repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cdoremus/udacity-python_web_development-cs253 | src/unit2/rot13/rot13_main.py | 1 | 2113 | '''
In order to be graded correctly for this homework, there are a few things
to keep in mind. We'll be grading your web app by POSTing to your form and
retrieving the text that has been encoded with ROT13. There are a few main
issues you need to keep in mind in order for this to work:
1. The textarea form element where the user inputs the text to encode must be
named 'text'. In other words, you must have 'textarea name="text"' for us to post to.
2. The form method must be POST, not GET.
3. You must enter the full url into the supplied textbox above, including the
path. For example, our example app is running at http://udacity-cs253.appspot.com/unit2/rot13,
but if we instead only entered http://udacity-cs253.appspot.com/ then the grading script would not work.
4. Don't forget to escape your output!
VIDEO NOTES:
Rot13 increments every letter by 13
Getting to the end of the alphabet, the count of a letter backs upon itself.
For instance, z becomes m
Rot13 encrypting a string that has been Rot13 encrypted gets the original string.
Case must be preserved
Punctuation must be preserved
Also preserve whitespace
Escape the HTML
Udacity Test site
http://udacity-cs253.appspot.com/unit2/rot13
My Production URL:
http://cdoremus-udacity-cs253.appspot.com/unit2/rot13
'''
import os
import webapp2
from google.appengine.ext.webapp import template
from rot13 import Rot13
class Rot13MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
values = {'rot13_string':''}
path = os.path.join(os.path.dirname(__file__), 'rot13.html')
self.response.out.write(template.render(path, values))
def post(self):
self.response.headers['Content-Type'] = 'text/html'
text = self.request.get('text')
rot13 = Rot13()
encrypted = rot13.encrypt(text) # escaping done in template using 'escape' attribute
values = {'rot13_string':encrypted}
path = os.path.join(os.path.dirname(__file__), 'rot13.html')
self.response.out.write(template.render(path, values))
| apache-2.0 | 8,370,183,515,845,830,000 | 38.148148 | 104 | 0.723616 | false | 3.605802 | false | false | false |
amd77/parker | inventario/migrations/0007_auto_20171014_1756.py | 1 | 1706 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventario', '0006_auto_20170930_1629'),
]
operations = [
migrations.CreateModel(
name='ComandoRemoto',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre', models.CharField(help_text='nombre del comando', max_length=100, null=True, blank=True)),
('comando', models.CharField(help_text='comando', max_length=100, null=True, blank=True)),
],
options={
'verbose_name': 'comando Remoto',
'verbose_name_plural': 'Comandos Remotos',
},
),
migrations.CreateModel(
name='NodoRemoto',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(help_text=' url del demonio nameko', max_length=100, null=True, blank=True)),
('nombre', models.CharField(help_text='Nombre del demonio nameko', max_length=100, null=True, blank=True)),
('parking', models.ForeignKey(to='inventario.Parking')),
],
options={
'verbose_name': 'Nodo Remoto',
'verbose_name_plural': 'Nodos Remotos',
},
),
migrations.AddField(
model_name='comandoremoto',
name='nodoremoto',
field=models.ForeignKey(to='inventario.NodoRemoto'),
),
]
| gpl-2.0 | -877,057,221,647,865,600 | 37.772727 | 123 | 0.552755 | false | 3.939954 | false | false | false |
magus424/powerline | powerline/lint/markedjson/reader.py | 1 | 3808 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import codecs
from powerline.lint.markedjson.error import MarkedError, Mark, NON_PRINTABLE
from powerline.lib.unicode import unicode
# This module contains abstractions for the input stream. You don't have to
# looks further, there are no pretty code.
class ReaderError(MarkedError):
pass
class Reader(object):
# Reader:
# - determines the data encoding and converts it to a unicode string,
# - checks if characters are in allowed range,
# - adds '\0' to the end.
# Reader accepts
# - a file-like object with its `read` method returning `str`,
# Yeah, it's ugly and slow.
def __init__(self, stream):
self.name = None
self.stream = None
self.stream_pointer = 0
self.eof = True
self.buffer = ''
self.pointer = 0
self.full_buffer = unicode('')
self.full_pointer = 0
self.raw_buffer = None
self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.index = 0
self.line = 0
self.column = 0
self.stream = stream
self.name = getattr(stream, 'name', "<file>")
self.eof = False
self.raw_buffer = None
while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
self.update_raw()
self.update(1)
def peek(self, index=0):
try:
return self.buffer[self.pointer + index]
except IndexError:
self.update(index + 1)
return self.buffer[self.pointer + index]
def prefix(self, length=1):
if self.pointer + length >= len(self.buffer):
self.update(length)
return self.buffer[self.pointer:self.pointer + length]
def update_pointer(self, length):
while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.full_pointer += 1
self.index += 1
if ch == '\n':
self.line += 1
self.column = 0
else:
self.column += 1
length -= 1
def forward(self, length=1):
if self.pointer + length + 1 >= len(self.buffer):
self.update(length + 1)
self.update_pointer(length)
def get_mark(self):
return Mark(self.name, self.line, self.column, self.full_buffer, self.full_pointer)
def check_printable(self, data):
match = NON_PRINTABLE.search(data)
if match:
self.update_pointer(match.start())
raise ReaderError(
'while reading from stream', None,
'found special characters which are not allowed',
Mark(self.name, self.line, self.column, self.full_buffer, self.full_pointer)
)
def update(self, length):
if self.raw_buffer is None:
return
self.buffer = self.buffer[self.pointer:]
self.pointer = 0
while len(self.buffer) < length:
if not self.eof:
self.update_raw()
try:
data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof)
except UnicodeDecodeError as exc:
character = self.raw_buffer[exc.start]
position = self.stream_pointer - len(self.raw_buffer) + exc.start
data, converted = self.raw_decode(self.raw_buffer[:exc.start], 'strict', self.eof)
self.buffer += data
self.full_buffer += data + '<' + str(ord(character)) + '>'
self.raw_buffer = self.raw_buffer[converted:]
self.update_pointer(exc.start - 1)
raise ReaderError(
'while reading from stream', None,
'found character #x%04x that cannot be decoded by UTF-8 codec' % ord(character),
Mark(self.name, self.line, self.column, self.full_buffer, position)
)
self.buffer += data
self.full_buffer += data
self.raw_buffer = self.raw_buffer[converted:]
self.check_printable(data)
if self.eof:
self.buffer += '\0'
self.raw_buffer = None
break
def update_raw(self, size=4096):
data = self.stream.read(size)
if self.raw_buffer is None:
self.raw_buffer = data
else:
self.raw_buffer += data
self.stream_pointer += len(data)
if not data:
self.eof = True
| mit | 8,412,327,568,441,967,000 | 27 | 86 | 0.676996 | false | 3.051282 | false | false | false |
lidavidm/sympy | sympy/liealgebras/type_f.py | 1 | 4555 | from sympy.core import Set, Dict, Tuple, Rational
from .cartan_type import Standard_Cartan
from sympy.matrices import Matrix
class TypeF(Standard_Cartan):
def __init__(self, n):
assert n == 4
Standard_Cartan.__init__(self, "F", 4)
def dimension(self):
"""
Returns the dimension of the vector space
V underlying the Lie algebra
Example
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("F4")
>>> c.dimension()
4
"""
return 4
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a 1 iin the ith position and a -1
in the jth postion.
"""
n = self.n
root = [0]*n
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
This method returns the ith simple root of F_4
Example
=======
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("F4")
>>> c.simple_root(3)
[0, 0, 0, 1]
"""
if i < 3:
return basic_root(i-1, i)
if i == 3:
root = [0]*4
root[3] = 1
return root
if i == 4:
root = [Rational(-1, 2)]*4
return root
def positive_roots(self):
"""
This method generates all the positive roots of
A_n. This is half of all of the roots of F_4;
by multiplying all the positive roots by -1 we
get the negative roots.
Example
======
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n-1):
for j in range(i+1, n):
k += 1
posroots[k] = self.basic_root(i, j)
k += 1
root = self.basic_root(i, j)
root[j] = 1
posroots[k] = root
for i in range(0, n):
k += 1
root = [0]*n
root[i] = 1
posroots[k] = root
k += 1
root = [Rational(1, 2)]*n
posroots[k] = root
for i in range(1, 4):
k += 1
root = [Rational(1, 2)]*n
root[i] = Rational(-1, 2)
posroots[k] = root
posroots[k+1] = [Rational(1, 2), Rational(1, 2), Rational(-1, 2), Rational(-1, 2)]
posroots[k+2] = [Rational(1, 2), Rational(-1, 2), Rational(1, 2), Rational(-1, 2)]
posroots[k+3] = [Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]
posroots[k+4] = [Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(-1, 2)]
return posroots
def roots(self):
"""
Returns the total number of roots for F_4
"""
return 48
def cartan_matrix(self):
"""
Returns the Cartan matrix for F_4
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Example
=======
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('A4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -1],
[ 0, 0, -1, 2]])
"""
m = Matrix( 4, 4, [2, -1, 0, 0, -1, 2, -2, 0, 0,
-1, 2, -1, 0, 0, -1, 2])
return m
def basis(self):
"""
Returns the number of independent generators of F_4
"""
return 52
def dynkin_diagram(self):
diag = "0---0=>=0---0\n"
diag += " ".join(str(i) for i in range(1, 5))
return diag
| bsd-3-clause | -5,288,564,138,958,715,000 | 27.117284 | 91 | 0.478156 | false | 3.458618 | false | false | false |
flailingsquirrel/asciimapper | OSMTileLoader.py | 1 | 3184 | #!/usr/bin/python
######################################################################
# Ascii TMS Viewer
#
#--------------------------------------------------------------------
# Brian Hone | Initial Release
#--------------------------------------------------------------------
#
#--------------------------------------------------------------------
# Copyright (c) 2009 Brian Hone
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
######################################################################
import curses, time, sys, os, string, random, math
import pprint
from Wget import *
from TileMap import TileMap
from TileLoader import TileLoader
import img_to_aa
false = 0
true = 1
class OSMTileLoader( TileLoader):
def __init__(self, (sizeX, sizeY), url, cacheUrl ):
TileLoader.__init__( self, (sizeX, sizeY), cacheUrl )
self.baseUrl = url
self.mapChars = "....,;clodxkO.XNOM"
# end __init__
def fetchTile( self, x, y, z ):
tileArr = self.getEmptyTile()
pngFile = self.cacheUrl + "/%s/%s/%s.png" % ( z,x,y )
url = self.baseUrl + "/%s/%s/%s.png" % ( z,x,y )
args = [ '-x', url ]
wget( args )
# convert to ascii
row_ctr = 0
col_ctr = 0
img_text = img_to_aa.load_and_scale_image( pngFile, self.mapChars, width=self.sizeX, height=self.sizeY, grayscale=True )
for line in img_text:
for c in line:
tileArr[ row_ctr ][ col_ctr ] = c
col_ctr = col_ctr+1
row_ctr = row_ctr + 1
col_ctr = 0
return tileArr
#end getMap
# end class OSMTileLoader
if __name__=="__main__":
#def __init__(self, (x,y,z), (sizeX, sizeY), kmlFile, cacheUrl ):
T = OSMTileLoader((55,55), "http://tile.openstreetmap.org", "tile.openstreetmap.org" )
print T.getTile( 0,0,1 )
| bsd-3-clause | -7,132,981,872,047,901,000 | 39.820513 | 125 | 0.605842 | false | 3.930864 | false | false | false |
texttochange/vusion-backend | vusion/persist/content_variable/content_variable_table.py | 1 | 4240 | from vusion.persist import Model
class ContentVariableTable(Model):
MODEL_TYPE = 'content_variable_table'
MODEL_VERSION = '2'
fields= {
'name': {
'required': True
},
'columns': {
'required': True
},
'column-key-selection': {
'required': True
}
}
def upgrade(self, **kwargs):
if kwargs['model-version'] == '1':
kwargs['column-key-selection'] = 'auto'
kwargs['model-version'] = '2'
return kwargs
def validate_fields(self):
self._validate(self, self.fields)
def _find_indexes(self, match):
key1_indexes = self._get_indexes(self['columns'][0], match['key1'])
if not key1_indexes:
return None
if 'key3' in match:
key2_indexes = self._get_indexes(self['columns'][1], match['key2'])
if len(key1_indexes & key2_indexes) == 0:
return None
row_index = (key1_indexes & key2_indexes).pop()
col_index = self._get_column_index(match['key3'])
else:
row_index = key1_indexes.pop()
col_index = self._get_column_index(match['key2'])
if col_index is None or row_index is None:
return None
return {'col_index': col_index, 'row_index': row_index}
def get_value(self, match):
indexes = self._find_indexes(match)
if indexes is None:
return None
return self._get_index_value(indexes['col_index'],
indexes['row_index'])
def set_value(self, match, value, upsert=True):
indexes = self._find_indexes(match)
if indexes is None:
if not upsert:
False
self._add_match(match)
indexes = self._find_indexes(match)
self._set_index_value(indexes['col_index'],
indexes['row_index'],
value)
return True
## function that will add the necessary col or row for a match
def _add_match(self, match):
if 'key3' not in match:
if not self._get_indexes(self['columns'][0], match['key1']):
self._create_row(match['key1'], None)
if not self._get_column_index(match['key2']):
self._create_column(match['key2'])
else:
key1_indexes = self._get_indexes(self['columns'][0], match['key1'])
key2_indexes = self._get_indexes(self['columns'][1], match['key2'])
if len(key1_indexes & key2_indexes) == 0:
self._create_row(match['key1'], match['key2'])
if not self._get_column_index(match['key3']):
self._create_column(match['key3'])
def _create_column(self, key):
index = self._count_columns()
values = []
for i in range(0, self._count_rows()):
values.append(None)
self['columns'].append(
{'header': key,
'values': values,
'validation': None,
'type': 'contentvariable'})
def _create_row(self, key1, key2):
index = 1
self['columns'][0]['values'].append(key1)
if not key2 is None:
self['columns'][1]['values'].append(key2)
index = 2
for i in range(index, self._count_columns()):
self['columns'][i]['values'].append(None)
def _count_columns(self):
return len(self['columns'])
def _count_rows(self):
return len(self['columns'][0]['values'])
def _get_index_value(self, col_index, row_index):
return self['columns'][col_index]['values'][row_index]
def _set_index_value(self, col_index, row_index, value):
self['columns'][col_index]['values'][row_index] = value
def _get_indexes(self, column, key):
indexes = set()
for i,x in enumerate(column['values']):
if x == key:
indexes.add(i)
return indexes
def _get_column_index(self, key):
for i, col in enumerate(self['columns']):
if col['header'] == key:
return i
return None
| bsd-3-clause | -1,479,600,885,574,234,400 | 33.471545 | 79 | 0.519811 | false | 3.973758 | false | false | false |
phantomii/restalchemy | examples/migrations/8d3025-2st-migration.py | 1 | 1163 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2016 Eugene Frolov <[email protected]>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from restalchemy.storage.sql import migrations
class MigrationStep(migrations.AbstarctMigrationStep):
def __init__(self):
self._depends = ["bf4d04-1st-migration.py"]
@property
def migration_id(self):
return "8d302575-a1ce-43db-b312-e070e8d0cf7f"
def upgrade(self, session):
six.print_("upgrade 2st")
def downgrade(self, session):
six.print_("downgrade 2st")
migration_step = MigrationStep()
| apache-2.0 | -8,124,373,744,143,169,000 | 28.075 | 78 | 0.706793 | false | 3.611801 | false | false | false |
brendangregg/bcc | tools/exitsnoop.py | 1 | 10349 | #!/usr/bin/python
# @lint-avoid-python-3-compatibility-imports
from __future__ import print_function
import argparse
import ctypes as ct
import os
import platform
import re
import signal
import sys
from bcc import BPF
from datetime import datetime
from time import strftime
#
# exitsnoop Trace all process termination (exit, fatal signal)
# For Linux, uses BCC, eBPF. Embedded C.
#
# USAGE: exitsnoop [-h] [-x] [-t] [--utc] [--label[=LABEL]] [-p PID]
#
_examples = """examples:
exitsnoop # trace all process termination
exitsnoop -x # trace only fails, exclude exit(0)
exitsnoop -t # include timestamps (local time)
exitsnoop --utc # include timestamps (UTC)
exitsnoop -p 181 # only trace PID 181
exitsnoop --label=exit # label each output line with 'exit'
"""
"""
Exit status (from <include/sysexits.h>):
0 EX_OK Success
2 argparse error
70 EX_SOFTWARE syntax error detected by compiler, or
verifier error from kernel
77 EX_NOPERM Need sudo (CAP_SYS_ADMIN) for BPF() system call
The template for this script was Brendan Gregg's execsnoop
https://github.com/iovisor/bcc/blob/master/tools/execsnoop.py
More information about this script is in bcc/tools/exitsnoop_example.txt
Copyright 2016 Netflix, Inc.
Copyright 2019 Instana, Inc.
Licensed under the Apache License, Version 2.0 (the "License")
07-Feb-2016 Brendan Gregg (Netflix) Created execsnoop
04-May-2019 Arturo Martin-de-Nicolas (Instana) Created exitsnoop
13-May-2019 Jeroen Soeters (Instana) Refactor to import as module
"""
def _getParser():
parser = argparse.ArgumentParser(
description="Trace all process termination (exit, fatal signal)",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=_examples)
a=parser.add_argument
a("-t", "--timestamp", action="store_true", help="include timestamp (local time default)")
a("--utc", action="store_true", help="include timestamp in UTC (-t implied)")
a("-p", "--pid", help="trace this PID only")
a("--label", help="label each line")
a("-x", "--failed", action="store_true", help="trace only fails, exclude exit(0)")
# print the embedded C program and exit, for debugging
a("--ebpf", action="store_true", help=argparse.SUPPRESS)
# RHEL 7.6 keeps task->start_time as struct timespec, convert to u64 nanoseconds
a("--timespec", action="store_true", help=argparse.SUPPRESS)
return parser.parse_args
class Global():
parse_args = _getParser()
args = None
argv = None
SIGNUM_TO_SIGNAME = dict((v, re.sub("^SIG", "", k))
for k,v in signal.__dict__.items() if re.match("^SIG[A-Z]+$", k))
class Data(ct.Structure):
"""Event data matching struct data_t in _embedded_c()."""
_TASK_COMM_LEN = 16 # linux/sched.h
_pack_ = 1
_fields_ = [
("start_time", ct.c_ulonglong), # task->start_time, see --timespec arg
("exit_time", ct.c_ulonglong), # bpf_ktime_get_ns()
("pid", ct.c_uint), # task->tgid, thread group id == sys_getpid()
("tid", ct.c_uint), # task->pid, thread id == sys_gettid()
("ppid", ct.c_uint),# task->parent->tgid, notified of exit
("exit_code", ct.c_int),
("sig_info", ct.c_uint),
("task", ct.c_char * _TASK_COMM_LEN)
]
def _embedded_c(args):
"""Generate C program for sched_process_exit tracepoint in kernel/exit.c."""
c = """
EBPF_COMMENT
#include <linux/sched.h>
BPF_STATIC_ASSERT_DEF
struct data_t {
u64 start_time;
u64 exit_time;
u32 pid;
u32 tid;
u32 ppid;
int exit_code;
u32 sig_info;
char task[TASK_COMM_LEN];
} __attribute__((packed));
BPF_STATIC_ASSERT(sizeof(struct data_t) == CTYPES_SIZEOF_DATA);
BPF_PERF_OUTPUT(events);
TRACEPOINT_PROBE(sched, sched_process_exit)
{
struct task_struct *task = (typeof(task))bpf_get_current_task();
if (FILTER_PID || FILTER_EXIT_CODE) { return 0; }
struct data_t data = {
.start_time = PROCESS_START_TIME_NS,
.exit_time = bpf_ktime_get_ns(),
.pid = task->tgid,
.tid = task->pid,
.ppid = task->parent->tgid,
.exit_code = task->exit_code >> 8,
.sig_info = task->exit_code & 0xFF,
};
bpf_get_current_comm(&data.task, sizeof(data.task));
events.perf_submit(args, &data, sizeof(data));
return 0;
}
"""
# TODO: this macro belongs in bcc/src/cc/export/helpers.h
bpf_static_assert_def = r"""
#ifndef BPF_STATIC_ASSERT
#define BPF_STATIC_ASSERT(condition) __attribute__((unused)) \
extern int bpf_static_assert[(condition) ? 1 : -1]
#endif
"""
code_substitutions = [
('EBPF_COMMENT', '' if not Global.args.ebpf else _ebpf_comment()),
("BPF_STATIC_ASSERT_DEF", bpf_static_assert_def),
("CTYPES_SIZEOF_DATA", str(ct.sizeof(Data))),
('FILTER_PID', '0' if not Global.args.pid else "task->tgid != %s" % Global.args.pid),
('FILTER_EXIT_CODE', '0' if not Global.args.failed else 'task->exit_code == 0'),
('PROCESS_START_TIME_NS', 'task->start_time' if not Global.args.timespec else
'(task->start_time.tv_sec * 1000000000L) + task->start_time.tv_nsec'),
]
for old,new in code_substitutions:
c = c.replace(old, new)
return c
def _ebpf_comment():
"""Return a C-style comment with information about the generated code."""
comment=('Created by %s at %s:\n\t%s' %
(sys.argv[0], strftime("%Y-%m-%d %H:%M:%S %Z"), _embedded_c.__doc__))
args = str(vars(Global.args)).replace('{','{\n\t').replace(', ',',\n\t').replace('}',',\n }\n\n')
return ("\n /*" + ("\n %s\n\n ARGV = %s\n\n ARGS = %s/" %
(comment, ' '.join(Global.argv), args))
.replace('\n','\n\t*').replace('\t',' '))
def _print_header():
if Global.args.timestamp:
title = 'TIME-' + ('UTC' if Global.args.utc else strftime("%Z"))
print("%-13s" % title, end="")
if Global.args.label is not None:
print("%-6s" % "LABEL", end="")
print("%-16s %-6s %-6s %-6s %-7s %-10s" %
("PCOMM", "PID", "PPID", "TID", "AGE(s)", "EXIT_CODE"))
def _print_event(cpu, data, size): # callback
"""Print the exit event."""
e = ct.cast(data, ct.POINTER(Data)).contents
if Global.args.timestamp:
now = datetime.utcnow() if Global.args.utc else datetime.now()
print("%-13s" % (now.strftime("%H:%M:%S.%f")[:-3]), end="")
if Global.args.label is not None:
label = Global.args.label if len(Global.args.label) else 'exit'
print("%-6s" % label, end="")
age = (e.exit_time - e.start_time) / 1e9
print("%-16s %-6d %-6d %-6d %-7.2f " %
(e.task.decode(), e.pid, e.ppid, e.tid, age), end="")
if e.sig_info == 0:
print("0" if e.exit_code == 0 else "code %d" % e.exit_code)
else:
sig = e.sig_info & 0x7F
if sig:
print("signal %d (%s)" % (sig, signum_to_signame(sig)), end="")
if e.sig_info & 0x80:
print(", core dumped ", end="")
print()
# =============================
# Module: These functions are available for import
# =============================
def initialize(arg_list = sys.argv[1:]):
"""Trace all process termination.
arg_list - list of args, if omitted then uses command line args
arg_list is passed to argparse.ArgumentParser.parse_args()
For example, if arg_list = [ '-x', '-t' ]
args.failed == True
args.timestamp == True
Returns a tuple (return_code, result)
0 = Ok, result is the return value from BPF()
1 = args.ebpf is requested, result is the generated C code
os.EX_NOPERM: need CAP_SYS_ADMIN, result is error message
os.EX_SOFTWARE: internal software error, result is error message
"""
Global.argv = arg_list
Global.args = Global.parse_args(arg_list)
if Global.args.utc and not Global.args.timestamp:
Global.args.timestamp = True
if not Global.args.ebpf and os.geteuid() != 0:
return (os.EX_NOPERM, "Need sudo (CAP_SYS_ADMIN) for BPF() system call")
if re.match('^3\.10\..*el7.*$', platform.release()): # Centos/Red Hat
Global.args.timespec = True
for _ in range(2):
c = _embedded_c(Global.args)
if Global.args.ebpf:
return (1, c)
try:
return (os.EX_OK, BPF(text=c))
except Exception as e:
error = format(e)
if (not Global.args.timespec
and error.find('struct timespec')
and error.find('start_time')):
print('This kernel keeps task->start_time in a struct timespec.\n' +
'Retrying with --timespec')
Global.args.timespec = True
continue
return (os.EX_SOFTWARE, "BPF error: " + error)
except:
return (os.EX_SOFTWARE, "Unexpected error: {0}".format(sys.exc_info()[0]))
def snoop(bpf, event_handler):
"""Call event_handler for process termination events.
bpf - result returned by successful initialize()
event_handler - callback function to handle termination event
args.pid - Return after event_handler is called, only monitoring this pid
"""
bpf["events"].open_perf_buffer(event_handler)
while True:
bpf.perf_buffer_poll()
if Global.args.pid:
return
def signum_to_signame(signum):
"""Return the name of the signal corresponding to signum."""
return Global.SIGNUM_TO_SIGNAME.get(signum, "unknown")
# =============================
# Script: invoked as a script
# =============================
def main():
try:
rc, buffer = initialize()
if rc:
print(buffer)
sys.exit(0 if Global.args.ebpf else rc)
_print_header()
snoop(buffer, _print_event)
except KeyboardInterrupt:
print()
sys.exit()
return 0
if __name__ == '__main__':
main()
| apache-2.0 | -4,594,911,471,573,904,400 | 36.361011 | 101 | 0.574935 | false | 3.475151 | false | false | false |
airanmehr/Utils | Simulation.py | 1 | 40529 | '''
Copyleft Oct 10, 2015 Arya Iranmehr, PhD Student, Bafna's Lab, UC San Diego, Email: [email protected]
'''
from __future__ import division
import numpy as np;
import pandas as pd;
np.set_printoptions(linewidth=140, precision=5, suppress=True)
import subprocess, uuid, os,sys
import pylab as plt
import UTILS.Util as utl
stdout_old=sys.stdout;sys.stdout=open('/dev/null','w');import simuPOP as sim;sys.stdout=stdout_old # to avoid simuPop welcome message!
def sig(x): return 1./(1+np.exp(-x));
def logit(p): return (np.inf if p==1 else np.log(p/(1.-p)))
a='';
def fff(msg):
global a
a += msg
class MSMS:
@staticmethod
def Simulate(n=200, mu=2*1e-9, L=50000, Ne=1e6,r=1e-9,verbose=False,seed=None,intPos=False):
L=int(L)
a= MSMS.Song(F=n, mu=mu, L=L, Ne=Ne, r=r,verbose=verbose,seed=seed)
c=pd.Series(a.columns)
if c.round().value_counts().max()==1:
a.columns=c.round().astype(int)
elif c.astype(int).value_counts().max()==1:
a.columns = c.astype(int)
if intPos:
a.columns=map(int,np.sort(np.random.choice(L, a.shape[1], replace=False)))
return a
@staticmethod
def Song(F=200, mu=2*1e-9, L=50000, Ne=1e6,r=4e-9, uid=None, theta=None, msmsFile=None, dir=None,verbose=False,seed=None):
"""
Everything is exactly the sam
"""
# print 'mu: {} r:{} NE:{} ,theta={} '.format(mu,r,Ne,4*Ne*mu*L), theta
if msmsFile is not None:
pop=MSMS.load(filename=msmsFile)[0]
else:
if theta:
pop=MSMS.MSMS(n=F, numReps=1, theta=theta, rho=2*Ne*(L-1)*r, L=L, Ne=Ne, uid=uid, dir=dir,verbose=verbose,seed=seed)[0]
else:
pop=MSMS.MSMS(n=F, numReps=1, theta=2*Ne*mu*L, rho=2*Ne*(L-1)*r, L=L, Ne=Ne, uid=uid, dir=dir,verbose=verbose,seed=seed)[0]
pop.r=r
pop.Ne=Ne
pop.L=L
return pop
@staticmethod
def MSMS(n, numReps, theta, rho, L, Ne=None,uid=None,oneMutationEvery=None, dir=dir,verbose=False,seed=None):
"""
Returns a list of dataframe for each replicate
"""
if dir is None:
dir= utl.PATH.simout;dir+= 'msms/';
os.system('mkdir -p ' +dir)
if oneMutationEvery is not None:
nSS=L/oneMutationEvery
theta=nSS/sum(1./np.arange(1,n))
if uid is None:
uid=str(uuid.uuid4())
unique_filename = dir+uid+'.msms'
if seed is None:
seed=''
else:
seed=' -seed {} '.format(seed)
cmd="java -jar -Xmx2g ~/bin/msms/lib/msms.jar -ms {} {} -t {:.0f} -r {:.0f} {:.0f} -oFP 0.000000000000E00 {} > {}".format(n, numReps, theta, rho, L, seed,unique_filename)
if verbose:
print cmd
subprocess.call(cmd,shell=True)
return MSMS.load(unique_filename)
@staticmethod
def getSeed(filename):
file=open(filename);cmd=np.array(file.readline().strip().split(' '));seed=file.readline().strip()
return seed
@staticmethod
def load(filename):
n, R, L, posUnderSelection = MSMS.getParams(open(filename).readline())
lines=np.array(map(str.strip,open(filename).readlines()) )
posIdx= np.where(map(lambda x: x[:len('positions:')]=='positions:',lines))[0]
try:
theta = lines[np.where(map(lambda x: 'ThetaW Estimate Summaray:' in x, lines))[0][0]].split(':')[1].strip()
except:
theta = None
POS=[map(lambda x: (float(x)*L), lines[ii].split()[1:]) for ii in posIdx]
dfs=[pd.DataFrame(map(list ,lines[i +1 +range(n)]),columns=pos ) for i,pos in zip(posIdx,POS)]
for df in dfs:
df[df!='0']=1
df[df=='0']=0
df.L = L
if posUnderSelection is not None:
df.posUnderSelection = posUnderSelection * L
if theta is not None:
df.stat = pd.Series(theta.split(), index=['W', 'Pi', 'D']).astype(float)
return dfs
@staticmethod
def getParams(line):
"""
Args:
params: takes the first line of msmsm file
Returns:
n,R,L: number of individuals in the sample, the number of the replicates, genome length
"""
params=np.array(line.strip().split(' '))
offset=np.where(map(lambda x: 'ms'in x, params))[0][0]
if params[offset+1] == '-N':
i=3
else:
i=1
posUnderSelection = None
if '-Sp' in params: posUnderSelection = float(params[np.where(params == '-Sp')[0][0] + 1])
return int(params[offset + i]), int(params[offset + i + 1]), int(
params[np.where(params == '-r')[0][0] + 2]), posUnderSelection
@staticmethod
def fixDuplicatePositions(pos,L):
pos=pd.Series(range(len(pos)),index=pos)
posHits=pos.index.value_counts()
invalidPOS=posHits[posHits>1]
if not invalidPOS.shape[0]:
return pos.index.values
for invalidPos in invalidPOS.index:
mini=pos.loc[invalidPos].min()
maxi=pos.loc[invalidPos].max()
lowerBound=pos[pos==mini-1].index.max()
upperBound=pos[pos==maxi+1].index.min();
if maxi==pos.shape[0]-1: upperBound=L
if mini==0: lowerBound=0
validRange=np.arange((upperBound-lowerBound)/2) # only second and third quartiles,
offset=validRange+validRange.shape[0]/2 # first qunatulw
newPos=pos.index.values;
newPos[mini:maxi+1]=np.sort(np.random.choice(offset,pos.loc[invalidPos].shape[0],replace=False))+lowerBound
pos.index=newPos
assert pos.index.value_counts().max()==1
return pos.index.values
@staticmethod
def Selection(msms, Ne, n, numReplicates, theta, rho, window_size, s, origin_count, posUnderSelection, gens, path):
seed = ''
for ii, gen in enumerate(gens):
fname = path + '{}.msms'.format(int(gen))
if (not ii) and s != 0:
# while (nu0 < 0.95) or (nu0 > 0.99):
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SForceKeep -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
else:
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -SForceKeep -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
if not ii: seed = MSMS.getSeed(fname)
@staticmethod
def SelectionFinale(msms, Ne, n, numReplicates, theta, rho, window_size, s, origin_count, posUnderSelection, gens,
path):
seed = ''
nu0 = 0
for ii, gen in enumerate(gens):
fname = path + '{}.msms'.format(int(gen))
if (not ii) and s != 0:
while (nu0 < 0.9):
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SForceKeep -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
nu0 = MSMS.load(fname)[0].mean(0).loc[25000]
else:
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -SForceKeep -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
if not ii: seed = MSMS.getSeed(fname)
@staticmethod
def SelectionNu(msms, Ne, n, numReplicates, theta, rho, window_size, s, posUnderSelection, nu, path=None):
seed = ''
if path is None: path = '~/tmp.msms'
fname = path + '{}.msms'.format(nu)
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SF 0 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, nu, posUnderSelection,
('-seed {}'.format(seed), '')[seed is ''], fname)
print cmd
os.system(cmd)
return MSMS.load(fname)
@staticmethod
def SelectionNuForward(msms, Ne, n, numReplicates, theta, rho, window_size, s, origin_count, posUnderSelection,
gens, path):
nu0 = 0
for ii, gen in enumerate(gens):
fname = path + '{}.msms'.format(gen)
if (not ii) and s != 0:
while (nu0 < 0.95) or (nu0 > 0.99):
cmd = "{} -N {} -ms {} {} -t {} -r {} {:.0f} -SAA {} -SaA {} -SI {} 1 {} -Sp {} -oOC -Smark -oFP 0.000000000000E00 {} -SFC -oTW >{}".format(
msms, Ne, n, numReplicates, theta, rho, window_size, 2 * Ne * s, Ne * s, gen / (4. * Ne),
origin_count / Ne,
posUnderSelection, ('-seed {}'.format(seed), '')[seed is ''], fname)
os.system(cmd)
nu0 = MSMS.load(fname)[0].mean(0).loc[25000]
print nu0, gen, cmd
if not ii: seed = MSMS.getSeed(fname)
class Simulation:
@staticmethod
def setSeed(seed):
if seed is None: return
sim.setRNG('rand', seed + 1);
np.random.seed(seed)
@staticmethod
def load(ExperimentName, s=0.1, L=50000, experimentID=0, nu0=0.005, isFolded=False, All=False, startGeneration=0,
maxGeneration=50, numReplicates=3, numSamples=5, step=10, replicates=None, coverage=np.inf):
path='{}{}/simpop/'.format(utl.PATH.simout, ExperimentName) + Simulation.getSimulationName(s=s, L=L, experimentID=experimentID, initialCarrierFreq=nu0, isFolded=isFolded) + '.pkl'
sim= pd.read_pickle(path)
sim.savedPath=path
if replicates is not None: sim.setReplicates(sorted(replicates))
elif numReplicates is not None: sim.setReplicates(range(numReplicates))
if coverage != np.inf:
sim.Xi = sim.X
sim.X = sim.C.loc[coverage] / sim.D.loc[coverage].astype(float)
sim.X = np.array(map(lambda x: utl.roundto(x, 5), sim.X.reshape(-1) * 1e4)).reshape(sim.X.shape) / 1e4
sim.CD=sim.getCD(coverage)
sim.CD.columns.names=['REP','GEN','READ']
if not All: sim.setSamplingTimes(maxGeneration=min(maxGeneration,sim.getGenerationTimes()[-1]),numSamples=numSamples,step=step,startGeneration=startGeneration)
return sim
@staticmethod
def getSimulationName(s,L,experimentID,initialCarrierFreq,isFolded,msms=False):
if msms:
return 'L{:.0f}K.{:04.0f}'.format(L/1000,experimentID)
if s:
return 'Nu{:E}.s{:E}.L{:.0f}K.{:04.0f}{}'.format(np.round(float(initialCarrierFreq), 3), s, L / 1000,
experimentID, ('', '.Folded')[isFolded])
else:
return 'Nu{:E}.s{:E}.L{:.0f}K.{:04.0f}{}'.format(0, s * 100, L / 1000, experimentID,
('', '.Folded')[isFolded])
def setReplicates(self,replicates):
self.numReplicates=len(replicates)
self.X=self.X[:,:,replicates]
self.C = self.C.apply(lambda x: x[:, :, replicates])
self.D = self.D.apply(lambda x: x[:, :, replicates])
def __init__(self, outpath=utl.PATH.simout, N=1000, generationStep=10, maxGeneration=None,
s=0.05, r=4e-9, Ne=1e6, mu=2e-9, F=200, h=0.5, L=50000, startGeneration=0, numReplicates=3, H0=None,
foldInitialAFs=False, save=True, foutName=None,
doForwardSimulationNow=True, experimentID=-1,
msmsFile=None,initialCarrierFreq=0, ExperimentName=None, simulateNeutrallyFor=0,
initialNeutralGenerations=0, ignoreInitialNeutralGenerations=True,
makeSureSelectedSiteDontGetLost=True, onlyKeep=None, verbose=0, sampingTimes=None, minIncrease=0,
model=None,initDiploidPop=None,posUnderSelection=-1,haplotypes=False,seed=None,recombinator=None
):
"""
A General Simulation Class; with params
H0: Dataframe F x m for F individuals and m segregation sites ; Initial Haplotypes; dataframe with columns as positions
"""
self.recombinator=recombinator
if seed is not None:
Simulation.setSeed(seed)
self.s = s;
self.r = r;
self.Ne = Ne;
self.mu = mu;
self.F = F;
self.h = h;
self.L = int(L);
self.startGeneration = startGeneration;
self.numReplicates = numReplicates;
self.posUnderSelection = -1
self.initDiploidPop = initDiploidPop
self.initialCarrierFreq= initialCarrierFreq if initialCarrierFreq else 1./self.F
if foutName is not None:
self.uid=foutName
self.uidMSMS=None
elif experimentID>=0:
self.uid=Simulation.getSimulationName(self.s, self.L, self.experimentID, initialCarrierFreq=self.initialCarrierFreq, isFolded=self.foldInitialAFs)
self.uidMSMS=Simulation.getSimulationName(self.s, self.L, self.experimentID, initialCarrierFreq=self.initialCarrierFreq, isFolded=self.foldInitialAFs,msms=True)
else:
self.uid=str(uuid.uuid4())
self.uidMSMS=self.uid
if H0 is None:
self.simulateH0()
H0=self.H0
else:
self.setH0(H0);
if posUnderSelection >= 0:
if self.positions is None:
self.positions=map(int, self.initDiploidPop.lociPos())
self.set_posUnderSelection(posUnderSelection)
assert ExperimentName != None
self.save=save
self.model=model
self.minIncrease = minIncrease
self.samplingTimes=sampingTimes
self.initialNeutralGenerations=initialNeutralGenerations
self.onlyKeep=onlyKeep
self.makeSureSelectedSiteDontGetLost=makeSureSelectedSiteDontGetLost
self.ignoreInitialNeutralGenerations=ignoreInitialNeutralGenerations
self.msmsFile=msmsFile;self.outpath=outpath; self.outpath=outpath ; self.N=N; self.generationStep=generationStep; self.maxGeneration= maxGeneration;
self.foldInitialAFs=foldInitialAFs;self.doForwardSimulationNow=doForwardSimulationNow;self.experimentID=experimentID
self.simulateNeutrallyFor=simulateNeutrallyFor
self.setH0(H0);
if not os.path.exists(self.outpath) : os.makedirs(self.outpath)
self.outpath+=ExperimentName
if not os.path.exists(self.outpath) : os.makedirs(self.outpath)
self.outpathmsms=self.outpath+'/msms/';self.outpath+='/simpop/'
if not os.path.exists(self.outpath) : os.makedirs(self.outpath)
if not os.path.exists(self.outpathmsms) : os.makedirs(self.outpathmsms)
if self.maxGeneration is None: self.maxGeneration=Simulation.getFixationTime(self.s, Ne=self.F, roundto10=True)
self.theta=2*self.Ne*self.mu*self.L
self.pops=[]
if self.model is None:
import simuPOP.demography as dmg
self.model=dmg.LinearGrowthModel(T=self.maxGeneration, N0=self.N, NT=self.N)
if self.doForwardSimulationNow:
self.forwardSimulation()
@staticmethod
def simulateSingleLoci(nu0=0.005, T=100, s=0.1, N=1000,verbose=True,h=0.5,seed=None):
if verbose:
print '.',
step = 1
Simulation.setSeed(seed)
pop = sim.Population(size=N, ploidy=2, loci=[1],infoFields=['fitness']);sim.initGenotype(pop, prop=[1-nu0,nu0]);simulator = sim.Simulator(pop.clone(), rep=1);
# sim.stat(pop, alleleFreq=[0]); print pop.dvars().alleleFreq[0][1]
global a;a = "0;;{}\n".format(nu0)
simulator.evolve(initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=0, fitness={(0, 0): 1, (0, 1): 1 + s *h, (1, 1): 1 + s}),
matingScheme=sim.RandomMating(), postOps=[sim.Stat(alleleFreq=[0], step=step),
sim.PyEval("'%d;;' % (gen+1)", reps=0, step=step,
output=fff), sim.PyEval(
r"'{}\n'.format(map(lambda x: round(x[1],5),alleleFreq.values())[0])", step=step, output=fff)],
gen=T)
return pd.DataFrame(zip(*map(lambda x: x.split(';;'), a.strip().split('\n')))).T.set_index(0)[1].astype(float)
def createInitialDiploidPopulation(self):
"""
initHaps : np 2D array which m x nSS where m i number of individual haps and nSS is number of SS
return a homozygote diploid population which every haplotype is copied n times
"""
if self.initDiploidPop is not None: return self.initDiploidPop
assert int(2*self.N/self.F)==2*self.N/float(self.F) # N should be a multiplier of F
nSS=self.H0.shape[1];n=int(self.N/self.F)
try:
pop = sim.Population(size=self.N, ploidy=2, loci=nSS,lociPos=list(self.positions), infoFields='fitness')
except:
import traceback
print(traceback.format_exc())
print list(self.positions), nSS,n,self.H0.shape[0]
exit()
assert (self.N % self.H0.shape[0]) ==0
H= [[list(h.values),list(h.values)] for _ in range(n) for _,h in self.H0.iterrows()]
for (i,h) in zip(pop.individuals(),H): # for each indv assing first and second chromosome
i.setGenotype(h[0],0 );i.setGenotype(h[1],1 ) #homozygote population of diploid
# sim.stat(pop, alleleFreq=range(nSS));print np.array([pop.dvars().alleleFreq[x][1] for x in range(nSS)])
return pop
@staticmethod
def getGT(pop, i=None, pos=None):
if i == None and pos == None:
df = pd.concat([pd.DataFrame([list(i.genotype(0)) for i in pop.individuals()]),
pd.DataFrame([list(i.genotype(1)) for i in pop.individuals()])],
keys=[0, 1]).sort_index().reorder_levels([1, 0]).sort_index()
df.columns = map(int, pop.lociPos())
return df
i = np.where(np.array(pop.lociPos()).astype(int) == pos)[0][0]
a, b = [], []
for ind in pop.individuals():
a += [ind.genotype(0)[i]]
b += [ind.genotype(1)[i]]
return pd.concat([pd.Series(a), pd.Series(b)], keys=[0, 1]).reorder_levels([1, 0]).sort_index()
@staticmethod
def createDiploidPopulationFromDataFrame(df):
"""
initHaps : np 2D array which m x nSS where m i number of individual haps and nSS is number of SS
return a homozygote diploid population which every haplotype is copied n times
"""
pop = sim.Population(size=df.shape[0]/2, ploidy=2, loci=df.shape[1], lociPos=list(df.columns), infoFields='fitness')
for j,i in enumerate(pop.individuals()): # for each indv assing first and second chromosome
i.setGenotype(df.loc[j].loc[0].tolist(),0 );i.setGenotype(df.loc[j].loc[1].tolist(),1 )
return pop
@staticmethod
def _simualtePop(pop, s=0, h=0.5, r=2e-8, siteUnderSelection=0,gen=1,recombinator=None,seed=None):
"Gets population and returns population"
Simulation.setSeed(seed)
simulator = sim.Simulator(pop.clone(), rep=1)
if recombinator is None:recombinator=sim.Recombinator(intensity=r)
simulator.evolve(
initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=siteUnderSelection, fitness={(0, 0): 1, (0, 1): 1 + s * h, (1, 1): 1 + s}),
matingScheme=sim.RandomMating(ops=recombinator),
gen=gen)
return simulator.population(0).clone()
@staticmethod
def _simualte(pop,s,h,r,siteUnderSelection,positions,startGeneration,generationStep,maxGeneration,model=None,makeSureSelectedSiteDontGetLost=True):
"Gets population and returns Dataframe, Static method"
N = int(pop.popSize())
if model is None:
import simuPOP.demography as dmg
model = dmg.LinearGrowthModel(T=maxGeneration, N0=N, NT=N)
simulator = sim.Simulator(pop.clone(), rep=1)
global a;a = ""
pops=[]
step=1# this is slow but safe, dont change it
simulator.evolve(
initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=siteUnderSelection, fitness={(0, 0): 1, (0, 1): 1 + s * h, (1, 1): 1 + s}),
matingScheme=sim.RandomMating(ops=sim.Recombinator(intensity=r),subPopSize=model),
postOps=[sim.Stat(alleleFreq=range(int(pop.numLoci()[0])), step=step), sim.PyEval("'Gen %4d;;' % (gen+1)", reps=0,step= step, output=fff), sim.PyEval(r"'{},'.format(map(lambda x: round(x[1],5),alleleFreq.values()))", step=step, output=fff),sim.PyOutput('\n', reps=-1, step=step, output=fff)],
gen = maxGeneration)
# idx=np.arange(self.generationStep-1,self.maxGeneration,self.generationStep)+self.initialNeutralGenerations
print a
_,data=zip(*map(lambda x: x.split(';;'),a.strip().split('\n')))
data=np.array(map(eval,data))[:,0,:]
print data
# if data[-1, self.siteUnderSelection] >= self.initialCarrierFreq + self.minIncrease or self.s == 0 or not self.makeSureSelectedSiteDontGetLost:
if data[-1, siteUnderSelection] or s == 0 or not makeSureSelectedSiteDontGetLost:
try:
pops+=[simulator.extract(0) ]
except:
print 'Error'
return data[int(startGeneration/generationStep):,:]
else:
return Simulation._simualte()
def simualte(self):
"Gets population and returns Dataframe, Class method"
import simuPOP.demography as dmg
# model=dmg.ExponentialGrowthModel(T=50, N0=1000, NT=200)
simulator = sim.Simulator(self.initDiploidPop.clone(), rep=1)
# sim.dump(self.initDiploidPop)
global a;a = ""
if self.recombinator is None:
self.recombinator=sim.Recombinator(intensity=self.r)
step=1# this is slow but safe, dont change it
simulator.evolve(
initOps=[sim.InitSex()],
preOps=sim.MapSelector(loci=self.siteUnderSelection, fitness={(0,0):1, (0,1):1+self.s*self.h, (1,1):1+self.s}),
matingScheme=sim.RandomMating(ops=self.recombinator,subPopSize=self.model),
postOps=[sim.Stat(alleleFreq=range(len(self.positions)), step=step),
sim.PyEval("'Gen %4d;;' % (gen+1)", reps=0,step= step, output=fff), sim.PyEval(r"'{},'.format(map(lambda x: round(x[1],5),alleleFreq.values()))", step=step, output=fff),sim.PyOutput('\n', reps=-1, step=step, output=fff)],
gen = self.maxGeneration)
# idx=np.arange(self.generationStep-1,self.maxGeneration,self.generationStep)+self.initialNeutralGenerations
_,data=zip(*map(lambda x: x.split(';;'),a.strip().split('\n')))
data=np.array(map(eval,data))[:,0,:]
# if data[-1, self.siteUnderSelection] >= self.initialCarrierFreq + self.minIncrease or self.s == 0 or not self.makeSureSelectedSiteDontGetLost:
if data[-1, self.siteUnderSelection] or self.s == 0 or not self.makeSureSelectedSiteDontGetLost:
try:
self.pops+=[simulator.extract(0) ]
except:
print 'Error'
return data[int(self.startGeneration/self.generationStep):,:]
else:
# print pd.Series(data[:, self.siteUnderSelection])
return self.simualte()
def simulateH0(self):
self.H0=MSMS.Song(F=self.F, L=self.L, Ne=self.Ne, r=self.r, mu=self.mu,uid=self.uidMSMS)
def set_siteUnderSelection(self,x):
self.siteUnderSelection=x
self.posUnderSelection=self.positions[self.siteUnderSelection]
def set_posUnderSelection(self,x):
self.posUnderSelection=x
self.siteUnderSelection=np.where(self.positions==self.posUnderSelection)[0][0]
def setH0(self,H0):
self.H0=H0
self.positions=self.H0.columns.values
self.F=self.H0.shape[0]
def set_BeneficialLoci(self,selectionOnRandomSite=False,siteUnderSelection=None,posUnderSelection =None):
if selectionOnRandomSite:
self.set_siteUnderSelection(np.random.randint(0,self.H0.shape[1]))
elif siteUnderSelection is not None:
self.set_siteUnderSelection(siteUnderSelection)
elif posUnderSelection is not None:
self.set_siteUnderSelection(posUnderSelection)
else:
if not self.s:
self.set_siteUnderSelection(self.X0.argmax())
else:
sites=np.sort(np.where(self.X0== self.initialCarrierFreq)[0]);
if not len(sites):
sites=np.sort(np.where(( self.X0 <= self.initialCarrierFreq +0.025) & ( self.X0 >= self.initialCarrierFreq -0.025) ) [0]);
if not len(sites):
print 'Try again. No site at freq ',self.initialCarrierFreq, self.uid; return
self.set_siteUnderSelection(sites[np.random.randint(0,len(sites))])
def createInitHaps(self):
assignPositions=True
if self.H0 is None:
H0 = MSMS.Song(F=self.F, L=self.L, Ne=self.Ne, r=self.r, mu=self.mu, uid=self.uidMSMS,
msmsFile=self.msmsFile, dir=self.outpathmsms)
else:
H0 = self.H0
assignPositions=False
if self.foldInitialAFs:
idx = H0.mean(0) > 0.5
H0.iloc[:, idx.values] = 1 - H0.iloc[:, idx.values]
self.setH0(H0)
if assignPositions:
self.positions_msms = self.H0.columns.values.copy(True)
self.positions = sorted(np.random.choice(self.L, self.H0.shape[1], replace=False))
self.H0 = pd.DataFrame(self.H0.values, columns=self.positions)
self.X0 = self.H0.mean(0).values
def forwardSimulation(self):
"""
returns np 3D array T x nSS x R which T=|{t_1,t_2,..}| (nnumber of times), nSS is number of SS , and R is the number of replicates
"""
import numpy as np
# df = pd.DataFrame([list(i.genotype(j)) for j in range(2) for i in self.initDiploidPop.individuals()])
if self.posUnderSelection<0 and self.initDiploidPop is None:
self.createInitHaps()
self.set_BeneficialLoci()
self.initDiploidPop=self.createInitialDiploidPopulation()
elif self.initDiploidPop is None:
self.createInitHaps()
self.initDiploidPop = self.createInitialDiploidPopulation()
# self.X0=self.H0.mean().values
else:
self.X0=Simulation.getGT(self.initDiploidPop).mean().values
# df = pd.DataFrame([list(i.genotype(j)) for j in range(2) for i in self.initDiploidPop.individuals()])
# print pd.concat([df.mean(),self.H0.mean().reset_index(drop=True)],1)
self.X=np.array([self.simualte() for _ in range(self.numReplicates)]).swapaxes(0, 2).swapaxes(0, 1)
self.X=np.append(np.tile(self.X0[:,None],(1,self.X.shape[2]))[None,:,:],self.X,axis=0)
self.sampleDepths()
if self.save:
pd.to_pickle(self,self.outpath+self.uid+'.pkl')
# self.createDF()
def getGenerationTimes(self,step=None,includeZeroGeneration=True):
if step is None: step=self.generationStep
times= np.arange(0,self.maxGeneration-self.startGeneration+1,step)
if includeZeroGeneration:
return times
else:
return times[1:]
def getTrueGenerationTimes(self,step=None,includeZeroGeneration=True):
if step is None: step=self.generationStep
times= np.arange(self.startGeneration,self.maxGeneration+1,step)
if includeZeroGeneration:
return times
else:
return times[1:]
@staticmethod
def getFixationTime(s,Ne=200,roundto10=True):
if s==0: s=0.01
t=-4*int(logit(1./Ne)/s)
if roundto10:
return (t//10 +1)*10
else:
return t
@staticmethod
def sampleInitSamplingTime(s,Ne=200,phase=0,samplingWindow=50,startOfEpoch=False):
fix=Simulation.getFixationTime(s, Ne=Ne)
if phase==0: lower,upper=(0, fix-samplingWindow)
if phase==1: lower,upper=(0, fix/3-samplingWindow)
if phase==2: lower,upper=(fix/3, 2*fix/3-samplingWindow)
if phase==3: lower,upper=(2*fix/3, fix-samplingWindow)
if startOfEpoch:
rnd=lower
else:
rnd=np.random.randint(lower,max(lower,upper)+1)
return int(rnd)//10 *10
@staticmethod
def sampleStartTimesforAlls(samplingWindow=50):
S=[0.1, 0.05, 0.02, 0.01,0]
for phase in [1,2,3]:
pd.DataFrame([[Simulation.sampleInitSamplingTime(s, phase=phase, samplingWindow=samplingWindow, startOfEpoch=True) for _ in range(100)] for s in S], index=S).T.to_pickle('/home/arya/out/startSamplingTimes.phase{}.sampleWin{}.pkl'.format(phase, samplingWindow))
def setSamplingTimes(self,maxGeneration=None,numSamples=5,step=None,startGeneration=None):
GT=pd.Series(range(len(self.getTrueGenerationTimes(includeZeroGeneration=True))),index=self.getTrueGenerationTimes(includeZeroGeneration=True))
if startGeneration is not None: self.startGeneration=startGeneration
if maxGeneration is not None: self.maxGeneration = maxGeneration
if step is not None:self.generationStep=step
else: self.generationStep=(self.maxGeneration-self.startGeneration)/numSamples
i = GT.loc[self.getTrueGenerationTimes(includeZeroGeneration=True)[:self.X.shape[0]]].values
self.X = self.X[i, :, :]
self.C = self.C.apply(lambda x: x[i, :, :])
self.D = self.D.apply(lambda x: x[i, :, :])
self.X0=self.X[0,:,0]
@staticmethod
def getSamplingTimeBasedOnFreq(sim,phase,samplingWin=50):
carrier_freq=[0.1,0.5,0.9][phase-1]
a= np.where(sim.X[:,sim.siteUnderSelection,:].mean(1)>carrier_freq)[0]
ft=sim.getTrueGenerationTimes().max()
if len(a):
t= sim.getTrueGenerationTimes()[np.where(sim.X[:,sim.siteUnderSelection,:].mean(1)>carrier_freq)[0].min()]
else:
t=sim.getTrueGenerationTimes().max()
return min(t,ft-samplingWin)
@staticmethod
def Load(s=0.1, experimentID=0, nu0=0.005, numReplicates=3, step=10, ModelName='TimeSeries', samplingWindow=50,
L=50000, depthRate=30):
if not s: nu0=0.005
sim = Simulation.load(s=s, experimentID=experimentID % 100, nu0=nu0, numReplicates=numReplicates, step=step,
ExperimentName=ModelName, All=True, L=L, replicates=range(numReplicates),
coverage=depthRate)
sim.experimentID=experimentID
startGen=0
sim.setSamplingTimes(maxGeneration=min(startGen+samplingWindow,sim.getTrueGenerationTimes()[-1]),step=step,startGeneration=startGen)
sim.createDF()
return sim
def getHardSweepMutations(self):
MAF=1./self.H0.shape[0]
dups=self.H0[self.H0.duplicated()]
x0=pd.Series(self.X0, index=self.positions)
hard=[]
for _,dup in dups.iterrows():
numDup=self.H0.apply(lambda x:(x==dup).all(),axis=1).sum()
hard=np.append(hard, (dup*x0==numDup*MAF).replace({False:None}).dropna().index.values)
hard=np.sort(np.append(hard,(x0==MAF).replace({False:None}).dropna().index.values).astype(int))
return hard
@property
def df(self):
reps=range(self.numReplicates)
self.df=pd.concat([pd.DataFrame(self.X[:,:,r],columns=self.positions,index=pd.MultiIndex.from_product([[r],range(self.X.shape[0])],names=['REP','TIME'])).T for r in reps],axis=1)
if self.numReplicates==1:
self.df=self.df[0]
return self.df
def computeCDi(self, EE, depthRate):
E = EE.loc[depthRate]
index = pd.Series(range(E.shape[0]), E.index)
C = pd.concat([pd.DataFrame(self.C.loc[depthRate][:, :, r], columns=self.H0.columns,
index=pd.MultiIndex.from_product([[r], self.getTrueGenerationTimes()],
names=['REP', 'GEN'])).T for r in
range(self.numReplicates)], axis=1)
D = pd.concat([pd.DataFrame(self.D.loc[depthRate][:, :, r], columns=self.H0.columns,
index=pd.MultiIndex.from_product([[r], self.getTrueGenerationTimes()],
names=['REP', 'GEN'])).T for r in
range(self.numReplicates)], axis=1)
self.cd = pd.concat([pd.Series(zip(C[i], D[i])) for i in C.columns], axis=1)
self.cd.columns = C.columns;
self.cd.index = C.index
self.cdi = self.cd.applymap(lambda x: index.loc[x])
def sampleDepths(self,depths = [30, 100, 300]):
self.D = pd.Series(None, index=depths)
self.C = pd.Series(None, index=depths)
for depthRate in depths:
self.D.loc[depthRate] = np.random.poisson(depthRate,
self.X.shape[0] * self.X.shape[1] * self.X.shape[2]).reshape(
self.X.shape).astype(object)
self.C.loc[depthRate] = np.array([np.random.binomial(d, x) for x, d in
zip(self.X.reshape(-1), self.D.loc[depthRate].reshape(-1))]).reshape(
self.X.shape).astype(object)
@staticmethod
def sampleDepthX(X,cov):
D= np.random.poisson(cov,X.size)
C= np.array([np.random.binomial(d, x) for x, d in zip(X, D)])
return C,D
@staticmethod
def sampleDepthXSeries(X,cov):
C,D=Simulation.sampleDepthX(X.values,cov)
a=pd.DataFrame([C,D],columns=X.index,index=['C','D']).T
return a
@staticmethod
def computeCDdf(a, E):
index = pd.Series(range(E.shape[0]), E.index)
def f(x):
try:
return index.loc[x]
except:
return -1
z=a.groupby(level=[0,1],axis=1).apply(lambda x: x.apply(lambda y:(y.iloc[0],y.iloc[1]),1)).applymap(f)
return z[(z<0).sum(1)==0]
def getCD(self,coverage):
T=self.getTrueGenerationTimes()
Ti=T
if T[-1]!=self.C[coverage].shape[0]-1: Ti=range(self.C[coverage].shape[0])
C=pd.concat([pd.DataFrame(self.C[coverage][Ti,:,i],columns=self.positions,index=T).T for i in range(self.numReplicates)],1,keys=range(self.C[coverage].shape[2]))
D=pd.concat([pd.DataFrame(self.D[coverage][Ti,:,i],columns=self.positions,index=T).T for i in range(self.numReplicates)],1,keys=range(self.C[coverage].shape[2]))
CD=pd.concat([C,D],1,keys=['C','D']).reorder_levels([1,2,0],1).sort_index(1)
CD.columns.names=['REP','GEN','READ']
return CD
@staticmethod
def Recombinator(rate, loci):
"""
Recombination at loci, after variant index. Loci can take value in [0, NumSNPs-1]
Args:
rate: recombination rate
loci: index of the loci in which rec is is being performed
Returns: recombinator which is an argument of Simulation, _simulation2 and evolve. It can be list of loci
"""
if not isinstance(loci, list):
loci = [loci]
return sim.Recombinator(intensity=rate, loci=loci)
class POP:
@staticmethod
def createISOGenicDiploidPopulation(df):
"""
initHaps : np 2D array which m x nSS where m i number of individual haps and nSS is number of SS
return a homozygote diploid population which every haplotype is copied n times
"""
pop = sim.Population(size=df.shape[0], ploidy=2, loci=df.shape[1], lociPos=list(df.columns),
infoFields='fitness')
for (i, (_, h)) in zip(pop.individuals(), df.iterrows()):
i.setGenotype(h.tolist(), 0);
i.setGenotype(h.tolist(), 1)
return pop
@staticmethod
def toDF(pop):
x = pd.concat(map(pd.DataFrame, [map(list, [i.genotype(0), i.genotype(1)]) for i in pop.allIndividuals()]),
keys=range(pop.popSize()))
x.columns = list(pop.lociPos())
return x
@staticmethod
def freq(pop):
sim.stat(pop, alleleFreq=range(pop.numLoci()[0]), vars=['alleleFreq'])
return pd.Series(pd.DataFrame(pop.vars()['alleleFreq']).loc[1].reindex().values,map(int,pop.lociPos())).fillna(0)
@staticmethod
def Haplotypes(pop,counts=False,unique=True):
if isinstance(pop,sim.Population):
a=POP.toDF(pop)
else:
a=pop
H=a.reset_index(drop=True)
H.columns=map(int,H.columns)
b=H.loc[H.sum(1).sort_values().index].astype(str).apply(lambda x: ''.join(x), 1).reset_index(drop=True)
if counts:
return b.value_counts().sort_index()
else:
if unique:
b=b.drop_duplicates()
return b.loc[b.sort_values().index].reset_index(drop=True)
@staticmethod
def establish(H, ba, k=5):
N = H.shape[0]
car = H[H[ba] == 1]
n = car.shape[0]
return pd.concat([car.iloc[np.random.choice(n, k)], H.iloc[np.random.choice(N, N - k)]]).reset_index(drop=True)
class Drift:
@staticmethod
def nextGeneration(N,x):
return (np.random.random(N)<=x).mean()
@staticmethod
def sampleReads(D,x):
return [Drift.sampleReadsDerived(D,x),D]
@staticmethod
def sampleReadsDerived(D,x):
return (np.random.random(D)<=x).sum()
@staticmethod
def simulateAF(N,x,T):
Xt=[]
for i in range(1, T[-1]+1):
x=Drift.nextGeneration(N,x)
if i in T:Xt.append(x)
return Xt
@staticmethod
def simulatePoolCD(N,n,cd):
x=cd[0].C/float(cd[0].D)
D=cd.xs('D',level=1)
Xt=[]
for i in range(1, D.index[-1]+1):
x=Drift.nextGeneration(N,x)
if i in D.index:
y=Drift.nextGeneration(n,x)
Xt.append(Drift.sampleReads(D[i], y))
return pd.DataFrame([[cd[0].C,cd[0].D]]+Xt,index=D.index,columns=['C','D'])
@staticmethod
def simulatePoolDerivd(N,n,cd):
x=cd[0].C/float(cd[0].D)
D=cd.xs('D',level=1)
Xt=[]
for i in range(1, D.index[-1]+1):
x=Drift.nextGeneration(N,x)
if i in D.index:
Xt+=[Drift.sampleReadsDerived(D[i], Drift.nextGeneration(n,x))]
return [cd[0].C]+Xt
@staticmethod
def simulatePools(N,cd,M):
return pd.concat([Drift.simulatePool(N,cd) for _ in range(M)],keys=range(M))
@staticmethod
def simulateAFs(N,x,T,M):
return pd.DataFrame([Drift.simulateAF(N,x,T) for _ in range(M)],columns=T)
| mit | 8,298,902,231,767,576,000 | 46.236597 | 304 | 0.578031 | false | 3.298527 | false | false | false |
turdusmerula/kipartman | test/TESTpluginImportCSV.py | 1 | 1083 | import sys, os
#TODO: LOOK UP CURRENT DIRECTORY
# sys.argv[0] <fulldirectory>\\<this filename> in ipython does not describe this filename
# so use os.getcwd
# For this TEST just add both possible paths to the necessary imports
#
#
sys.path.append(
os.path.join(os.path.split(os.path.dirname(sys.argv[0]))[0],'kipartman'))
sys.path.append(os.path.join(os.getcwd(),'kipartman'))
print(sys.path)
from plugins import plugin_loader
from plugins import import_plugins as import_plugins
# RETRIEVE the find_parts
import rest
'''
Gets a file path via popup, then imports content
'''
importers = plugin_loader.load_import_plugins()
wildcards = '|'.join([x.wildcard for x in importers])
wildcards
importers[0]
importpath=os.path.join(os.getcwd(),'','17W50TESTimportCSV.csv')
importpath
base, ext = os.path.splitext(importpath)
thecategory = eval(u"{'childs': None,\n 'description': '',\n 'id': 4,\n 'name': 'Test',\n 'parent': {'id': 1},\n 'path': '/Resistor/Test'}")
# 1: For sqldb 0: for CsvImport
importItems = importers[0]().fetch(base, thecategory, rest.model)
pass | gpl-3.0 | 13,814,710,417,899,064 | 22.06383 | 140 | 0.713758 | false | 3.016713 | false | false | false |
ericholscher/django | django/views/i18n.py | 1 | 10581 | import importlib
import json
import os
import gettext as gettext_module
from django import http
from django.conf import settings
from django.template import Context, Template
from django.utils.translation import check_for_language, to_locale, get_language
from django.utils.encoding import smart_text
from django.utils.formats import get_format_modules, get_format
from django.utils._os import upath
from django.utils.http import is_safe_url
from django.utils import six
def set_language(request):
"""
Redirect to a given url while setting the chosen language in the
session or cookie. The url and the language code need to be
specified in the request parameters.
Since this view changes how the user will see the rest of the site, it must
only be accessed as a POST request. If called as a GET request, it will
redirect to the page in the request (the 'next' parameter) without changing
any state.
"""
next = request.POST.get('next', request.GET.get('next'))
if not is_safe_url(url=next, host=request.get_host()):
next = request.META.get('HTTP_REFERER')
if not is_safe_url(url=next, host=request.get_host()):
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'POST':
lang_code = request.POST.get('language', None)
if lang_code and check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
def get_formats():
"""
Returns all formats strings required for i18n to work
"""
FORMAT_SETTINGS = (
'DATE_FORMAT', 'DATETIME_FORMAT', 'TIME_FORMAT',
'YEAR_MONTH_FORMAT', 'MONTH_DAY_FORMAT', 'SHORT_DATE_FORMAT',
'SHORT_DATETIME_FORMAT', 'FIRST_DAY_OF_WEEK', 'DECIMAL_SEPARATOR',
'THOUSAND_SEPARATOR', 'NUMBER_GROUPING',
'DATE_INPUT_FORMATS', 'TIME_INPUT_FORMATS', 'DATETIME_INPUT_FORMATS'
)
result = {}
for module in [settings] + get_format_modules(reverse=True):
for attr in FORMAT_SETTINGS:
result[attr] = get_format(attr)
formats = {}
for k, v in result.items():
if isinstance(v, (six.string_types, int)):
formats[k] = smart_text(v)
elif isinstance(v, (tuple, list)):
formats[k] = [smart_text(value) for value in v]
return formats
js_catalog_template = r"""
{% autoescape off %}
(function (globals) {
var django = globals.django || (globals.django = {});
{% if plural %}
django.pluralidx = function (n) {
var v={{ plural }};
if (typeof(v) == 'boolean') {
return v ? 1 : 0;
} else {
return v;
}
};
{% else %}
django.pluralidx = function (count) { return (count == 1) ? 0 : 1; };
{% endif %}
{% if catalog_str %}
/* gettext library */
django.catalog = {{ catalog_str }};
django.gettext = function (msgid) {
var value = django.catalog[msgid];
if (typeof(value) == 'undefined') {
return msgid;
} else {
return (typeof(value) == 'string') ? value : value[0];
}
};
django.ngettext = function (singular, plural, count) {
value = django.catalog[singular];
if (typeof(value) == 'undefined') {
return (count == 1) ? singular : plural;
} else {
return value[django.pluralidx(count)];
}
};
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) {
var value = django.gettext(context + '\x04' + msgid);
if (value.indexOf('\x04') != -1) {
value = msgid;
}
return value;
};
django.npgettext = function (context, singular, plural, count) {
var value = django.ngettext(context + '\x04' + singular, context + '\x04' + plural, count);
if (value.indexOf('\x04') != -1) {
value = django.ngettext(singular, plural, count);
}
return value;
};
{% else %}
/* gettext identity library */
django.gettext = function (msgid) { return msgid; };
django.ngettext = function (singular, plural, count) { return (count == 1) ? singular : plural; };
django.gettext_noop = function (msgid) { return msgid; };
django.pgettext = function (context, msgid) { return msgid; };
django.npgettext = function (context, singular, plural, count) { return (count == 1) ? singular : plural; };
{% endif %}
django.interpolate = function (fmt, obj, named) {
if (named) {
return fmt.replace(/%\(\w+\)s/g, function(match){return String(obj[match.slice(2,-2)])});
} else {
return fmt.replace(/%s/g, function(match){return String(obj.shift())});
}
};
/* formatting library */
django.formats = {{ formats_str }};
django.get_format = function (format_type) {
var value = django.formats[format_type];
if (typeof(value) == 'undefined') {
return format_type;
} else {
return value;
}
};
/* add to global namespace */
globals.pluralidx = django.pluralidx;
globals.gettext = django.gettext;
globals.ngettext = django.ngettext;
globals.gettext_noop = django.gettext_noop;
globals.pgettext = django.pgettext;
globals.npgettext = django.npgettext;
globals.interpolate = django.interpolate;
globals.get_format = django.get_format;
}(this));
{% endautoescape %}
"""
def render_javascript_catalog(catalog=None, plural=None):
template = Template(js_catalog_template)
indent = lambda s: s.replace('\n', '\n ')
context = Context({
'catalog_str': indent(json.dumps(
catalog, sort_keys=True, indent=2)) if catalog else None,
'formats_str': indent(json.dumps(
get_formats(), sort_keys=True, indent=2)),
'plural': plural,
})
return http.HttpResponse(template.render(context), 'text/javascript')
def get_javascript_catalog(locale, domain, packages):
default_locale = to_locale(settings.LANGUAGE_CODE)
packages = [p for p in packages if p == 'django.conf' or p in settings.INSTALLED_APPS]
t = {}
paths = []
en_selected = locale.startswith('en')
en_catalog_missing = True
# paths of requested packages
for package in packages:
p = importlib.import_module(package)
path = os.path.join(os.path.dirname(upath(p.__file__)), 'locale')
paths.append(path)
# add the filesystem paths listed in the LOCALE_PATHS setting
paths.extend(list(reversed(settings.LOCALE_PATHS)))
# first load all english languages files for defaults
for path in paths:
try:
catalog = gettext_module.translation(domain, path, ['en'])
t.update(catalog._catalog)
except IOError:
pass
else:
# 'en' is the selected language and at least one of the packages
# listed in `packages` has an 'en' catalog
if en_selected:
en_catalog_missing = False
# next load the settings.LANGUAGE_CODE translations if it isn't english
if default_locale != 'en':
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [default_locale])
except IOError:
catalog = None
if catalog is not None:
t.update(catalog._catalog)
# last load the currently selected language, if it isn't identical to the default.
if locale != default_locale:
# If the currently selected language is English but it doesn't have a
# translation catalog (presumably due to being the language translated
# from) then a wrong language catalog might have been loaded in the
# previous step. It needs to be discarded.
if en_selected and en_catalog_missing:
t = {}
else:
locale_t = {}
for path in paths:
try:
catalog = gettext_module.translation(domain, path, [locale])
except IOError:
catalog = None
if catalog is not None:
locale_t.update(catalog._catalog)
if locale_t:
t = locale_t
plural = None
if '' in t:
for l in t[''].split('\n'):
if l.startswith('Plural-Forms:'):
plural = l.split(':', 1)[1].strip()
if plural is not None:
# this should actually be a compiled function of a typical plural-form:
# Plural-Forms: nplurals=3; plural=n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2;
plural = [el.strip() for el in plural.split(';') if el.strip().startswith('plural=')][0].split('=', 1)[1]
pdict = {}
maxcnts = {}
catalog = {}
for k, v in t.items():
if k == '':
continue
if isinstance(k, six.string_types):
catalog[k] = v
elif isinstance(k, tuple):
msgid = k[0]
cnt = k[1]
maxcnts[msgid] = max(cnt, maxcnts.get(msgid, 0))
pdict.setdefault(msgid, {})[cnt] = v
else:
raise TypeError(k)
for k, v in pdict.items():
catalog[k] = [v.get(i, '') for i in range(maxcnts[msgid] + 1)]
return catalog, plural
def null_javascript_catalog(request, domain=None, packages=None):
"""
Returns "identity" versions of the JavaScript i18n functions -- i.e.,
versions that don't actually do anything.
"""
return render_javascript_catalog()
def javascript_catalog(request, domain='djangojs', packages=None):
"""
Returns the selected language catalog as a javascript library.
Receives the list of packages to check for translations in the
packages parameter either from an infodict or as a +-delimited
string from the request. Default is 'django.conf'.
Additionally you can override the gettext domain for this view,
but usually you don't want to do that, as JavaScript messages
go to the djangojs domain. But this might be needed if you
deliver your JavaScript source from Django templates.
"""
locale = to_locale(get_language())
if request.GET and 'language' in request.GET:
if check_for_language(request.GET['language']):
locale = to_locale(request.GET['language'])
if packages is None:
packages = ['django.conf']
if isinstance(packages, six.string_types):
packages = packages.split('+')
catalog, plural = get_javascript_catalog(locale, domain, packages)
return render_javascript_catalog(catalog, plural)
| bsd-3-clause | 7,840,672,329,311,219,000 | 33.691803 | 124 | 0.617049 | false | 3.895803 | false | false | false |
Alir3z4/django-databrowse | django_databrowse/datastructures.py | 1 | 11642 | """
These classes are light wrappers around Django's database API that provide
convenience functionality and permalink functions for the databrowse app.
"""
from django.db import models
from django.utils import formats
from django.utils.text import capfirst
from django.utils.encoding import smart_text, smart_text, iri_to_uri
from django.utils.safestring import mark_safe
from django.db.models.query import QuerySet
from django.core.exceptions import ObjectDoesNotExist
from django.utils.encoding import python_2_unicode_compatible
EMPTY_VALUE = '(None)'
DISPLAY_SIZE = 100
class EasyModel(object):
def __init__(self, site, model):
self.site = site
self.model = model
self.model_list = site.registry.keys()
self.verbose_name = model._meta.verbose_name
self.verbose_name_plural = model._meta.verbose_name_plural
def __repr__(self):
return '<EasyModel for %s>' % \
smart_text(self.model._meta.object_name)
def model_databrowse(self):
"Returns the ModelDatabrowse class for this model."
return self.site.registry[self.model]
def url(self):
return mark_safe('%s%s/%s/' % (self.site.root_url,
self.model._meta.app_label,
self.model._meta.model_name))
def objects(self, **kwargs):
return self.get_query_set().filter(**kwargs)
def get_query_set(self):
qs = self.model._default_manager.get_queryset()
easy_qs = EasyQuerySet(model=qs.model, query=qs.query.clone(),
using=qs._db, hints=qs._hints)
easy_qs._easymodel = self
return easy_qs
def object_by_pk(self, pk):
return EasyInstance(self, self.model._default_manager.get(pk=pk))
def sample_objects(self):
for obj in self.model._default_manager.all()[:3]:
yield EasyInstance(self, obj)
def field(self, name):
try:
f = self.model._meta.get_field(name)
except models.FieldDoesNotExist:
return None
return EasyField(self, f)
def fields(self):
return [EasyField(self, f) for f in (self.model._meta.fields +
self.model._meta.many_to_many)]
class EasyField(object):
def __init__(self, easy_model, field):
self.model, self.field = easy_model, field
def __repr__(self):
return smart_text(u'<EasyField for %s.%s>' %
(self.model.model._meta.object_name,
self.field.name))
def choices(self):
for value, label in self.field.choices:
yield EasyChoice(self.model, self, value, label)
def url(self):
if self.field.choices:
return mark_safe('%s%s/%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
self.field.name))
elif self.field.rel:
return mark_safe('%s%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name))
class EasyChoice(object):
def __init__(self, easy_model, field, value, label):
self.model, self.field = easy_model, field
self.value, self.label = value, label
def __repr__(self):
return smart_text(u'<EasyChoice for %s.%s>' %
(self.model.model._meta.object_name,
self.field.name))
def url(self):
return mark_safe('%s%s/%s/%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
self.field.field.name,
iri_to_uri(self.value)))
@python_2_unicode_compatible
class EasyInstance(object):
def __init__(self, easy_model, instance):
self.model, self.instance = easy_model, instance
def __repr__(self):
return smart_text(u'<EasyInstance for %s (%s)>' %
(self.model.model._meta.object_name,
self.instance._get_pk_val()))
def __str__(self):
val = smart_text(self.instance)
if len(val) > DISPLAY_SIZE:
return val[:DISPLAY_SIZE] + u'...'
return val
def pk(self):
return self.instance._get_pk_val()
def url(self):
return mark_safe('%s%s/%s/objects/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
iri_to_uri(self.pk())))
def fields(self):
"""
Generator that yields EasyInstanceFields for each field in this
EasyInstance's model.
"""
for f in self.model.model._meta.fields +\
self.model.model._meta.many_to_many:
yield EasyInstanceField(self.model, self, f)
def related_objects(self):
"""
Generator that yields dictionaries of all models that have this
EasyInstance's model as a ForeignKey or ManyToManyField, along with
lists of related objects.
"""
related_objects = [
f for f in self.model.model._meta.get_fields()
if (f.one_to_many or f.one_to_one)
and f.auto_created and not f.concrete
]
related_m2m = [
f for f in self.model.model._meta.get_fields(include_hidden=True)
if f.many_to_many and f.auto_created
]
for rel_object in related_objects + related_m2m:
if rel_object.model not in self.model.model_list:
continue # Skip models that aren't in the model_list
em = EasyModel(self.model.site, rel_object.related_model)
try:
rel_accessor = getattr(self.instance, rel_object.get_accessor_name())
except ObjectDoesNotExist:
continue
if rel_object.field.rel.multiple:
object_list = [EasyInstance(em, i) for i in rel_accessor.all()]
else: # for one-to-one fields
object_list = [EasyInstance(em, rel_accessor)]
yield {
'model': em,
'related_field': rel_object.field.verbose_name,
'object_list': object_list,
}
class EasyInstanceField(object):
def __init__(self, easy_model, instance, field):
self.model, self.field, self.instance = easy_model, field, instance
self.raw_value = getattr(instance.instance, field.name)
def __repr__(self):
return smart_text(u'<EasyInstanceField for %s.%s>' %
(self.model.model._meta.object_name,
self.field.name))
def values(self):
"""
Returns a list of values for this field for this instance. It's a list
so we can accomodate many-to-many fields.
"""
# This import is deliberately inside the function because it causes
# some settings to be imported, and we don't want to do that at the
# module level.
if self.field.rel:
if isinstance(self.field.rel, models.ManyToOneRel):
objs = getattr(self.instance.instance, self.field.name)
elif isinstance(self.field.rel,
models.ManyToManyRel): # ManyToManyRel
return list(getattr(self.instance.instance,
self.field.name).all())
elif self.field.choices:
objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE)
elif isinstance(self.field, models.DateField) or \
isinstance(self.field, models.TimeField):
if self.raw_value:
if isinstance(self.field, models.DateTimeField):
objs = capfirst(formats.date_format(self.raw_value,
'DATETIME_FORMAT'))
elif isinstance(self.field, models.TimeField):
objs = capfirst(formats.time_format(self.raw_value,
'TIME_FORMAT'))
else:
objs = capfirst(formats.date_format(self.raw_value,
'DATE_FORMAT'))
else:
objs = EMPTY_VALUE
elif isinstance(self.field, models.BooleanField) or \
isinstance(self.field, models.NullBooleanField):
objs = {True: 'Yes', False: 'No', None: 'Unknown'}[self.raw_value]
else:
objs = self.raw_value
return [objs]
def urls(self):
"Returns a list of (value, URL) tuples."
# First, check the urls() method for each plugin.
plugin_urls = []
for plugin_name, plugin in \
self.model.model_databrowse().plugins.items():
urls = plugin.urls(plugin_name, self)
if urls is not None:
#plugin_urls.append(urls)
values = self.values()
return zip(self.values(), urls)
if self.field.rel:
m = EasyModel(self.model.site, self.field.rel.to)
if self.field.rel.to in self.model.model_list:
lst = []
for value in self.values():
if value is None:
continue
url = mark_safe('%s%s/%s/objects/%s/' %
(self.model.site.root_url,
m.model._meta.app_label,
m.model._meta.model_name,
iri_to_uri(value._get_pk_val())))
lst.append((smart_text(value), url))
else:
lst = [(value, None) for value in self.values()]
elif self.field.choices:
lst = []
for value in self.values():
url = mark_safe('%s%s/%s/fields/%s/%s/' %
(self.model.site.root_url,
self.model.model._meta.app_label,
self.model.model._meta.model_name,
self.field.name,
iri_to_uri(self.raw_value)))
lst.append((value, url))
elif isinstance(self.field, models.URLField):
val = self.values()[0]
lst = [(val, iri_to_uri(val))]
else:
lst = [(self.values()[0], None)]
return lst
class EasyQuerySet(QuerySet):
"""
When creating (or cloning to) an `EasyQuerySet`, make sure to set the
`_easymodel` variable to the related `EasyModel`.
"""
def iterator(self, *args, **kwargs):
for obj in super(EasyQuerySet, self).iterator(*args, **kwargs):
yield EasyInstance(self._easymodel, obj)
def _clone(self, *args, **kwargs):
c = super(EasyQuerySet, self)._clone(*args, **kwargs)
c._easymodel = self._easymodel
return c
| bsd-3-clause | 8,765,281,568,235,199,000 | 39.423611 | 85 | 0.519155 | false | 4.288029 | false | false | false |
SelvorWhim/competitive | Codewars/LinkedListsAlternatingSplit.py | 1 | 1195 | # this solution preserves original list structure, but new nodes shallow copy old data, so if the data is a reference type, changing it in one list will affect one of the others
class Node(object):
def __init__(self, data=None):
self.data = data
self.next = None
# shallow copy of the data, no copy of next
def clone(self):
return Node(self.data)
class Context(object):
def __init__(self, first, second):
self.first = first
self.second = second
def alternating_split(head):
if head == None or head.next == None: # fewer than 2 Nodes in the list
#return Context(head, None) # that made sense to me but examples say raise an error
raise ValueError()
ret = Context(head.clone(), head.next.clone())
main_it = head.next.next
ret_its = [ret.first, ret.second]
i = 2 # or 0, or work with booleans, all I need here is parity. But this way, solution is easily generalized to alternating split between 3 or more lists
while main_it != None:
ret_its[i % 2].next = main_it.clone()
ret_its[i % 2] = ret_its[i % 2].next
main_it = main_it.next
i += 1
return ret
| unlicense | 2,663,904,410,082,395,600 | 38.833333 | 177 | 0.632636 | false | 3.734375 | false | false | false |
OpusVL/odoo | openerp/cli/scaffold.py | 1 | 4096 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import re
import sys
import jinja2
from . import Command
from openerp.modules.module import (get_module_root, MANIFEST, load_information_from_description_file as load_manifest)
class Scaffold(Command):
""" Generates an Odoo module skeleton. """
def run(self, cmdargs):
# TODO: bash completion file
parser = argparse.ArgumentParser(
prog="%s scaffold" % sys.argv[0].split(os.path.sep)[-1],
description=self.__doc__,
epilog=self.epilog(),
)
parser.add_argument(
'-t', '--template', type=template, default=template('default'),
help="Use a custom module template, can be a template name or the"
" path to a module template (default: %(default)s)")
parser.add_argument('name', help="Name of the module to create")
parser.add_argument(
'dest', default='.', nargs='?',
help="Directory to create the module in (default: %(default)s)")
if not cmdargs:
sys.exit(parser.print_help())
args = parser.parse_args(args=cmdargs)
args.template.render_to(
snake(args.name),
directory(args.dest, create=True),
{'name': args.name})
def epilog(self):
return "Built-in templates available are: %s" % ', '.join(
d for d in os.listdir(builtins())
if d != 'base'
)
builtins = lambda *args: os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'templates',
*args)
def snake(s):
""" snake cases ``s``
:param str s:
:return: str
"""
# insert a space before each uppercase character preceded by a
# non-uppercase letter
s = re.sub(r'(?<=[^A-Z])\B([A-Z])', r' \1', s)
# lowercase everything, split on whitespace and join
return '_'.join(s.lower().split())
def pascal(s):
return ''.join(
ss.capitalize()
for ss in re.sub('[_\s]+', ' ', s).split()
)
def directory(p, create=False):
expanded = os.path.abspath(
os.path.expanduser(
os.path.expandvars(p)))
if create and not os.path.exists(expanded):
os.makedirs(expanded)
if not os.path.isdir(expanded):
die("%s is not a directory" % p)
return expanded
env = jinja2.Environment()
env.filters['snake'] = snake
env.filters['pascal'] = pascal
class template(object):
def __init__(self, identifier):
# TODO: directories, archives (zipfile, tarfile)
self.id = identifier
if not os.path.isdir(self.path):
die("{} is not a valid module template".format(identifier))
def __str__(self):
return self.id
@property
def path(self):
return builtins(self.id)
def files(self):
""" Lists the (local) path and content of all files in the template
"""
for root, _, files in os.walk(self.path):
for f in files:
path = os.path.join(root, f)
yield path, open(path, 'rb').read()
def render_to(self, modname, directory, params=None):
""" Render this module template to ``dest`` with the provided
rendering parameters
"""
# overwrite with local
for path, content in self.files():
_, ext = os.path.splitext(path)
local = os.path.relpath(path, self.path)
dest = os.path.join(directory, modname, local)
destdir = os.path.dirname(dest)
if not os.path.exists(destdir):
os.makedirs(destdir)
with open(dest, 'wb') as f:
if ext not in ('.py', '.xml', '.csv', '.js'):
f.write(content)
else:
env.from_string(content)\
.stream(params or {})\
.dump(f, encoding='utf-8')
def die(message, code=1):
print >>sys.stderr, message
sys.exit(code)
def warn(message):
# ASK: shall we use logger ?
print "WARNING: " + message
| agpl-3.0 | 5,110,078,230,369,223,000 | 29.567164 | 119 | 0.561768 | false | 3.878788 | false | false | false |
tensorflow/lingvo | lingvo/tasks/asr/tools/simple_wer.py | 1 | 9729 | # Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stand-alone script to evalute the word error rate (WER) for ASR tasks.
THIS SCRIPT IS NO LONGER SUPPORTED. PLEASE USE simple_wer_v2.py INSTEAD.
Tensorflow and Lingvo are not required to run this script.
Example of Usage::
python simple_wer.py file_hypothesis file_reference
python simple_wer.py file_hypothesis file_reference diagnosis_html
where `file_hypothesis` is the file name for hypothesis text and
`file_reference` is the file name for reference text.
`diagnosis_html` (optional) is the html filename to diagnose the errors.
Or you can use this file as a library, and call either of the following:
- ``ComputeWER(hyp, ref)`` compute WER for one pair of hypothesis/reference
- ``AverageWERs(hyps, refs)`` average WER for a list of hypotheses/references
Note to evaluate the ASR, we consider the following pre-processing:
- change transcripts to lower-case
- remove punctuation: ``" , . ! ? ( ) [ ]``
- remove extra empty spaces
"""
import re
import sys
def ComputeEditDistanceMatrix(hs, rs):
"""Compute edit distance between two list of strings.
Args:
hs: the list of words in the hypothesis sentence
rs: the list of words in the reference sentence
Returns:
Edit distance matrix (in the format of list of lists), where the first
index is the reference and the second index is the hypothesis.
"""
dr, dh = len(rs) + 1, len(hs) + 1
dists = [[]] * dr
# Initialization.
for i in range(dr):
dists[i] = [0] * dh
for j in range(dh):
if i == 0:
dists[0][j] = j
elif j == 0:
dists[i][0] = i
# Do dynamic programming.
for i in range(1, dr):
for j in range(1, dh):
if rs[i - 1] == hs[j - 1]:
dists[i][j] = dists[i - 1][j - 1]
else:
tmp0 = dists[i - 1][j - 1] + 1
tmp1 = dists[i][j - 1] + 1
tmp2 = dists[i - 1][j] + 1
dists[i][j] = min(tmp0, tmp1, tmp2)
return dists
def PreprocessTxtBeforeWER(txt):
"""Preprocess text before WER caculation."""
# Lowercase, remove \t and new line.
txt = re.sub(r'[\t\n]', ' ', txt.lower())
# Remove punctuation before space.
txt = re.sub(r'[,.\?!]+ ', ' ', txt)
# Remove punctuation before end.
txt = re.sub(r'[,.\?!]+$', ' ', txt)
# Remove punctuation after space.
txt = re.sub(r' [,.\?!]+', ' ', txt)
# Remove quotes, [, ], ( and ).
txt = re.sub(r'["\(\)\[\]]', '', txt)
# Remove extra space.
txt = re.sub(' +', ' ', txt.strip())
return txt
def _GenerateAlignedHtml(hyp, ref, err_type):
"""Generate a html element to highlight the difference between hyp and ref.
Args:
hyp: Hypothesis string.
ref: Reference string.
err_type: one of 'none', 'sub', 'del', 'ins'.
Returns:
a html string where disagreements are highlighted.
- hyp highlighted in green, and marked with <del> </del>
- ref highlighted in yellow
"""
highlighted_html = ''
if err_type == 'none':
highlighted_html += '%s ' % hyp
elif err_type == 'sub':
highlighted_html += """<span style="background-color: greenyellow">
<del>%s</del></span><span style="background-color: yellow">
%s </span> """ % (hyp, ref)
elif err_type == 'del':
highlighted_html += """<span style="background-color: yellow">
%s</span> """ % (
ref)
elif err_type == 'ins':
highlighted_html += """<span style="background-color: greenyellow">
<del>%s</del> </span> """ % (
hyp)
else:
raise ValueError('unknown err_type ' + err_type)
return highlighted_html
def GenerateSummaryFromErrs(nref, errs):
"""Generate strings to summarize word errors.
Args:
nref: integer of total words in references
errs: dict of three types of errors. e.g. {'sub':10, 'ins': 15, 'del': 3}
Returns:
Two strings:
- string summarizing total error, total word, WER,
- string breaking down three errors: deleting, insertion, substitute
"""
total_error = sum(errs.values())
str_sum = 'total error = %d, total word = %d, wer = %.2f%%' % (
total_error, nref, total_error * 100.0 / nref)
str_details = 'Error breakdown: del = %.2f%%, ins=%.2f%%, sub=%.2f%%' % (
errs['del'] * 100.0 / nref, errs['ins'] * 100.0 / nref,
errs['sub'] * 100.0 / nref)
return str_sum, str_details
def ComputeWER(hyp, ref, diagnosis=False):
"""Computes WER for ASR by ignoring diff of punctuation, space, captions.
Args:
hyp: Hypothesis string.
ref: Reference string.
diagnosis (optional): whether to generate diagnosis str (in html format)
Returns:
A tuple of 3 elements:
- dict of three types of errors. e.g. ``{'sub':0, 'ins': 0, 'del': 0}``
- num of reference words, integer
- aligned html string for diagnois (empty if diagnosis = False)
"""
hyp = PreprocessTxtBeforeWER(hyp)
ref = PreprocessTxtBeforeWER(ref)
# Compute edit distance.
hs = hyp.split()
rs = ref.split()
distmat = ComputeEditDistanceMatrix(hs, rs)
# Back trace, to distinguish different errors: insert, deletion, substitution.
ih, ir = len(hs), len(rs)
errs = {'sub': 0, 'ins': 0, 'del': 0}
aligned_html = ''
while ih > 0 or ir > 0:
err_type = ''
# Distinguish error type by back tracking
if ir == 0:
err_type = 'ins'
elif ih == 0:
err_type = 'del'
else:
if hs[ih - 1] == rs[ir - 1]: # correct
err_type = 'none'
elif distmat[ir][ih] == distmat[ir - 1][ih - 1] + 1: # substitute
err_type = 'sub'
elif distmat[ir][ih] == distmat[ir - 1][ih] + 1: # deletion
err_type = 'del'
elif distmat[ir][ih] == distmat[ir][ih - 1] + 1: # insert
err_type = 'ins'
else:
raise ValueError('fail to parse edit distance matrix')
# Generate aligned_html
if diagnosis:
if ih == 0 or not hs:
tmph = ' '
else:
tmph = hs[ih - 1]
if ir == 0 or not rs:
tmpr = ' '
else:
tmpr = rs[ir - 1]
aligned_html = _GenerateAlignedHtml(tmph, tmpr, err_type) + aligned_html
# If no error, go to previous ref and hyp.
if err_type == 'none':
ih, ir = ih - 1, ir - 1
continue
# Update error.
errs[err_type] += 1
# Adjust position of ref and hyp.
if err_type == 'del':
ir = ir - 1
elif err_type == 'ins':
ih = ih - 1
else: # err_type == 'sub'
ih, ir = ih - 1, ir - 1
assert distmat[-1][-1] == sum(errs.values())
# Num of words. For empty ref we set num = 1.
nref = max(len(rs), 1)
return errs, nref, aligned_html
def AverageWERs(hyps, refs, verbose=True, diagnosis=False):
"""Computes average WER from a list of references/hypotheses.
Args:
hyps: list of hypothesis strings.
refs: list of reference strings.
verbose: optional (default True)
diagnosis (optional): whether to generate list of diagnosis html
Returns:
A tuple of 3 elements:
- dict of three types of errors. e.g. ``{'sub':0, 'ins': 0, 'del': 0}``
- num of reference words, integer
- list of aligned html string for diagnosis (empty if diagnosis = False)
"""
totalw = 0
total_errs = {'sub': 0, 'ins': 0, 'del': 0}
aligned_html_list = []
for hyp, ref in zip(hyps, refs):
errs_i, nref_i, diag_str = ComputeWER(hyp, ref, diagnosis)
if diagnosis:
aligned_html_list += [diag_str]
totalw += nref_i
total_errs['sub'] += errs_i['sub']
total_errs['ins'] += errs_i['ins']
total_errs['del'] += errs_i['del']
if verbose:
str_summary, str_details = GenerateSummaryFromErrs(totalw, total_errs)
print(str_summary)
print(str_details)
return total_errs, totalw, aligned_html_list
def main(argv):
hyp = open(argv[1], 'r').read()
ref = open(argv[2], 'r').read()
if len(argv) == 4:
diagnosis = True
fn_output = argv[3]
else:
diagnosis = False
fn_output = None
errs, nref, aligned_html = ComputeWER(hyp, ref, diagnosis)
str_summary, str_details = GenerateSummaryFromErrs(nref, errs)
print(str_summary)
print(str_details)
if fn_output:
with open(fn_output, 'wt') as fp:
fp.write('<body><html>')
fp.write('<div>%s</div>' % aligned_html)
fp.write('</body></html>')
if __name__ == '__main__':
print('THIS SCRIPT IS NO LONGER SUPPORTED.'
'PLEASE USE simple_wer_v2.py INSTEAD.')
if len(sys.argv) < 3 or len(sys.argv) > 4:
print("""
Example of Usage:
python simple_wer.py file_hypothesis file_reference
or
python simple_wer.py file_hypothesis file_reference diagnosis_html
where file_hypothesis is the file name for hypothesis text
file_reference is the file name for reference text.
diagnosis_html (optional) is the html filename to diagnose the errors.
Or you can use this file as a library, and call either of the following
- ComputeWER(hyp, ref) to compute WER for one pair of hypothesis/reference
- AverageWERs(hyps, refs) to average WER for a list of hypotheses/references
""")
sys.exit(1)
main(sys.argv)
| apache-2.0 | -7,434,293,313,371,145,000 | 27.614706 | 80 | 0.617638 | false | 3.308058 | false | false | false |
ActiveState/code | recipes/Python/576696_OrderedSet_with_Weakrefs/recipe-576696.py | 1 | 2863 | import collections
from weakref import proxy
class Link(object):
__slots__ = 'prev', 'next', 'key', '__weakref__'
class OrderedSet(collections.MutableSet):
'Set the remembers the order elements were added'
# Big-O running times for all methods are the same as for regular sets.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The prev/next links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedSet.
def __init__(self, iterable=None):
self.__root = root = Link() # sentinel node for doubly linked list
root.prev = root.next = root
self.__map = {} # key --> link
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.__map)
def __contains__(self, key):
return key in self.__map
def add(self, key):
# Store new key in a new link at the end of the linked list
if key not in self.__map:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = root.prev = proxy(link)
def discard(self, key):
# Remove an existing item using self.__map to find the link which is
# then removed by updating the links in the predecessor and successors.
if key in self.__map:
link = self.__map.pop(key)
link.prev.next = link.next
link.next.prev = link.prev
def __iter__(self):
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return not self.isdisjoint(other)
| mit | -5,884,483,915,631,324,000 | 35.705128 | 87 | 0.575969 | false | 4.254086 | false | false | false |
nvbn/django-discover-jenkins | tests/tests/test_runner.py | 1 | 2207 | from mock import MagicMock, patch
from django.test import TestCase
from discover_jenkins import runner, tasks
class FakeTestRunner(object):
"""
A fake object to stub out the base methods that the mixin's super() calls
require.
"""
def setup_test_environment(self):
pass
def teardown_test_environment(self):
pass
class Runner(runner.CIRunner, FakeTestRunner):
"""CIRunner is a mixin, so use the FakeTestRunner as a base"""
pass
class TestCIRunner(TestCase):
def test_get_tasks(self):
"""
Make sure the correct tasks are imported based on the
test_project.settings.
"""
self.assertEqual(runner.get_tasks(),
[tasks.with_coverage.CoverageTask,
tasks.run_pylint.PyLintTask,
tasks.run_jshint.JSHintTask,
tasks.run_sloccount.SlocCountTask])
def test_get_task_options(self):
"""
For now, just do a simple test to make sure the right number of options
are gleaned from the tasks.
"""
self.assertEqual(len(runner.get_task_options()), 14)
def test_setup_test_environment(self):
"""
Make sure the setup_test_environment method on a task is triggered by
the runner.
"""
mock_task = MagicMock()
with patch.object(Runner, '__init__') as mock_init:
mock_init.return_value = None
cirun = Runner()
cirun.jenkins = True
cirun.tasks = [mock_task]
cirun.setup_test_environment()
self.assertTrue(mock_task.setup_test_environment.called)
def test_teardown_test_environment(self):
"""
Make sure the setup_test_environment method on a task is triggered by
the runner.
"""
mock_task = MagicMock()
with patch.object(Runner, '__init__') as mock_init:
mock_init.return_value = None
cirun = Runner()
cirun.jenkins = True
cirun.tasks = [mock_task]
cirun.teardown_test_environment()
self.assertTrue(mock_task.teardown_test_environment.called)
| bsd-3-clause | 4,994,888,553,036,383,000 | 28.426667 | 79 | 0.593113 | false | 4.20381 | true | false | false |
will-hart/blitz | blitz/data/__init__.py | 1 | 8432 | import sys
__author__ = 'Will Hart'
from collections import OrderedDict
class DataContainer(object):
"""
A class for saving and managing data that can be used in the interface. It
also provides an interface for adding DataTransform objects which can be used
to apply filters (i.e. moving average, multiplication, etc) to the data
:param persistent: Indicates if all data is kept, (True) or only 200 values for each series (False, default)
"""
MAX_VALUES = 50
def __init__(self, persistent=False):
self.__series = OrderedDict()
self.__series_names = {}
self.number_of_series = 0
self.x = []
self.y = []
self.__transforms = []
self.x_transformed = []
self.y_transformed = []
self.__persistent = persistent
def clear_data(self):
"""
Clears all data from the data DataContainer
:returns: Nothing
"""
self.__series = OrderedDict()
self.x = []
self.y = []
self.__series_names = {}
self.number_of_series = 0
self.__transforms = []
self.x_transformed = []
self.y_transformed = []
def push(self, series_id, series_name, x, y):
"""
Adds the passed X and Y values to the given series. If the series has not been
registered with the DataContainer it registers it
:param series_id: The ID of the series
:param series_name: The human readable name of the series
:param x: the list of x-values to add
:param y: the list of y-values to add
:throws ValueError: if the x and y lists are of different lengths
:returns bool: True if the series was created, false if data was appended
"""
if len(x) != len(y):
raise ValueError("X and Y lists must have the same number of elements")
created = False
if series_id not in self.__series.keys():
self.__series[series_id] = self.number_of_series
self.__series_names[series_id] = series_name
self.x.append([])
self.y.append([])
self.number_of_series += 1
created = True
idx = self.__series[str(series_id)]
self.x[idx] += x
self.y[idx] += y
if not self.__persistent:
self.x[idx] = self.x[idx][-self.MAX_VALUES:]
self.y[idx] = self.y[idx][-self.MAX_VALUES:]
return created
def get_name(self, series_id):
"""
Returns the name of a series in the DataContainer with the given series ID
:param series_id: the series name to return
:returns: The name of the series if it is in the Container, otherwise the series ID
"""
return self.__series_names[series_id].replace("_", " ").title() \
if series_id in self.__series_names.keys() else series_id
def all_series(self):
"""
A generator which yields the series x, y values
:returns: generated [x, y] value lists
"""
for key in self.__series.keys():
idx = self.__series[key]
yield [key, self.x[idx], self.y[idx]]
def get_latest(self, named=False):
"""
Gets the latest readings for each variable type and returns them in a pair of variable name / value pairs
:param named: If False (default), the variables will be indexed by variable name, otherwise by series name
:returns: A list of tuples. Each tuple is in the form `(variable_name, value)`
"""
result = []
for k in self.__series.keys():
val = self.y[self.__series[k]][-1]
if named:
k = self.get_name(k)
result.append((k, val))
return result
def get_x(self, series_id):
"""
Gets a list of x-values for a specified series_name
:param series_id: the string name of the series to retrieve
:returns: a list of x values if the key is found, an empty list otherwise
"""
try:
idx = self.__series[str(series_id)]
except KeyError:
return []
return self.x[idx]
def get_y(self, series_id):
"""
Gets a list of y-values for a specified series_name
:param series_id: the string name of the series to retrieve
:returns: a list of y values if the key is found, an empty list otherwise
"""
try:
idx = self.__series[str(series_id)]
except KeyError:
return []
return self.y[idx]
def get_series(self, series_id):
"""
Gets a single series and returns a list of [x,y] values
:param series_id: The name of the series to return
:returns: A list of [x,y] values for the given series, or empty lists if the series doesn't exist
"""
if series_id not in self.__series.keys():
return [[], []]
else:
idx = self.__series[series_id]
return [self.x[idx], self.y[idx]]
def get_transformed_series(self, series_id):
"""
Gets a single series and returns a list of [x,y] values from the transformed data
:param series_id: The name of the series to return
:returns: A list of [x,y] values for the given series, or empty lists if the series doesn't exist
"""
if series_id not in self.__series.keys() or not self.x_transformed:
return [[], []]
else:
idx = self.__series[series_id]
return [self.x_transformed[idx], self.y_transformed[idx]]
def get_series_index(self, series_id):
"""
Gets the index for a given series, or returns None if the series is not found
:param series_id: The name of the series to find the index for
:returns: An integer representing the 0 based index of this series name in the series dictionary
"""
try:
return self.__series[series_id]
except KeyError:
return None
def has_series(self, series_id):
"""
Checks is the given series name is registered in the DataContainer
:param series_id: The name of the series to check (will be converted to string)
:returns: True if the series exists, false otherwise
"""
return str(series_id) in self.__series.keys()
def get_series_names(self):
"""
Returns a list of series names that are registered in this DataContainer
:returns: A list of string series names registered to this DataContainer
"""
return self.__series.keys()
def add_transform(self, transform):
"""
Adds a data transform to the DataContainer
"""
if not isinstance(transform, BaseDataTransform):
raise ValueError("Attempted to add a data transformation class which doesn't derive from BaseDataTransform")
self.__transforms.append(transform)
def apply_transforms(self):
"""
Applies the transformation chain
"""
self.x_transformed = [data[:] for data in self.x]
self.y_transformed = [data[:] for data in self.y]
for transform in self.__transforms:
transform.apply(self)
def get_transforms(self):
"""
Gets all the current transforms applied
:returns: A list of BaseDataTransform classes
"""
return self.__transforms
def empty(self):
"""
Checks if a DataContainer is empty. An empty data container has no
data series. A container with data series but no data values is NOT empty
:returns: True if there are no data series, False otherwise
"""
return len(self.__series.keys()) == 0
class BaseDataTransform(object):
"""
A base class which must be inherited by DataTransform classes.
"""
def apply(self, container):
"""
Takes a DataContainer object and applies a transformation to the X and Y data in the
DataContainer. This is a base class which should be inherited from.
.. warning::
If no `apply` method is provided on the derived class then a `NotImplementedError` will be thrown
:raises: NotImplementedError
"""
raise NotImplementedError("BaseDataTransform.apply should be overridden by derived instances")
| agpl-3.0 | -6,738,503,005,614,650,000 | 32.066667 | 120 | 0.591437 | false | 4.355372 | false | false | false |
cldavid/aquacc | temp_client.py | 1 | 1340 | #!/usr/bin/python
import socket
import re
import subprocess
sensors_db = {
"2857993450082": "in_temp",
"2866BC3C5006E": "out_temp"
}
ALERT_TEMP_IN_MIN = 27.5
ALERT_TEMP_IN_MAX = 29.5
print "Sensor DB"
for i in sensors_db:
print i, sensors_db[i]
print
s = socket.socket()
host = "localhost"
port = 5000
r = re.compile("^Epoch-Time:\s+(\d+)\s+Sensor:\s+(\w+),(\d+\.\d+),(\w+),(\d+\.\d+).*$")
s.connect((host, port))
f = s.makefile()
for i in range (0, 100) :
f.write("a")
f.write("\n");
f.flush();
while 1:
data = f.readline()
m = r.match(data)
if m :
epochTime = m.group(1)
sensorName1 = sensors_db[m.group(2)]
sensorValue1 = float(m.group(3))
sensorName2 = sensors_db[m.group(4)]
sensorValue2 = float(m.group(5))
sensor = { sensorName1: sensorValue1, sensorName2: sensorValue2 }
rrdString = "/usr/bin/rrdtool update /www/multirPItemp.rrd --template " + sensorName1 + ":" + sensorName2 + " -- " + str(epochTime) + ":" + str(sensorValue1) + ":" + str(sensorValue2)
print rrdString
subprocess.call(rrdString, shell=True)
if ((ALERT_TEMP_IN_MIN > sensor["in_temp"]) or (sensor["in_temp"] >= ALERT_TEMP_IN_MAX)) :
ifttt = "/usr/local/sbin/sendIFTTTmsg.sh new_temperature_event " + str(sensor["in_temp"])
print ifttt
subprocess.call(ifttt, shell=True)
s.close
| gpl-3.0 | 3,318,766,553,013,326,300 | 24.283019 | 185 | 0.627612 | false | 2.528302 | false | false | false |
DG-i/openshift-ansible | roles/lib_openshift/library/oc_serviceaccount_secret.py | 1 | 58441 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/serviceaccount_secret -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_serviceaccount_secret
short_description: Module to manage openshift service account secrets
description:
- Manage openshift service account secrets programmatically.
options:
state:
description:
- If present, the service account will be linked with the secret if it is not already. If absent, the service account will be unlinked from the secret if it is already linked. If list, information about the service account secrets will be gathered and returned as part of the Ansible call results.
required: false
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: false
aliases: []
service_account:
description:
- Name of the service account.
required: true
default: None
aliases: []
namespace:
description:
- Namespace of the service account and secret.
required: true
default: None
aliases: []
secret:
description:
- The secret that should be linked to the service account.
required: false
default: None
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: get secrets of a service account
oc_serviceaccount_secret:
state: list
service_account: builder
namespace: default
register: sasecretout
- name: Link a service account to a specific secret
oc_serviceaccount_secret:
service_account: builder
secret: mynewsecret
namespace: default
register: sasecretout
'''
# -*- -*- -*- End included fragment: doc/serviceaccount_secret -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
err = verr.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/serviceaccount.py -*- -*- -*-
class ServiceAccountConfig(object):
'''Service account config class
This class stores the options and returns a default service account
'''
# pylint: disable=too-many-arguments
def __init__(self, sname, namespace, kubeconfig, secrets=None, image_pull_secrets=None):
self.name = sname
self.kubeconfig = kubeconfig
self.namespace = namespace
self.secrets = secrets or []
self.image_pull_secrets = image_pull_secrets or []
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiate a properly structured volume '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'ServiceAccount'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['secrets'] = []
if self.secrets:
for sec in self.secrets:
self.data['secrets'].append({"name": sec})
self.data['imagePullSecrets'] = []
if self.image_pull_secrets:
for sec in self.image_pull_secrets:
self.data['imagePullSecrets'].append({"name": sec})
class ServiceAccount(Yedit):
''' Class to wrap the oc command line tools '''
image_pull_secrets_path = "imagePullSecrets"
secrets_path = "secrets"
def __init__(self, content):
'''ServiceAccount constructor'''
super(ServiceAccount, self).__init__(content=content)
self._secrets = None
self._image_pull_secrets = None
@property
def image_pull_secrets(self):
''' property for image_pull_secrets '''
if self._image_pull_secrets is None:
self._image_pull_secrets = self.get(ServiceAccount.image_pull_secrets_path) or []
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, secrets):
''' property for secrets '''
self._image_pull_secrets = secrets
@property
def secrets(self):
''' property for secrets '''
if not self._secrets:
self._secrets = self.get(ServiceAccount.secrets_path) or []
return self._secrets
@secrets.setter
def secrets(self, secrets):
''' property for secrets '''
self._secrets = secrets
def delete_secret(self, inc_secret):
''' remove a secret '''
remove_idx = None
for idx, sec in enumerate(self.secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.secrets[remove_idx]
return True
return False
def delete_image_pull_secret(self, inc_secret):
''' remove a image_pull_secret '''
remove_idx = None
for idx, sec in enumerate(self.image_pull_secrets):
if sec['name'] == inc_secret:
remove_idx = idx
break
if remove_idx:
del self.image_pull_secrets[remove_idx]
return True
return False
def find_secret(self, inc_secret):
'''find secret'''
for secret in self.secrets:
if secret['name'] == inc_secret:
return secret
return None
def find_image_pull_secret(self, inc_secret):
'''find secret'''
for secret in self.image_pull_secrets:
if secret['name'] == inc_secret:
return secret
return None
def add_secret(self, inc_secret):
'''add secret'''
if self.secrets:
self.secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.secrets_path, [{"name": inc_secret}])
def add_image_pull_secret(self, inc_secret):
'''add image_pull_secret'''
if self.image_pull_secrets:
self.image_pull_secrets.append({"name": inc_secret}) # pylint: disable=no-member
else:
self.put(ServiceAccount.image_pull_secrets_path, [{"name": inc_secret}])
# -*- -*- -*- End included fragment: lib/serviceaccount.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_serviceaccount_secret.py -*- -*- -*-
class OCServiceAccountSecret(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'sa'
def __init__(self, config, verbose=False):
''' Constructor for OpenshiftOC '''
super(OCServiceAccountSecret, self).__init__(config.namespace, kubeconfig=config.kubeconfig, verbose=verbose)
self.config = config
self.verbose = verbose
self._service_account = None
@property
def service_account(self):
''' Property for the service account '''
if not self._service_account:
self.get()
return self._service_account
@service_account.setter
def service_account(self, data):
''' setter for the service account '''
self._service_account = data
def exists(self, in_secret):
''' verifies if secret exists in the service account '''
result = self.service_account.find_secret(in_secret)
if not result:
return False
return True
def get(self):
''' get the service account definition from the master '''
sao = self._get(OCServiceAccountSecret.kind, self.config.name)
if sao['returncode'] == 0:
self.service_account = ServiceAccount(content=sao['results'][0])
sao['results'] = self.service_account.get('secrets')
return sao
def delete(self):
''' delete secrets '''
modified = []
for rem_secret in self.config.secrets:
modified.append(self.service_account.delete_secret(rem_secret))
if any(modified):
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
def put(self):
''' place secrets into sa '''
modified = False
for add_secret in self.config.secrets:
if not self.service_account.find_secret(add_secret):
self.service_account.add_secret(add_secret)
modified = True
if modified:
return self._replace_content(OCServiceAccountSecret.kind, self.config.name, self.service_account.yaml_dict)
return {'returncode': 0, 'changed': False}
@staticmethod
# pylint: disable=too-many-return-statements,too-many-branches
# TODO: This function should be refactored into its individual parts.
def run_ansible(params, check_mode):
''' run the ansible idempotent code '''
sconfig = ServiceAccountConfig(params['service_account'],
params['namespace'],
params['kubeconfig'],
[params['secret']],
None)
oc_sa_sec = OCServiceAccountSecret(sconfig, verbose=params['debug'])
state = params['state']
api_rval = oc_sa_sec.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': "list"}
########
# Delete
########
if state == 'absent':
if oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have removed the " + \
"secret from the service account.'}
api_rval = oc_sa_sec.delete()
return {'changed': True, 'results': api_rval, 'state': "absent"}
return {'changed': False, 'state': "absent"}
if state == 'present':
########
# Create
########
if not oc_sa_sec.exists(params['secret']):
if check_mode:
return {'changed': True, 'msg': 'Would have added the ' + \
'secret to the service account.'}
# Create it here
api_rval = oc_sa_sec.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_sa_sec.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': "present"}
return {'changed': False, 'results': api_rval, 'state': "present"}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state,
'state': 'unknown'}
# -*- -*- -*- End included fragment: class/oc_serviceaccount_secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_serviceaccount_secret.py -*- -*- -*-
def main():
'''
ansible oc module to manage service account secrets.
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default=None, required=True, type='str'),
secret=dict(default=None, type='str'),
service_account=dict(required=True, type='str'),
),
supports_check_mode=True,
)
rval = OCServiceAccountSecret.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_serviceaccount_secret.py -*- -*- -*-
| apache-2.0 | -6,776,691,593,204,628,000 | 33.076385 | 301 | 0.535104 | false | 4.30949 | true | false | false |
atamazian/traffic-proc-tools | gg1_function.py | 1 | 2914 | #Simulate queuing system G/G/1
#@Author: Nguyen Duc Viet
import random as rd
import numpy as np
import simpy
#Function for epirical data --------------------------------------------------------------------------------------------
data_wt = []
def arrival(env, number,counter,interval,time_service):
for i in range(number):
t = interval[i]
yield env.timeout(t)
c = service(env,'Customer %02d'%i,counter,i,time_service[i])
env.process(c)
def service(env,name, counter,i, time_service):
arrive = env.now
with counter.request() as req:
yield req
wait = env.now - arrive
#print('%7.4f %s: Waited %6.3f' % (env.now, name, wait))
data_wt.append(wait)
ts = time_service
yield env.timeout(ts)
#print('%7.4f %s: Finished' % (env.now, name))
def simulate_gg1(n,interval_time,time_service):
env = simpy.Environment()
counter = simpy.Resource(env, capacity=1)
t = env.now
env.process(arrival(env,n,counter,interval_time,time_service))
env.run()
t = env.now - t
#print("\nTotal simulation time: %f"% t)
tw = np.array(data_wt)
ts = np.array(time_service)
del data_wt[:] #reset list variable containing waiting time
b=0 #busy time of server
for i in range(n):
b = b+ts[i]
t_in_system = tw.sum() + b # Total time spent in system of all packet = total waiting time + total service time
#print("Total waiting time of %i packets: %f" %(n,tw.sum()))
#print("Total time spent in system of %i packets: %f\n" %(n,t_in_system))
#Caculate output parameters: Utilization; mean time spent in system; mean number of clients
u = b/t
w = t_in_system/n #Mean time spent in system
l = t_in_system/t #Mean number of clients in the system
return (u,w,l)
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
#function for simulating M/M/1
def simulate_MM1(lamb_da,mu):
u = lamb_da/mu
if u>1:
u=1
W =1/(mu-lamb_da)
Wq = W - 1/mu
L = lamb_da*W
return (u,W,L)
#-----------------------------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------------------------
#Function for simulating QE/QE/1
def qexp_rate(q, ave):
rate = 1/(ave*(3-2*q))
return rate
def ts_qlog(x,q):
if q==1:
y=np.log(x)
else:
y = (x**(1-q)-1)/(1-q)
return y
def rand_qexp(N,q,rate):
q1 = 1/(2-q)
u = np.random.uniform(0,1,size=(1,N))
y = -q1*ts_qlog(u,q1)/rate
return y
| mit | 5,512,354,887,458,386,000 | 32.494253 | 128 | 0.46431 | false | 3.571078 | false | false | false |
yuma-m/pychord | pychord/progression.py | 1 | 3153 | # -*- coding: utf-8 -*-
from .chord import as_chord, Chord
class ChordProgression(object):
""" Class to handle chord progressions.
:param list[pychord.Chord] _chords: component chords of chord progression.
"""
def __init__(self, initial_chords=None):
""" Constructor of ChordProgression instance.
:type initial_chords: str|pychord.Chord|list
:param initial_chords: Initial chord or chords of the chord progressions
"""
if initial_chords is None:
initial_chords = []
if isinstance(initial_chords, Chord):
self._chords = [initial_chords]
elif isinstance(initial_chords, str):
self._chords = [as_chord(initial_chords)]
elif isinstance(initial_chords, list):
self._chords = [as_chord(chord) for chord in initial_chords]
else:
raise TypeError("Cannot initialize ChordProgression with argument of {} type".format(type(initial_chords)))
def __unicode__(self):
return " | ".join([chord.chord for chord in self._chords])
def __str__(self):
return " | ".join([chord.chord for chord in self._chords])
def __repr__(self):
return "<ChordProgression: {}>".format(" | ".join([chord.chord for chord in self._chords]))
def __add__(self, other):
self._chords += other.chords
return self
def __len__(self):
return len(self._chords)
def __getitem__(self, item):
return self._chords[item]
def __setitem__(self, key, value):
self._chords[key] = value
def __eq__(self, other):
if not isinstance(other, ChordProgression):
raise TypeError("Cannot compare ChordProgression object with {} object".format(type(other)))
if len(self) != len(other):
return False
for c, o in zip(self, other):
if c != o:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@property
def chords(self):
""" Get component chords of chord progression
:rtype: list[pychord.Chord]
"""
return self._chords
def append(self, chord):
""" Append a chord to chord progressions
:type chord: str|pychord.Chord
:param chord: A chord to append
:return:
"""
self._chords.append(as_chord(chord))
def insert(self, index, chord):
""" Insert a chord to chord progressions
:param int index: Index to insert a chord
:type chord: str|pychord.Chord
:param chord: A chord to insert
:return:
"""
self._chords.insert(index, as_chord(chord))
def pop(self, index=-1):
""" Pop a chord from chord progressions
:param int index: Index of the chord to pop (default: -1)
:return: pychord.Chord
"""
return self._chords.pop(index)
def transpose(self, trans):
""" Transpose whole chord progressions
:param int trans: Transpose key
:return:
"""
for chord in self._chords:
chord.transpose(trans)
| mit | 278,924,370,740,419,650 | 28.745283 | 119 | 0.582937 | false | 3.735782 | false | false | false |
Kobzol/debug-visualizer | debugger/pycgdb/programinfo.py | 1 | 2577 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Jakub Beranek
#
# This file is part of Devi.
#
# Devi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Devi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Devi. If not, see <http://www.gnu.org/licenses/>.
#
import os
class ProgramInfo(object):
def __init__(self, elffile):
"""
@type elffile: elftools.elf.elffile.ELFFile
"""
dwarf_info = elffile.get_dwarf_info()
self.files = {}
self.addresses = {}
for cu in dwarf_info.iter_CUs():
line_program = dwarf_info.line_program_for_CU(cu)
if line_program:
for line_entry in line_program.get_entries():
if line_entry.state:
self._parse_line_state(line_program.header.file_entry,
line_entry.state)
for die in cu.iter_DIEs():
self._parse_die(die)
def has_file(self, file):
return os.path.abspath(file) in self.files
def has_location(self, file, line):
file = os.path.abspath(file)
return self.has_file(file) and line in self.files[file]
def get_address(self, file, line):
file = os.path.abspath(file)
if not self.has_location(file, line):
return None
else:
return self.files[file][line][0]
def get_location(self, address):
if address in self.addresses:
return self.addresses[address]
else:
return None
def _parse_die(self, die):
for child in die.iter_children():
self._parse_die(child)
def _parse_line_state(self, files, line_state):
file = os.path.abspath(files[line_state.file - 1].name)
line = line_state.line
address = line_state.address
if file not in self.files:
self.files[file] = {}
if line not in self.files[file]:
self.files[file][line] = []
self.files[file][line].append(address)
self.addresses[address] = (file, line)
| gpl-3.0 | 6,166,318,167,629,636,000 | 29.317647 | 78 | 0.589445 | false | 3.723988 | false | false | false |
MicrosoftGenomics/FaST-LMM | fastlmm/util/runner/examples.py | 1 | 2867 | import math
from fastlmm.util.mapreduce import map_reduce
from fastlmm.util.runner import Local, LocalMultiProc, HPC, LocalMultiThread
import os
def is_prime(n):
assert n == int(n) and n>1, "Expect integers greater than 1"
for j in xrange(2,int(math.sqrt(n))+1):
if n % j == 0:
return False
return True
# Iterative algorithm for finding prime numbers in a range
def prime_search0(start,stop):
assert start < stop, "start must be less than stop"
prime_list = []
for i in xrange(start,stop):
if is_prime(i):
prime_list.append(i)
return prime_list
# The similar map_reduce algorithm for finding prime numbers in a range
def prime_search1(start,stop,runner):
def mapper(i):
if is_prime(i):
#assert i != 5, "I just don't like fives"
return i
else:
return None
def reducer(sequence):
result = []
for i in sequence:
if i is not None:
result.append(i)
return result
return map_reduce(xrange(start,stop),
mapper=mapper,
reducer=reducer, #lambda sequence: [i for i in sequence if i is not None], #Filter out the None's
runner=runner)
if __name__ == '__main__':
#Run the iterative algorithm
#print prime_search0(2,10) #=> [2, 3, 5, 7]
#Run the map_reduce algorithm locally.
#print prime_search1(2,10,runner=Local()) #=> [2, 3, 5, 7]
#Now we run map_reduce on 20 processors.
#from PrimeNumbers.examples import prime_search1 #If not running local, must import your function. (Recall that for import to work, you also need an empty __init__.py).
#print prime_search1(2,10,runner=LocalMultiProc(20)) #=> [2, 3, 5, 7]
#Finally we run on HPC
#------- To run on HPC must create an hpc cluster object
#remote_python_parent=r"\\GCR\Scratch\B99\escience\{0}\ppv0".format(os.environ['USERNAME']) #where to copy your "python_path" code to.
#hpc_runner= HPC(10, 'GCR',r"\\GCR\Scratch\B99\escience",
# remote_python_parent=remote_python_parent,
# unit='node', #core, socket, node
# update_remote_python_parent=True,
# template="Preemptable",
# priority="Lowest",
# nodegroups="Preemptable",
# runtime="0:11:0", # day:hour:min
# )
#runner=LocalMultiProc(2,just_one_process=False)
#runner = Local()
runner = LocalMultiThread(2,just_one_process=False)
print prime_search1(2,10,runner=runner) #=> [2, 3, 5, 7]
print "done" | apache-2.0 | -1,530,155,641,020,250,400 | 39.394366 | 172 | 0.55354 | false | 3.699355 | false | false | false |
electronic-library/electronic-library-core | tests/library_test.py | 1 | 5083 | """
Unit tests for class Library.
"""
from library.bookmark import Bookmark
from library.book import Book
from library.library import Library
lib = Library()
book1_refs = {
'Authors': ['OS guy', 'Some guy'],
'Publisher': 'Forgot publisher',
'Edition': 9,
'Chapters': 18,
'Pages': 900
}
book2_refs = {
'Authors': 'Me',
'Edition': 5,
'Chapters': 7,
'Pages': 900
}
book3_refs = {
'Authors': ['Scott Chacon', 'Ben Straub'],
'Publisher': 'Apress',
'Edition': 2,
'Chapters': 10
}
book1 = Book(title='Operating System Concepts',
refs=book1_refs)
book2 = Book(title='Computer Organization and Design',
refs=book2_refs)
book3 = Book(title='Pro Git',
refs=book3_refs)
bookmark1_refs = {
'Book Title': 'Operating System Concepts',
'Edition': 9,
'Chapter': 10,
'Page': 485
}
bookmark2_refs = {
'Book Title': 'Operating System Concepts',
'Edition': 9,
'Chapter': '11.3.1',
'Page': 517
}
bookmark3_refs = {
'Book Title': 'Pro Git',
'Edition': 2,
'Chapter': 3,
'Page': 81
}
bookmark1 = Bookmark(title='File Systems',
category='Operating Systems',
refs=bookmark1_refs)
bookmark2 = Bookmark(title='Storage Structure',
category='Operating Systems',
refs=bookmark2_refs)
bookmark3 = Bookmark(title='Git Branching',
category='Git',
refs=bookmark3_refs)
lib.add_book(book1)
lib.add_book(book2)
lib.add_book(book3)
book1.add_bookmark(bookmark1)
book1.add_bookmark(bookmark2)
book3.add_bookmark(bookmark3)
class TestLibrary:
def test_book(self):
# test with ID
assert(lib.book(book1.id()) == book1)
assert(lib.book(book1.id()) != book2)
assert(lib.book(book1.id()) != book3)
assert(lib.book(book2.id()) != book1)
assert(lib.book(book2.id()) == book2)
assert(lib.book(book2.id()) != book3)
assert(lib.book(book3.id()) != book1)
assert(lib.book(book3.id()) != book2)
assert(lib.book(book3.id()) == book3)
# test with obj
assert(lib.book(book1) == book1)
assert(lib.book(book1) != book2)
assert(lib.book(book1) != book3)
assert(lib.book(book2) != book1)
assert(lib.book(book2) == book2)
assert(lib.book(book2) != book3)
assert(lib.book(book3) != book1)
assert(lib.book(book3) != book2)
assert(lib.book(book3) == book3)
# test with dict
assert(lib.book(book1_refs) == book1)
assert(lib.book(book1_refs) != book2)
assert(lib.book(book1_refs) != book3)
assert(lib.book(book2_refs) != book1)
assert(lib.book(book2_refs) == book2)
assert(lib.book(book2_refs) != book3)
assert(lib.book(book3_refs) != book1)
assert(lib.book(book3_refs) != book2)
assert(lib.book(book3_refs) == book3)
# test with str for title
assert(lib.book(book1.title()) == book1)
assert(lib.book(book1.title()) != book2)
assert(lib.book(book1.title()) != book3)
assert(lib.book(book2.title()) != book1)
assert(lib.book(book2.title()) == book2)
assert(lib.book(book2.title()) != book3)
assert(lib.book(book3.title()) != book1)
assert(lib.book(book3.title()) != book2)
assert(lib.book(book3.title()) == book3)
def test_book_with_bookmark(self):
assert(lib.book_with_bookmark(bookmark1) == book1)
assert(lib.book_with_bookmark(bookmark1) != book2)
assert(lib.book_with_bookmark(bookmark1) != book3)
assert(lib.book_with_bookmark(bookmark2) == book1)
assert(lib.book_with_bookmark(bookmark2) != book2)
assert(lib.book_with_bookmark(bookmark2) != book3)
assert(lib.book_with_bookmark(bookmark3) != book1)
assert(lib.book_with_bookmark(bookmark3) != book2)
assert(lib.book_with_bookmark(bookmark3) == book3)
def test_bookmarks_of_book(self):
assert(lib.bookmarks_of_book(book1) == {bookmark1, bookmark2})
assert(lib.bookmarks_of_book(book1) == {bookmark2, bookmark1})
assert(lib.bookmarks_of_book(book1) != {bookmark1, bookmark3})
assert(lib.bookmarks_of_book(book1) != {bookmark2, bookmark3})
assert(lib.bookmarks_of_book(book2) == set())
assert(lib.bookmarks_of_book(book2) != {bookmark1, bookmark2})
assert(lib.bookmarks_of_book(book2) != {bookmark1, bookmark3})
assert(lib.bookmarks_of_book(book2) != {bookmark2, bookmark3})
assert(lib.bookmarks_of_book(book3) == {bookmark3})
assert(lib.bookmarks_of_book(book3) != {bookmark1})
assert(lib.bookmarks_of_book(book3) != {bookmark2})
assert(lib.bookmarks_of_book(book3) != {bookmark1, bookmark3})
assert(lib.bookmarks_of_book(book3) != {bookmark2, bookmark3})
def test_add_category(self):
pass
def test_rm_category(self):
pass
def test_parent_category_of(self):
pass
| gpl-3.0 | 215,717,537,777,519,400 | 31.793548 | 70 | 0.598465 | false | 3.174891 | true | false | false |
jeamland/wsproto | test/test_upgrade.py | 1 | 5508 | # -*- coding: utf-8 -*-
"""
Test the HTTP upgrade phase of connection
"""
import base64
import email
import random
import sys
from wsproto.connection import WSConnection, CLIENT, SERVER
from wsproto.events import (
ConnectionEstablished, ConnectionFailed, ConnectionRequested
)
IS_PYTHON3 = sys.version_info >= (3, 0)
def parse_headers(headers):
if IS_PYTHON3:
headers = email.message_from_bytes(headers)
else:
headers = email.message_from_string(headers)
return dict(headers.items())
class TestClientUpgrade(object):
def initiate(self, host, path, **kwargs):
ws = WSConnection(CLIENT, host, path, **kwargs)
data = ws.bytes_to_send()
request, headers = data.split(b'\r\n', 1)
method, path, version = request.strip().split()
headers = parse_headers(headers)
print(method, path, version)
print(repr(headers))
return ws, method, path, version, headers
def test_initiate_connection(self):
_host = 'frob.nitz'
_path = '/fnord'
ws, method, path, version, headers = self.initiate(
_host, _path, subprotocols=["foo", "bar"])
assert method == b'GET'
assert path == _path.encode('ascii')
assert headers['host'] == _host
assert headers['connection'].lower() == 'upgrade'
assert headers['upgrade'].lower() == 'websocket'
assert 'sec-websocket-key' in headers
assert 'sec-websocket-version' in headers
assert headers['sec-websocket-protocol'] == 'foo, bar'
def test_no_subprotocols(self):
ws, method, path, version, headers = self.initiate("foo", "/bar")
assert 'sec-websocket-protocol' not in headers
def test_correct_accept_token(self):
_host = 'frob.nitz'
_path = '/fnord'
ws, method, path, version, headers = self.initiate(_host, _path)
key = headers['sec-websocket-key'].encode('ascii')
accept_token = ws._generate_accept_token(key)
response = b"HTTP/1.1 101 Switching Protocols\r\n"
response += b"Connection: Upgrade\r\n"
response += b"Upgrade: WebSocket\r\n"
response += b"Sec-WebSocket-Accept: " + accept_token + b"\r\n"
response += b"\r\n"
ws.receive_bytes(response)
assert isinstance(next(ws.events()), ConnectionEstablished)
def test_incorrect_accept_token(self):
_host = 'frob.nitz'
_path = '/fnord'
ws, method, path, version, headers = self.initiate(_host, _path)
key = b'This is wrong token'
accept_token = ws._generate_accept_token(key)
response = b"HTTP/1.1 101 Switching Protocols\r\n"
response += b"Connection: Upgrade\r\n"
response += b"Upgrade: WebSocket\r\n"
response += b"Sec-WebSocket-Accept: " + accept_token + b"\r\n"
response += b"\r\n"
ws.receive_bytes(response)
assert isinstance(next(ws.events()), ConnectionFailed)
def test_bad_connection_header(self):
_host = 'frob.nitz'
_path = '/fnord'
ws, method, path, version, headers = self.initiate(_host, _path)
key = headers['sec-websocket-key'].encode('ascii')
accept_token = ws._generate_accept_token(key)
response = b"HTTP/1.1 101 Switching Protocols\r\n"
response += b"Connection: Updraft\r\n"
response += b"Upgrade: WebSocket\r\n"
response += b"Sec-WebSocket-Accept: " + accept_token + b"\r\n"
response += b"\r\n"
ws.receive_bytes(response)
assert isinstance(next(ws.events()), ConnectionFailed)
def test_bad_upgrade_header(self):
_host = 'frob.nitz'
_path = '/fnord'
ws, method, path, version, headers = self.initiate(_host, _path)
key = headers['sec-websocket-key'].encode('ascii')
accept_token = ws._generate_accept_token(key)
response = b"HTTP/1.1 101 Switching Protocols\r\n"
response += b"Connection: Upgrade\r\n"
response += b"Upgrade: SebWocket\r\n"
response += b"Sec-WebSocket-Accept: " + accept_token + b"\r\n"
response += b"\r\n"
ws.receive_bytes(response)
assert isinstance(next(ws.events()), ConnectionFailed)
class TestServerUpgrade(object):
def test_correct_request(self):
test_host = 'frob.nitz'
test_path = '/fnord'
ws = WSConnection(SERVER)
nonce = bytes(random.getrandbits(8) for x in range(0, 16))
nonce = base64.b64encode(nonce)
request = b"GET " + test_path.encode('ascii') + b" HTTP/1.1\r\n"
request += b'Host: ' + test_host.encode('ascii') + b'\r\n'
request += b'Connection: Upgrade\r\n'
request += b'Upgrade: WebSocket\r\n'
request += b'Sec-WebSocket-Version: 13\r\n'
request += b'Sec-WebSocket-Key: ' + nonce + b'\r\n'
request += b'\r\n'
ws.receive_bytes(request)
event = next(ws.events())
assert isinstance(event, ConnectionRequested)
ws.accept(event)
data = ws.bytes_to_send()
response, headers = data.split(b'\r\n', 1)
version, code, reason = response.split(b' ')
headers = parse_headers(headers)
accept_token = ws._generate_accept_token(nonce)
assert int(code) == 101
assert headers['connection'].lower() == 'upgrade'
assert headers['upgrade'].lower() == 'websocket'
assert headers['sec-websocket-accept'] == accept_token.decode('ascii')
| mit | -7,825,416,513,577,806,000 | 31.4 | 78 | 0.604757 | false | 3.551257 | true | false | false |
ansobolev/regCMPostProc | src/plot.py | 1 | 2816 | #!/usr/bin/env python
# RegCM postprocessing tool
# Copyright (C) 2014 Aliou, Addisu, Kanhu, Andrey
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from value import Value
class Plotter(object):
def __init__(self, value):
self._value = value
self.lat, self.lon = value.latlon
def plot(self, coastlines=True,
countries=True,
places=True,
title=None,
levels = None):
if levels is not None:
l_min, l_max = levels
l = (l_max - l_min) / 10
levels = range(l_min, l_max + l, l)
projection = ccrs.PlateCarree()
self.fig, self.ax = plt.subplots(subplot_kw={'projection': projection})
if coastlines:
self.ax.coastlines('10m')
if countries:
countries = cfeature.NaturalEarthFeature(
scale='110m', category='cultural', name='admin_0_countries')
self.ax.add_feature(countries, color='r', alpha=0.1)
if places:
places = cfeature.NaturalEarthFeature(
scale='110m', category='cultural', name='populated_places')
self.ax.add_feature(places, color='b', hatch='o')
cx = self.ax.contourf(self.lon, self.lat, self._value.data, transform=ccrs.PlateCarree(),cmap='bwr', levels=levels)
# To mask out OCEAN or LAND
#ax.add_feature(cfeature.OCEAN)
#ax.add_feature(cfeature.LAND)
self.ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,
linewidth=1, color='blue', alpha=0.5, linestyle='-')
self.fig.colorbar(cx)
times = self._value.limits['time']
plt.title(self._value.title + ' [' + self._value.units + ']\n' +
'mean between ' + str(times[0]) + ' and ' + str(times[1]) + '\n')
def show(self):
plt.show()
def save(self, filename, format):
plt.savefig(filename + '.' + format)
def close(self):
plt.close(self.fig)
if __name__ == "__main__":
pass | gpl-3.0 | -1,203,394,349,944,687,000 | 34.2125 | 123 | 0.604759 | false | 3.685864 | false | false | false |
Bismarrck/tensorflow | tensorflow/python/saved_model/function_serialization.py | 1 | 4869 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for serializing PolymorphicFunctions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as defun_lib
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.saved_model import saved_object_graph_pb2
def _serialize_polymorphic_function(polymorphic_function, node_ids):
"""Build a SavedPolymorphicProto."""
coder = nested_structure_coder.StructureCoder()
proto = saved_object_graph_pb2.SavedPolymorphicFunction()
proto.function_spec_tuple.CopyFrom(
coder.encode_structure(polymorphic_function.function_spec.as_tuple())) # pylint: disable=protected-access
for signature, concrete_function in list_all_concrete_functions(
polymorphic_function):
bound_inputs = []
try:
for capture in concrete_function.captured_inputs:
bound_inputs.append(node_ids[capture])
except KeyError:
# TODO(andresp): Would it better to throw an exception?
logging.warning(
"Concrete function %s not added to object based saved model as it "
"captures tensor %s which is unsupported or not reachable from root.",
concrete_function.name, capture)
continue
function_proto = proto.monomorphic_function.add()
function_proto.concrete_function = concrete_function.name
function_proto.canonicalized_input.CopyFrom(
coder.encode_structure(signature))
function_proto.bound_inputs.extend(bound_inputs)
return proto
def list_all_concrete_functions(polymorphic_function):
"""Given a polymorphic function, returns all of its concrete functions.
Args:
polymorphic_function: Instance of `PolymorphicFunction`.
Returns:
A list of tuples in the form (signature, concrete_function), where concrete
function is an instance of `Function`.
"""
input_signature = polymorphic_function._input_signature # pylint: disable=protected-access
if input_signature is not None:
polymorphic_function.get_concrete_function()
concrete_functions = []
for signature in polymorphic_function._cached_input_signatures: # pylint: disable=protected-access
if any(isinstance(arg, defun_lib.UnknownArgument) for arg in signature):
continue
concrete_function = polymorphic_function.get_concrete_function(*signature)
concrete_functions.append((signature, concrete_function))
return concrete_functions
def list_all_polymorphic_functions(checkpointable_object):
"""Given a checkpointable object, returns all of its polymorphic functions."""
polymorphic_functions = dict()
for attribute_name in dir(checkpointable_object):
try:
attribute_value = getattr(checkpointable_object, attribute_name, None)
except: # pylint: disable=bare-except
# We really don't want to throw an exception just because some object's
# attribute accessor is broken.
attribute_value = None
# TODO(allenl): Consider de-duplicating functions which are referenced
# from multiple attributes.
if isinstance(attribute_value, def_function.PolymorphicFunction):
polymorphic_functions[attribute_name] = attribute_value
return polymorphic_functions
def add_polymorphic_functions_to_object_graph_proto(checkpointable_objects,
saved_object_graph,
node_ids):
"""Finds PolymorphicFunctions attached to objects and saves them."""
existing_objects = list(zip(checkpointable_objects, saved_object_graph.nodes))
for obj, obj_proto in existing_objects:
for name, polymorphic_function in list_all_polymorphic_functions(
obj).items():
function_node_id = len(saved_object_graph.nodes)
function_node = saved_object_graph.nodes.add()
function_node.function.CopyFrom(
_serialize_polymorphic_function(polymorphic_function, node_ids))
reference = obj_proto.children.add()
reference.node_id = function_node_id
reference.local_name = name
| apache-2.0 | 3,175,076,075,852,673,500 | 43.669725 | 112 | 0.720476 | false | 4.394404 | false | false | false |
robotican/ric | ric_board/scripts/RiCConfigurator/BAL/Devices/PPMReader.py | 1 | 2203 | __author__ = 'tom1231'
from PyQt4.QtCore import QUrl
from PyQt4.QtGui import *
from BAL.Interface.DeviceFrame import DeviceFrame, EX_DEV, PPMReader
from lxml.etree import Element, SubElement, XML
class PPMReader(DeviceFrame):
def __init__(self, frame, data):
DeviceFrame.__init__(self, EX_DEV, frame, data)
self._diffTopic = '/diff'
self._ppmTopic = '/RiC_PPM'
def fromDict(self, data):
self._diffTopic = data['diff']
self._ppmTopic = data['ppm']
def toDict(self):
data = dict()
data['type'] = PPMReader
data['diff'] = self._diffTopic
data['ppm'] = self._ppmTopic
return data
def add(self):
if not self.nameIsValid():
error = QErrorMessage()
error.setWindowTitle("Same name error")
error.showMessage("Name already taken.")
error.exec_()
self._isValid = False
return
self._diffTopic = str(self.diffTopic.text())
self._ppmTopic = str(self.ppmTopic.text())
self._isValid = True
def showDetails(self, items=None):
self.diffTopic = QLineEdit(self._diffTopic)
self.ppmTopic = QLineEdit(self._ppmTopic)
self._frame.layout().addRow(QLabel('Differential drive topic: '), self.diffTopic)
self._frame.layout().addRow(QLabel('PPM topic: '), self.ppmTopic)
def printDetails(self):
self._frame.layout().addRow(QLabel('Differential drive topic: '), QLabel(self._diffTopic))
self._frame.layout().addRow(QLabel('PPM topic: '), QLabel(self._ppmTopic))
def saveToFile(self, parent):
keysAtt = parent.keys()
ns = ''
if len(keysAtt) > 0 and keysAtt[0] == 'ns':
ns = '/' + parent.get('ns')
element = SubElement(parent, 'include', {
'file': '$(find ric_board)/scripts/RiCPPMReader.launch'
})
SubElement(element, 'arg', {
'name': 'ppmTopic',
'value': ns + self._ppmTopic
})
SubElement(element, 'arg', {
'name': 'diffTopic',
'value': ns + self._diffTopic
})
def getName(self):
return 'ppm_reader'
| bsd-3-clause | 7,213,415,209,181,855,000 | 30.028169 | 98 | 0.574671 | false | 3.696309 | false | false | false |
bokeh/bokeh | examples/integration/glyphs/categorical_multi_glyphs.py | 1 | 1210 | from bokeh.io import show
from bokeh.layouts import gridplot
from bokeh.plotting import figure
x_range = ['a', 'b', 'c', 'd']
y_values = [1., 2., 3., 4.]
y_errors = [.1, .2, .3, .4]
err_xs = []
err_ys = []
for x, y, yerr in zip(x_range, y_values, y_errors):
err_xs.append((x, x))
err_ys.append((y - yerr, y + yerr))
p1 = figure(x_range=x_range, title="multi_line", width=300, height=300)
p1.square(x_range, y_values, size=7, line_alpha=0)
p1.multi_line(err_xs, err_ys)
p2 = figure(x_range=x_range, title="line", width=300, height=300)
p2.square(x_range, y_values, size=7, line_alpha=0)
for i in range(len(err_xs)):
p2.line(err_xs[i], err_ys[i])
patch1_x = ['foo','bar','bar','foo']
patch1_y = [1,1,2,2]
patch2_x = ['bar','ting','bar','foo']
patch2_y = [2,2,4,4]
patch_list_x = [patch1_x, patch2_x]
patch_list_y = [patch1_y, patch2_y]
p3 = figure(x_range=['foo', 'bar', 'ting'], y_range=(0, 5), title="patches", width=300, height=300)
p3.patches(patch_list_x, patch_list_y)
p4 = figure(x_range=['foo', 'bar', 'ting'], y_range=(0, 5), title="patch", width=300, height=300)
p4.patch(patch1_x, patch1_y)
p4.patch(patch2_x, patch2_y)
show(gridplot([[p1, p2], [p3, p4]], merge_tools=False))
| bsd-3-clause | -1,479,721,902,649,406,000 | 27.139535 | 99 | 0.621488 | false | 2.232472 | false | false | false |
awslabs/chalice | tests/codelinter.py | 1 | 2180 | # These are linting checks used in the chalice codebase itself.
# These are used to enforce specific coding standards and constraints.
from pylint.checkers import BaseChecker
from pylint.interfaces import IAstroidChecker
from astroid.exceptions import InferenceError
import astroid
def register(linter):
linter.register_checker(ConditionalImports(linter))
class ConditionalImports(BaseChecker):
# This is used to ensure that any imports that rely on conditional
# dependencies must be wrapped in a try/except ImportError.
__implements__ = (IAstroidChecker,)
name = 'must-catch-import-error'
msgs = {
'C9997': ('Importing this module must catch ImportError.',
'must-catch-import-error',
'Importing this module must catch ImportError.'),
}
def visit_import(self, node):
names = [name[0] for name in node.names]
if 'chalice.cli.filewatch.eventbased' in names:
if not self._is_in_try_except_import_error(node):
self.add_message('must-catch-import-error', node=node)
return
def visit_importfrom(self, node):
if node.modname == 'chalice.cli.filewatch.eventbased':
names = [name[0] for name in node.names]
if 'WatchdogWorkerProcess' in names:
# Ensure this is wrapped in a try/except.
# Technically we should ensure anywhere in the call stack
# we're wrapped in a try/except, but in practice we'll just
# enforce you did that in the same scope as your import.
if not self._is_in_try_except_import_error(node):
self.add_message('must-catch-import-error', node=node)
return
def _is_in_try_except_import_error(self, node):
if not isinstance(node.parent, astroid.TryExcept):
return False
caught_exceptions = [
handler.type.name for handler in node.parent.handlers]
if 'ImportError' not in caught_exceptions:
# They wrapped a try/except but aren't catching
# ImportError.
return False
return True
| apache-2.0 | -3,445,982,109,611,971,000 | 40.923077 | 75 | 0.638532 | false | 4.3083 | false | false | false |
smarkets/marge-bot | tests/git_repo_mock.py | 1 | 8075 | import logging as log
from collections import defaultdict
from datetime import timedelta
import functools
import shlex
import marge.git as git
class RepoMock(git.Repo):
@classmethod
def init_for_merge_request(cls, merge_request, initial_target_sha, project, forked_project=None):
assert bool(forked_project) == (
merge_request.source_project_id != merge_request.target_project_id
)
target_url = project.ssh_url_to_repo
source_url = forked_project.ssh_url_to_repo if forked_project else target_url
remote_repos = defaultdict(GitRepoModel)
remote_repos[source_url].set_ref(merge_request.source_branch, merge_request.sha)
remote_repos[target_url].set_ref(merge_request.target_branch, initial_target_sha)
result = cls(
remote_url=target_url,
local_path='/tmp/blah',
ssh_key_file='/home/homer/.ssh/id_rsa',
timeout=timedelta(seconds=1000000),
reference='the_reference',
)
# pylint: disable=attribute-defined-outside-init
result.mock_impl = GitModel(origin=target_url, remote_repos=remote_repos)
return result
def git(self, *args, from_repo=True):
command = args[0]
command_args = args[1:]
log.info('Run: git %r %s', command, ' '.join(map(repr, command_args)))
assert from_repo == (command != 'clone')
command_impl_name = command.replace('-', '_')
command_impl = getattr(self.mock_impl, command_impl_name, None)
assert command_impl, ('git: Unexpected command %s' % command)
try:
result = command_impl(*command_args)
except Exception:
log.warning('Failed to simulate: git %r %s', command, command_args)
raise
else:
return self._pretend_result_comes_from_popen(result)
@staticmethod
def _pretend_result_comes_from_popen(result):
result_bytes = ('' if result is None else str(result)).encode('ascii')
return stub(stdout=result_bytes)
class stub: # pylint: disable=invalid-name,too-few-public-methods
def __init__(self, **kwargs):
self.__dict__ = kwargs
class GitRepoModel:
def __init__(self, copy_of=None):
# pylint: disable=protected-access
self._refs = dict(copy_of._refs) if copy_of else {}
def set_ref(self, ref, commit):
self._refs[ref] = commit
def get_ref(self, ref):
return self._refs[ref]
def has_ref(self, ref):
return ref in self._refs
def del_ref(self, ref):
self._refs.pop(ref, None)
def __repr__(self):
return "<%s: %s>" % (type(self), self._refs)
class GitModel:
def __init__(self, origin, remote_repos):
assert origin in remote_repos
self.remote_repos = remote_repos
self._local_repo = GitRepoModel()
self._remotes = dict(origin=origin)
self._remote_refs = {}
self._branch = None
self.on_push_callbacks = []
@property
def _head(self):
return self._local_repo.get_ref(self._branch)
def remote(self, *args):
action = args[0]
if action == 'rm':
_, remote = args
try:
self._remotes.pop(remote)
except KeyError as err:
raise git.GitError('No such remote: %s' % remote) from err
elif action == 'add':
_, remote, url = args
self._remotes[remote] = url
else:
assert False, args
def fetch(self, *args):
_, remote_name = args
assert args == ('--prune', remote_name)
remote_url = self._remotes[remote_name]
remote_repo = self.remote_repos[remote_url]
self._remote_refs[remote_name] = GitRepoModel(copy_of=remote_repo)
def checkout(self, *args):
if args[0] == '-B': # -B == create if it doesn't exist
_, branch, start_point, _ = args
assert args == ('-B', branch, start_point, '--')
assert start_point == '' or '/' in start_point # '' when "local"
# create if it doesn't exist
if not self._local_repo.has_ref(branch):
if start_point:
remote_name, remote_branch = start_point.split('/')
assert remote_branch == branch
remote_url = self._remotes[remote_name]
remote_repo = self.remote_repos[remote_url]
commit = remote_repo.get_ref(branch)
self._local_repo.set_ref(branch, commit)
else:
self._local_repo.set_ref(branch, self._head)
else:
branch, _ = args
assert args == (branch, '--')
assert self._local_repo.has_ref(branch)
# checkout
self._branch = branch
def branch(self, *args):
if args[0] == "-D":
_, branch = args
assert self._branch != branch
self._local_repo.del_ref(branch)
else:
assert False
def rev_parse(self, arg):
if arg == 'HEAD':
return self._head
remote, branch = arg.split('/')
return self._remote_refs[remote].get_ref(branch)
def rebase(self, arg):
remote, branch = arg.split('/')
new_base = self._remote_refs[remote].get_ref(branch)
if new_base != self._head:
new_sha = 'rebase(%s onto %s)' % (self._head, new_base)
self._local_repo.set_ref(self._branch, new_sha)
def merge(self, arg):
remote, branch = arg.split('/')
other_ref = self._remote_refs[remote].get_ref(branch)
if other_ref != self._head:
new_sha = 'merge(%s with %s)' % (self._head, other_ref)
self._local_repo.set_ref(self._branch, new_sha)
def push(self, *args):
force_flag, remote_name, refspec = args
assert force_flag in ('', '--force')
branch, remote_branch = refspec.split(':')
remote_url = self._remotes[remote_name]
remote_repo = self.remote_repos[remote_url]
old_sha = remote_repo.get_ref(remote_branch)
new_sha = self._local_repo.get_ref(branch)
if force_flag:
remote_repo.set_ref(remote_branch, new_sha)
else:
expected_remote_sha = self._remote_refs[remote_name].get_ref(remote_branch)
if old_sha != expected_remote_sha:
raise git.GitError("conflict: can't push")
remote_repo.set_ref(remote_branch, new_sha)
for callback in self.on_push_callbacks:
callback(
remote_url=remote_url,
remote_branch=remote_branch,
old_sha=old_sha,
new_sha=new_sha,
)
def config(self, *args):
assert len(args) == 2 and args[0] == '--get'
_, remote, _ = elems = args[1].split('.')
assert elems == ['remote', remote, 'url'], elems
return self._remotes[remote]
def diff_index(self, *args):
assert args == ('--quiet', 'HEAD')
# we don't model dirty index
def ls_files(self, *args):
assert args == ('--others',)
# we don't model untracked files
def filter_branch(self, *args):
_, _, filter_cmd, commit_range = args
assert args == ('--force', '--msg-filter', filter_cmd, commit_range)
trailers_var, python, script_path = shlex.split(filter_cmd)
_, trailers_str = trailers_var.split('=')
assert trailers_var == "TRAILERS=%s" % trailers_str
assert python == "python3"
assert script_path.endswith("marge/trailerfilter.py")
trailers = list(sorted(set(line.split(':')[0] for line in trailers_str.split('\n'))))
assert trailers
new_sha = functools.reduce(
lambda x, f: "add-%s(%s)" % (f, x),
[trailer.lower() for trailer in trailers],
self._head
)
self._local_repo.set_ref(self._branch, new_sha)
return new_sha
| bsd-3-clause | -5,253,496,412,043,105,000 | 32.367769 | 101 | 0.560991 | false | 3.773364 | false | false | false |
ph1l/halo_radio | WebRoot/Preferences.py | 1 | 2083 | import HaloRadio.TopWeb as TopWeb
import HaloRadio.StyleListMaker as StyleListMaker
import HaloRadio.Style as Style
class plugin(TopWeb.TopWeb):
def GetReqs(self):
return "amv"
def handler(self, context):
import HaloRadio.UserSongStatsListMaker as UserSongStatsListMaker
import HaloRadio.UserSongStats as UserSongStats
import HaloRadio.Song as Song
import HaloRadio.User as User
import HaloRadio.Exception as Exception
# Username
if self.form.has_key("id"):
userid = int(self.form['id'].value)
else:
userid = self.user.id
user = User.User(userid)
if (self.do_authorize(self.user.rights, "a")):
is_user=0
canmod_user=1
ro_user=0
canmod_rights=1
ro_rights=0
canmod_email=1
ro_email=0
canmod_passwd=1
ro_passwd=0
elif (self.do_authorize(self.user.rights, "m")):
is_user=0
canmod_user=0
ro_user=1
canmod_rights=0
ro_rights=1
canmod_email=1
ro_email=0
canmod_passwd=1
ro_passwd=0
else:
is_user=1
canmod_user=0
ro_user=1
canmod_rights=0
ro_rights=1
canmod_email=0
ro_email=1
canmod_passwd=1
ro_passwd=0
context.addGlobal ("is_user", is_user)
context.addGlobal ("canmod_user", canmod_user )
context.addGlobal ("ro_user", ro_user)
context.addGlobal ("canmod_rights", canmod_rights)
context.addGlobal ("ro_rights", ro_rights)
context.addGlobal ("canmod_email", canmod_email)
context.addGlobal ("ro_email", ro_email)
context.addGlobal ("canmod_passwd", canmod_passwd)
context.addGlobal ("ro_passwd", ro_passwd)
context.addGlobal ("userid", userid )
context.addGlobal ("username", user.name )
context.addGlobal ("email", user.email )
context.addGlobal ("rights", user.rights )
context.addGlobal ("createdate", user.create_time )
slm = StyleListMaker.StyleListMaker()
slm.GetAll()
styles = []
for styleid in slm.list:
style = Style.Style(styleid)
entity = {}
entity['style'] = style.GetName()
entity['id'] = style.GetId()
styles.append(entity)
context.addGlobal ("styles", styles )
| gpl-2.0 | -5,218,146,967,847,114,000 | 26.773333 | 67 | 0.68555 | false | 2.849521 | false | false | false |
CommonsDev/dataserver | commons/migrations/0006_auto__chg_field_pertinence_comment.py | 1 | 7545 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Pertinence.comment'
db.alter_column(u'commons_pertinence', 'comment', self.gf('django.db.models.fields.TextField')(null=True))
def backwards(self, orm):
# Changing field 'Pertinence.comment'
db.alter_column(u'commons_pertinence', 'comment', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
models = {
'commons.pertinence': {
'Meta': {'object_name': 'Pertinence'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.Project']"}),
'usage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['commons.Usage']"})
},
'commons.prestation': {
'Meta': {'object_name': 'Prestation'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'module': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'modules'", 'symmetrical': 'False', 'through': "orm['commons.SelectedModules']", 'to': "orm['commons.PrestationModule']"}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'commons.prestationmodule': {
'Meta': {'object_name': 'PrestationModule'},
'commonsretribution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'commonsselected': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'prestation_module'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['projects.Project']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'providerretribution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'providersupport': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'commons.selectedmodules': {
'Meta': {'object_name': 'SelectedModules'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modules': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['commons.PrestationModule']"}),
'prestation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['commons.Prestation']"})
},
'commons.usage': {
'Meta': {'object_name': 'Usage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'project': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['projects.Project']", 'through': "orm['commons.Pertinence']", 'symmetrical': 'False'})
},
u'projects.project': {
'Meta': {'object_name': 'Project'},
'baseline': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'begin_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['scout.Place']", 'null': 'True', 'blank': 'True'}),
'progress': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectProgress']", 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': 'None', 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'projects.projectprogress': {
'Meta': {'ordering': "['order']", 'object_name': 'ProjectProgress'},
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'progress_range': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['projects.ProjectProgressRange']"})
},
u'projects.projectprogressrange': {
'Meta': {'object_name': 'ProjectProgressRange'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'name'", 'unique_with': '()'})
},
u'scout.place': {
'Meta': {'object_name': 'Place'},
'address': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place'", 'to': u"orm['scout.PostalAddress']"}),
'geo': ('django.contrib.gis.db.models.fields.PointField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'scout.postaladdress': {
'Meta': {'object_name': 'PostalAddress'},
'address_locality': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'address_region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_office_box_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'street_address': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['commons'] | agpl-3.0 | 8,106,612,999,738,941,000 | 70.188679 | 219 | 0.557323 | false | 3.636145 | false | false | false |
edubecks/vaidecaronaorg | caronasbrasilapp/djangoapp/apps/caronasbrasil/migrations/0009_auto__del_field_caronamodel_date__add_field_caronamodel_from_datetime_.py | 1 | 3750 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'CaronaModel.date'
db.delete_column(u'caronasbrasil_caronamodel', 'date')
# Adding field 'CaronaModel.from_datetime'
db.add_column(u'caronasbrasil_caronamodel', 'from_datetime',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 10, 30, 0, 0)),
keep_default=False)
# Adding field 'CaronaModel.to_datetime'
db.add_column(u'caronasbrasil_caronamodel', 'to_datetime',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 10, 30, 0, 0)),
keep_default=False)
def backwards(self, orm):
# Adding field 'CaronaModel.date'
db.add_column(u'caronasbrasil_caronamodel', 'date',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 10, 30, 0, 0)),
keep_default=False)
# Deleting field 'CaronaModel.from_datetime'
db.delete_column(u'caronasbrasil_caronamodel', 'from_datetime')
# Deleting field 'CaronaModel.to_datetime'
db.delete_column(u'caronasbrasil_caronamodel', 'to_datetime')
models = {
u'caronasbrasil.caronagroupmodel': {
'Meta': {'object_name': 'CaronaGroupModel'},
'city1': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'city1_list': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'city1_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'city2': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'city2_list': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'city2_state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'fb_group_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'caronasbrasil.caronamodel': {
'Meta': {'object_name': 'CaronaModel'},
'destiny': ('django.db.models.fields.CharField', [], {'max_length': '33'}),
'fb_content': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'fb_group_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'fb_post_id': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'from_datetime': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_vagas': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'ofereco_procuro': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '33'}),
'to_datetime': ('django.db.models.fields.DateTimeField', [], {})
},
u'caronasbrasil.parsererrorsmodel': {
'Meta': {'object_name': 'ParserErrorsModel'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'fb_group_id': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'fb_post_id': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['caronasbrasil'] | mit | 5,747,936,618,193,574,000 | 51.097222 | 118 | 0.574667 | false | 3.501401 | false | false | false |
davidecaminati/Handcart-lift-rotary | Python/facedetect_mio.py | 1 | 3991 | #!/usr/bin/env python
import numpy as np
import cv2
import cv2.cv as cv
from multiprocessing.pool import ThreadPool
from video import create_capture
from common import clock, draw_str
from pyfirmata import Arduino, util
ArduinoPresent = False
if ArduinoPresent :
board = Arduino('/dev/ttyACM0')
#board.digital[2].write(1)
#board.digital[4].write(1)
help_message = '''
USAGE: facedetect.py [--cascade <cascade_fn>] [--nested-cascade <cascade_fn>] [<video_source>]
'''
minsize_occhi = 60
def rotateImage(image, angle):
row,col = image.shape
center=tuple(np.array([row,col])/2)
rot_mat = cv2.getRotationMatrix2D(center,angle,1.0)
new_image = cv2.warpAffine(image, rot_mat, (col,row))
return new_image
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.2, minNeighbors=4, minSize=(minsize_occhi, minsize_occhi), flags = cv.CV_HAAR_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
return rects
def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
if __name__ == '__main__':
import sys, getopt
#print help_message
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
try: video_src = video_src[0]
except: video_src = 0
args = dict(args)
#cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_frontalface_alt.xml")
#nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")
cascade_fn = args.get('--cascade', "../../data/haarcascades/haarcascade_eye.xml")
#nested_fn = args.get('--nested-cascade', "../../data/haarcascades/haarcascade_eye.xml")
cascade = cv2.CascadeClassifier(cascade_fn)
#nested = cv2.CascadeClassifier(nested_fn)
cam = create_capture(video_src, fallback='synth:bg=../cpp/lena.jpg:noise=0.05')
numero = 0
while True:
ret, img = cam.read()
#gray = img[200:400,100:400]
#gray = img[100:300,100:300]
gray = img[100:400,100:500]
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
gray = cv2.equalizeHist(gray)
t = clock()
rects = detect(gray, cascade)
vis = gray.copy()
draw_rects(vis, rects, (0, 255, 0))
if ArduinoPresent:
board.digital[4].write(0)
board.digital[2].write(0)
for x1, y1, x2, y2 in rects:
#roi = gray[y1:y2, x1:x2]
#vis_roi = vis[y1:y2, x1:x2]
numero = numero + 1
larghezza = x2-x1
altezza = y2-y1
'''
if x1 >= 150: #dx
if ArduinoPresent:
board.digital[2].write(1)
dx = cv2.getRectSubPix(vis, (larghezza, altezza),(x1+larghezza/2,y1+altezza/2))
cv2.imshow('dx', dx)
'''
if ArduinoPresent:
board.digital[4].write(1)
sx = cv2.getRectSubPix(vis, (larghezza, altezza),(x1+larghezza/2,y1+altezza/2))
#edges = cv2.Canny(sx,100,300)
#cv2.imshow('sx', edges)
cv2.imshow('sx', sx)
#file = "/home/pi/opencv-2.4.10/samples/python2/occhi/test_image" + str(numero) + ".png"
# A nice feature of the imwrite method is that it will automatically choose the
# correct format based on the file extension you provide. Convenient!
#cv2.imwrite(file, sx)
#subrects = detect(roi.copy(), nested)
#draw_rects(vis_roi, subrects, (255, 0, 0))
dt = clock() - t
draw_str(vis, (20, 20), 'time: %.1f ms' % (dt*1000))
cv2.imshow('facedetect', vis)
if 0xFF & cv2.waitKey(5) == 27:
break
cv2.destroyAllWindows()
| gpl-2.0 | -5,949,255,796,883,102,000 | 29.7 | 146 | 0.560261 | false | 3.084235 | false | false | false |
peoplepower/composer-sdk-python | com.ppc.Microservices/intelligence/data_request/tools/download_data.py | 1 | 5155 | #!/usr/bin/env python
# encoding: utf-8
'''
Created on January 4, 2019
@author: David Moss
'''
# Data Stream Address
DATASTREAM_ADDRESS = "download_data"
# Data Stream Content
DATASTREAM_CONTENT = {
"force": True
}
# input function behaves differently in Python 2.x and 3.x. And there is no raw_input in 3.x.
if hasattr(__builtins__, 'raw_input'):
input=raw_input
import requests
import sys
import json
import logging
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-u", "--username", dest="username", help="Username")
parser.add_argument("-p", "--password", dest="password", help="Password")
parser.add_argument("-s", "--server", dest="server", help="Base server URL (app.presencepro.com)")
parser.add_argument("-a", "--api_key", dest="apikey", help="User's API key instead of a username/password")
parser.add_argument("--httpdebug", dest="httpdebug", action="store_true", help="HTTP debug logger output");
# Process arguments
args = parser.parse_args()
# Extract the arguments
username = args.username
password = args.password
server = args.server
httpdebug = args.httpdebug
app_key = args.apikey
# Define the bot server
if not server:
server = "https://app.presencepro.com"
if "http" not in server:
server = "https://" + server
# HTTP Debugging
if httpdebug:
try:
import http.client as http_client
except ImportError:
# Python 2
import httplib as http_client
http_client.HTTPConnection.debuglevel = 1
# You must initialize logging, otherwise you'll not see debug output.
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
# Login to your user account
if app_key is None:
app_key, user_info = _login(server, username, password)
send_datastream_message(server, app_key, DATASTREAM_ADDRESS, DATASTREAM_CONTENT)
print("Done!")
def send_datastream_message(server, app_key, address, content):
http_headers = {"API_KEY": app_key, "Content-Type": "application/json"}
params = {
"address": address,
"organizational": 1
}
body = {
"feed": content
}
print("Body: " + json.dumps(body, indent=2, sort_keys=True))
print("Server: " + server)
r = requests.post(server + "/cloud/appstore/stream/", params=params, data=json.dumps(body), headers=http_headers)
j = json.loads(r.text)
_check_for_errors(j)
print(str(r.text))
def _login(server, username, password):
"""Get an Bot API key and User Info by login with a username and password"""
if not username:
username = input('Email address: ')
if not password:
import getpass
password = getpass.getpass('Password: ')
try:
import requests
# login by username and password
http_headers = {"PASSWORD": password, "Content-Type": "application/json"}
r = requests.get(server + "/cloud/json/login", params={"username":username}, headers=http_headers)
j = json.loads(r.text)
_check_for_errors(j)
app_key = j['key']
# get user info
http_headers = {"PRESENCE_API_KEY": app_key, "Content-Type": "application/json"}
r = requests.get(server + "/cloud/json/user", headers=http_headers)
j = json.loads(r.text)
_check_for_errors(j)
return app_key, j
except BotError as e:
sys.stderr.write("Error: " + e.msg)
sys.stderr.write("\nCreate an account on " + server + " and use it to sign in")
sys.stderr.write("\n\n")
raise e
def _check_for_errors(json_response):
"""Check some JSON response for BotEngine errors"""
if not json_response:
raise BotError("No response from the server!", -1)
if json_response['resultCode'] > 0:
msg = "Unknown error!"
if 'resultCodeMessage' in json_response.keys():
msg = json_response['resultCodeMessage']
elif 'resultCodeDesc' in json_response.keys():
msg = json_response['resultCodeDesc']
raise BotError(msg, json_response['resultCode'])
del(json_response['resultCode'])
class BotError(Exception):
"""BotEngine exception to raise and log errors."""
def __init__(self, msg, code):
super(BotError).__init__(type(self))
self.msg = msg
self.code = code
def __str__(self):
return self.msg
def __unicode__(self):
return self.msg
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -2,137,572,307,962,109,000 | 26.275132 | 117 | 0.60582 | false | 3.983771 | false | false | false |
eonpatapon/lollypop | src/database_upgrade.py | 1 | 1934 | #!/usr/bin/python
# Copyright (c) 2014-2015 Cedric Bellegarde <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from lollypop.sqlcursor import SqlCursor
class DatabaseUpgrade:
"""
Manage database schema upgrades
"""
def __init__(self, version, db):
"""
Init object
@param version as int
@param db as Database
"""
self._version = version
self._db = db
# Here are schema upgrade, key is database version,
# value is sql request
self._UPGRADES = {
1: "update tracks set duration=CAST(duration as INTEGER);",
2: "update albums set artist_id=-2001 where artist_id=-999;"
}
"""
Return upgrade count
@return int
"""
def count(self):
return len(self._UPGRADES)
"""
Upgrade database based on version
@return new db version as int
"""
def do_db_upgrade(self):
with SqlCursor(self._db) as sql:
for i in range(self._version+1, len(self._UPGRADES)+1):
try:
sql.execute(self._UPGRADES[i])
except Exception as e:
print("Database upgrade failed: ", e)
sql.commit()
return len(self._UPGRADES)
| gpl-3.0 | 8,553,216,954,296,847,000 | 33.535714 | 76 | 0.61272 | false | 4.186147 | false | false | false |
Linaro/squad | squad/frontend/comparison.py | 1 | 4547 | from functools import reduce
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator
from django.db.models import Q, Prefetch
from squad.core.models import Project, Group, Build
from squad.core.comparison import TestComparison, MetricComparison
from squad.frontend.utils import alphanum_sort
RESULT_STATES = ['pass', 'fail', 'xfail', 'skip', 'n/a']
TRANSITIONS = {(_from, _to): False for _from in RESULT_STATES for _to in RESULT_STATES}
DEFAULT_CHECKED_TRANSITIONS = [('pass', 'fail'), ('fail', 'pass')]
def __get_comparison_class(comparison_type):
if 'metric' == comparison_type:
return MetricComparison
else:
return TestComparison
def __paginate(results, request):
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
paginator = Paginator(tuple(results.items()), 50)
return paginator.page(page)
def __get_transitions(request):
transitions = TRANSITIONS.copy()
marked_transitions = request.GET.getlist('transitions', [])
if 'ignore' in marked_transitions:
return {}
if len(marked_transitions) > 0:
for t in marked_transitions:
if t is None or t == 'None':
continue
_from, _to = t.split(':')
if _from in RESULT_STATES and _to in RESULT_STATES:
transitions[(_from, _to)] = True
else:
for default in DEFAULT_CHECKED_TRANSITIONS:
transitions[(default[0], default[1])] = True
return transitions
def compare_projects(request):
comparison = None
group = None
projects = None
comparison_type = request.GET.get('comparison_type', 'test')
transitions = __get_transitions(request)
group_slug = request.GET.get('group')
if group_slug:
group = get_object_or_404(Group, slug=group_slug)
qs = group.projects.accessible_to(request.user).prefetch_related(
Prefetch('builds', queryset=Build.objects.order_by('-datetime'))
)
projects = alphanum_sort(qs, 'slug')
filters = []
for key, value in request.GET.items():
if 'project_' in key and len(key.split('_')) == 2:
project_id = key.split('_')[1]
filters.append(Q(project_id=project_id) & Q(version=value))
if len(filters) > 1:
build_filters = reduce(lambda x, y: x | y, filters)
builds = Build.objects.filter(build_filters)
comparison_class = __get_comparison_class(comparison_type)
comparison = comparison_class.compare_builds(*builds)
if comparison_type == 'test' and len(transitions):
comparison.apply_transitions([t for t, checked in transitions.items() if checked])
comparison.results = __paginate(comparison.results, request)
context = {
'group': group,
'projects': projects,
'comparison': comparison,
'comparison_type': comparison_type,
'transitions': transitions,
}
return render(request, 'squad/compare_projects.jinja2', context)
def compare_test(request):
context = {}
return render(request, 'squad/compare.jinja2', context)
def compare_builds(request):
project_slug = request.GET.get('project')
comparison_type = request.GET.get('comparison_type', 'test')
transitions = __get_transitions(request)
comparison = None
project = None
if project_slug:
group_slug, project_slug = project_slug.split('/')
project = get_object_or_404(Project, group__slug=group_slug, slug=project_slug)
baseline_build = request.GET.get('baseline')
target_build = request.GET.get('target')
if baseline_build and target_build:
baseline = get_object_or_404(project.builds, version=baseline_build)
target = get_object_or_404(project.builds, version=target_build)
comparison_class = __get_comparison_class(comparison_type)
comparison = comparison_class.compare_builds(baseline, target)
if comparison_type == 'test' and len(transitions):
comparison.apply_transitions([t for t, checked in transitions.items() if checked])
comparison.results = __paginate(comparison.results, request)
context = {
'project': project,
'comparison': comparison,
'comparison_type': comparison_type,
'transitions': transitions,
}
return render(request, 'squad/compare_builds.jinja2', context)
| agpl-3.0 | -3,463,290,576,986,732,000 | 32.932836 | 98 | 0.635144 | false | 4.0097 | true | false | false |
EnviroCentre/jython-upgrade | jython/lib/site-packages/pip/download.py | 1 | 26290 | import cgi
import email.utils
import hashlib
import getpass
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
import pip
from pip.backwardcompat import urllib, urlparse, raw_input
from pip.exceptions import InstallationError, HashMismatch
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.compat import IncompleteRead
from pip._vendor.requests.exceptions import InvalidURL, ChunkedEncodingError
from pip._vendor.requests.models import Response
from pip._vendor.requests.structures import CaseInsensitiveDict
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
def user_agent():
"""Return a string representing the user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([
_implementation_version,
sys.pypy_version_info.releaselevel,
])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['pip/%s' % pip.__version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urlparse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.split("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urlparse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urlparse.urlparse(resp.url)
# Prompt the user for a new username and password
username = raw_input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSResponse(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def __getattr__(self, name):
return getattr(self.fileobj, name)
def read(self, amt=None, decode_content=None, cache_content=False):
return self.fileobj.read(amt)
# Insert Hacks to Make Cookie Jar work w/ Requests
@property
def _original_response(self):
class FakeMessage(object):
def getheaders(self, header):
return []
def get_all(self, header, default):
return []
class FakeResponse(object):
@property
def msg(self):
return FakeMessage()
return FakeResponse()
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
parsed_url = urlparse.urlparse(request.url)
# We only work for requests with a host of localhost
if parsed_url.netloc.lower() != "localhost":
raise InvalidURL(
"Invalid URL %r: Only localhost is allowed" %
request.url
)
real_url = urlparse.urlunparse(parsed_url[:1] + ("",) + parsed_url[2:])
pathname = url_to_path(real_url)
resp = Response()
resp.status_code = 200
resp.url = real_url
stats = os.stat(pathname)
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
resp.headers = CaseInsensitiveDict({
"Content-Type": mimetypes.guess_type(pathname)[0] or "text/plain",
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = LocalFSResponse(open(pathname, "rb"))
resp.close = resp.raw.close
return resp
def close(self):
pass
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
retries = kwargs.pop('retries', None)
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Configure retries
if retries:
http_adapter = requests.adapters.HTTPAdapter(max_retries=retries)
self.mount("http://", http_adapter)
self.mount("https://", http_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
def request(self, method, url, *args, **kwargs):
# Make file:// urls not fail due to lack of a hostname
parsed = urlparse.urlparse(url)
if parsed.scheme == "file":
url = urlparse.urlunparse(parsed[:1] + ("localhost",) + parsed[2:])
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
session = PipSession()
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
f = open(url)
content = f.read()
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = (
'.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle', '.whl'
)
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.fatal(
"Hash digest size of the package %d (%s) doesn't match the "
"expected hash name %s!" %
(download_hash.digest_size, link, link.hash_name)
)
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.fatal(
"Hash of the package %s (%s) doesn't match the expected hash %s!" %
(link, download_hash.hexdigest(), link.hash)
)
raise HashMismatch(
'Bad %s hash for package %s' % (link.hash_name, link)
)
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warn(
"Unsupported hash name %s for package %s" % (link.hash_name, link)
)
return None
fp = open(target_file, 'rb')
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warn(
"Unsupported hash name %s for package %s" %
(link.hash_name, link)
)
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
downloaded = 0
show_progress = total_length > 40 * 1000 or not total_length
show_url = link.show_url
try:
if show_progress:
# FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress(
'Downloading %s (%s): ' %
(show_url, format_size(total_length))
)
else:
logger.start_progress(
'Downloading %s (unknown size): ' % show_url
)
else:
logger.notify('Downloading %s' % show_url)
logger.info('Downloading from URL %s' % link)
def resp_read(chunk_size):
try:
# Special case for urllib3.
try:
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we do
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
for chunk in resp_read(4096):
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress(
'%3i%% %s' %
(
100 * downloaded / total_length,
format_size(downloaded)
)
)
if download_hash is not None:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn(
'Backing up %s to %s' %
(display_path(download_location), display_path(dest_file))
)
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, download_dir=None,
session=None):
if session is None:
session = PipSession()
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
temp_location = None
target_url = link.url.split('#', 1)[0]
already_cached = False
cache_file = None
cache_content_type_file = None
download_hash = None
# If a download cache is specified, is the file cached there?
if download_cache:
cache_file = os.path.join(
download_cache,
urllib.quote(target_url, '')
)
cache_content_type_file = cache_file + '.content-type'
already_cached = (
os.path.exists(cache_file) and
os.path.exists(cache_content_type_file)
)
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
# If a download dir is specified, is the file already downloaded there?
already_downloaded = None
if download_dir:
already_downloaded = os.path.join(download_dir, link.filename)
if not os.path.exists(already_downloaded):
already_downloaded = None
# If already downloaded, does its hash match?
if already_downloaded:
temp_location = already_downloaded
content_type = mimetypes.guess_type(already_downloaded)[0]
logger.notify('File was already downloaded %s' % already_downloaded)
if link.hash:
download_hash = _get_hash_from_file(temp_location, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(already_downloaded)
already_downloaded = None
# If not a valid download, let's confirm the cached file is valid
if already_cached and not temp_location:
with open(cache_content_type_file) as fp:
content_type = fp.read().strip()
temp_location = cache_file
logger.notify('Using download cache from %s' % cache_file)
if link.hash and link.hash_name:
download_hash = _get_hash_from_file(cache_file, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Cached file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(cache_file)
os.unlink(cache_content_type_file)
already_cached = False
# We don't have either a cached or a downloaded copy
# let's download to a tmp dir
if not temp_location:
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.fatal("HTTP error %s while getting %s" %
(exc.response.status_code, link))
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded:
_copy_file(temp_location, download_dir, content_type, link)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(temp_location, location, content_type, link)
# if using a download cache, cache it, if needed
if cache_file and not already_cached:
cache_download(cache_file, temp_location, content_type)
if not (already_cached or already_downloaded):
os.unlink(temp_location)
os.rmdir(temp_dir)
def unpack_file_url(link, location, download_dir=None):
link_path = url_to_path(link.url_without_fragment)
already_downloaded = False
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
if download_dir:
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
content_type = mimetypes.guess_type(download_path)[0]
logger.notify('File was already downloaded %s' % download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
already_downloaded = True
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % link_path
)
os.unlink(download_path)
else:
already_downloaded = True
if already_downloaded:
from_path = download_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded:
_copy_file(from_path, download_dir, content_type, link)
| mit | -5,345,007,701,288,024,000 | 35.0631 | 79 | 0.568581 | false | 4.304191 | false | false | false |
cartologic/cartoview | cartoview/apps_handler/config.py | 1 | 4552 | # -*- coding: utf-8 -*-
import json
import os
from collections import Mapping
import portalocker
class AppsDict(Mapping):
def __init__(self, *args, **kw):
self._app_data = dict(*args, **kw)
def __setitem__(self, key, item):
self._app_data[key] = item
self.__sort_apps()
def __getitem__(self, key):
return self._app_data[key]
def __repr__(self):
return repr(self._app_data)
def __len__(self):
return len(self._app_data)
def __delitem__(self, key):
del self._app_data[key]
def clear(self):
return self._app_data.clear()
def copy(self):
return self._app_data.copy()
def has_key(self, k):
return k in self._app_data
def update(self, *args, **kwargs):
self._app_data.update(*args, **kwargs)
self.__sort_apps()
def keys(self):
return self._app_data.keys()
def values(self):
return self._app_data.values()
def items(self):
return self._app_data.items()
def pop(self, *args):
return self._app_data.pop(*args)
def __cmp__(self, dict_):
return self.__cmp__(self._app_data, dict_)
def __contains__(self, item):
return item in self._app_data
def __iter__(self):
return iter(self._app_data)
def __unicode__(self):
return str(repr(self._app_data))
def __sort_apps(self):
self._app_data = dict(
sorted(self._app_data.items(), key=lambda item: item[1].order))
def to_json(self):
data = {k: v.to_dict() for k, v in self._app_data.items()}
return json.dumps(data, indent=4, sort_keys=True)
def from_json(self, data):
def cartoview_app_dict(name, data):
d = {'name': name}
d.update(data)
return d
try:
apps = json.loads(data)
self._app_data = {
k: CartoviewApp(cartoview_app_dict(k, v))
for k, v in apps.items()
}
self.__sort_apps()
return self._app_data
except BaseException:
return AppsDict()
def get_active_apps(self):
return {k: v for k, v in self._app_data.items() if v.active}
def get_pending_apps(self):
return {k: v for k, v in self._app_data.items() if v.pending}
def app_exists(self, app_name):
return self._app_data.get(app_name, None)
class CartoviewApp(object):
app_attrs = frozenset(['name', 'active', 'pending', 'order'])
objects = AppsDict()
apps_dir = None
def __init__(self, data):
if not data and isinstance(data, dict):
raise ValueError("data must be dict type")
for k, v in data.items():
setattr(self, k, v)
self._validate()
self.cleanup()
self.commit()
def _validate(self):
for attr in CartoviewApp.app_attrs:
if not hasattr(self, attr):
raise ValueError('attr {} not found'.format(attr))
def cleanup(self):
for attr in vars(self).keys():
if attr not in [
'objects', 'app_attrs'
] and attr not in CartoviewApp.app_attrs and (
not attr.startswith('_') and not attr.startswith('_')):
delattr(self, attr)
def __setattr__(self, name, value):
if name == ['objects', 'app_attrs']:
raise ValueError("{} should be altered using classname")
if name not in CartoviewApp.app_attrs:
raise AttributeError("attribute '{}' not found ".format(name))
super(CartoviewApp, self).__setattr__(name, value)
def to_dict(self):
return {
k: getattr(self, k)
for k in CartoviewApp.app_attrs if k != 'name'
}
@classmethod
def get_apps_json_path(cls):
return os.path.join(cls.apps_dir, 'apps.json')
def commit(self):
CartoviewApp.objects.update({self.name: self})
return self
@classmethod
def load(cls):
if os.path.exists(cls.get_apps_json_path()):
with portalocker.Lock(
cls.get_apps_json_path(), 'r',
portalocker.LOCK_EX) as jf:
data = jf.read()
CartoviewApp.objects.from_json(data)
@classmethod
def save(cls):
with portalocker.Lock(
cls.get_apps_json_path(), 'w',
portalocker.LOCK_EX) as jf:
data = CartoviewApp.objects.to_json()
jf.write(data)
| bsd-2-clause | -8,725,916,909,306,204,000 | 26.92638 | 75 | 0.541301 | false | 3.697807 | false | false | false |
sevenian3/ChromaStarPy | solartest.py | 1 | 6462 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 30 10:54:21 2017
@author: ishort
"""
#plotting:
import matplotlib
import matplotlib.pyplot as plt
#%matplotlib inline
import pylab
#General file for printing ad hoc quantities
#dbgHandle = open("debug.out", 'w')
#Get the data
dataPath = "SolFluxAtlas2005/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wavStr = ""
flxStr = ""
inLine = ""
fields = [" " for i in range(2)]
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = dataPath + "fluxspliced.2005"
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #Special one-line header
print(inLine)
fields = inLine.split()
numStr = fields[0].strip() #first field is number of following records
num = int(numStr)
waveSun = [0.0 for i in range(num)]
fluxSun = [0.0 for i in range(num)]
for i in range(num):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
waveSun[i] = float(wavStr); fluxSun[i] = float(flxStr)
pylab.plot(waveSun, fluxSun, color='black')
#Now get the synthetic spectrum pre-computed with ChromaStarPy
modelPath = "Outputs/"
#outPath = absPath + "Outputs/"
numStr = ""
num = 0.0
wavStr = ""
flxStr = ""
inLine = " "
#fields = [" " for i in range(2)]
"""
runVers = "pyLoop"
#Model atmosphere
teffStr = "5777.0"
loggStr = "4.44"
logZStr = "0.0"
massStarStr = "1.0"
xiTStr = "1.0"
logHeFeStr = "0.0"
logCOStr = "0.0"
logAlphaFeStr = "0.0"
#Spectrum synthesis
lambdaStartStr = "390.0"
lambdaStopStr = "400.0"
lineThreshStr = "-3.0"
voigtThreshStr = "-3.0"
logGammaColStr = "0.5"
logKapFudgeStr = "0.0"
macroVStr = "1.0"
rotVStr = "2.0"
rotIStr = "90.0"
RVStr = "0.0"
strucStem = "Teff" + teffStr + "Logg" + loggStr + "Z" + logZStr + "M" + massStarStr+"xiT"+xiTStr + \
"HeFe" + logHeFeStr + "CO" + logCOStr + "AlfFe" + logAlphaFeStr + "v" + runVers
strucFile = "struc." + strucStem + ".out"
specFile = "spec." + strucStem + "L"+lambdaStartStr+"-"+lambdaStopStr+"xiT"+xiTStr+"LThr"+lineThreshStr+ \
"GamCol"+logGammaColStr+"Mac"+macroVStr+"Rot"+rotVStr+"-"+rotIStr+"RV"+RVStr + ".out"
#with open("", 'r', encoding='utf-8') as inputHandle:
inFile = modelPath + specFile;
"""
project = "Project"
runVers = "Run"
teff = 5777.0
logg = 4.44
log10ZScale = 0.0
lambdaStart = 390.0
lambdaStop = 400.0
fileStem = project + "-"\
+ str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\
+ "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\
+ "-" + runVers
inFile = modelPath + fileStem + ".spec.txt"
invnAir = 1.0 / 1.000277 #// reciprocal of refractive index of air at STP
#numStr = fields[0].strip() #first field is number of following records
#num = int(numStr)
waveMod = []
fluxMod = []
wav = 0.0 #//initialization
wavStr = ""
lblStr = ""
with open(inFile, 'r') as inputHandle:
#Expects number of records on first lines, then white space delimited columns of
#wavelengths in nm and continuum rectified fluxes
inLine = inputHandle.readline() #line of header
print(inLine)
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of line IDs is last field:
numLineIdsStr = fields[len(fields)-1]
numLineIds = int(numLineIdsStr) - 1 # to be on safe side
print("Recovered that there are " + numLineIdsStr + " lines to ID")
inLine = inputHandle.readline()
print(inLine)
fields = inLine.split()
#number of wavelengths in spectrum is last field:
numWavsStr = fields[len(fields)-1]
numWavs = int(numWavsStr) # to be on safe side
print("Recovered that there are " + numWavsStr + " wavelengths")
#One more line of header
inLine = inputHandle.readline() #line of header
print(inLine)
waveMod = [0.0 for i in range(numWavs)]
fluxMod = [0.0 for i in range(numWavs)]
#Get the synthetic spectrum
for i in range(numWavs):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod[i] = wav
fluxMod[i] = float(flxStr)
waveIds = [0.0 for i in range(numLineIds)]
lblIds = ["" for i in range(numLineIds)]
#Get the line IDs
#Expects four white-space-delimited fields:
# wavelength, element, ion. stage, and rounded wavelength
#Another line of header for line id section
inLine = inputHandle.readline() #line of header
print(inLine)
for i in range(numLineIds):
inLine = inputHandle.readline()
fields = inLine.split()
wavStr = fields[0].strip()
wav = invnAir * float(wavStr)
waveIds[i] = wav
lblStr = fields[1].strip() + " " + fields[2].strip() + " " + fields[3].strip()
lblIds[i] = lblStr
"""
#If we do NOT know number of records:
#for i in inputHandle: #doesn't work - 0 iterations
while (inLine != ""):
inLine = inputHandle.readline()
if not inLine:
break
#print(inLine)
fields = inLine.split()
wavStr = fields[0].strip(); flxStr = fields[1].strip()
wav = invnAir * float(wavStr)
waveMod.append(wav)
fluxMod.append(float(flxStr))
"""
#plot the spectrum
#plt.title('Synthetic spectrum')
plt.ylabel('$F_\lambda/F^C_\lambda$')
plt.xlabel('$\lambda$ (nm)')
xMin = min(waveMod)
xMax = max(waveMod)
pylab.xlim(xMin, xMax)
pylab.ylim(0.0, 1.6)
pylab.plot(waveMod, fluxMod, color="gray")
#add the line IDs
for i in range(numLineIds):
if "Ca II" in lblIds[i]:
thisLam = waveIds[i]
thisLbl = lblIds[i]
xPoint = [thisLam, thisLam]
yPoint = [1.05, 1.1]
pylab.plot(xPoint, yPoint, color='black')
pylab.text(thisLam, 1.5, thisLbl, rotation=270)
#Save as encapsulated postscript (eps) for LaTex
epsName = fileStem + ".eps"
plt.savefig(epsName, format='eps', dpi=1000) | mit | 3,639,136,743,345,214,000 | 28.780952 | 106 | 0.604302 | false | 2.935938 | false | false | false |
lunixbochs/uberserver | server.py | 1 | 4578 | #!/usr/bin/env python
# coding=utf-8
import thread, traceback, signal, socket, sys
from urllib import urlopen
from DataHandler import DataHandler
from Client import Client
from NATServer import NATServer
from Dispatcher import Dispatcher
import ip2country # just to make sure it's downloaded
import ChanServ
_root = DataHandler()
_root.parseArgv(sys.argv)
try:
signal.SIGHUP
def sighup(sig, frame):
_root.console_write('Received SIGHUP.')
if _root.sighup:
_root.reload()
signal.signal(signal.SIGHUP, sighup)
except AttributeError:
pass
_root.console_write('-'*40)
_root.console_write('Starting uberserver...\n')
host = ''
port = _root.port
natport = _root.natport
backlog = 100
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR,
server.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) | 1 )
# fixes TIME_WAIT :D
server.bind((host,port))
server.listen(backlog)
try:
natserver = NATServer(natport)
thread.start_new_thread(natserver.start,())
natserver.bind(_root)
except socket.error:
print 'Error: Could not start NAT server - hole punching will be unavailable.'
_root.console_write()
_root.console_write('Detecting local IP:')
try: local_addr = socket.gethostbyname(socket.gethostname())
except: local_addr = '127.0.0.1'
_root.console_write(local_addr)
_root.console_write('Detecting online IP:')
try:
timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(5)
web_addr = urlopen('http://automation.whatismyip.com/n09230945.asp').read()
socket.setdefaulttimeout(timeout)
_root.console_write(web_addr)
except:
web_addr = local_addr
_root.console_write('not online')
_root.console_write()
_root.local_ip = local_addr
_root.online_ip = web_addr
_root.console_write('Listening for clients on port %i'%port)
_root.console_write('Using %i client handling thread(s).'%_root.max_threads)
dispatcher = Dispatcher(_root, server)
_root.dispatcher = dispatcher
chanserv = True
if chanserv:
address = ((web_addr or local_addr), 0)
chanserv = ChanServ.ChanServClient(_root, address, _root.session_id)
dispatcher.addClient(chanserv)
_root.chanserv = chanserv
try:
dispatcher.pump()
except KeyboardInterrupt:
_root.console_write()
_root.console_write('Server killed by keyboard interrupt.')
except:
_root.error(traceback.format_exc())
_root.console_write('Deep error, exiting...')
# _root.console_write('Killing handlers.')
# for handler in _root.clienthandlers:
# handler.running = False
_root.console_write('Killing clients.')
for client in dict(_root.clients):
try:
conn = _root.clients[client].conn
if conn: conn.close()
except: pass # for good measure
server.close()
_root.running = False
_root.console_print_step()
if _root.dbtype == 'legacy':
print 'Writing account database to file...'
try:
while True:
try:
_root.userdb.writeAccounts()
print 'Accounts written.'
if _root.channelfile:
print 'Writing channels...'
__import__('tasserver').LegacyChannels.Writer().dump(_root.channels, _root.getUserDB().clientFromID)
print 'Channels written.'
_root.channelfile = None
break
except KeyboardInterrupt:
print 'You probably shouldn\'t interrupt this, starting account dump over.'
except:
print '-'*60
print traceback.format_exc()
print '-'*60
memdebug = False
if memdebug:
recursion = []
names = {}
def dump(obj, tabs=''):
if obj in recursion: return str(obj)
else: recursion.append(obj)
try:
if type(obj) == (list, set):
return [dump(var) for var in obj]
elif type(obj) in (str, unicode, int, float):
return obj
elif type(obj) == dict:
output = {}
for key in obj:
output[key] = dump(obj[key], tabs+'\t')
else:
output = {}
ovars = vars(obj)
for key in ovars:
if key in names: names[key] += 1
else: names[key] = 1
output[key] = dump(ovars[key], tabs+'\t')
return '\n'.join(['%s%s:\n%s\t%s' % (tabs, key, tabs, output[key]) for key in output]) if output else {}
except: return 'no __dict__'
print 'Dumping memleak info.'
f = open('dump.txt', 'w')
f.write(dump(_root))
f.close()
counts = {}
for name in names:
count = names[name]
if count in counts:
counts[count].append(name)
else:
counts[count] = [name]
f = open('counts.txt', 'w')
for key in reversed(sorted(counts)):
f.write('%s: %s\n' % (key, counts[key]))
f.close() | mit | -7,224,281,884,972,797,000 | 24.783626 | 107 | 0.669506 | false | 3.031788 | false | false | false |
zooko/egtp_new | egtp/CommHints.py | 1 | 2565 | # Copyright (c) 2001 Autonomous Zone Industries
# This file is licensed under the
# GNU Lesser General Public License v2.1.
# See the file COPYING or visit http://www.gnu.org/ for details.
__revision__ = "$Id: CommHints.py,v 1.2 2002/12/02 19:58:44 myers_carpenter Exp $"
### standard modules
import types
# The following hints can be passed to `send_msg()' to allow the comms handler to optimize
# usage of the underlying communication system. A correct comms handler implementation
# could, of course, ignore these hints, and the comms handler should not fail to send a
# message, send it to the wrong counterparty, or otherwise do something incorrect no matter
# what hints are passed.
# This hint means that you expect an immediate response. For example, the TCPCommsHandler
# holds the connection open after sending until it gets a message on that connection, then
# closes it. (Unless HINT_EXPECT_MORE_TRANSACTIONS is also passed, in which case see
# below.)
HINT_EXPECT_RESPONSE = 1
# This hint means that you expect to send and receive messages with this counterparty in the
# near future. (Who knows what that means? This is just a hint.) For example, the
# TCPCommsHandler holds the connection open after sending unless it has too many open
# connections, in which case it closes it.
HINT_EXPECT_MORE_TRANSACTIONS = 2
# For example, if both HINT_EXPECT_RESPONSE and HINT_EXPECT_MORE_TRANSACTIONS are passed,
# then the TCPCommsHandler holds the connection open until it receives a message on that
# connection, then reverts to HINT_EXPECT_MORE_TRANSACTIONS -style mode in which it keeps
# the connection open unless it has too many open connections.
# This hint means that you expect no more messages to or from this counterparty. For
# example, the TCPCommsHandler closes the connection immediately after sending the message.
# If you pass both HINT_EXPECT_NO_MORE_COMMS and one of the previous hints then you are
# silly.
HINT_EXPECT_NO_MORE_COMMS = 4
# This hint means that you are going to send something. For example, the TCPCommsHandler
# holds open a connection after it receives a query and then closed it after sending the reply.
HINT_EXPECT_TO_RESPOND = 8
# This hint, when passed with a call to `send()' indicates that the message is a response to an
# earlier received query.
HINT_THIS_IS_A_RESPONSE = 16
HINT_NO_HINT = 0
def is_hint(thingie, IntType=types.IntType, LongType=types.LongType):
if not type(thingie) in (IntType, LongType,):
return 0 # `false'
return (thingie >= 0) and (thingie < 32)
| lgpl-2.1 | 629,575,291,455,173,500 | 46.5 | 95 | 0.759454 | false | 3.690647 | false | false | false |
SatAgro/ecoclima | ecoclima_parser/init_all.py | 1 | 2540 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import psycopg2
import sys
def init_all(db_name, user, host, password):
try:
conn = psycopg2.connect("dbname='" + db_name + "' user='" + user +
"' host='" + host + "' password='" + password +
"'")
cur = conn.cursor()
cur.execute("""DROP TABLE IF EXISTS """ + 'stations')
cur.execute("""CREATE TABLE """ + 'stations' +
"""(id Serial, name Text,
lat REAL, lon REAL, owner TEXT, url TEXT)""")
cur.execute("""DROP TABLE IF EXISTS """ + 'measures')
cur.execute("""CREATE TABLE """ + 'measures' +
"""(
station_id INTEGER,
m_date DATE,
m_time TIME,
temp_out REAL,
hi_temp REAL,
low_temp REAL,
out_hum INTEGER,
dew_pt REAL,
wind_speed REAL,
wind_dir TEXT,
wind_run REAL,
hi_speed REAL,
hi_dir TEXT,
wind_chill REAL,
heat_index REAL,
thw_index REAL,
bar REAL,
rain REAL,
rain_rate REAL,
uv_index REAL,
uv_dose REAL,
hi_uv REAL,
heat_dd REAL,
cool_dd REAL,
in_temp REAL,
in_hum INTEGER,
in_dew REAL,
in_heat REAL,
in_emc REAL,
in_air_density REAL,
soil_moist INTEGER,
soil_temp REAL,
leaf_wet INTEGER,
wind_samp REAL,
wind_tx INTEGER,
iss_recept REAL,
arc_int INTEGER,
CONSTRAINT """ +
"""station_time_unique UNIQUE (station_id, m_date, m_time))""")
cur.close()
conn.commit()
conn.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
raise
if __name__ == '__main__':
init_all(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
print ("tables for stations and measures have been created")
| lgpl-3.0 | -574,160,321,714,270,700 | 34.774648 | 83 | 0.385827 | false | 4.364261 | false | false | false |
ferdisdot/elbe | elbepack/pkgutils.py | 1 | 7605 | # ELBE - Debian Based Embedded Rootfilesystem Builder
# Copyright (C) 2013 Linutronix GmbH
#
# This file is part of ELBE.
#
# ELBE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ELBE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ELBE. If not, see <http://www.gnu.org/licenses/>.
import os
from tempfile import mkdtemp
import urllib2
import hashlib
from elbepack.shellhelper import CommandError
try:
from elbepack import virtapt
from apt_pkg import TagFile
virtapt_imported = True
except ImportError:
print "WARNING - python-apt not available: if there are multiple versions of"
print " elbe-bootstrap packages on the mirror(s) elbe selects the first package it"
print " has found. There is no guarantee that the latest package is used."
print " To ensure this, the python-apt package needs to be installed."
import urllib2
virtapt_imported = False
class NoKinitrdException(Exception):
pass
def get_sources_list( prj, defs ):
suite = prj.text("suite")
slist = ""
if prj.has("mirror/primary_host"):
mirror = "%s://%s/%s" % ( prj.text("mirror/primary_proto"), prj.text("mirror/primary_host"), prj.text("mirror/primary_path") )
slist += "deb %s %s main\n" % (mirror, suite)
slist += "deb-src %s %s main\n" % (mirror, suite)
if prj.has("mirror/cdrom"):
tmpdir = mkdtemp()
kinitrd = prj.text("buildimage/kinitrd", default=defs, key="kinitrd")
os.system( '7z x -o%s "%s" pool/main/%s/%s dists' % (tmpdir, prj.text("mirror/cdrom"), kinitrd[0], kinitrd) )
slist += "deb file://%s %s main\n" % (tmpdir,suite)
if prj.node("mirror/url-list"):
for n in prj.node("mirror/url-list"):
if n.has("binary"):
tmp = n.text("binary").replace("LOCALMACHINE", "localhost")
slist += "deb %s\n" % tmp.strip()
if n.has("source"):
tmp = n.text("source").replace("LOCALMACHINE", "localhost")
slist += "deb-src %s\n" % tmp.strip()
return slist
def get_key_list (prj):
retval = []
if prj.node("mirror/url-list"):
for n in prj.node("mirror/url-list"):
if n.has("key"):
tmp = n.text("key").replace("LOCALMACHINE", "localhost")
retval.append (tmp.strip ())
return retval
def get_initrd_pkg( prj, defs ):
initrdname = prj.text("buildimage/kinitrd", default=defs, key="kinitrd")
return initrdname
def get_url ( arch, suite, target_pkg, mirror ):
try:
packages = urllib2.urlopen("%s/dists/%s/main/binary-%s/Packages" %
(mirror.replace("LOCALMACHINE", "localhost"), suite, arch))
packages = packages.readlines()
packages = filter( lambda x: x.startswith( "Filename" ), packages )
packages = filter( lambda x: x.find( target_pkg ) != -1, packages )
tmp = packages.pop()
urla = tmp.split()
url = "%s/%s" % (mirror.replace("LOCALMACHINE", "localhost"), urla[1])
except IOError:
url = ""
except IndexError:
url = ""
return url
def get_initrd_uri( prj, defs, arch ):
if arch == "default":
arch = prj.text("buildimage/arch", default=defs, key="arch")
suite = prj.text("suite")
name = prj.text("name", default=defs, key="name")
apt_sources = get_sources_list(prj, defs)
apt_keys = get_key_list (prj)
target_pkg = get_initrd_pkg(prj, defs)
if virtapt_imported:
v = virtapt.VirtApt( name, arch, suite, apt_sources, "", apt_keys )
d = virtapt.apt_pkg.DepCache(v.cache)
pkg = v.cache[target_pkg]
c=d.get_candidate_ver(pkg)
x=v.source.find_index(c.file_list[0][0])
r=virtapt.apt_pkg.PackageRecords(v.cache)
r.lookup(c.file_list[0])
uri = x.archive_uri(r.filename)
if not x.is_trusted:
return "", uri
return r.sha1_hash, uri
else:
url = "%s://%s/%s" % (prj.text("mirror/primary_proto"),
prj.text("mirror/primary_host"),
prj.text("mirror/primary_path") )
pkg = get_url ( arch, suite, target_pkg, url )
if pkg:
return "", pkg
for n in prj.node("mirror/url-list"):
url = n.text("binary")
urla = url.split()
pkg = get_url ( arch, suite, target_pkg,
urla[0].replace("BUILDHOST", "localhost") )
if pkg:
return "", pkg
return "", ""
def get_dsc_size( fname ):
if not virtapt_imported:
return 0
tf = TagFile( fname )
sz = os.path.getsize(fname)
for sect in tf:
if sect.has_key('Files'):
files = sect['Files'].split('\n')
files = [ f.strip().split(' ') for f in files ]
for f in files:
sz += int(f[1])
return sz
def copy_kinitrd( prj, target_dir, defs, arch="default" ):
try:
sha1, uri = get_initrd_uri(prj, defs, arch)
except KeyError:
raise NoKinitrdException ('no elbe-bootstrap package available')
return
except SystemError:
raise NoKinitrdException ('a configured mirror is not reachable')
return
except CommandError as e:
raise NoKinitrdException ("couldn't download elbe-bootstrap package")
return
tmpdir = mkdtemp()
try:
if uri.startswith("file://"):
os.system( 'cp "%s" "%s"' % ( uri[len("file://"):], os.path.join(tmpdir, "pkg.deb") ) )
elif uri.startswith("http://"):
os.system( 'wget -O "%s" "%s"' % ( os.path.join(tmpdir, "pkg.deb"), uri ) )
elif uri.startswith("ftp://"):
os.system( 'wget -O "%s" "%s"' % ( os.path.join(tmpdir, "pkg.deb"), uri ) )
else:
raise NoKinitrdException ('no elbe-bootstrap package available')
except CommandError as e:
raise NoKinitrdException ("couldn't download elbe-bootstrap package")
return
if len(sha1) > 0:
m = hashlib.sha1()
with open (os.path.join(tmpdir, "pkg.deb"), "rb") as f:
buf = f.read(65536)
while len(buf)>0:
m.update( buf )
buf = f.read(65536)
if m.hexdigest() != sha1:
raise NoKinitrdException ('elbe-bootstrap failed to verify !!!')
else:
print "-----------------------------------------------------"
print "WARNING:"
print "Using untrusted elbe-bootstrap"
print "-----------------------------------------------------"
os.system( 'dpkg -x "%s" "%s"' % ( os.path.join(tmpdir, "pkg.deb"), tmpdir ) )
if prj.has("mirror/cdrom"):
os.system( 'cp "%s" "%s"' % ( os.path.join( tmpdir, 'var', 'lib', 'elbe', 'initrd', 'initrd-cdrom.gz' ), os.path.join(target_dir, "initrd.gz") ) )
else:
os.system( 'cp "%s" "%s"' % ( os.path.join( tmpdir, 'var', 'lib', 'elbe', 'initrd', 'initrd.gz' ), os.path.join(target_dir, "initrd.gz") ) )
os.system( 'cp "%s" "%s"' % ( os.path.join( tmpdir, 'var', 'lib', 'elbe', 'initrd', 'vmlinuz' ), os.path.join(target_dir, "vmlinuz") ) )
os.system( 'rm -r "%s"' % tmpdir )
| gpl-3.0 | 6,152,932,937,203,005,000 | 32.650442 | 154 | 0.575016 | false | 3.417978 | false | false | false |
popgengui/negui | agestrucne/pgchromlocifilemanager.py | 1 | 9254 | '''
Description
This class wraps defs to validate a chromosome
loci table file used by LDNe2 to filter out loci
pairs that share a chromosome.
'''
__filename__ = "pgchromlocifilemanager.py"
__date__ = "20180502"
__author__ = "Ted Cosart<[email protected]>"
'''
This string designates that
there is no chrom loci file,
in the case expected by LDNe2:
'''
NO_CHROM_LOCI_FILE="None"
CHROM_TOTAL_ZERO=0
CHROM_LOCI_FILE_DELIMITER="\t"
#Field order in the file
IDX_CHROM_NAME=0
IDX_LOCI_NAME=1
LDNE_LOCI_PAIRING_SCHEME_IGNORE_CHROM=0
LDNE_LOCI_PAIRING_SCHEME_SAME_CHROM=1
LDNE_LOCI_PAIRING_SCHEME_DIFF_CHROM=2
LOCI_PAIRING_SCHEME_DESCRIPT={ LDNE_LOCI_PAIRING_SCHEME_IGNORE_CHROM:"use all pairs",
LDNE_LOCI_PAIRING_SCHEME_SAME_CHROM:"loci pair p1,p2 must be from the same chromosome",
LDNE_LOCI_PAIRING_SCHEME_DIFF_CHROM:"loci pair p1,p2, must be from different chromosomes" }
import os
class GenepopLociScraper( object ):
'''
This is a convenience class to
segregate the code needed just
to get the limited loci info
needed for the ChromLociFileManager.
'''
def __init__( self, s_genepop_file ):
self.__gpfile=s_genepop_file
self.__get_loci_list()
return
#end __init__
def __get_loci_list( self ):
FIRST_LINE=1
POPLINE="pop"
DELIMITER_WHEN_LOCI_ARE_LISTED_ON_ONE_LINE=","
ls_loci_list=[]
o_file=open( self.__gpfile, 'r' )
i_line_number = 0
s_second_line_entry=None
for s_line in o_file:
i_line_number += 1
if i_line_number==FIRST_LINE:
continue
elif i_line_number == 2:
s_second_line_entry=s_line.strip()
#If second line is not only loci line,
#we continue to build our loci list,
#line by line:
ls_loci_list.append( s_line.strip() )
elif s_line.strip().lower() == POPLINE:
if i_line_number == 3:
#all loci were on line 2,
#and entered as a list, so se
#reassign our loci_list thusly:
ls_loci_list=s_second_line_entry.split( \
DELIMITER_WHEN_LOCI_ARE_LISTED_ON_ONE_LINE )
#end if first pop line is file's 3rd line, then loci format is list
break
else:
ls_loci_list.append( s_line.strip() )
#end if first line, else second line, else pop line, else loci line
#end for each linn in file
o_file.close()
self.__loci_list=ls_loci_list
return
#end __get_loci_list
@property
def loci_list( self ):
return self.__loci_list
#end property loci_list
#end class GenepopLociScraper
class ChromLociFileManager( object ):
'''
2018_05_02. This class is created, inititally,
to validate files to be used by LDNe2 to get
chromosome/loci pairs, for use in filtering
loci pairs that share a chromsome. We may
want to put it to other uses later.
Note that it also is the single source for
the string that designates that no such
file is to be used, and which chromosome
totals are invalid (see mod-level assignments).
'''
def __init__( self,
s_file_name=NO_CHROM_LOCI_FILE,
ls_genepop_files_that_use_the_file=[],
i_ldne_pairing_scheme=None ):
self.__filename=s_file_name
'''
Note -- no list.copy() def for python2:
'''
self.__genepop_files=[ v_item for v_item
in ls_genepop_files_that_use_the_file ]
self.__total_chromosomes=None
self.__chromloci_table=None
self.__unlisted_loci=[]
self.__loci_pairing_scheme=i_ldne_pairing_scheme
return
#end __init__
def __validate_file( self ):
s_error_message=""
b_is_valid=False
b_is_file=os.path.isfile( self.__filename )
if b_is_file:
self.__get_total_chromosomes()
b_each_loci_paired_with_one_chromosome=\
self.__each_loci_is_assigned_to_exactly_one_chromosome()
b_all_loci_listed=self.__all_genepop_loci_are_listed()
'''
2018_05_07. The only loci pairing violation detected so far,
occurs when the client has a chrom/loci file that contains just one
chromosome, and also requests the loci pairing sheme that requires
pairs l1,l2, from chrom c1,c2, have c1 != c2.
'''
b_pairing_violation=\
self.__loci_pairing_scheme is not None \
and self.__loci_pairing_scheme \
== LDNE_LOCI_PAIRING_SCHEME_DIFF_CHROM \
and self.__total_chromosomes == 1
if not b_each_loci_paired_with_one_chromosome:
s_error_message += "\nAt least one loci is paired with " \
+ "more than one chromosome." \
if not b_all_loci_listed:
s_error_message += "\n" \
+ " in chrom/loci file, " \
+ self.__filename + ", " \
+ "Genepop file(s) has (have) the " \
+ "following loci not " \
+ "assigned to chromosomes: \n" \
+ str( self.__unlisted_loci )
#end if some loci unlisted
if b_pairing_violation:
s_error_message += "\n" \
+ " in chrom/loci file, " \
+ self.__filename + ", " \
+ " the chromosome total, " \
+ str( self.__total_chromosomes ) \
+ ", is incompatible with the " \
+ "loci pairing scheme: " \
+ LOCI_PAIRING_SCHEME_DESCRIPT[ \
self.__loci_pairing_scheme ]
#end if loci pairing violation
else:
s_error_message="\nFile, " + self.__filename + "does not exist."
#end if we have a chrom/loci file else not
if s_error_message != "":
raise Exception( "In ChromLociFileManager instance, " \
+ "def __validate_file, " \
+ "file found to be invalid with message: " \
+ s_error_message )
#end if we noted an error, raise exception
return
#end __validate_file
def __get_chrom_loci_table( self ):
MIN_NUM_FIELDS=2
o_file=open( self.__filename, 'r' )
self.__chromloci_table={}
for s_line in o_file:
ls_fields=s_line.strip().split( CHROM_LOCI_FILE_DELIMITER )
s_chrom=ls_fields[ IDX_CHROM_NAME ]
if len( ls_fields ) < MIN_NUM_FIELDS:
raise Exception( "In ChromLociFileManager, " \
+ "def __get_chrom_loci_table, " \
+ "a file line has fewer than the " \
+ "required " + str( MIN_NUM_FIELDS ) \
+ " fields for a chrom/loci table file. " \
+ "The file line is: \"" + s_line.strip() + "\"" )
#end if too few fields
s_loci_name=ls_fields[ IDX_LOCI_NAME ]
if s_chrom in self.__chromloci_table:
self.__chromloci_table[ s_chrom ].append( s_loci_name )
else:
self.__chromloci_table[ s_chrom ]=[ s_loci_name ]
#end if chrom already in dict, else add
#end for each line in file
o_file.close()
return
#end __get_chrom_loci_table
def __all_genepop_loci_are_listed( self ):
b_all_listed=False
set_loci_listed_in_chrom_loci_file=self.__get_set_loci_list_from_chrom_loci_file()
i_total_unlisted_loci=0
for s_genepop_file in self.__genepop_files:
ls_loci_in_this_gp_file=\
self.__get_loci_list_from_genepop_file( s_genepop_file )
set_loci_in_this_gp_file=set( ls_loci_in_this_gp_file )
if not( set_loci_in_this_gp_file.issubset( set_loci_listed_in_chrom_loci_file ) ):
set_diff=set_loci_in_this_gp_file.difference( set_loci_listed_in_chrom_loci_file )
i_total_unlisted_loci += len( set_diff )
self.__unlisted_loci += list( set_diff )
#end if gp list not a subset of our table's loci
#end for each genepop file
b_all_listed=( i_total_unlisted_loci==0 )
return b_all_listed
#end __all_genepop_loci_are_listed
def __each_loci_is_assigned_to_exactly_one_chromosome( self ):
b_loci_assignments_valid=True
if self.__chromloci_table is None:
self.__get_chrom_loci_table()
#end if not table, make one
ds_chrom_names_by_loci_name={}
for s_chrom in self.__chromloci_table:
ls_loci=self.__chromloci_table[ s_chrom ]
for s_loci in ls_loci:
if s_loci in ds_chrom_names_by_loci_name:
b_loci_assignments_valid=False
break
else:
ds_chrom_names_by_loci_name[ s_loci ]=s_chrom
#end if loci already paired with a chrom
#end for each loci in this chrom's loci list
#end for each chrom
return b_loci_assignments_valid
#end def __each_loci_is_assigned_to_exactly_one_chromosome
def validateFile( self ):
self.__validate_file()
return
#end validateFile
def __get_loci_list_from_genepop_file( self, s_genepop_file ):
o_gp_loci_scraper=GenepopLociScraper( s_genepop_file )
return o_gp_loci_scraper.loci_list
#end __get_loci_list_from_chrom_loci_file
def __get_set_loci_list_from_chrom_loci_file( self ):
ls_loci_list=[]
set_loci_list=None
if self.__chromloci_table is None:
self.__get_chrom_loci_table()
#end if no table, get it
for s_chrom in self.__chromloci_table:
ls_loci_list +=self.__chromloci_table[ s_chrom ]
#end for each chrom, append loci list
set_loci_list=set( ls_loci_list )
return set_loci_list
#end def __get_loci_list_from_chrom_loci_file
def __get_total_chromosomes( self ):
if self.__total_chromosomes is None:
if self.__chromloci_table is None:
self.__get_chrom_loci_table()
#end if no table
self.__total_chromosomes=len( self.__chromloci_table )
#end if total not yet calc'd
return
#end __get_total_chromosomes
#end class ChromLociFileManager
if __name__ == "__main__":
s_test_file="/home/ted/temp/tclf.tsv"
s_gp="/home/ted/temp/gp.gp"
o_clfm=ChromLociFileManager( s_test_file, [ s_gp ] )
o_clfm.validateFile()
pass
#end if main
| agpl-3.0 | -9,070,952,403,891,394,000 | 25.515759 | 96 | 0.654636 | false | 2.694817 | false | false | false |
IQSS/miniverse | dv_apps/slackbot/starterbot.py | 1 | 2042 | import os
import time
from slackclient import SlackClient
# starterbot's ID as an environment variable
BOT_ID = os.environ.get("BOT_ID")
# constants
AT_BOT = "<@" + BOT_ID + ">"
EXAMPLE_COMMAND = "gofish"
# instantiate Slack & Twilio clients
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
response = "Not sure what you mean. Use the *" + EXAMPLE_COMMAND + \
"* command with numbers, delimited by spaces."
if command.startswith(EXAMPLE_COMMAND):
response = "Sure...write some more code then I can do that!"
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# return text after the @ mention, whitespace removed
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("StarterBot connected and running!")
while True:
command, channel = parse_slack_output(slack_client.rtm_read())
if command and channel:
handle_command(command, channel)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
| mit | -338,846,921,837,846,000 | 36.814815 | 75 | 0.633203 | false | 3.988281 | false | false | false |
globocom/database-as-a-service | dbaas/physical/migrations/0112_auto__add_ip.py | 1 | 36235 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Ip'
db.create_table(u'physical_ip', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('identifier', self.gf('django.db.models.fields.CharField')(max_length=200, null=True, blank=True)),
('address', self.gf('django.db.models.fields.CharField')(max_length=200)),
('instance', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['physical.Instance'], null=True, on_delete=models.SET_NULL, blank=True)),
))
db.send_create_signal(u'physical', ['Ip'])
def backwards(self, orm):
# Deleting model 'Ip'
db.delete_table(u'physical_ip')
models = {
u'account.organization': {
'Meta': {'object_name': 'Organization'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'external': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grafana_datasource': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'grafana_hostgroup': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_orgid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'team_organization'", 'on_delete': 'models.PROTECT', 'to': u"orm['account.Organization']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.cloud': {
'Meta': {'object_name': 'Cloud'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'backup_hour': ('django.db.models.fields.IntegerField', [], {}),
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'engine_patch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EnginePatch']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'maintenance_day': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'maintenance_window': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'infra'", 'null': 'True', 'to': u"orm['physical.Pool']"}),
'ssl_configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ssl_mode': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'major_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'minor_version': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginepatch': {
'Meta': {'object_name': 'EnginePatch'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'patchs'", 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_initial_patch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'patch_path': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'patch_version': ('django.db.models.fields.PositiveIntegerField', [], {}),
'required_disk_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'environment_cloud'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Cloud']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provisioner': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'stage': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environmentgroup': {
'Meta': {'object_name': 'EnvironmentGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'groups'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'root_size_gb': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ssl_expire_at': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.ip': {
'Meta': {'object_name': 'Ip'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'persistense_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_persisted_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.pool': {
'Meta': {'object_name': 'Pool'},
'cluster_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cluster_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cluster_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dbaas_token': ('django.db.models.fields.CharField', [], {'max_length': '406'}),
'domain': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'pools'", 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'project_id': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'rancher_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rancher_token': ('django.db.models.fields.CharField', [], {'max_length': '406'}),
'storageclass': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'pools'", 'symmetrical': 'False', 'to': u"orm['account.Team']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'vpc': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recreate_slave': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_setup_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'configure_log': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'metric_collector': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.vip': {
'Meta': {'object_name': 'Vip'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'vips'", 'to': u"orm['physical.DatabaseInfra']"}),
'original_vip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Vip']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical'] | bsd-3-clause | 7,484,833,559,263,377,000 | 92.391753 | 239 | 0.560977 | false | 3.55943 | false | false | false |
vlegoff/tsunami | src/primaires/scripting/extensions/selection.py | 1 | 8145 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module contenant la classe Selection, détaillée plus bas."""
from textwrap import dedent
from primaires.interpreteur.editeur.selection import Selection as EdtSelection
from primaires.interpreteur.editeur.selection_objet import SelectionObjet
from primaires.scripting.editeurs.edt_script import EdtScript
from primaires.scripting.extensions.base import Extension
from primaires.scripting.script import Script
class Selection(Extension):
"""Classe représentant le type éditable 'selection'.
Ce type utilise l'éditeur SelectionObjet. Il permet de
sélectionner aucune, un ou plusieurs valeurs.
"""
extension = "selection"
aide = "une liste de zéro, une ou plusieurs possibilités"
nom_scripting = "la sélection d'éditeur"
def __init__(self, structure, nom):
Extension.__init__(self, structure, nom)
self.selection = []
self.script = ScriptSelection(self)
@property
def editeur(self):
"""Retourne le type d'éditeur."""
return SelectionObjet
@property
def arguments(self):
"""Retourne les arguments de l'éditeur."""
evt = self.script["selection"]
if evt.nb_lignes:
evt.executer()
cles = evt.espaces.variables["retour"]
evt = self.script["valeurs"]
if evt.nb_lignes:
evt.executer()
valeurs = evt.espaces.variables["retour"]
else:
valeurs = list(cles)
else:
cles = valeurs = self.selection
selection = dict(zip(cles, valeurs))
return (selection, )
def etendre_editeur(self, presentation):
"""Ëtend l'éditeur en fonction du type de l'extension."""
# Selection
selection = presentation.ajouter_choix("valeurs", "v", EdtSelection,
self, "selection")
selection.parent = presentation
selection.apercu = "{valeur}"
selection.aide_courte = dedent("""
Entrez |ent|ue valeur|ff| pour l'ajouter ou le retirer.
Entrez |cmd|/|ff| pour revenir à la fenêtre parente.
Cet éditeur existe pour laisser le joueur choisir entre
séro, une ou plusieurs valeurs parmi une liste. On parle
de sélection, car le joueur sélectionne certaines
informations. La liste de valeurs peut être très
simple : par exemple, on demande au joueur de choisir les noms
de villes qu'il fréquente régulièrement : le joueur
peut en choisir aucune, une ou plusieurs. La case de la
structure contiendra la liste des valeurs sélectionnées par
le joueur. Dans ce cas, vous pouvez entrer directement les
valeurs possibles pour les ajouter dans la liste des choix
proposés par l'éditeur.
Parfois cependant, on a besoin d'offrir un choix plus complexe.
Par exemple, entrer un ou plusieur noms de joueurs (la liste
des joueurs étant dynamiquement générée, pas statique).
Dans ce cas, on peut utiliser les deux évènements définis
dans le script de cet éditeur : l'évènement 'selection'
doit retourner une liste des choix possibles. Par exemple,
dans ce cas, une liste des noms de joueurs. L'évènement
'valeurs' permet de faire correspondre chaque choix
de la liste avec une valeur de remplacement : dans le
cas qui nous occupe, le joueur rentre le nom du ou des
joueurs, mais le systhème fait la correspondance avec
les joueur (les personnages sont écrits dans la structure, pas la
chaîne de caractères contenant leur nom). Ces scripts sont
donc bien plus puissants qu'une liste statique, mais peuvent
s'avérer complexes à utiliser.
La liste statique définie ici n'est utilisée que si
l'évènement 'selection' est vide.
Si l'évènement 'selection' existe mais que l'évènement
'valeurs' est vide, les chaînes de caractères sont ajoutées
dans la liste (il n'y a pas de remplacement d'effectué).
Valeurs autorisées : {valeur}""".strip("\n"))
# Script
scripts = presentation.ajouter_choix("scripts", "sc", EdtScript,
self.script)
scripts.parent = presentation
class ScriptSelection(Script):
"""Définition des sélection scriptables."""
def init(self):
"""Initialisation du script."""
# Événement selection
evt_selection = self.creer_evenement("selection")
evt_selection.aide_courte = "la liste des choix scriptables"
evt_selection.aide_longue = \
"Cet évènement est appelé pour déterminer les choix possibles " \
"que le joueur dans l'éditeur pourra sélectionner. Une " \
"variable |ent|retour|ff| doit être créée dans cet évènement, " \
"contenant une liste de chaînes. Le joueur dans l'éditeur " \
"pourra choisir aucune, une ou plusieurs des valeurs se " \
"trouvant dans cette liste. L'évènement 'valeurs' permet de " \
"configurer de façon encore plus précise ce qui sera conservé " \
"dans la structure."
# Événement valeurs
evt_valeurs = self.creer_evenement("valeurs")
evt_valeurs.aide_courte = "la liste des valeurs correspondantes"
evt_valeurs.aide_longue = \
"Cet évènement est couplé à l'évènement 'selection' pour " \
"déterminer les choix possibles et leur valeur respective. " \
"Quand le joueur dans l'éditeur entrera l'un des choix " \
"(une des chaînes contenues dans la liste de la variable " \
"|ent|retour|ff| de l'évènement 'selection'), le système " \
"recherchera la même case de la liste contenue dans la " \
"variable |ent|retour|ff| de l'évènement 'valeurs'. Ainsi, " \
"cet évènement doit contenir dans le même ordre que ''selection' " \
"les valeurs correspondantes. Si 'selection' contient une liste " \
"de noms de joueurs, l'évènement 'valeurs' doit contenir " \
"la liste des joueurs correspondants dans le même ordre. " \
"Quand le joueur dans l'éditeur entrera un nom de joueur, " \
"la structure sera modifiée pour contenir le joueur (et " \
"non pas son nom)."
| bsd-3-clause | 178,633,146,605,590,900 | 45.732558 | 80 | 0.663971 | false | 3.377311 | false | false | false |
johnkeates/statsite | sinks/graphite.py | 1 | 5895 | """
Supports flushing metrics to graphite
"""
import re
import sys
import socket
import logging
import pickle
import struct
# Initialize the logger
logging.basicConfig()
SPACES = re.compile(r"\s+")
SLASHES = re.compile(r"\/+")
NON_ALNUM = re.compile(r"[^a-zA-Z_\-0-9\.]")
class GraphiteStore(object):
def __init__(self, host="localhost", port=2003, prefix="statsite.", attempts=3,
protocol='lines', normalize=None):
"""
Implements an interface that allows metrics to be persisted to Graphite.
Raises a :class:`ValueError` on bad arguments.
:Parameters:
- `host` : The hostname of the graphite server.
- `port` : The port of the graphite server
- `prefix` (optional) : A prefix to add to the keys. Defaults to 'statsite.'
- `attempts` (optional) : The number of re-connect retries before failing.
- `normalize` (optional) : If set, attempt to sanitize/normalize keys to be more
generally compliant with graphite/carbon expectations.
"""
# Convert the port to an int since its coming from a configuration file
port = int(port)
attempts = int(attempts)
if port <= 0:
raise ValueError("Port must be positive!")
if attempts < 1:
raise ValueError("Must have at least 1 attempt!")
if protocol not in ["pickle", "lines"]:
raise ValueError("Supported protocols are pickle, lines")
if normalize is not None and normalize not in ("False", "false", "No", "no"):
self.normalize_func = self.normalize_key
else:
self.normalize_func = lambda k: "%s%s" % (self.prefix, k)
self.logger = logging.getLogger("statsite.graphitestore")
self.host = host
self.port = port
self.prefix = prefix
self.attempts = attempts
self.sock = self._create_socket()
self.flush = self.flush_pickle if protocol == "pickle" else self.flush_lines
self.metrics = []
def normalize_key(self, key):
"""
Take a single key string and return the same string with spaces, slashes and
non-alphanumeric characters subbed out and prefixed by self.prefix.
"""
key = SPACES.sub("_", key)
key = SLASHES.sub("-", key)
key = NON_ALNUM.sub("", key)
key = "%s%s" % (self.prefix, key)
return key
def append(self, metric):
"""
Add one metric to queue for sending. Addtionally modify key to be compatible with txstatsd
format.
:Parameters:
- `metric` : A single statsd metric string in the format "key|value|timestamp".
"""
if metric and metric.count("|") == 2:
k, v, ts = metric.split("|")
k = self.normalize_func(k)
self.metrics.append(((k), v, ts))
def flush_lines(self):
"""
Flushes the metrics provided to Graphite.
"""
if not self.metrics:
return
lines = ["%s %s %s" % metric for metric in self.metrics]
data = "\n".join(lines) + "\n"
# Serialize writes to the socket
try:
self._write_metric(data)
except StandardError:
self.logger.exception("Failed to write out the metrics!")
def flush_pickle(self):
"""
Flushes the metrics provided to Graphite.
"""
if not self.metrics:
return
# transform a list of strings into the list of tuples that
# pickle graphite interface supports, in the form of
# (key, (timestamp, value))
# http://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-pickle-protocol
metrics_fmt = []
for (k, v, ts) in self.metrics:
metrics_fmt.append((k, (ts, v)))
# do pickle the list of tuples
# add the header the pickle protocol wants
payload = pickle.dumps(metrics_fmt, protocol=2)
header = struct.pack("!L", len(payload))
message = header + payload
try:
self._write_metric(message)
except StandardError:
self.logger.exception("Failed to write out the metrics!")
def close(self):
"""
Closes the connection. The socket will be recreated on the next
flush.
"""
try:
if self.sock:
self.sock.close()
except StandardError:
self.logger.warning("Failed to close connection!")
def _create_socket(self):
"""Creates a socket and connects to the graphite server"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((self.host, self.port))
except StandardError:
self.logger.error("Failed to connect!")
sock = None
return sock
def _write_metric(self, metric):
"""Tries to write a string to the socket, reconnecting on any errors"""
for _ in xrange(self.attempts):
if self.sock:
try:
self.sock.sendall(metric)
return
except socket.error:
self.logger.exception("Error while flushing to graphite. Reattempting...")
self.sock = self._create_socket()
self.logger.critical("Failed to flush to Graphite! Gave up after %d attempts.",
self.attempts)
def main():
# Intialize from our arguments
graphite = GraphiteStore(*sys.argv[1:])
# Get all the inputs
while True:
try:
graphite.append(raw_input().strip())
except EOFError:
break
# Flush
graphite.logger.info("Outputting %d metrics", len(graphite.metrics))
graphite.flush()
graphite.close()
if __name__ == "__main__":
main()
| bsd-3-clause | -8,307,928,939,181,115,000 | 31.39011 | 98 | 0.575403 | false | 4.253247 | false | false | false |
itielshwartz/BackendApi | lib/googleapiclient/schema.py | 1 | 10198 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schema processing for discovery based APIs
Schemas holds an APIs discovery schemas. It can return those schema as
deserialized JSON objects, or pretty print them as prototype objects that
conform to the schema.
For example, given the schema:
schema = \"\"\"{
"Foo": {
"type": "object",
"properties": {
"etag": {
"type": "string",
"description": "ETag of the collection."
},
"kind": {
"type": "string",
"description": "Type of the collection ('calendar#acl').",
"default": "calendar#acl"
},
"nextPageToken": {
"type": "string",
"description": "Token used to access the next
page of this result. Omitted if no further results are available."
}
}
}
}\"\"\"
s = Schemas(schema)
print s.prettyPrintByName('Foo')
Produces the following output:
{
"nextPageToken": "A String", # Token used to access the
# next page of this result. Omitted if no further results are available.
"kind": "A String", # Type of the collection ('calendar#acl').
"etag": "A String", # ETag of the collection.
},
The constructor takes a discovery document in which to look up named schema.
"""
from __future__ import absolute_import
import six
# TODO(jcgregorio) support format, enum, minimum, maximum
__author__ = '[email protected] (Joe Gregorio)'
import copy
from oauth2client import util
class Schemas(object):
"""Schemas for an API."""
def __init__(self, discovery):
"""Constructor.
Args:
discovery: object, Deserialized discovery document from which we pull
out the named schema.
"""
self.schemas = discovery.get('schemas', {})
# Cache of pretty printed schemas.
self.pretty = {}
@util.positional(2)
def _prettyPrintByName(self, name, seen=None, dent=0):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
if name in seen:
# Do not fall into an infinite loop over recursive definitions.
return '# Object with schema name: %s' % name
seen.append(name)
if name not in self.pretty:
self.pretty[name] = _SchemaToStruct(self.schemas[name],
seen, dent=dent).to_str(self._prettyPrintByName)
seen.pop()
return self.pretty[name]
def prettyPrintByName(self, name):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
@util.positional(2)
def _prettyPrintSchema(self, schema, seen=None, dent=0):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
return _SchemaToStruct(schema, seen, dent=dent).to_str(self._prettyPrintByName)
def prettyPrintSchema(self, schema):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintSchema(schema, dent=1)[:-2]
def get(self, name):
"""Get deserialized JSON schema from the schema name.
Args:
name: string, Schema name.
"""
return self.schemas[name]
class _SchemaToStruct(object):
"""Convert schema to a prototype object."""
@util.positional(3)
def __init__(self, schema, seen, dent=0):
"""Constructor.
Args:
schema: object, Parsed JSON schema.
seen: list, List of names of schema already seen while parsing. Used to
handle recursive definitions.
dent: int, Initial indentation depth.
"""
# The result of this parsing kept as list of strings.
self.value = []
# The final value of the parsing.
self.string = None
# The parsed JSON schema.
self.schema = schema
# Indentation level.
self.dent = dent
# Method that when called returns a prototype object for the schema with
# the given name.
self.from_cache = None
# List of names of schema already seen while parsing.
self.seen = seen
def emit(self, text):
"""Add text as a line to the output.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text, '\n'])
def emitBegin(self, text):
"""Add text to the output, but with no line terminator.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text])
def emitEnd(self, text, comment):
"""Add text and comment to the output with line terminator.
Args:
text: string, Text to output.
comment: string, Python comment.
"""
if comment:
divider = '\n' + ' ' * (self.dent + 2) + '# '
lines = comment.splitlines()
lines = [x.rstrip() for x in lines]
comment = divider.join(lines)
self.value.extend([text, ' # ', comment, '\n'])
else:
self.value.extend([text, '\n'])
def indent(self):
"""Increase indentation level."""
self.dent += 1
def undent(self):
"""Decrease indentation level."""
self.dent -= 1
def _to_str_impl(self, schema):
"""Prototype object based on the schema, in Python code with comments.
Args:
schema: object, Parsed JSON schema file.
Returns:
Prototype object based on the schema, in Python code with comments.
"""
stype = schema.get('type')
if stype == 'object':
self.emitEnd('{', schema.get('description', ''))
self.indent()
if 'properties' in schema:
for pname, pschema in six.iteritems(schema.get('properties', {})):
self.emitBegin('"%s": ' % pname)
self._to_str_impl(pschema)
elif 'additionalProperties' in schema:
self.emitBegin('"a_key": ')
self._to_str_impl(schema['additionalProperties'])
self.undent()
self.emit('},')
elif '$ref' in schema:
schemaName = schema['$ref']
description = schema.get('description', '')
s = self.from_cache(schemaName, seen=self.seen)
parts = s.splitlines()
self.emitEnd(parts[0], description)
for line in parts[1:]:
self.emit(line.rstrip())
elif stype == 'boolean':
value = schema.get('default', 'True or False')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'string':
value = schema.get('default', 'A String')
self.emitEnd('"%s",' % str(value), schema.get('description', ''))
elif stype == 'integer':
value = schema.get('default', '42')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'number':
value = schema.get('default', '3.14')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'null':
self.emitEnd('None,', schema.get('description', ''))
elif stype == 'any':
self.emitEnd('"",', schema.get('description', ''))
elif stype == 'array':
self.emitEnd('[', schema.get('description'))
self.indent()
self.emitBegin('')
self._to_str_impl(schema['items'])
self.undent()
self.emit('],')
else:
self.emit('Unknown type! %s' % stype)
self.emitEnd('', '')
self.string = ''.join(self.value)
return self.string
def to_str(self, from_cache):
"""Prototype object based on the schema, in Python code with comments.
Args:
from_cache: callable(name, seen), Callable that retrieves an object
prototype for a schema with the given name. Seen is a list of schema
names already seen as we recursively descend the schema definition.
Returns:
Prototype object based on the schema, in Python code with comments.
The lines of the code will all be properly indented.
"""
self.from_cache = from_cache
return self._to_str_impl(self.schema)
| apache-2.0 | -364,215,873,647,700,700 | 31.58147 | 96 | 0.581683 | false | 4.334042 | false | false | false |
macourteau/scripts | chromium/sublime/find_owners.py | 1 | 2139 | """Sublime Text plugin to find the Chromium OWNERS for the current file.
In a Chromium checkout, this will search for the closest OWNERS file and list
its contents. Select an entry to copy to the clipboard. You can also open the
displayed OWNERS file, or walk up the directory tree to the next OWNERS file.
"""
import os
import sublime
import sublime_plugin
class FindOwnersCommand(sublime_plugin.WindowCommand):
"""Implements the Find Owners command."""
def run(self):
self.find_owners(self.window.active_view().file_name())
def find_owners(self, start_path):
current_directory = start_path
while True:
new_directory = os.path.dirname(current_directory)
if new_directory == current_directory:
sublime.error_message('No OWNERS file found for "%s".'% start_path)
return
current_directory = new_directory
current_owners_file_path = os.path.join(current_directory, 'OWNERS')
if os.path.exists(current_owners_file_path):
self.last_directory = current_directory
self.owners_file_path = current_owners_file_path
with open(self.owners_file_path, 'r') as owners_file:
sublime.status_message('Found OWNERS file: "%s".' %
self.owners_file_path)
data = owners_file.read()
self.lines = data.strip().split('\n')
self.lines.insert(0, '[Show parent OWNERS file]')
self.lines.insert(1, '[Open this OWNERS file]')
self.lines.insert(2, '----- (select owner below to copy) -----')
self.window.show_quick_panel(self.lines,
self.on_select,
sublime.MONOSPACE_FONT)
return
def on_select(self, index):
# Show parent OWNERS file.
if index == 0:
self.find_owners(self.last_directory)
# Open this OWNERS file.
elif index == 1:
self.window.open_file(self.owners_file_path)
# Copy this line to clipboard.
elif index > 2:
sublime.set_clipboard(self.lines[index])
sublime.status_message('Copied "%s" to clipboard.' % self.lines[index])
| mit | -6,741,106,143,749,502,000 | 38.611111 | 77 | 0.636746 | false | 3.726481 | false | false | false |
toidi/hadoop-yarn-api-python-client | tests/test_history_server.py | 1 | 3978 | # -*- coding: utf-8 -*-
from mock import patch
from tests import TestCase
from yarn_api_client.history_server import HistoryServer
from yarn_api_client.errors import IllegalArgumentError
@patch('yarn_api_client.history_server.HistoryServer.request')
class HistoryServerTestCase(TestCase):
def setUp(self):
self.hs = HistoryServer('localhost')
@patch('yarn_api_client.history_server.get_jobhistory_endpoint')
def test__init__(self, get_config_mock, request_mock):
get_config_mock.return_value = None
HistoryServer()
get_config_mock.assert_called_with()
def test_application_information(self, request_mock):
self.hs.application_information()
request_mock.assert_called_with('/ws/v1/history/info')
def test_jobs(self, request_mock):
self.hs.jobs()
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs', params={})
self.hs.jobs(state='NEW', user='root', queue='high', limit=100,
started_time_begin=1, started_time_end=2,
finished_time_begin=3, finished_time_end=4)
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs',
params={"queue": 'high',
"state": 'NEW',
"user": 'root',
"limit": 100,
"startedTimeBegin": 1,
"startedTimeEnd": 2,
"finishedTimeBegin": 3,
"finishedTimeEnd": 4})
with self.assertRaises(IllegalArgumentError):
self.hs.jobs(state='ololo')
def test_job(self, request_mock):
self.hs.job('job_100500')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_100500')
def test_job_attempts(self, request_mock):
self.hs.job_attempts('job_1')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_1/jobattempts')
def test_job_counters(self, request_mock):
self.hs.job_counters('job_2')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/counters')
def test_job_conf(self, request_mock):
self.hs.job_conf('job_2')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/conf')
def test_job_tasks(self, request_mock):
self.hs.job_tasks('job_2')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks', params={})
self.hs.job_tasks('job_2', job_type='m')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks', params={"type": 'm'})
with self.assertRaises(IllegalArgumentError):
self.hs.job_tasks('job_2', job_type='ololo')
def test_job_task(self, request_mock):
self.hs.job_task('job_2', 'task_3')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks/task_3')
def test_task_counters(self, request_mock):
self.hs.task_counters('job_2', 'task_3')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks/task_3/counters')
def test_task_attempts(self, request_mock):
self.hs.task_attempts('job_2', 'task_3')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks/task_3/attempts')
def test_task_attempt(self, request_mock):
self.hs.task_attempt('job_2', 'task_3', 'attempt_4')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks/task_3/attempts/attempt_4')
def test_task_attempt_counters(self, request_mock):
self.hs.task_attempt_counters('job_2', 'task_3', 'attempt_4')
request_mock.assert_called_with('/ws/v1/history/mapreduce/jobs/job_2/tasks/task_3/attempts/attempt_4/counters')
| bsd-3-clause | 2,893,879,876,168,801,000 | 44.204545 | 119 | 0.602815 | false | 3.551786 | true | false | false |
kannon92/psi4 | psi4/share/psi4/scripts/vmd_cube.py | 1 | 12482 | #!/usr/bin/env python
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2016 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
# Francesco Evangelista
# Emory University
from __future__ import print_function
import argparse
import sys
import re
import subprocess
import os
import datetime
from os import listdir, environ
from os.path import isfile, join
vmd_cube_help = """vmd_cube is a script to render cube files with vmd.
To generate cube files with Psi4, add the command cubeprop(wfn) at the end
of your input file, where *wfn* is a Wavefunction object that may be
retrieved from any calculation and used following the pattern "ene, wave =
energy('pbe', return_wfn=True)\\n cubeprop(wave)"."""
vmd_exe = ""
vmd_script_name = "vmd_mo_script.vmd"
vmd_template = """#
# VMD script to plot MOs from cube files
#
# Load the molecule and change the atom style
mol load cube PARAM_CUBEFILE.cube
mol modcolor 0 PARAM_CUBENUM Element
mol modstyle 0 PARAM_CUBENUM CPK 0.400000 0.40000 30.000000 16.000000
# Define the material
material change ambient Opaque 0.310000
material change diffuse Opaque 0.720000
material change specular Opaque 0.500000
material change shininess Opaque 0.480000
material change opacity Opaque 1.000000
material change outline Opaque 0.000000
material change outlinewidth Opaque 0.000000
material change transmode Opaque 0.000000
material change specular Opaque 0.750000
material change ambient EdgyShiny 0.310000
material change diffuse EdgyShiny 0.720000
material change shininess EdgyShiny 1.0000
material change opacity EdgyShiny PARAM_OPACITY
# Customize atom colors
color Element C silver
color Element H white
# Rotate and translate the molecule
rotate x by PARAM_RX
rotate y by PARAM_RY
rotate z by PARAM_RZ
translate by PARAM_TX PARAM_TY PARAM_TZ
scale by PARAM_SCALE
# Eliminate the axis and perfect the view
axes location Off
display projection Orthographic
display depthcue off
color Display Background white"""
vmd_template_surface = """#
# Add the surfaces
mol color ColorID PARAM_SURF1ID
mol representation Isosurface PARAM_ISOVALUE1 0 0 0 1 1
mol selection all
mol material EdgyShiny
mol addrep PARAM_CUBENUM
mol color ColorID PARAM_SURF2ID
mol representation Isosurface PARAM_ISOVALUE2 0 0 0 1 1
mol selection all
mol material EdgyShiny
mol addrep PARAM_CUBENUM
# Render
render TachyonInternal PARAM_CUBEFILE.tga
mol delete PARAM_CUBENUM
"""
vmd_template_rotate = """
light 1 off
light 0 rot y 30.0
light 0 rot x -30.0
"""
default_path = os.getcwd()
# Default parameters
options = {"SURF1ID" : [None,"Surface1 Color Id"],
"SURF2ID" : [None,"Surface2 Color Id"],
"ISOVALUE1" : [None,"Isosurface1 Value"],
"ISOVALUE2" : [None,"Isosurface2 Value"],
"RX" : [None,"X-axis Rotation"],
"RY" : [None,"Y-axis Rotation"],
"RZ" : [None,"Z-axis Rotation"],
"TX" : [None,"X-axis Translation"],
"TY" : [None,"Y-axis Translation"],
"TZ" : [None,"Z-axis Translation"],
"OPACITY" : [None,"Opacity"],
"CUBEDIR" : [None,"Cubefile Directory"],
"SCALE" : [None,"Scaling Factor"],
"MONTAGE" : [None,"Montage"],
"FONTSIZE" : [None,"Font size"],
"IMAGESIZE" : [None,"Image size"],
"VMDPATH" : [None,"VMD Path"]}
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def multigsub(subs,str):
for k,v in subs.items():
str = re.sub(k,v,str)
return str
def find_vmd(options):
if environ['VMDPATH']:
vmdpath = environ['VMDPATH']
vmdpath = multigsub({" " : r"\ "},vmdpath)
options["VMDPATH"][0] = vmdpath
else:
print("Please set the VMDPATH environmental variable to the path of VMD.")
exit(1)
def save_setup_command(argv):
file_name = join(default_path, 'vmd_cube_command')
f = open(file_name, 'w')
f.write('# setup command was executed '+datetime.datetime.now().strftime("%d-%B-%Y %H:%M:%S"+"\n"))
f.write(" ".join(argv[:])+"\n")
f.close()
def read_options(options):
parser = argparse.ArgumentParser(description=vmd_cube_help)
parser.add_argument('data', metavar='<cubefile dir>', type=str, nargs='?',default=".",
help='The directory containing the cube files.')
parser.add_argument('--color1', metavar='<integer>', type=int, nargs='?',default=3,
help='the color ID of surface 1 (integer, default = 3)')
parser.add_argument('--color2', metavar='<integer>', type=int, nargs='?',default=23,
help='the color ID of surface 2 (integer, default = 23)')
parser.add_argument('--iso', metavar='<isovalue>', type=float, nargs='?',default=0.05,
help='the isosurface value (float, default = 0.05)')
parser.add_argument('--rx', metavar='<angle>', type=float, nargs='?',default=30.0,
help='the x-axis rotation angle (float, default = 30.0)')
parser.add_argument('--ry', metavar='<angle>', type=float, nargs='?',default=40.0,
help='the y-axis rotation angle (float, default = 40.0)')
parser.add_argument('--rz', metavar='<angle>', type=float, nargs='?',default=15.0,
help='the z-axis rotation angle (float, default = 15.0)')
parser.add_argument('--tx', metavar='<angle>', type=float, nargs='?',default=0.0,
help='the x-axis translation (float, default = 0.0)')
parser.add_argument('--ty', metavar='<angle>', type=float, nargs='?',default=0.0,
help='the y-axis translation (float, default = 0.0)')
parser.add_argument('--tz', metavar='<angle>', type=float, nargs='?',default=0.0,
help='the z-axis translation (float, default = 0.0)')
parser.add_argument('--opacity', metavar='<opacity>', type=float, nargs='?',default=1.0,
help='opacity of the isosurface (float, default = 1.0)')
parser.add_argument('--scale', metavar='<factor>', type=float, nargs='?',default=1.0,
help='the scaling factor (float, default = 1.0)')
parser.add_argument('--montage', const=True, default=False, nargs='?',
help='call montage to combine images. (string, default = false)')
parser.add_argument('--imagesize', metavar='<integer>', type=int, nargs='?',default=250,
help='the size of each image (integer, default = 250)')
parser.add_argument('--fontsize', metavar='<integer>', type=int, nargs='?',default=20,
help='the font size (integer, default = 20)')
args = parser.parse_args()
options["CUBEDIR"][0] = str(args.data)
options["SURF1ID"][0] = str(args.color1)
options["SURF2ID"][0] = str(args.color2)
options["ISOVALUE1"][0] = str(args.iso)
options["ISOVALUE2"][0] = str(-args.iso)
options["RX"][0] = str(args.rx)
options["RY"][0] = str(args.ry)
options["RZ"][0] = str(args.rz)
options["TX"][0] = str(args.tx)
options["TY"][0] = str(args.ty)
options["TZ"][0] = str(args.tz)
options["OPACITY"][0] = str(args.opacity)
options["SCALE"][0] = str(args.scale)
options["MONTAGE"][0] = str(args.montage)
options["FONTSIZE"][0] = str(args.fontsize)
options["IMAGESIZE"][0] = str(args.imagesize)
print("Parameters:")
for k,v in options.items():
print(" %-20s %s" % (v[1],v[0]))
def find_cubes(options):
# Find all the cube files in a given directory
dir = options["CUBEDIR"][0]
sorted_files = []
for f in listdir(options["CUBEDIR"][0]):
if ".cube" in f:
sorted_files.append(f)
return sorted(sorted_files)
def write_and_run_vmd_script(options,cube_files):
vmd_script = open(vmd_script_name,"w+")
vmd_script.write(vmd_template_rotate)
# Define a map that contains all the values of the VMD parameters
replacement_map = {}
for k,v in options.items():
key = "PARAM_" + k.upper()
replacement_map[key] = v[0]
for n,f in enumerate(cube_files):
replacement_map["PARAM_CUBENUM"] = "%03d" % n
replacement_map["PARAM_CUBEFILE"] = options["CUBEDIR"][0] + "/" + f[:-5]
vmd_script_surface = multigsub(replacement_map,vmd_template_surface)
vmd_script_head = multigsub(replacement_map,vmd_template)
vmd_script.write(vmd_script_head + "\n" + vmd_script_surface)
vmd_script.write("quit")
vmd_script.close()
# Call VMD
FNULL = open(os.devnull, 'w')
subprocess.call(("%s -dispdev text -e %s" % (options["VMDPATH"][0],vmd_script_name)),stdout=FNULL, shell=True)
def call_montage(options,cube_files):
if options["MONTAGE"][0] == 'True':
# Optionally, combine all figures into one image using montage
montage_exe = which("montage")
if montage_exe:
alpha_mos = []
beta_mos = []
densities = []
basis_functions = []
for f in cube_files:
tga_file = f[:-5] + ".tga"
if "Psi_a" in f:
alpha_mos.append(tga_file)
if "Psi_b" in f:
beta_mos.append(tga_file)
if "D" in f:
densities.append(tga_file)
if "Phi" in f:
basis_functions.append(tga_file)
# Sort the MOs
sorted_mos = []
for set in [alpha_mos,beta_mos]:
sorted_set = []
for s in set:
s_split = s.split('_')
sorted_set.append((int(s_split[2]),"Psi_a_%s_%s" % (s_split[2],s_split[3])))
sorted_set = sorted(sorted_set)
sorted_mos.append([s[1] for s in sorted_set])
os.chdir(options["CUBEDIR"][0])
for f in sorted_mos[0]:
f_split = f.split('_')
label = "%s\ \(%s\)" % (f_split[3][:-4],f_split[2])
subprocess.call(("montage -pointsize %s -label %s %s -geometry '%sx%s+0+0>' %s" %
(options["FONTSIZE"][0],label,f,options["IMAGESIZE"][0],options["IMAGESIZE"][0],f)), shell=True)
if len(alpha_mos) > 0:
subprocess.call(("%s %s -geometry +2+2 AlphaMOs.tga" % (montage_exe," ".join(sorted_mos[0]))), shell=True)
if len(beta_mos) > 0:
subprocess.call(("%s %s -geometry +2+2 BetaMOs.tga" % (montage_exe," ".join(sorted_mos[1]))), shell=True)
if len(densities) > 0:
subprocess.call(("%s %s -geometry +2+2 Densities.tga" % (montage_exe," ".join(densities))), shell=True)
if len(basis_functions) > 0:
subprocess.call(("%s %s -geometry +2+2 BasisFunctions.tga" % (montage_exe," ".join(basis_functions))), shell=True)
def main(argv):
read_options(options)
find_vmd(options)
save_setup_command(argv)
cube_files = find_cubes(options)
write_and_run_vmd_script(options,cube_files)
call_montage(options,cube_files)
if __name__ == '__main__':
main(sys.argv)
| gpl-2.0 | 6,798,562,547,059,358,000 | 34.971182 | 130 | 0.608476 | false | 3.417853 | false | false | false |
gstarnberger/paasta | paasta_tools/check_marathon_services_replication.py | 1 | 17359 | #!/usr/bin/env python
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage: ./check_marathon_services_replication.py [options]
This is a script that checks the number of HAProxy backends via Synapse against
the expected amount that should've been deployed via Marathon in a mesos cluster.
Basically, the script checks smartstack.yaml for listed namespaces, and then queries
Synapse for the number of available backends for that namespace. It then goes through
the Marathon service configuration file for that cluster, and sees how many instances
are expected to be available for that namespace based on the number of instances deployed
on that namespace.
After retrieving that information, a fraction of available instances is calculated
(available/expected), and then compared against a threshold. The default threshold is
50, meaning if less than 50% of a service's backends are available, the script sends
CRITICAL. If replication_threshold is defined in the yelpsoa config for a service
instance then it will be used instead.
"""
import argparse
import logging
from datetime import datetime
from datetime import timedelta
import pysensu_yelp
from paasta_tools import marathon_tools
from paasta_tools import mesos_tools
from paasta_tools import monitoring_tools
from paasta_tools.marathon_tools import format_job_id
from paasta_tools.monitoring import replication_utils
from paasta_tools.utils import _log
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import datetime_from_utc_to_local
from paasta_tools.utils import get_services_for_cluster
from paasta_tools.utils import is_under_replicated
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import NoDeploymentsAvailable
log = logging.getLogger(__name__)
def send_event(service, namespace, cluster, soa_dir, status, output):
"""Send an event to sensu via pysensu_yelp with the given information.
:param service: The service name the event is about
:param namespace: The namespace of the service the event is about
:param soa_dir: The service directory to read monitoring information from
:param status: The status to emit for this event
:param output: The output to emit for this event"""
# This function assumes the input is a string like "mumble.main"
monitoring_overrides = marathon_tools.load_marathon_service_config(
service, namespace, cluster).get_monitoring()
if 'alert_after' not in monitoring_overrides:
monitoring_overrides['alert_after'] = '2m'
monitoring_overrides['check_every'] = '1m'
monitoring_overrides['runbook'] = monitoring_tools.get_runbook(monitoring_overrides, service, soa_dir=soa_dir)
check_name = 'check_marathon_services_replication.%s' % compose_job_id(service, namespace)
monitoring_tools.send_event(service, check_name, monitoring_overrides, status, output, soa_dir)
_log(
service=service,
line='Replication: %s' % output,
component='monitoring',
level='debug',
cluster=cluster,
instance=namespace,
)
def parse_args():
epilog = "PERCENTAGE is an integer value representing the percentage of available to expected instances"
parser = argparse.ArgumentParser(epilog=epilog)
parser.add_argument('-d', '--soa-dir', dest="soa_dir", metavar="SOA_DIR",
default=marathon_tools.DEFAULT_SOA_DIR,
help="define a different soa config directory")
parser.add_argument('-v', '--verbose', action='store_true',
dest="verbose", default=False)
options = parser.parse_args()
return options
def check_smartstack_replication_for_instance(
service,
instance,
cluster,
soa_dir,
expected_count,
system_paasta_config,
):
"""Check a set of namespaces to see if their number of available backends is too low,
emitting events to Sensu based on the fraction available and the thresholds defined in
the corresponding yelpsoa config.
:param service: A string like example_service
:param namespace: A nerve namespace, like "main"
:param cluster: name of the cluster
:param soa_dir: The SOA configuration directory to read from
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
"""
namespace = marathon_tools.read_namespace_for_service_instance(service, instance, soa_dir=soa_dir)
if namespace != instance:
log.debug("Instance %s is announced under namespace: %s. "
"Not checking replication for it" % (instance, namespace))
return
full_name = compose_job_id(service, instance)
job_config = marathon_tools.load_marathon_service_config(service, instance, cluster)
crit_threshold = job_config.get_replication_crit_percentage()
monitoring_blacklist = job_config.get_monitoring_blacklist()
log.info('Checking instance %s in smartstack', full_name)
smartstack_replication_info = load_smartstack_info_for_service(
service=service,
namespace=namespace,
soa_dir=soa_dir,
blacklist=monitoring_blacklist,
system_paasta_config=system_paasta_config,
)
log.debug('Got smartstack replication info for %s: %s' % (full_name, smartstack_replication_info))
if len(smartstack_replication_info) == 0:
status = pysensu_yelp.Status.CRITICAL
output = ('Service %s has no Smartstack replication info. Make sure the discover key in your smartstack.yaml '
'is valid!\n') % full_name
log.error(output)
else:
expected_count_per_location = int(expected_count / len(smartstack_replication_info))
output = ''
under_replication_per_location = []
for location, available_backends in sorted(smartstack_replication_info.iteritems()):
num_available_in_location = available_backends.get(full_name, 0)
under_replicated, ratio = is_under_replicated(
num_available_in_location, expected_count_per_location, crit_threshold)
if under_replicated:
output += '- Service %s has %d out of %d expected instances in %s (CRITICAL: %d%%)\n' % (
full_name, num_available_in_location, expected_count_per_location, location, ratio)
else:
output += '- Service %s has %d out of %d expected instances in %s (OK: %d%%)\n' % (
full_name, num_available_in_location, expected_count_per_location, location, ratio)
under_replication_per_location.append(under_replicated)
if any(under_replication_per_location):
status = pysensu_yelp.Status.CRITICAL
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that a SmartStack powered loadbalancer (haproxy)\n"
" doesn't have enough healthy backends. Not having enough healthy backends\n"
" means that clients of that service will get 503s (http) or connection refused\n"
" (tcp) when trying to connect to it.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply not have enough copies or it could simply be\n"
" unhealthy in that location. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
"\n"
" * Widen SmartStack discovery settings\n"
" * Increase the instance count\n"
"\n"
) % {
'service': service,
'instance': instance,
'cluster': cluster,
}
log.error(output)
else:
status = pysensu_yelp.Status.OK
log.info(output)
send_event(service=service, namespace=instance, cluster=cluster, soa_dir=soa_dir, status=status, output=output)
def get_healthy_marathon_instances_for_short_app_id(client, app_id):
tasks = client.list_tasks()
tasks_for_app = [task for task in tasks if task.app_id.startswith('/%s' % app_id)]
one_minute_ago = datetime.now() - timedelta(minutes=1)
healthy_tasks = []
for task in tasks_for_app:
if marathon_tools.is_task_healthy(task, default_healthy=True) \
and task.started_at is not None \
and datetime_from_utc_to_local(task.started_at) < one_minute_ago:
healthy_tasks.append(task)
return len(healthy_tasks)
def check_healthy_marathon_tasks_for_service_instance(client, service, instance, cluster,
soa_dir, expected_count):
app_id = format_job_id(service, instance)
log.info("Checking %s in marathon as it is not in smartstack" % app_id)
num_healthy_tasks = get_healthy_marathon_instances_for_short_app_id(client, app_id)
send_event_if_under_replication(
service=service,
instance=instance,
cluster=cluster,
expected_count=expected_count,
num_available=num_healthy_tasks,
soa_dir=soa_dir,
)
def send_event_if_under_replication(
service,
instance,
cluster,
expected_count,
num_available,
soa_dir,
):
full_name = compose_job_id(service, instance)
job_config = marathon_tools.load_marathon_service_config(service, instance, cluster)
crit_threshold = job_config.get_replication_crit_percentage()
output = ('Service %s has %d out of %d expected instances available!\n' +
'(threshold: %d%%)') % (full_name, num_available, expected_count, crit_threshold)
under_replicated, _ = is_under_replicated(num_available, expected_count, crit_threshold)
if under_replicated:
output += (
"\n\n"
"What this alert means:\n"
"\n"
" This replication alert means that the service PaaSTA can't keep the\n"
" requested number of copies up and healthy in the cluster.\n"
"\n"
"Reasons this might be happening:\n"
"\n"
" The service may simply unhealthy. There also may not be enough resources\n"
" in the cluster to support the requested instance count.\n"
"\n"
"Things you can do:\n"
"\n"
" * Increase the instance count\n"
" * Fix the cause of the unhealthy service. Try running:\n"
"\n"
" paasta status -s %(service)s -i %(instance)s -c %(cluster)s -vv\n"
) % {
'service': service,
'instance': instance,
'cluster': cluster,
}
log.error(output)
status = pysensu_yelp.Status.CRITICAL
else:
log.info(output)
status = pysensu_yelp.Status.OK
send_event(
service=service,
namespace=instance,
cluster=cluster,
soa_dir=soa_dir,
status=status,
output=output)
def check_service_replication(client, service, instance, cluster, soa_dir, system_paasta_config):
"""Checks a service's replication levels based on how the service's replication
should be monitored. (smartstack or mesos)
:param service: Service name, like "example_service"
:param instance: Instance name, like "main" or "canary"
:param cluster: name of the cluster
:param soa_dir: The SOA configuration directory to read from
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
"""
job_id = compose_job_id(service, instance)
try:
expected_count = marathon_tools.get_expected_instance_count_for_namespace(service, instance, soa_dir=soa_dir)
except NoDeploymentsAvailable:
log.debug('deployments.json missing for %s. Skipping replication monitoring.' % job_id)
return
if expected_count is None:
return
log.info("Expecting %d total tasks for %s" % (expected_count, job_id))
proxy_port = marathon_tools.get_proxy_port_for_instance(service, instance, soa_dir=soa_dir)
if proxy_port is not None:
check_smartstack_replication_for_instance(
service=service,
instance=instance,
cluster=cluster,
soa_dir=soa_dir,
expected_count=expected_count,
system_paasta_config=system_paasta_config,
)
else:
check_healthy_marathon_tasks_for_service_instance(
client=client,
service=service,
instance=instance,
cluster=cluster,
soa_dir=soa_dir,
expected_count=expected_count,
)
def load_smartstack_info_for_service(service, namespace, soa_dir, blacklist, system_paasta_config):
"""Retrives number of available backends for given services
:param service_instances: A list of tuples of (service, instance)
:param namespaces: list of Smartstack namespaces
:param blacklist: A list of blacklisted location tuples in the form (location, value)
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
:returns: a dictionary of the form
::
{
'location_type': {
'unique_location_name': {
'service.instance': <# ofavailable backends>
},
'other_unique_location_name': ...
}
}
"""
service_namespace_config = marathon_tools.load_service_namespace_config(service, namespace,
soa_dir=soa_dir)
discover_location_type = service_namespace_config.get_discover()
return get_smartstack_replication_for_attribute(
attribute=discover_location_type,
service=service,
namespace=namespace,
blacklist=blacklist,
system_paasta_config=system_paasta_config,
)
def get_smartstack_replication_for_attribute(attribute, service, namespace, blacklist, system_paasta_config):
"""Loads smartstack replication from a host with the specified attribute
:param attribute: a Mesos attribute
:param service: A service name, like 'example_service'
:param namespace: A particular smartstack namespace to inspect, like 'main'
:param constraints: A list of Marathon constraints to restrict which synapse hosts to query
:param blacklist: A list of blacklisted location tuples in the form of (location, value)
:param system_paasta_config: A SystemPaastaConfig object representing the system configuration.
:returns: a dictionary of the form {'<unique_attribute_value>': <smartstack replication hash>}
(the dictionary will contain keys for unique all attribute values)
"""
replication_info = {}
unique_values = mesos_tools.get_mesos_slaves_grouped_by_attribute(attribute=attribute, blacklist=blacklist)
full_name = compose_job_id(service, namespace)
for value, hosts in unique_values.iteritems():
# arbitrarily choose the first host with a given attribute to query for replication stats
synapse_host = hosts[0]
repl_info = replication_utils.get_replication_for_services(
synapse_host=synapse_host,
synapse_port=system_paasta_config.get_synapse_port(),
synapse_haproxy_url_format=system_paasta_config.get_synapse_haproxy_url_format(),
services=[full_name],
)
replication_info[value] = repl_info
return replication_info
def main():
args = parse_args()
soa_dir = args.soa_dir
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
service_instances = get_services_for_cluster(
cluster=cluster, instance_type='marathon', soa_dir=args.soa_dir)
config = marathon_tools.load_marathon_config()
client = marathon_tools.get_marathon_client(config.get_url(), config.get_username(), config.get_password())
for service, instance in service_instances:
check_service_replication(
client=client,
service=service,
instance=instance,
cluster=cluster,
soa_dir=soa_dir,
system_paasta_config=system_paasta_config,
)
if __name__ == "__main__":
main()
| apache-2.0 | -9,141,361,211,709,925,000 | 41.23601 | 118 | 0.656029 | false | 3.981422 | true | false | false |
lcrees/twoq | twoq/tests/auto/queuing.py | 1 | 2927 | # -*- coding: utf-8 -*-
'''auto queuing call chain test mixins'''
class AQMixin(object):
###########################################################################
## queue manipulation #####################################################
###########################################################################
def test_repr(self):
from stuf.six import strings
self.assertTrue(isinstance(
self.qclass([1, 2, 3, 4, 5, 6]).__repr__(), strings,
))
def test_ro(self):
self.assertListEqual(
self.qclass([1, 2, 3, 4, 5, 6]).ro().peek(), [1, 2, 3, 4, 5, 6],
)
def test_extend(self):
self.assertEqual(
self.qclass().extend([1, 2, 3, 4, 5, 6]).outsync().end(),
[1, 2, 3, 4, 5, 6],
)
def test_outextend(self):
self.assertEqual(
self.qclass().outextend([1, 2, 3, 4, 5, 6]).end(),
[1, 2, 3, 4, 5, 6],
)
def test_extendleft(self):
self.assertListEqual(
self.qclass().extendleft([1, 2, 3, 4, 5, 6]).outsync().end(),
[6, 5, 4, 3, 2, 1]
)
def test_append(self):
autoq = self.qclass().append('foo').outsync()
self.assertEqual(autoq.end(), 'foo')
def test_appendleft(self):
autoq = self.qclass().appendleft('foo').outsync()
self.assertEqual(autoq.end(), 'foo')
def test_inclear(self):
self.assertEqual(len(list(self.qclass([1, 2, 5, 6]).inclear())), 0)
def test_outclear(self):
self.assertEqual(
len(list(self.qclass([1, 2, 5, 6]).outclear().outgoing)), 0
)
###########################################################################
## queue balancing ########################################################
###########################################################################
def test_insync(self):
q = self.qclass([1, 2, 3, 4, 5, 6]).outshift().inclear().shift()
self.assertListEqual(list(q.incoming), list(q.outgoing))
def test_inshift(self):
q = self.qclass([1, 2, 3, 4, 5, 6]).outshift().sync()
self.assertListEqual(list(q.incoming), list(q.outgoing))
def test_outsync(self):
q = self.qclass([1, 2, 3, 4, 5, 6]).outshift()
self.assertListEqual(list(q.incoming), list(q.outgoing))
def test_outshift(self):
q = self.qclass([1, 2, 3, 4, 5, 6]).outsync()
self.assertListEqual(list(q.incoming), list(q.outgoing))
##########################################################################
# queue information ######################################################
##########################################################################
def test_results(self):
self.assertListEqual(
list(self.qclass(1, 2, 3, 4, 5, 6).outsync().results()),
[1, 2, 3, 4, 5, 6],
)
| bsd-3-clause | 2,572,901,103,623,996,000 | 33.845238 | 79 | 0.413393 | false | 3.982313 | true | false | false |
shekkizh/TensorflowProjects | ImageArt/ImageColoring.py | 1 | 7092 | __author__ = 'Charlie'
"""Image coloring by fully convolutional networks - incomplete """
import numpy as np
import tensorflow as tf
import os, sys, inspect
from datetime import datetime
import scipy.misc as misc
lib_path = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..")))
if lib_path not in sys.path:
sys.path.insert(0, lib_path)
import TensorflowUtils as utils
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("data_dir", "Data_zoo/CIFAR10_data/", """Path to the CIFAR10 data""")
tf.flags.DEFINE_string("mode", "train", "Network mode train/ test")
tf.flags.DEFINE_string("test_image_path", "", "Path to test image - read only if mode is test")
tf.flags.DEFINE_integer("batch_size", "128", "train batch size")
tf.flags.DEFINE_string("logs_dir", "logs/ImageColoring_logs/", """Path to save logs and checkpoint if needed""")
DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
LEARNING_RATE = 1e-3
MAX_ITERATIONS = 100001
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 20000
IMAGE_SIZE = 32
def read_cifar10(filename_queue):
class CIFAR10Record(object):
pass
result = CIFAR10Record()
label_bytes = 1
result.height = IMAGE_SIZE
result.width = IMAGE_SIZE
result.depth = 3
image_bytes = result.height * result.width * result.depth
record_bytes = label_bytes + image_bytes
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
record_bytes = tf.decode_raw(value, tf.uint8)
depth_major = tf.cast(tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),
[result.depth, result.height, result.width]), tf.float32)
image = tf.transpose(depth_major, [1, 2, 0])
# extended_image = tf.reshape(image, (result.height, result.width, result.depth))
result.color_image = image
print result.color_image.get_shape()
print "Converting image to gray scale"
result.gray_image = 0.21 * result.color_image[ :, :, 2] + 0.72 * result.color_image[ :, :,
1] + 0.07 * result.color_image[ :, :, 0]
result.gray_image = tf.expand_dims(result.gray_image, 2)
print result.gray_image.get_shape()
return result
def get_image(image_dir):
image = misc.imread(image_dir)
image = np.ndarray.reshape(image.astype(np.float32), ((1,) + image.shape))
return image
def inputs():
data_dir = os.path.join(FLAGS.data_dir, 'cifar-10-batches-bin')
filenames = [os.path.join(data_dir, 'data_batch_%d.bin' % i) for i in xrange(1, 6)]
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
filename_queue = tf.train.string_input_producer(filenames)
read_input = read_cifar10(filename_queue)
num_preprocess_threads = 8
min_queue_examples = int(0.4 * NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN)
print "Shuffling"
input_gray, input_colored = tf.train.shuffle_batch([read_input.gray_image, read_input.color_image],
batch_size=FLAGS.batch_size,
num_threads=num_preprocess_threads,
capacity=min_queue_examples + 3 * FLAGS.batch_size,
min_after_dequeue=min_queue_examples)
input_gray = (input_gray - 128) / 255.0
input_colored = (input_colored - 128) / 255.0
return input_gray, input_colored
def inference(image):
W1 = utils.weight_variable_xavier_initialized([9, 9, 1, 32])
b1 = utils.bias_variable([32])
tf.histogram_summary("W1", W1)
tf.histogram_summary("b1", b1)
h_conv1 = tf.nn.relu(utils.conv2d_basic(image, W1, b1))
W2 = utils.weight_variable_xavier_initialized([3, 3, 32, 64])
b2 = utils.bias_variable([64])
tf.histogram_summary("W2", W2)
tf.histogram_summary("b2", b2)
h_conv2 = tf.nn.relu(utils.conv2d_strided(h_conv1, W2, b2))
W3 = utils.weight_variable_xavier_initialized([3, 3, 64, 128])
b3 = utils.bias_variable([128])
tf.histogram_summary("W3", W3)
tf.histogram_summary("b3", b3)
h_conv3 = tf.nn.relu(utils.conv2d_strided(h_conv2, W3, b3))
# upstrides
W4 = utils.weight_variable_xavier_initialized([3, 3, 64, 128])
b4 = utils.bias_variable([64])
tf.histogram_summary("W4", W4)
tf.histogram_summary("b4", b4)
h_conv4 = tf.nn.relu(utils.conv2d_transpose_strided(h_conv3, W4, b4))
W5 = utils.weight_variable_xavier_initialized([3, 3, 32, 64])
b5 = utils.bias_variable([32])
tf.histogram_summary("W5", W5)
tf.histogram_summary("b5", b5)
h_conv5 = tf.nn.relu(utils.conv2d_transpose_strided(h_conv4, W5, b5))
W6 = utils.weight_variable_xavier_initialized([9, 9, 32, 3])
b6 = utils.bias_variable([3])
tf.histogram_summary("W6", W6)
tf.histogram_summary("b6", b6)
pred_image = tf.nn.tanh(utils.conv2d_basic(h_conv5, W6, b6))
return pred_image
def loss(pred, colored):
rmse = tf.sqrt(2 * tf.nn.l2_loss(tf.sub(colored, pred))) / FLAGS.batch_size
tf.scalar_summary("RMSE", rmse)
return rmse
def train(loss_val, step):
learning_rate = tf.train.exponential_decay(LEARNING_RATE, step, 0.4 * MAX_ITERATIONS, 0.99)
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss_val, global_step=step)
return train_op
def main(argv=None):
utils.maybe_download_and_extract(FLAGS.data_dir, DATA_URL, is_tarfile=True)
print "Setting up model..."
global_step = tf.Variable(0,trainable=False)
gray, color = inputs()
pred = 255 * inference(gray) + 128
tf.image_summary("Gray", gray, max_images=1)
tf.image_summary("Ground_truth", color, max_images=1)
tf.image_summary("Prediction", pred, max_images=1)
image_loss = loss(pred, color)
train_op = train(image_loss, global_step)
summary_op = tf.merge_all_summaries()
with tf.Session() as sess:
print "Setting up summary writer, queue, saver..."
sess.run(tf.initialize_all_variables())
summary_writer = tf.train.SummaryWriter(FLAGS.logs_dir, sess.graph)
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
if ckpt and ckpt.model_checkpoint_path:
print "Restoring model from checkpoint..."
saver.restore(sess, ckpt.model_checkpoint_path)
tf.train.start_queue_runners(sess)
for step in xrange(MAX_ITERATIONS):
if step % 400 == 0:
loss_val, summary_str = sess.run([image_loss, summary_op])
print "Step %d, Loss: %g" % (step, loss_val)
summary_writer.add_summary(summary_str, global_step=step)
if step % 1000 == 0:
saver.save(sess, FLAGS.logs_dir + "model.ckpt", global_step=step)
print "%s" % datetime.now()
sess.run(train_op)
if __name__ == "__main__":
tf.app.run()
| mit | -4,695,333,736,710,493,000 | 36.925134 | 112 | 0.628173 | false | 3.187416 | false | false | false |
tbabej/freeipa | ipalib/pkcs10.py | 1 | 9170 | # Authors:
# Rob Crittenden <[email protected]>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import sys
import base64
import nss.nss as nss
from pyasn1.type import univ, char, namedtype, tag
from pyasn1.codec.der import decoder
import six
if six.PY3:
unicode = str
PEM = 0
DER = 1
SAN_DNSNAME = 'DNS name'
SAN_RFC822NAME = 'RFC822 Name'
SAN_OTHERNAME_UPN = 'Other Name (OID.1.3.6.1.4.1.311.20.2.3)'
SAN_OTHERNAME_KRB5PRINCIPALNAME = 'Other Name (OID.1.3.6.1.5.2.2)'
def get_subject(csr, datatype=PEM):
"""
Given a CSR return the subject value.
This returns an nss.DN object.
"""
request = load_certificate_request(csr, datatype)
try:
return request.subject
finally:
del request
def get_extensions(csr, datatype=PEM):
"""
Given a CSR return OIDs of certificate extensions.
The return value is a tuple of strings
"""
request = load_certificate_request(csr, datatype)
# Work around a bug in python-nss where nss.oid_dotted_decimal
# errors on unrecognised OIDs
#
# https://bugzilla.redhat.com/show_bug.cgi?id=1246729
#
def get_prefixed_oid_str(ext):
"""Returns a string like 'OID.1.2...'."""
if ext.oid_tag == 0:
return repr(ext)
else:
return nss.oid_dotted_decimal(ext.oid)
return tuple(get_prefixed_oid_str(ext)[4:]
for ext in request.extensions)
class _PrincipalName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('name-type', univ.Integer().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType('name-string', univ.SequenceOf(char.GeneralString()).subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
)
class _KRB5PrincipalName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('realm', char.GeneralString().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType('principalName', _PrincipalName().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
)
def _decode_krb5principalname(data):
principal = decoder.decode(data, asn1Spec=_KRB5PrincipalName())[0]
realm = (str(principal['realm']).replace('\\', '\\\\')
.replace('@', '\\@'))
name = principal['principalName']['name-string']
name = '/'.join(str(n).replace('\\', '\\\\')
.replace('/', '\\/')
.replace('@', '\\@') for n in name)
name = '%s@%s' % (name, realm)
return name
class _AnotherName(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type-id', univ.ObjectIdentifier()),
namedtype.NamedType('value', univ.Any().subtype(
explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
)
class _GeneralName(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('otherName', _AnotherName().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
),
namedtype.NamedType('rfc822Name', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
),
namedtype.NamedType('dNSName', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))
),
namedtype.NamedType('x400Address', univ.Sequence().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))
),
namedtype.NamedType('directoryName', univ.Choice().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))
),
namedtype.NamedType('ediPartyName', univ.Sequence().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))
),
namedtype.NamedType('uniformResourceIdentifier', char.IA5String().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))
),
namedtype.NamedType('iPAddress', univ.OctetString().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))
),
namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))
),
)
class _SubjectAltName(univ.SequenceOf):
componentType = _GeneralName()
def get_subjectaltname(csr, datatype=PEM):
"""
Given a CSR return the subjectaltname value, if any.
The return value is a tuple of strings or None
"""
request = load_certificate_request(csr, datatype)
for extension in request.extensions:
if extension.oid_tag == nss.SEC_OID_X509_SUBJECT_ALT_NAME:
break
else:
return None
del request
nss_names = nss.x509_alt_name(extension.value, nss.AsObject)
asn1_names = decoder.decode(extension.value.data,
asn1Spec=_SubjectAltName())[0]
names = []
for nss_name, asn1_name in zip(nss_names, asn1_names):
name_type = nss_name.type_string
if name_type == SAN_OTHERNAME_KRB5PRINCIPALNAME:
name = _decode_krb5principalname(asn1_name['otherName']['value'])
else:
name = nss_name.name
names.append((name_type, name))
return tuple(names)
# Unfortunately, NSS can only parse the extension request attribute, so
# we have to parse friendly name ourselves (see RFC 2986)
class _Attribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', univ.ObjectIdentifier()),
namedtype.NamedType('values', univ.Set()),
)
class _Attributes(univ.SetOf):
componentType = _Attribute()
class _CertificationRequestInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', univ.Integer()),
namedtype.NamedType('subject', univ.Sequence()),
namedtype.NamedType('subjectPublicKeyInfo', univ.Sequence()),
namedtype.OptionalNamedType('attributes', _Attributes().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class _CertificationRequest(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certificationRequestInfo',
_CertificationRequestInfo()),
namedtype.NamedType('signatureAlgorithm', univ.Sequence()),
namedtype.NamedType('signatureValue', univ.BitString()),
)
_FRIENDLYNAME = univ.ObjectIdentifier('1.2.840.113549.1.9.20')
def get_friendlyname(csr, datatype=PEM):
"""
Given a CSR return the value of the friendlyname attribute, if any.
The return value is a string.
"""
if datatype == PEM:
csr = strip_header(csr)
csr = base64.b64decode(csr)
csr = decoder.decode(csr, asn1Spec=_CertificationRequest())[0]
for attribute in csr['certificationRequestInfo']['attributes']:
if attribute['type'] == _FRIENDLYNAME:
return unicode(attribute['values'][0])
return None
def strip_header(csr):
"""
Remove the header and footer from a CSR.
"""
headerlen = 40
s = csr.find("-----BEGIN NEW CERTIFICATE REQUEST-----")
if s == -1:
headerlen = 36
s = csr.find("-----BEGIN CERTIFICATE REQUEST-----")
if s >= 0:
e = csr.find("-----END")
csr = csr[s+headerlen:e]
return csr
def load_certificate_request(csr, datatype=PEM):
"""
Given a base64-encoded certificate request, with or without the
header/footer, return a request object.
"""
if datatype == PEM:
csr = strip_header(csr)
csr = base64.b64decode(csr)
# A fail-safe so we can always read a CSR. python-nss/NSS will segfault
# otherwise
if not nss.nss_is_initialized():
nss.nss_init_nodb()
return nss.CertificateRequest(csr)
if __name__ == '__main__':
nss.nss_init_nodb()
# Read PEM request from stdin and print out its components
csrlines = sys.stdin.readlines()
csr = ''.join(csrlines)
print(load_certificate_request(csr))
print(get_subject(csr))
print(get_subjectaltname(csr))
print(get_friendlyname(csr))
| gpl-3.0 | -1,926,462,383,126,963,000 | 33.603774 | 89 | 0.648637 | false | 3.833612 | false | false | false |
tshirtman/ultimate-smash-friends | usf/screens/configure.py | 1 | 2357 | ################################################################################
# copyright 2009 Gabriel Pettier <[email protected]> #
# #
# This file is part of Ultimate Smash Friends. #
# #
# Ultimate Smash Friends is free software: you can redistribute it and/or #
# modify it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# Ultimate Smash Friends is distributed in the hope that it will be useful, but#
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or#
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along with #
# Ultimate Smash Friends. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
'''
The Base Configuration screen, show buttons to other configuration screens.
'''
from usf.screens.screen import Screen
from usf.widgets.box import VBox
from usf.widgets.button import Button
from usf.translation import _
class Configure(Screen):
def init(self):
self.add(VBox())
self.name = _("configure")
#I18N:option screen
self.widget.add(Button(_('Audio')))
self.widget.add(Button(_('Display')))
self.widget.add(Button(_('Keyboard')))
self.widget.add(Button(_('Back')), margin=100)
def callback(self, action):
if action.text == _('Audio'):
return {'goto': 'sound'}
if action.text == _('Display'):
return {'goto': 'display'}
if action.text == _('Keyboard'):
return {'goto': 'keyboard'}
if action.text == _('Back'):
return {'goto': 'back'}
| gpl-3.0 | -8,758,575,220,416,393,000 | 42.648148 | 80 | 0.474332 | false | 5.025586 | false | false | false |
nickmilon/mongoUtils | mongoUtils/importsExports.py | 1 | 8999 | """Classes used to import/export data to mongoDB
"""
from Hellas.Thebes import format_header
xlrd = None # reserved to import xlrd on demand
def _xlrd_on_demand():
global xlrd
if xlrd is None:
try:
import xlrd
except ImportError:
print ("this module requires xlrd library please install (pip install xlrd")
raise
return xlrd
def import_workbook(workbook, db, fields=None, ws_options={'dt_python': True}, stats_every=1000):
"""save all workbook's sheets to a db
consider using :class:`~ImportXls` class instead which is more flexible but imports only a single sheet
:Parameters: see :class:`~ImportXls` class
:Example:
>>> from pymongo import MongoClient
>>> from mongoUtils import _PATH_TO_DATA
>>> db = MongoClient().test
>>> res = import_workbook(_PATH_TO_DATA + "example_workbook.xlsx", db)
>>> res
[{'rows': 368, 'db': 'test', 'collection': 'weather'}, {'rows': 1007, 'db': 'test', 'collection': 'locations'}]
"""
_xlrd_on_demand()
workbook = xlrd.open_workbook(workbook, on_demand=True)
return [ImportXls(workbook, i, db, fields=fields, ws_options=ws_options, stats_every=stats_every)()
for i in range(0, workbook.nsheets)]
class Import(object):
"""generic class for importing into a mongoDB collection, successors should use/extend this class
:Parameters:
- db: a pynongo database object that will be used for output
- collection: a pymongo collection object that will be used for output
- drop_collection: (defaults to True)
- True drops output collection on init before writing to it
- False appends to output collection
- stats_every: int print import stats every stats_every rows or 0 to cancel stats (defaults to 10000)
"""
format_stats = "|{db:16s}|{collection:16s}|{rows:15,d}|"
format_stats_header = format_header(format_stats)
def __init__(self, collection, drop_collection=True, stats_every=10000):
if drop_collection:
collection.database.drop_collection(collection.name)
self.info = {'db': collection.database.name, 'collection': collection.name, 'rows': 0}
self.stats_every = stats_every
self.collection = collection
def import_to_collection(self):
"""successors should implement this"""
raise NotImplementedError
def _import_to_collection_before(self):
"""successors can call this or implement their's"""
if self.stats_every > 0:
print(self.format_stats_header)
def _import_to_collection_after(self):
"""successors can call this or implement their's"""
if self.stats_every > 0:
self.print_stats()
def print_stats(self):
print(self.format_stats.format(**self.info))
def __call__(self):
return self.import_to_collection()
class ImportXls(Import):
"""save an an xls sheet to a collection
`see <https://github.com/python-excel/xlrd>`_
:Parameters:
- workbook: path to a workbook or an xlrd workbook object
- sheet: name of a work sheet in workbook or an int (sheet number in workbook)
- db: a pymongo database object
- coll_name: str output collection name or None to create name from sheet name (defaults to None)
- row_start: int or None starting raw or None to start from first row (defaults to None)
- row_end:int or None ending raw or None to end at lastrow (defaults to None)
- fields:
- a list with field names
- or True (to treat first row as field names)
- or None (for auto creating field names i.e: [fld_1, fld_2, etc]
- or a function that:
- takes one argument (a list of row values)
- returns a dict (if this dict contains a key '_id' this value will be used for _id)
- >>> lambda x: {'coordinates': [x[0] , x[1]]}
- ws_options: (optional) a dictionary specifying how to treat cell values
- dt_python : bool convert dates to python datetime
- integers_only : round float values to int helpful coz all int values are represented as floats in sheets
- negatives_to_0 : treat all negative numbers as 0's
- drop_collection: (defaults to True)
- True drops output collection on init before writing to it
- False appends to output collection
- stats_every: int print import stats every stats_every rows or 0 to cancel stats (defaults to 10000)
- drop_collection: if True drops collection on init otherwise appends to collection
:Example:
>>> from pymongo import MongoClient
>>> from mongoUtils import _PATH_TO_DATA
>>> db = MongoClient().test
>>> res = ImportXls(_PATH_TO_DATA + "example_workbook.xlsx", 0, db)()
>>> res
{'rows': 367, 'db': u'test', 'collection': u'weather'}
"""
def __init__(self,
workbook, sheet,
db, coll_name=None,
row_start=None, row_end=None,
fields=True,
ws_options={'dt_python': True, 'integers_only': False, 'negatives_to_0': False},
stats_every=10000,
drop_collection=True):
_xlrd_on_demand()
if not isinstance(workbook, xlrd.book.Book):
workbook = xlrd.open_workbook(workbook, on_demand=True)
self.workbook = workbook
self.sheet = workbook.sheet_by_index(sheet) if isinstance(sheet, int) else workbook.sheet_by_name(sheet)
self._ws_options = {}
self.ws_options_set(ws_options)
coll_name = self.fix_name(self.sheet.name) if coll_name is None else coll_name
if row_start is None:
row_start = 1 if fields is True else 0
self.row_start = row_start
self.row_end = row_end
collection = db[coll_name]
super(ImportXls, self).__init__(collection, drop_collection=drop_collection, stats_every=stats_every)
self.auto_field_names(fields)
@property
def ws_options(self):
return self._ws_options
def ws_options_set(self, options_dict):
self._ws_options.update(options_dict)
def fix_name(self, name, cnt=0):
if name == '':
return 'fld_{}'.format(cnt)
else:
return name.replace(' ', '_').replace('.', '_').replace('$', '_')
def auto_field_names(self, fields):
row0_values = self.sheet.row_values(0)
if fields is True:
self._fields_or_fun = [self.fix_name(fn, cnt) for cnt, fn in enumerate(row0_values)]
elif fields is None:
self._fields_or_fun = ['fld_{}'.format(i) for i in range(len(row0_values))]
elif isinstance(fields, list):
self._fields_or_fun = [self.fix_name(fn, cnt) for cnt, fn in enumerate(fields)]
else: # then it has to be function
self._fields_or_fun = fields
return self._fields_or_fun
def row_to_doc(self, valueslist, _id=None):
if isinstance(self._fields_or_fun, list):
doc = dict(list(zip(self._fields_or_fun, valueslist)))
else:
doc = self._fields_or_fun(valueslist)
if _id is not None and doc.get('_id') is None:
doc['_id'] = _id
return doc
def ws_convert_cell(self, cl):
"""
:Parameters:
- cl an xlrd cell object
"""
# XL_CELL_BLANK XL_CELL_BOOLEAN XL_CELL_NUMBER XL_CELL_TEXT
tp = cl.ctype
vl = cl.value
if tp == xlrd.XL_CELL_NUMBER: # number
if self._ws_options.get('integers_only') is True:
if vl % 1 == 0:
vl = int(vl + 0.49999) # kind of round
if vl < 0 and self._ws_options.get('negatives_to_0'):
vl = 0
elif tp == xlrd.XL_CELL_DATE and self._ws_options.get('dt_python') is True:
vl = xlrd.xldate.xldate_as_datetime(vl, self.sheet.book.datemode)
return vl
def import_to_collection(self):
super(ImportXls, self)._import_to_collection_before()
outlist = []
for i in range(self.row_start, self.row_end or self.sheet.nrows):
self.info['rows'] += 1
row_values = [self.ws_convert_cell(cl) for cl in self.sheet.row(i)]
outlist.append(self.row_to_doc(row_values, i))
if self.stats_every and i % self.stats_every == 0:
self.print_stats()
if len(outlist) == 200:
try:
self.collection.insert_many(outlist)
outlist = []
except Exception:
print (outlist)
raise
if len(outlist) > 0:
self.collection.insert_many(outlist)
super(ImportXls, self)._import_to_collection_after()
return self.info
| apache-2.0 | -6,477,179,523,395,941,000 | 40.662037 | 119 | 0.595066 | false | 3.862232 | false | false | false |
Leberwurscht/OfflineDict | buildindex.py | 1 | 1491 | #!/usr/bin/python
# -*- coding: utf8 -*-
import sys, re
filename = sys.argv[1]
tokensize = int(sys.argv[2])
numbersize = int(sys.argv[3])
numbersize2 = int(sys.argv[4])
def normalize(s):
r = s.lower()
r = r.replace(u'ä',u'a');
r = r.replace(u'ö',u'o');
r = r.replace(u'ü',u'u');
r = r.replace(u'Ä',u'A');
r = r.replace(u'Ö',u'O');
r = r.replace(u'Ü',u'U');
r = r.replace(u'ß',u'ss');
r = r.replace(u'ñ',u'n');
r = r.replace(u'á',u'a');
r = r.replace(u'é',u'e');
r = r.replace(u'í',u'i');
r = r.replace(u'ó',u'o');
r = r.replace(u'ú',u'u');
r = r.replace(u'Á',u'A');
r = r.replace(u'É',u'E');
r = r.replace(u'Í',u'I');
r = r.replace(u'Ó',u'O');
r = r.replace(u'Ú',u'U');
return r.encode("utf8")
pos = 0
for line in open(filename):
linelength = len(line)
if line.strip() and not line[0]=="#":
length = len(line)
line = unicode(line, 'utf8')
i=line.rindex('\t')
line = line[0:i]
red = re.sub(r'\[.*?\]|\{.*?\}','',line,flags=re.UNICODE).strip()
tokens = re.split(r'\W', red, flags=re.UNICODE)
for token in tokens:
ntoken = normalize(token)
if len(ntoken)>tokensize: raise Exception("increase tokensize")
if pos>10**numbersize-1: raise Exception("increase numbersize")
if length>10**numbersize2-1: raise Exception("increase numbersize2")
if ntoken: print ("%-"+str(tokensize)+"s %"+str(numbersize)+"d %"+str(numbersize2)+"d") % (ntoken, pos, length)
pos += linelength
| mpl-2.0 | 982,324,380,487,193,000 | 28.46 | 117 | 0.570944 | false | 2.442786 | false | false | false |
isohybrid/dotfile | vim/bundle/git:--github.com-klen-python-mode/pylibs/logilab/astng/scoped_nodes.py | 1 | 34414 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
# copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:[email protected]
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""This module contains the classes for "scoped" node, i.e. which are opening a
new local scope in the language definition : Module, Class, Function (and
Lambda, GenExpr, DictComp and SetComp to some extent).
"""
from __future__ import with_statement
__doctype__ = "restructuredtext en"
import sys
from itertools import chain
from logilab.common.compat import builtins
from logilab.common.decorators import cached
from logilab.astng import BUILTINS_MODULE
from logilab.astng.exceptions import NotFoundError, NoDefault, \
ASTNGBuildingException, InferenceError
from logilab.astng.node_classes import Const, DelName, DelAttr, \
Dict, From, List, Name, Pass, Raise, Return, Tuple, Yield, \
are_exclusive, LookupMixIn, const_factory as cf, unpack_infer
from logilab.astng.bases import NodeNG, InferenceContext, Instance,\
YES, Generator, UnboundMethod, BoundMethod, _infer_stmts, copy_context, \
BUILTINS_NAME
from logilab.astng.mixins import FilterStmtsMixin
from logilab.astng.bases import Statement
from logilab.astng.manager import ASTNGManager
def remove_nodes(func, cls):
def wrapper(*args, **kwargs):
nodes = [n for n in func(*args, **kwargs) if not isinstance(n, cls)]
if not nodes:
raise NotFoundError()
return nodes
return wrapper
def function_to_method(n, klass):
if isinstance(n, Function):
if n.type == 'classmethod':
return BoundMethod(n, klass)
if n.type != 'staticmethod':
return UnboundMethod(n)
return n
def std_special_attributes(self, name, add_locals=True):
if add_locals:
locals = self.locals
else:
locals = {}
if name == '__name__':
return [cf(self.name)] + locals.get(name, [])
if name == '__doc__':
return [cf(self.doc)] + locals.get(name, [])
if name == '__dict__':
return [Dict()] + locals.get(name, [])
raise NotFoundError(name)
MANAGER = ASTNGManager()
def builtin_lookup(name):
"""lookup a name into the builtin module
return the list of matching statements and the astng for the builtin
module
"""
builtin_astng = MANAGER.astng_from_module(builtins)
if name == '__dict__':
return builtin_astng, ()
try:
stmts = builtin_astng.locals[name]
except KeyError:
stmts = ()
return builtin_astng, stmts
# TODO move this Mixin to mixins.py; problem: 'Function' in _scope_lookup
class LocalsDictNodeNG(LookupMixIn, NodeNG):
""" this class provides locals handling common to Module, Function
and Class nodes, including a dict like interface for direct access
to locals information
"""
# attributes below are set by the builder module or by raw factories
# dictionary of locals with name as key and node defining the local as
# value
def qname(self):
"""return the 'qualified' name of the node, eg module.name,
module.class.name ...
"""
if self.parent is None:
return self.name
return '%s.%s' % (self.parent.frame().qname(), self.name)
def frame(self):
"""return the first parent frame node (i.e. Module, Function or Class)
"""
return self
def scope(self):
"""return the first node defining a new scope (i.e. Module,
Function, Class, Lambda but also GenExpr, DictComp and SetComp)
"""
return self
def _scope_lookup(self, node, name, offset=0):
"""XXX method for interfacing the scope lookup"""
try:
stmts = node._filter_stmts(self.locals[name], self, offset)
except KeyError:
stmts = ()
if stmts:
return self, stmts
if self.parent: # i.e. not Module
# nested scope: if parent scope is a function, that's fine
# else jump to the module
pscope = self.parent.scope()
if not pscope.is_function:
pscope = pscope.root()
return pscope.scope_lookup(node, name)
return builtin_lookup(name) # Module
def set_local(self, name, stmt):
"""define <name> in locals (<stmt> is the node defining the name)
if the node is a Module node (i.e. has globals), add the name to
globals
if the name is already defined, ignore it
"""
#assert not stmt in self.locals.get(name, ()), (self, stmt)
self.locals.setdefault(name, []).append(stmt)
__setitem__ = set_local
def _append_node(self, child):
"""append a child, linking it in the tree"""
self.body.append(child)
child.parent = self
def add_local_node(self, child_node, name=None):
"""append a child which should alter locals to the given node"""
if name != '__class__':
# add __class__ node as a child will cause infinite recursion later!
self._append_node(child_node)
self.set_local(name or child_node.name, child_node)
def __getitem__(self, item):
"""method from the `dict` interface returning the first node
associated with the given name in the locals dictionary
:type item: str
:param item: the name of the locally defined object
:raises KeyError: if the name is not defined
"""
return self.locals[item][0]
def __iter__(self):
"""method from the `dict` interface returning an iterator on
`self.keys()`
"""
return iter(self.keys())
def keys(self):
"""method from the `dict` interface returning a tuple containing
locally defined names
"""
return self.locals.keys()
def values(self):
"""method from the `dict` interface returning a tuple containing
locally defined nodes which are instance of `Function` or `Class`
"""
return [self[key] for key in self.keys()]
def items(self):
"""method from the `dict` interface returning a list of tuple
containing each locally defined name with its associated node,
which is an instance of `Function` or `Class`
"""
return zip(self.keys(), self.values())
def __contains__(self, name):
return name in self.locals
has_key = __contains__
# Module #####################################################################
class Module(LocalsDictNodeNG):
_astng_fields = ('body',)
fromlineno = 0
lineno = 0
# attributes below are set by the builder module or by raw factories
# the file from which as been extracted the astng representation. It may
# be None if the representation has been built from a built-in module
file = None
# the module name
name = None
# boolean for astng built from source (i.e. ast)
pure_python = None
# boolean for package module
package = None
# dictionary of globals with name as key and node defining the global
# as value
globals = None
# names of python special attributes (handled by getattr impl.)
special_attributes = set(('__name__', '__doc__', '__file__', '__path__',
'__dict__'))
# names of module attributes available through the global scope
scope_attrs = set(('__name__', '__doc__', '__file__', '__path__'))
def __init__(self, name, doc, pure_python=True):
self.name = name
self.doc = doc
self.pure_python = pure_python
self.locals = self.globals = {}
self.body = []
def block_range(self, lineno):
"""return block line numbers.
start from the beginning whatever the given lineno
"""
return self.fromlineno, self.tolineno
def scope_lookup(self, node, name, offset=0):
if name in self.scope_attrs and not name in self.locals:
try:
return self, self.getattr(name)
except NotFoundError:
return self, ()
return self._scope_lookup(node, name, offset)
def pytype(self):
return '%s.module' % BUILTINS_MODULE
def display_type(self):
return 'Module'
def getattr(self, name, context=None, ignore_locals=False):
if name in self.special_attributes:
if name == '__file__':
return [cf(self.file)] + self.locals.get(name, [])
if name == '__path__' and self.package:
return [List()] + self.locals.get(name, [])
return std_special_attributes(self, name)
if not ignore_locals and name in self.locals:
return self.locals[name]
if self.package:
try:
return [self.import_module(name, relative_only=True)]
except ASTNGBuildingException:
raise NotFoundError(name)
except Exception:# XXX pylint tests never pass here; do we need it?
import traceback
traceback.print_exc()
raise NotFoundError(name)
getattr = remove_nodes(getattr, DelName)
def igetattr(self, name, context=None):
"""inferred getattr"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
try:
return _infer_stmts(self.getattr(name, context), context, frame=self)
except NotFoundError:
raise InferenceError(name)
def fully_defined(self):
"""return True if this module has been built from a .py file
and so contains a complete representation including the code
"""
return self.file is not None and self.file.endswith('.py')
def statement(self):
"""return the first parent node marked as statement node
consider a module as a statement...
"""
return self
def previous_sibling(self):
"""module has no sibling"""
return
def next_sibling(self):
"""module has no sibling"""
return
if sys.version_info < (2, 8):
def absolute_import_activated(self):
for stmt in self.locals.get('absolute_import', ()):
if isinstance(stmt, From) and stmt.modname == '__future__':
return True
return False
else:
absolute_import_activated = lambda self: True
def import_module(self, modname, relative_only=False, level=None):
"""import the given module considering self as context"""
if relative_only and level is None:
level = 0
absmodname = self.relative_to_absolute_name(modname, level)
try:
return MANAGER.astng_from_module_name(absmodname)
except ASTNGBuildingException:
# we only want to import a sub module or package of this module,
# skip here
if relative_only:
raise
return MANAGER.astng_from_module_name(modname)
def relative_to_absolute_name(self, modname, level):
"""return the absolute module name for a relative import.
The relative import can be implicit or explicit.
"""
# XXX this returns non sens when called on an absolute import
# like 'pylint.checkers.logilab.astng.utils'
# XXX doesn't return absolute name if self.name isn't absolute name
if self.absolute_import_activated() and level is None:
return modname
if level:
if self.package:
level = level - 1
package_name = self.name.rsplit('.', level)[0]
elif self.package:
package_name = self.name
else:
package_name = self.name.rsplit('.', 1)[0]
if package_name:
if not modname:
return package_name
return '%s.%s' % (package_name, modname)
return modname
def wildcard_import_names(self):
"""return the list of imported names when this module is 'wildcard
imported'
It doesn't include the '__builtins__' name which is added by the
current CPython implementation of wildcard imports.
"""
# take advantage of a living module if it exists
try:
living = sys.modules[self.name]
except KeyError:
pass
else:
try:
return living.__all__
except AttributeError:
return [name for name in living.__dict__.keys()
if not name.startswith('_')]
# else lookup the astng
#
# We separate the different steps of lookup in try/excepts
# to avoid catching too many Exceptions
# However, we can not analyse dynamically constructed __all__
try:
all = self['__all__']
except KeyError:
return [name for name in self.keys() if not name.startswith('_')]
try:
explicit = all.assigned_stmts().next()
except InferenceError:
return [name for name in self.keys() if not name.startswith('_')]
except AttributeError:
# not an assignment node
# XXX infer?
return [name for name in self.keys() if not name.startswith('_')]
try:
# should be a Tuple/List of constant string / 1 string not allowed
return [const.value for const in explicit.elts]
except AttributeError:
return [name for name in self.keys() if not name.startswith('_')]
class ComprehensionScope(LocalsDictNodeNG):
def frame(self):
return self.parent.frame()
scope_lookup = LocalsDictNodeNG._scope_lookup
class GenExpr(ComprehensionScope):
_astng_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class DictComp(ComprehensionScope):
_astng_fields = ('key', 'value', 'generators')
def __init__(self):
self.locals = {}
self.key = None
self.value = None
self.generators = []
class SetComp(ComprehensionScope):
_astng_fields = ('elt', 'generators')
def __init__(self):
self.locals = {}
self.elt = None
self.generators = []
class _ListComp(NodeNG):
"""class representing a ListComp node"""
_astng_fields = ('elt', 'generators')
elt = None
generators = None
if sys.version_info >= (3, 0):
class ListComp(_ListComp, ComprehensionScope):
"""class representing a ListComp node"""
def __init__(self):
self.locals = {}
else:
class ListComp(_ListComp):
"""class representing a ListComp node"""
# Function ###################################################################
class Lambda(LocalsDictNodeNG, FilterStmtsMixin):
_astng_fields = ('args', 'body',)
# function's type, 'function' | 'method' | 'staticmethod' | 'classmethod'
type = 'function'
def __init__(self):
self.locals = {}
self.args = []
self.body = []
def pytype(self):
if 'method' in self.type:
return '%s.instancemethod' % BUILTINS_MODULE
return '%s.function' % BUILTINS_MODULE
def display_type(self):
if 'method' in self.type:
return 'Method'
return 'Function'
def callable(self):
return True
def argnames(self):
"""return a list of argument names"""
if self.args.args: # maybe None with builtin functions
names = _rec_get_names(self.args.args)
else:
names = []
if self.args.vararg:
names.append(self.args.vararg)
if self.args.kwarg:
names.append(self.args.kwarg)
return names
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
return self.body.infer(context)
def scope_lookup(self, node, name, offset=0):
if node in self.args.defaults:
frame = self.parent.frame()
# line offset to avoid that def func(f=func) resolve the default
# value to the defined function
offset = -1
else:
# check this is not used in function decorators
frame = self
return frame._scope_lookup(node, name, offset)
class Function(Statement, Lambda):
_astng_fields = ('decorators', 'args', 'body')
special_attributes = set(('__name__', '__doc__', '__dict__'))
is_function = True
# attributes below are set by the builder module or by raw factories
blockstart_tolineno = None
decorators = None
def __init__(self, name, doc):
self.locals = {}
self.args = []
self.body = []
self.decorators = None
self.name = name
self.doc = doc
self.extra_decorators = []
self.instance_attrs = {}
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
# lineno is the line number of the first decorator, we want the def statement lineno
if self.decorators is not None:
self.fromlineno += len(self.decorators.nodes)
self.tolineno = lastchild.tolineno
self.blockstart_tolineno = self.args.tolineno
def block_range(self, lineno):
"""return block line numbers.
start from the "def" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
"""
if name == '__module__':
return [cf(self.root().qname())]
if name in self.instance_attrs:
return self.instance_attrs[name]
return std_special_attributes(self, name, False)
def is_method(self):
"""return true if the function node should be considered as a method"""
# check we are defined in a Class, because this is usually expected
# (e.g. pylint...) when is_method() return True
return self.type != 'function' and isinstance(self.parent.frame(), Class)
def decoratornames(self):
"""return a list of decorator qualified names"""
result = set()
decoratornodes = []
if self.decorators is not None:
decoratornodes += self.decorators.nodes
decoratornodes += self.extra_decorators
for decnode in decoratornodes:
for infnode in decnode.infer():
result.add(infnode.qname())
return result
decoratornames = cached(decoratornames)
def is_bound(self):
"""return true if the function is bound to an Instance or a class"""
return self.type == 'classmethod'
def is_abstract(self, pass_is_abstract=True):
"""return true if the method is abstract
It's considered as abstract if the only statement is a raise of
NotImplementError, or, if pass_is_abstract, a pass statement
"""
for child_node in self.body:
if isinstance(child_node, Raise):
if child_node.raises_not_implemented():
return True
if pass_is_abstract and isinstance(child_node, Pass):
return True
return False
# empty function is the same as function with a single "pass" statement
if pass_is_abstract:
return True
def is_generator(self):
"""return true if this is a generator function"""
# XXX should be flagged, not computed
try:
return self.nodes_of_class(Yield, skip_klass=Function).next()
except StopIteration:
return False
def infer_call_result(self, caller, context=None):
"""infer what a function is returning when called"""
if self.is_generator():
yield Generator(self)
return
returns = self.nodes_of_class(Return, skip_klass=Function)
for returnnode in returns:
if returnnode.value is None:
yield Const(None)
else:
try:
for infered in returnnode.value.infer(context):
yield infered
except InferenceError:
yield YES
def _rec_get_names(args, names=None):
"""return a list of all argument names"""
if names is None:
names = []
for arg in args:
if isinstance(arg, Tuple):
_rec_get_names(arg.elts, names)
else:
names.append(arg.name)
return names
# Class ######################################################################
def _class_type(klass, ancestors=None):
"""return a Class node type to differ metaclass, interface and exception
from 'regular' classes
"""
# XXX we have to store ancestors in case we have a ancestor loop
if klass._type is not None:
return klass._type
if klass.name == 'type':
klass._type = 'metaclass'
elif klass.name.endswith('Interface'):
klass._type = 'interface'
elif klass.name.endswith('Exception'):
klass._type = 'exception'
else:
if ancestors is None:
ancestors = set()
if klass in ancestors:
# XXX we are in loop ancestors, and have found no type
klass._type = 'class'
return 'class'
ancestors.add(klass)
# print >> sys.stderr, '_class_type', repr(klass)
for base in klass.ancestors(recurs=False):
if _class_type(base, ancestors) != 'class':
klass._type = base.type
break
if klass._type is None:
klass._type = 'class'
return klass._type
def _iface_hdlr(iface_node):
"""a handler function used by interfaces to handle suspicious
interface nodes
"""
return True
class Class(Statement, LocalsDictNodeNG, FilterStmtsMixin):
# some of the attributes below are set by the builder module or
# by a raw factories
# a dictionary of class instances attributes
_astng_fields = ('decorators', 'bases', 'body') # name
decorators = None
special_attributes = set(('__name__', '__doc__', '__dict__', '__module__',
'__bases__', '__mro__', '__subclasses__'))
blockstart_tolineno = None
_type = None
type = property(_class_type,
doc="class'type, possible values are 'class' | "
"'metaclass' | 'interface' | 'exception'")
def __init__(self, name, doc):
self.instance_attrs = {}
self.locals = {}
self.bases = []
self.body = []
self.name = name
self.doc = doc
def _newstyle_impl(self, context=None):
if context is None:
context = InferenceContext()
if self._newstyle is not None:
return self._newstyle
for base in self.ancestors(recurs=False, context=context):
if base._newstyle_impl(context):
self._newstyle = True
break
if self._newstyle is None:
self._newstyle = False
return self._newstyle
_newstyle = None
newstyle = property(_newstyle_impl,
doc="boolean indicating if it's a new style class"
"or not")
def set_line_info(self, lastchild):
self.fromlineno = self.lineno
self.blockstart_tolineno = self.bases and self.bases[-1].tolineno or self.fromlineno
if lastchild is not None:
self.tolineno = lastchild.tolineno
# else this is a class with only a docstring, then tolineno is (should be) already ok
def block_range(self, lineno):
"""return block line numbers.
start from the "class" position whatever the given lineno
"""
return self.fromlineno, self.tolineno
def pytype(self):
if self.newstyle:
return '%s.type' % BUILTINS_MODULE
return '%s.classobj' % BUILTINS_MODULE
def display_type(self):
return 'Class'
def callable(self):
return True
def infer_call_result(self, caller, context=None):
"""infer what a class is returning when called"""
yield Instance(self)
def scope_lookup(self, node, name, offset=0):
if node in self.bases:
frame = self.parent.frame()
# line offset to avoid that class A(A) resolve the ancestor to
# the defined class
offset = -1
else:
frame = self
return frame._scope_lookup(node, name, offset)
# list of parent class as a list of string (i.e. names as they appear
# in the class definition) XXX bw compat
def basenames(self):
return [bnode.as_string() for bnode in self.bases]
basenames = property(basenames)
def ancestors(self, recurs=True, context=None):
"""return an iterator on the node base classes in a prefixed
depth first order
:param recurs:
boolean indicating if it should recurse or return direct
ancestors only
"""
# FIXME: should be possible to choose the resolution order
# XXX inference make infinite loops possible here (see BaseTransformer
# manipulation in the builder module for instance)
yielded = set([self])
if context is None:
context = InferenceContext()
for stmt in self.bases:
with context.restore_path():
try:
for baseobj in stmt.infer(context):
if not isinstance(baseobj, Class):
# duh ?
continue
if baseobj in yielded:
continue # cf xxx above
yielded.add(baseobj)
yield baseobj
if recurs:
for grandpa in baseobj.ancestors(True, context):
if grandpa in yielded:
continue # cf xxx above
yielded.add(grandpa)
yield grandpa
except InferenceError:
# XXX log error ?
continue
def local_attr_ancestors(self, name, context=None):
"""return an iterator on astng representation of parent classes
which have <name> defined in their locals
"""
for astng in self.ancestors(context=context):
if name in astng:
yield astng
def instance_attr_ancestors(self, name, context=None):
"""return an iterator on astng representation of parent classes
which have <name> defined in their instance attribute dictionary
"""
for astng in self.ancestors(context=context):
if name in astng.instance_attrs:
yield astng
def has_base(self, node):
return node in self.bases
def local_attr(self, name, context=None):
"""return the list of assign node associated to name in this class
locals or in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
try:
return self.locals[name]
except KeyError:
# get if from the first parent implementing it if any
for class_node in self.local_attr_ancestors(name, context):
return class_node.locals[name]
raise NotFoundError(name)
local_attr = remove_nodes(local_attr, DelAttr)
def instance_attr(self, name, context=None):
"""return the astng nodes associated to name in this class instance
attributes dictionary and in its parents
:raises `NotFoundError`:
if no attribute with this name has been find in this class or
its parent classes
"""
values = self.instance_attrs.get(name, [])
# get all values from parents
for class_node in self.instance_attr_ancestors(name, context):
values += class_node.instance_attrs[name]
if not values:
raise NotFoundError(name)
return values
instance_attr = remove_nodes(instance_attr, DelAttr)
def instanciate_class(self):
"""return Instance of Class node, else return self"""
return Instance(self)
def getattr(self, name, context=None):
"""this method doesn't look in the instance_attrs dictionary since it's
done by an Instance proxy at inference time.
It may return a YES object if the attribute has not been actually
found but a __getattr__ or __getattribute__ method is defined
"""
values = self.locals.get(name, [])
if name in self.special_attributes:
if name == '__module__':
return [cf(self.root().qname())] + values
# FIXME : what is expected by passing the list of ancestors to cf:
# you can just do [cf(tuple())] + values without breaking any test
# this is ticket http://www.logilab.org/ticket/52785
if name == '__bases__':
return [cf(tuple(self.ancestors(recurs=False, context=context)))] + values
# XXX need proper meta class handling + MRO implementation
if name == '__mro__' and self.newstyle:
# XXX mro is read-only but that's not our job to detect that
return [cf(tuple(self.ancestors(recurs=True, context=context)))] + values
return std_special_attributes(self, name)
# don't modify the list in self.locals!
values = list(values)
for classnode in self.ancestors(recurs=True, context=context):
values += classnode.locals.get(name, [])
if not values:
raise NotFoundError(name)
return values
def igetattr(self, name, context=None):
"""inferred getattr, need special treatment in class to handle
descriptors
"""
# set lookup name since this is necessary to infer on import nodes for
# instance
context = copy_context(context)
context.lookupname = name
try:
for infered in _infer_stmts(self.getattr(name, context), context,
frame=self):
# yield YES object instead of descriptors when necessary
if not isinstance(infered, Const) and isinstance(infered, Instance):
try:
infered._proxied.getattr('__get__', context)
except NotFoundError:
yield infered
else:
yield YES
else:
yield function_to_method(infered, self)
except NotFoundError:
if not name.startswith('__') and self.has_dynamic_getattr(context):
# class handle some dynamic attributes, return a YES object
yield YES
else:
raise InferenceError(name)
def has_dynamic_getattr(self, context=None):
"""return True if the class has a custom __getattr__ or
__getattribute__ method
"""
# need to explicitly handle optparse.Values (setattr is not detected)
if self.name == 'Values' and self.root().name == 'optparse':
return True
try:
self.getattr('__getattr__', context)
return True
except NotFoundError:
#if self.newstyle: XXX cause an infinite recursion error
try:
getattribute = self.getattr('__getattribute__', context)[0]
if getattribute.root().name != BUILTINS_NAME:
# class has a custom __getattribute__ defined
return True
except NotFoundError:
pass
return False
def methods(self):
"""return an iterator on all methods defined in the class and
its ancestors
"""
done = {}
for astng in chain(iter((self,)), self.ancestors()):
for meth in astng.mymethods():
if meth.name in done:
continue
done[meth.name] = None
yield meth
def mymethods(self):
"""return an iterator on all methods defined in the class"""
for member in self.values():
if isinstance(member, Function):
yield member
def interfaces(self, herited=True, handler_func=_iface_hdlr):
"""return an iterator on interfaces implemented by the given
class node
"""
# FIXME: what if __implements__ = (MyIFace, MyParent.__implements__)...
try:
implements = Instance(self).getattr('__implements__')[0]
except NotFoundError:
return
if not herited and not implements.frame() is self:
return
found = set()
missing = False
for iface in unpack_infer(implements):
if iface is YES:
missing = True
continue
if not iface in found and handler_func(iface):
found.add(iface)
yield iface
if missing:
raise InferenceError()
| bsd-2-clause | -297,131,739,150,811,800 | 34.40535 | 93 | 0.586012 | false | 4.453734 | false | false | false |
botswana-harvard/tshilo-dikotla | td_infant/models/infant_birth_data.py | 1 | 2659 | from django.core.validators import MinValueValidator, MaxValueValidator
from django.db import models
from edc_constants.choices import YES_NO, GENDER
from .infant_crf_model import InfantCrfModel
class InfantBirthData(InfantCrfModel):
""" A model completed by the user on the infant's birth exam. """
infant_gender = models.CharField(
max_length=6,
choices=GENDER,
verbose_name="What is the gender of the infant?",
help_text="")
weight_kg = models.DecimalField(
max_digits=3,
decimal_places=2,
verbose_name="What was the infant's birth weight? ",
help_text="Measured in Kilograms (kg)")
infant_length = models.DecimalField(
max_digits=4,
decimal_places=2,
validators=[MinValueValidator(0), MaxValueValidator(90)],
verbose_name="What was the infant's length at birth? ",
help_text="Measured in centimeters, (cm)")
head_circumference = models.DecimalField(
max_digits=4,
decimal_places=2,
validators=[MinValueValidator(0), MaxValueValidator(41)],
verbose_name="What was the head circumference in centimeters? ",
help_text="Measured in centimeters, (cm)")
apgar_score = models.CharField(
max_length=3,
choices=YES_NO,
verbose_name="Was Apgar Score performed? ",
help_text="If 'No' go to question 10. Otherwise continue")
apgar_score_min_1 = models.IntegerField(
verbose_name="At 1 minute: ",
help_text="",
blank=True,
null=True,
validators=[MaxValueValidator(10),
MinValueValidator(0)])
apgar_score_min_5 = models.IntegerField(
verbose_name="At 5 minutes: ",
help_text="",
blank=True,
null=True,
validators=[MaxValueValidator(10),
MinValueValidator(0)])
apgar_score_min_10 = models.IntegerField(
verbose_name="At 10 minutes: ",
help_text="",
blank=True,
null=True,
validators=[MaxValueValidator(10),
MinValueValidator(0)])
congenital_anomalities = models.CharField(
max_length=3,
choices=YES_NO,
verbose_name="Were any congenital anomalies identified? ",
help_text="If 'Yes' please complete the Congenital Anomalies Form",)
other_birth_info = models.TextField(
max_length=250,
verbose_name="Other birth information ",
blank=True,
null=True)
class Meta:
app_label = 'td_infant'
verbose_name = "Infant Birth: Data"
verbose_name_plural = "Infant Birth: Data"
| gpl-2.0 | -4,537,329,226,076,851,700 | 31.036145 | 76 | 0.620158 | false | 3.809456 | false | false | false |
EnriqueSoria/Series-my | series.py | 1 | 8682 | # -*- coding: utf-8 -*-
directorios = \
r'''
D:/Series
'''.split('\n')
'''
Traducción para algunos géneros
'''
gen = {
'Crime': u'Crimen',
'Action': u'Acción',
'Drama': u'Drama',
'Comedy': u'Comedia',
'Adventure': u'Aventuras',
'Thriller': u'Thriller'
}
##############################################################################
' HTML '
##############################################################################
html_header = u'''<!DOCTYPE html><html lang="es"><head>
<meta charset="utf-8"><meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <meta name="description" content=""><meta name="author" content=""><link rel="icon" href="favicon.ico"><title>Series</title>
<link href="css/bootstrap.min.css" rel="stylesheet"><link href="css/jumbotron-narrow.css" rel="stylesheet">
</head><body>
<h1 class="header" align="center">Series<br></h1><div>'''
html_serie_row = '''<div class="row">'''
html_serie = u'''
<!--- Serie --->
<div class="col-xs-4">
<div class="row">
<div class="col-xs-4"><img src="{img}" alt="{titulo}" class="img-thumbnail"></div>
<div class="col-xs-8" align="left">
<h2>{titulo} ({anyo})</h2>
<ul>
<li><b>Genero</b>: {genero}</li>
<li><b>Temporadas</b>: {temporadas}</li>
<li><b>Mas info</b>: {masinfo}</li>
</ul><br>
<p><a class="btn btn-info" data-toggle='collapse' data-target="#{toggle}" aria-expanded="false" aria-controls="{toggle}">Ver capítulos</a></p>
<div class="collapse" id="{toggle}">
<div class="well">
{enlaces}
</div>
</div>
</div>
</div>
</div>
'''
html_serie_finrow = '''</div>'''
html_season = u'''<a href='#'>%s</a>'''
html_footer = u'''<footer class="footer"></footer></div>
<script src="//ajax.googleapis.com/ajax/libs/jquery/2.0.3/jquery.min.js"></script>
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css">
<!-- Optional theme -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap-theme.min.css">
<!-- Latest compiled and minified JavaScript -->
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.1/js/bootstrap.min.js"></script>
</body></html>'''
def series_links(d):
'''
Devuelve una lista de links a los capítulos de una temporada
de una serie concreta.
Buscamos patrones comunes:
1x01, S01E03, 101...
'''
path = d[u'path']
patterns = patterns = [ \
# Del tipo: 1x01, 12x24
'(\d{1,2}x\d\d)',
# S01E01, S12E24
'(S\d\dE\d\d)',
# 101, 1224
'(\d{3,4})']
patterns = [re.compile(regex) for regex in patterns]
capitulos = []
for temporada in [x for x in ls(path) if not '.' in x]:
for capitulo in ls('%s/%s' %(path,temporada)):
print capitulo
# 1x03
p = re.search(patterns[0], capitulo)
if p and len(p.groups()):
cap = p.groups()[0]
capitulos.append( (cap, u'%s/%s/%s' % (utf(path), utf(temporada) , utf(capitulo) )) )
print cap
continue
# S01E03
p = re.search(patterns[1], capitulo)
if p and len(p.groups()):
cap = p.groups()[0]
cap = u'%s%sx%s%s' % (cap[1] if cap[1]!=0 else '', cap[2], cap[4], cap[5])
capitulos.append( ( cap, u'%s/%s/%s' % (utf(path), utf(temporada) , utf(capitulo)) ))
print cap
continue
# 103
p = re.search(patterns[2], capitulo)
if p and len(p.groups()):
cap = p.groups()[0]
if len(cap)==3: cap = u'%sx%s%s' % (cap[0], cap[1], cap[2])
else: cap = u'%s%sx%s%s' % (cap[0], cap[1], cap[2], cap[3])
capitulos.append( ( cap, u'%s/%s/%s' % (utf(path), utf(temporada) , utf(capitulo) )))
print cap
continue
# Si tiene algun numero lo añado
if re.search('\d', capitulo):
capitulos.append( ( capitulo, u'%s/%s/%s' % (path, temporada, capitulo) ) )
return capitulos
def serie_HTML(d, download=False):
''' Devuelve el HTML para una determinada serie '''
return html_serie.format(
img = d[u'img'] if not download else 'imgs/%s.jpg' % download_image(d),
titulo = d[u'name'].decode('utf-8', 'replace'),
anyo = d[u'year'],
genero = gen[d[u'maingenre']],
temporadas = u' '.join( [html_season % idx for idx in xrange(1,d[u'seasons']+1)]),
masinfo = u'',
toggle = d[u'name'].decode('utf-8', 'replace').split(' ')[0],
enlaces = u'<br>'.join( [(u'<a href="file:///%s">%s</a>' % (cap[1], cap[0])) for cap in series_links(d)])
)
##############################################################################
' Funciones aux '
##############################################################################
def read(pathFNAME):
'''
Abre un fichero, lo lee y devuelve un diccionario.
'''
with open(pathFNAME, 'r', 'utf-8') as fn:
return eval(fn.read())
def paths_de_las_series(orden=lambda (p,d): d[u'name']):
'''
Buscamos por todos los directorios y nos guardamos dónde están las
series de forma ordenada.
'''
paths = []
for pathBase in [d for d in directorios if d]:
for path in ls(pathBase):
if not '.' in path:
if 'info.json' in ls('%s/%s'%(pathBase, path)):
# Save the path
camino = '%s/%s' % (pathBase, path)
inform = read('%s/info.json' % (camino))
inform[u'path'] = camino
paths.append((camino, inform))
return sorted(paths, key=orden)
utf = lambda x: x.decode('utf-8', 'replace')
def urlify(name):
'''
Devuelve una string como si fuera una URL
'''
name = name#.decode('utf-8', 'replace')
for l, ll in zip(u'áàéèíìóòúù:',u'aaeeiioouu_'):
name = name.replace(l,ll)
return (name.encode('ASCII', 'replace')).replace(' ', '-')
def download_image(d):
'''
Descarga la imagen de la serie
'''
# Nombre del fichero
fName = urlify(d[u'name'])
# Comprueba si ya está descargada
if ('%s.jpg' % fName) in ls('D:/Series/_web/imgs/'):
pass
else:
call("wget %s -O %s.jpg" % (d[u'poster'][u'large'], fName) )
sleep(2)
mv('%s.jpg' % fName, 'D:/Series/_web/imgs/%s.jpg' % fName)
return fName
##############################################################################
' Main code '
##############################################################################
if __name__=='__main__':
'''
Código principal
'''
from shutil import move as mv
from os import listdir as ls
from time import sleep
from subprocess import call
import re
import codecs
open = codecs.open
''' Creamos el HTML '''
html = html_header
ps = paths_de_las_series()
la, lb, lc = len(ps[0::3]), len(ps[1::3]), len(ps[2::3])
for a, b, c in zip( ps[0::3] , \
ps[1::3] + ([0] if la>lb else []), \
ps[2::3] + ([0] if la>lc else [])):
html += html_serie_row
html += serie_HTML(a[1]) if a else ''
html += serie_HTML(b[1]) if b else ''
html += serie_HTML(c[1]) if c else ''
html += html_serie_finrow
html += html_footer
''' Guardamos el HTML '''
location = r'./_web/index.html'
with open(location, 'w', 'utf-8') as f:
f.write(html)
| mit | 3,314,433,357,700,545,500 | 35.70339 | 276 | 0.448857 | false | 3.475923 | false | false | false |
advisory/djangosaml2_tenant | setup.py | 1 | 1850 | # Copyright (C) 2015 Education Advisory Board
# Copyright (C) 2011-2012 Yaco Sistemas
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
setup(
name='djangosaml2_tenant',
version='0.22.0',
description='pysaml2 integration for multi-tenant in Django',
long_description='\n\n'.join([read('README'), read('CHANGES')]),
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
keywords="django,pysaml2,saml2,federated authentication,multi-tenant",
author="Education Advisory Board",
author_email="[email protected]",
url="https://github.com/advisory/djangosaml2_tenant",
license='Apache 2.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'pysaml2==2.2.0',
'python-memcached==1.48',
],
)
| apache-2.0 | 5,805,203,127,526,979,000 | 33.90566 | 79 | 0.668649 | false | 3.838174 | false | false | false |
Adamssss/projectEuler | Problem 001-150 Python/pb050.py | 1 | 1068 | import math
import time
t1 = time.time()
prime = [2,3,5]
primen = 2
while primen < 547:
b = prime[primen]
t = 1
while (t == 1):
b = b+2
i = 0
t = 0
while (prime[i]*prime[i] < b)and (t == 0):
i=i+1
if (b%prime[i] == 0):
t = 1
if (t == 0):
primen += 1
prime.append(b)
# define a method to check if it is a prime
def isPrime(num):
if num%2 == 0:
return False
i = 3
while i < math.sqrt(num):
if num%i == 0:
return False
i += 2
return True
# first 546 consective prime sum is the greatest less than 1 million
def sumOf(start,number):
total = 0
i = 0
while i<number:
total += prime[start+i]
i += 1
return total
# print(sumOf(0,546))
for i in range(0,500):
for j in range(0,i+1):
test = sumOf(j,546-i)
if isPrime(test):
break
if isPrime(test):
print (test)
break
print("time:",time.time()-t1)
| mit | 1,853,741,059,998,883,600 | 17.736842 | 69 | 0.47191 | false | 3.086705 | false | false | false |
angadpc/Alexa-Project- | twilio/rest/api/v2010/account/available_phone_number/local.py | 1 | 16142 | # coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class LocalList(ListResource):
def __init__(self, version, account_sid, country_code):
"""
Initialize the LocalList
:param Version version: Version that contains the resource
:param account_sid: The 34 character string that uniquely identifies your account.
:param country_code: The ISO Country code to lookup phone numbers for.
:returns: twilio.rest.api.v2010.account.available_phone_number.local.LocalList
:rtype: twilio.rest.api.v2010.account.available_phone_number.local.LocalList
"""
super(LocalList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'country_code': country_code,
}
self._uri = '/Accounts/{account_sid}/AvailablePhoneNumbers/{country_code}/Local.json'.format(**self._solution)
def stream(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset,
exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, limit=None, page_size=None):
"""
Streams LocalInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode area_code: The area_code
:param unicode contains: The contains
:param bool sms_enabled: The sms_enabled
:param bool mms_enabled: The mms_enabled
:param bool voice_enabled: The voice_enabled
:param bool exclude_all_address_required: The exclude_all_address_required
:param bool exclude_local_address_required: The exclude_local_address_required
:param bool exclude_foreign_address_required: The exclude_foreign_address_required
:param bool beta: The beta
:param unicode near_number: The near_number
:param unicode near_lat_long: The near_lat_long
:param unicode distance: The distance
:param unicode in_postal_code: The in_postal_code
:param unicode in_region: The in_region
:param unicode in_rate_center: The in_rate_center
:param unicode in_lata: The in_lata
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.local.LocalInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
area_code=area_code,
contains=contains,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, limit=None, page_size=None):
"""
Lists LocalInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode area_code: The area_code
:param unicode contains: The contains
:param bool sms_enabled: The sms_enabled
:param bool mms_enabled: The mms_enabled
:param bool voice_enabled: The voice_enabled
:param bool exclude_all_address_required: The exclude_all_address_required
:param bool exclude_local_address_required: The exclude_local_address_required
:param bool exclude_foreign_address_required: The exclude_foreign_address_required
:param bool beta: The beta
:param unicode near_number: The near_number
:param unicode near_lat_long: The near_lat_long
:param unicode distance: The distance
:param unicode in_postal_code: The in_postal_code
:param unicode in_region: The in_region
:param unicode in_rate_center: The in_rate_center
:param unicode in_lata: The in_lata
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.available_phone_number.local.LocalInstance]
"""
return list(self.stream(
area_code=area_code,
contains=contains,
sms_enabled=sms_enabled,
mms_enabled=mms_enabled,
voice_enabled=voice_enabled,
exclude_all_address_required=exclude_all_address_required,
exclude_local_address_required=exclude_local_address_required,
exclude_foreign_address_required=exclude_foreign_address_required,
beta=beta,
near_number=near_number,
near_lat_long=near_lat_long,
distance=distance,
in_postal_code=in_postal_code,
in_region=in_region,
in_rate_center=in_rate_center,
in_lata=in_lata,
limit=limit,
page_size=page_size,
))
def page(self, area_code=values.unset, contains=values.unset,
sms_enabled=values.unset, mms_enabled=values.unset,
voice_enabled=values.unset, exclude_all_address_required=values.unset,
exclude_local_address_required=values.unset,
exclude_foreign_address_required=values.unset, beta=values.unset,
near_number=values.unset, near_lat_long=values.unset,
distance=values.unset, in_postal_code=values.unset,
in_region=values.unset, in_rate_center=values.unset,
in_lata=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of LocalInstance records from the API.
Request is executed immediately
:param unicode area_code: The area_code
:param unicode contains: The contains
:param bool sms_enabled: The sms_enabled
:param bool mms_enabled: The mms_enabled
:param bool voice_enabled: The voice_enabled
:param bool exclude_all_address_required: The exclude_all_address_required
:param bool exclude_local_address_required: The exclude_local_address_required
:param bool exclude_foreign_address_required: The exclude_foreign_address_required
:param bool beta: The beta
:param unicode near_number: The near_number
:param unicode near_lat_long: The near_lat_long
:param unicode distance: The distance
:param unicode in_postal_code: The in_postal_code
:param unicode in_region: The in_region
:param unicode in_rate_center: The in_rate_center
:param unicode in_lata: The in_lata
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of LocalInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.local.LocalPage
"""
params = values.of({
'AreaCode': area_code,
'Contains': contains,
'SmsEnabled': sms_enabled,
'MmsEnabled': mms_enabled,
'VoiceEnabled': voice_enabled,
'ExcludeAllAddressRequired': exclude_all_address_required,
'ExcludeLocalAddressRequired': exclude_local_address_required,
'ExcludeForeignAddressRequired': exclude_foreign_address_required,
'Beta': beta,
'NearNumber': near_number,
'NearLatLong': near_lat_long,
'Distance': distance,
'InPostalCode': in_postal_code,
'InRegion': in_region,
'InRateCenter': in_rate_center,
'InLata': in_lata,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return LocalPage(self._version, response, self._solution)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.LocalList>'
class LocalPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the LocalPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The 34 character string that uniquely identifies your account.
:param country_code: The ISO Country code to lookup phone numbers for.
:returns: twilio.rest.api.v2010.account.available_phone_number.local.LocalPage
:rtype: twilio.rest.api.v2010.account.available_phone_number.local.LocalPage
"""
super(LocalPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of LocalInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.available_phone_number.local.LocalInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.local.LocalInstance
"""
return LocalInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
country_code=self._solution['country_code'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.LocalPage>'
class LocalInstance(InstanceResource):
def __init__(self, version, payload, account_sid, country_code):
"""
Initialize the LocalInstance
:returns: twilio.rest.api.v2010.account.available_phone_number.local.LocalInstance
:rtype: twilio.rest.api.v2010.account.available_phone_number.local.LocalInstance
"""
super(LocalInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'friendly_name': payload['friendly_name'],
'phone_number': payload['phone_number'],
'lata': payload['lata'],
'rate_center': payload['rate_center'],
'latitude': deserialize.decimal(payload['latitude']),
'longitude': deserialize.decimal(payload['longitude']),
'region': payload['region'],
'postal_code': payload['postal_code'],
'iso_country': payload['iso_country'],
'address_requirements': payload['address_requirements'],
'beta': payload['beta'],
'capabilities': payload['capabilities'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'country_code': country_code,
}
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def phone_number(self):
"""
:returns: The phone_number
:rtype: unicode
"""
return self._properties['phone_number']
@property
def lata(self):
"""
:returns: The lata
:rtype: unicode
"""
return self._properties['lata']
@property
def rate_center(self):
"""
:returns: The rate_center
:rtype: unicode
"""
return self._properties['rate_center']
@property
def latitude(self):
"""
:returns: The latitude
:rtype: unicode
"""
return self._properties['latitude']
@property
def longitude(self):
"""
:returns: The longitude
:rtype: unicode
"""
return self._properties['longitude']
@property
def region(self):
"""
:returns: The region
:rtype: unicode
"""
return self._properties['region']
@property
def postal_code(self):
"""
:returns: The postal_code
:rtype: unicode
"""
return self._properties['postal_code']
@property
def iso_country(self):
"""
:returns: The iso_country
:rtype: unicode
"""
return self._properties['iso_country']
@property
def address_requirements(self):
"""
:returns: The address_requirements
:rtype: unicode
"""
return self._properties['address_requirements']
@property
def beta(self):
"""
:returns: The beta
:rtype: bool
"""
return self._properties['beta']
@property
def capabilities(self):
"""
:returns: The capabilities
:rtype: unicode
"""
return self._properties['capabilities']
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.LocalInstance>'
| mit | -3,366,049,378,486,977,000 | 37.070755 | 118 | 0.610767 | false | 4.276026 | false | false | false |
r8/scrapy-kinopoisk | kinopoisk/pipelines.py | 1 | 2920 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
import sys
import codecs
from slugify import slugify
from time import strptime, strftime
from html2text import html2text
class MarkdownPipeline(object):
"""Scrapy pipeline to save reviews as markdown document"""
def parse_datetime(self, str_datetime):
"""Parse date string in russian"""
dictionary = {u"января": 'Jan', u"февраля": 'Feb', u"марта": 'Mar',
u"апреля": 'Apr', u"мая": 'May', u"июня": 'Jun', u"июля": 'Jul',
u"августа": 'Aug', u"сентября": 'Sep', u"октября": 'Oct',
u"ноября": 'Nov', u"декабря": 'Dec'}
for russian, english in dictionary.items():
str_datetime = str_datetime.replace(russian, english)
return strptime(str_datetime, '%d %b %Y %H:%M')
def fix_typography(self, s):
"""Fix typographic symbols"""
s = s.replace(u'\x97', u'\u2014') # Fix dashes
s = s.replace(u'\x85', u'\u2026') # Fix ellipsis
return s
def process_item(self, item, spider):
"""Process and save review item"""
settings = spider.settings
if not os.path.exists(settings['MARKDOWN_OUTPUT']):
os.mkdir(settings['MARKDOWN_OUTPUT'])
file_name = strftime('%Y-%m-%d-', self.parse_datetime(item['review_datetime'][0])) + slugify(item['movie_title'][0]) + '.md'
try:
output_file = codecs.open(settings['MARKDOWN_OUTPUT'] + '/' + file_name, 'w', 'utf-8')
except IOError:
print 'Error opening target file: %s' % output_file
sys.exit(1)
if len(item['review_title']) > 0:
title = item['review_title'][0]
else:
title = item['movie_title'][0]
title = self.fix_typography(title)
output_file.write("%s\n" % title)
output_file.write("%s\n\n" % ('=' * len(title)))
output_file.write("* **User Id:** %s\n" % item['user_id'])
output_file.write("* **Movie Title:** %s\n" % item['movie_title'][0])
output_file.write("* **Movie Original Title:** %s\n" % item['movie_original_title'][0])
output_file.write("* **Movie Link:** [{0}]({0})\n".format(item['movie_link'][0]))
output_file.write("* **Review Date:** %s\n" % item['review_datetime'][0])
output_file.write("* **Review Grade:** %s\n" % item['review_grade'][0])
output_file.write("* **Review Link:** [{0}]({0})\n".format(item['review_link']))
output_file.write("\n")
review_text = html2text(item['review_text'])
review_text = self.fix_typography(review_text)
output_file.write(review_text)
output_file.close()
return item
| gpl-3.0 | -5,104,314,660,100,140,000 | 36.012987 | 132 | 0.579298 | false | 3.220339 | false | false | false |
bsquidwrd/Squid-Bot | gaming/migrations/0007_auto_20161029_2354.py | 1 | 1089 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-10-30 06:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('gaming', '0006_auto_20161029_2347'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='channel',
name='expire_date',
field=models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True),
),
migrations.AlterField(
model_name='gamesearch',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='gamesearch',
name='expire_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| mit | -768,404,492,787,284,200 | 29.25 | 97 | 0.599633 | false | 4.287402 | false | false | false |
hashbang/provisor | provisor/utils.py | 1 | 1845 | def drop_privileges(uid_name='nobody', gid_name='nogroup'):
import grp, pwd, os, resource
if os.getuid() != 0: # not root. #yolo
return
running_uid = pwd.getpwnam(uid_name).pw_uid
running_gid = grp.getgrnam(gid_name).gr_gid
os.setgroups([])
os.setgid(running_gid)
os.setuid(running_uid)
os.umask(0o077)
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
def getch():
import sys, termios, tty
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
def validate_pubkey(value):
import base64
if len(value) > 8192 or len(value) < 80:
raise ValueError("Expected length to be between 80 and 8192 characters")
value = value.replace("\"", "").replace("'", "").replace("\\\"", "")
value = value.split(' ')
types = [ 'ecdsa-sha2-nistp256', 'ecdsa-sha2-nistp384',
'ecdsa-sha2-nistp521', 'ssh-rsa', 'ssh-dss', 'ssh-ed25519' ]
if value[0] not in types:
raise ValueError(
"Expected " + ', '.join(types[:-1]) + ', or ' + types[-1]
)
try:
base64.decodestring(bytes(value[1]))
except TypeError:
raise ValueError("Expected string of base64 encoded data")
return "%s %s" % (value[0], value[1])
def validate_username(value):
import re
from reserved import RESERVED_USERNAMES
# Regexp must be kept in sync with
# https://github.com/hashbang/hashbang.sh/blob/master/src/hashbang.sh#L186-196
if re.compile(r"^[a-z][a-z0-9]{,30}$").match(value) is None:
raise ValueError('Username is invalid')
if value in RESERVED_USERNAMES:
raise ValueError('Username is reserved')
return value
| mit | 5,792,575,811,254,287,000 | 28.758065 | 83 | 0.617344 | false | 3.236842 | false | false | false |
abhijeet-talaulikar/Automatic-Helmet-Detection | K-Fold/Logistic_Regression.py | 1 | 2663 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import *
from timeit import default_timer as timer
from random import randint
from sklearn.feature_selection import *
from sklearn.decomposition import PCA
helmet_data = np.genfromtxt ('helmet.csv', delimiter=",")
face_data = np.genfromtxt ('face.csv', delimiter=",")
data_full = np.concatenate((helmet_data, face_data), 0)
np.random.shuffle(data_full) #shuffle the tuples
#feature reduction (on HOG part)
#gain, j = mutual_info_classif(data_full[:, 8:-1], data_full[:, -1], discrete_features='auto', n_neighbors=3, copy=True, random_state=None), 0
#for i in np.arange(len(gain)):
# if gain[i] <= 0.001:
# data_full = np.delete(data_full, 8+i-j, 1)
# j += 1
#data = np.copy(data_full)
#principal component analysis
pca = PCA(n_components=150)
data = pca.fit_transform(data_full[:, 8:-1])
data = np.concatenate((data_full[:, 0:8], data, np.array([data_full[:, -1]]).T), axis=1)
precision, recall, f1, accuracy, support, fn, roc_auc = 0, 0, 0, 0, 0, 0, 0
colors = ['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange']
k = 10
kf = KFold(n_splits = k)
start = timer()
for train, test in kf.split(data):
X_train, X_test = data[train, 0:-1], data[test, 0:-1]
y_train, y_test = data[train, -1], data[test, -1]
clf = LogisticRegression().fit(X_train, y_train)
y_pred = clf.predict(X_test)
#ROC curve
y_prob = clf.predict_proba(X_test)[:,1]
fpr, tpr, thresholds = roc_curve(y_test, y_prob, pos_label=1)
roc_auc += auc(fpr, tpr)
plt.plot(fpr, tpr, color=colors[randint(0, len(colors)-1)])
precision += precision_score(y_test, y_pred, average = 'macro')
recall += recall_score(y_test, y_pred, average = 'macro')
f1 += f1_score(y_test, y_pred, average = 'macro')
accuracy += accuracy_score(y_test, y_pred)
y = y_test - y_pred
fn += sum(y[y > 0]) / len(y_test)
end = timer()
precision /= k
recall /= k
f1 /= k
accuracy /= k
fn /= k
print("Precision \t: %s" % round(precision, 4))
print("Recall \t\t: %s" % round(recall, 4))
print("F1 \t\t: %s" % round(f1, 4))
print("Accuracy \t: %s" % round(accuracy, 4))
print("False Neg \t: %s%%" % round(fn * 100, 4))
print("Mean AUC \t: %s" % round(roc_auc / k, 4))
print("\nExecution time: %s ms" % round((end - start) * 1000, 4))
#ROC curve
plt.title('Logistic Regression (AUC = %s)' % round(roc_auc, 4))
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([-0.05,1.0])
plt.ylim([0.0,1.05])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
| gpl-3.0 | -5,013,241,237,569,052,000 | 32.2875 | 142 | 0.662035 | false | 2.634026 | true | false | false |
cristobaltapia/sajou | sajou/sections.py | 1 | 3525 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Define the classes and methods to work with sections.
"""
import numpy as np
class BeamSection(object):
"""Defines a beam section
Parameters
----------
name: str
name of the section
material: Material instance
material of the section defined as an instance of Material object
data: tuple
properties of the section
type: str
defines the type of cross-section
+-------------------------+------------------------------+
| type | *data* format |
+=========================+==============================+
|'rectangular': |``data=(width, height,)`` |
+-------------------------+------------------------------+
|'circular': |``data=(r, )`` |
+-------------------------+------------------------------+
|'I-section': |``data=(H, h_f, w_web, w_f)`` |
+-------------------------+------------------------------+
|'general': |``data=(A, I_3,)`` |
+-------------------------+------------------------------+
"""
def __init__(self, name, material, data, type='rectangular'):
self._name = name
self._material = material
self._data = data
self._type = type
self._area = 0
self._Iz = 0
self._Iy = 0
self._Jx = 0
self.compute_properties()
def print_properties(self):
"""Prints the properties of the BeamSection instance
:returns: TODO
"""
if self._type == 'rectangular':
props = {'width': self._data[0], 'height': self._data[1]}
else:
props = 'undefined'
return 'Properties: ' + str(props)
def compute_properties(self):
"""Compute all the mechanical properties for the given section
:returns: TODO
"""
# Calculate the area
self._area = self.calc_area()
self._Iz, self._Iy = self.calc_inertia()
def calc_area(self):
"""Calculate the area of the section
:returns: TODO
"""
type = self._type
if type == 'rectangular':
width = self._data[0]
height = self._data[1]
return width * height
elif type == 'general':
return self._data[0]
elif type == 'circular':
radius = self._data[0]
return np.pi * radius**2
def calc_inertia(self):
"""Calculate the moment of inertia of the beam section
:returns: Iz, Iy
"""
type = self._type
if type == 'rectangular':
width = self._data[0]
height = self._data[1]
I_z = width * height**3 / 12.
I_y = height * width**3 / 12.
return I_z, I_y
elif type == 'general':
return self._data[1], 0
def __str__(self):
"""
Returns the printable string for this object
"""
return 'Beam Section: {name}, type: {t}'.format(name=self._name,
t=self._type)
def __repr__(self):
"""
Returns the printable string for this object
"""
return 'Beam Section: {name}, type: {t}'.format(name=self._name,
t=self._type)
| mit | 9,096,255,454,926,391,000 | 28.621849 | 73 | 0.41844 | false | 4.632063 | false | false | false |
n3wb13/OpenNfrGui-5.0-1 | lib/python/Plugins/Extensions/MediaPortal/additions/fun/geo_de.py | 1 | 4389 | # -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.simpleplayer import SimplePlayer, SimplePlaylist
from Plugins.Extensions.MediaPortal.resources.twagenthelper import twAgentGetPage
STV_Version = "GEO.de v0.95"
STV_siteEncoding = 'iso8859-1'
class GEOdeGenreScreen(MPScreen, ThumbsHelper):
def __init__(self, session):
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/dokuListScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/dokuListScreen.xml"
print path
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"yellow" : self.keyTxtPageUp,
"blue" : self.keyTxtPageDown,
"ok" : self.keyOK,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"0" : self.closeAll,
"left" : self.keyLeft
}, -1)
self['title'] = Label(STV_Version)
self['ContentTitle'] = Label("GEOaudio - Hören und Reisen")
self['F1'] = Label(_("Exit"))
self['F3'] = Label(_("Text-"))
self['F4'] = Label(_("Text+"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.baseUrl = "http://www.geo.de"
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.lastservice = self.session.nav.getCurrentlyPlayingServiceReference()
self.onClose.append(self.restoreLastService)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.keyLocked = True
stvLink = self.baseUrl + '/GEO/reisen/podcast/reise-podcast-geoaudio-hoeren-und-reisen-5095.html'
print "getPage: ",stvLink
twAgentGetPage(stvLink).addCallback(self.genreData).addErrback(self.dataError)
def genreData(self, data):
print "genreData:"
for m in re.finditer('id:"(.*?)".*?name:"(.*?)".*?mp3:"(.*?)".*?iption:"(.*?)".*?poster: "(.*?)"', data, re.S):
# print "Podcasts found"
id, name, mp3, desc, img = m.groups()
self.filmliste.append(("%s. " % id, decodeHtml2(name), mp3, decodeHtml2(desc),img))
if self.keyLocked:
self.keyLocked = False
if not self.filmliste:
self.filmliste.append(('Keine Podcasts gefunden !','','','',''))
self.ml.setList(map(self.GEOdeListEntry, self.filmliste))
self.th_ThumbsQuery(self.filmliste, 1, 0, 4, None, None, 1, 1, mode=1)
self.showInfos()
def showInfos(self):
stvTitle = self['liste'].getCurrent()[0][1]
stvImage = self['liste'].getCurrent()[0][4]
stvDesc = self['liste'].getCurrent()[0][3]
print stvImage
self['name'].setText(stvTitle)
self['handlung'].setText(stvDesc)
CoverHelper(self['coverArt']).getCover(stvImage)
def keyOK(self):
if self.keyLocked:
return
self.session.open(
GEOdePlayer,
self.filmliste,
playIdx = self['liste'].getSelectedIndex()
)
def restoreLastService(self):
if config.mediaportal.restorelastservice.value == "1" and not config.mediaportal.backgroundtv.value:
self.session.nav.playService(self.lastservice)
class GEOdePlaylist(SimplePlaylist):
def playListEntry(self, entry):
width = self['liste'].instance.size().width()
height = self['liste'].l.getItemSize().height()
self.ml.l.setFont(0, gFont('mediaportal', height - 2 * mp_globals.sizefactor))
res = [entry]
res.append((eListboxPythonMultiContent.TYPE_TEXT, 0, 0, width, height, 0, RT_HALIGN_LEFT | RT_VALIGN_CENTER, entry[0] + entry[1]))
return res
class GEOdePlayer(SimplePlayer):
def __init__(self, session, playList, playIdx):
print "GEOdePlayer:"
SimplePlayer.__init__(self, session, playList, playIdx=playIdx, playAll=True, listTitle="GEOaudio - Hören und Reisen", autoScrSaver=True, ltype='geo.de', playerMode='MP3')
def getVideo(self):
stvLink = self.playList[self.playIdx][2]
stvTitle = "%s%s" % (self.playList[self.playIdx][0], self.playList[self.playIdx][1])
stvImage = self.playList[self.playIdx][4]
self.playStream(stvTitle, stvLink, imgurl=stvImage)
def openPlaylist(self, pl_class=GEOdePlaylist):
SimplePlayer.openPlaylist(self, pl_class) | gpl-2.0 | 2,190,676,951,641,538,800 | 34.088 | 173 | 0.696693 | false | 2.78944 | false | false | false |
osuripple/lets | helpers/aeshelper.py | 1 | 10866 | """
A pure python (slow) implementation of rijndael with a decent interface
To include -
from rijndael import rijndael
To do a key setup -
r = rijndael(key, block_size = 16)
key must be a string of length 16, 24, or 32
blocksize must be 16, 24, or 32. Default is 16
To use -
ciphertext = r.encrypt(plaintext)
plaintext = r.decrypt(ciphertext)
If any strings are of the wrong length a ValueError is thrown
"""
# ported from the Java reference code by Bram Cohen, April 2001
# this code is public domain, unless someone makes
# an intellectual property claim against the reference
# code, in which case it can be made public domain by
# deleting all the comments and renaming all the variables
import copy
import base64
shifts = [[[0, 0], [1, 3], [2, 2], [3, 1]],
[[0, 0], [1, 5], [2, 4], [3, 3]],
[[0, 0], [1, 7], [3, 5], [4, 4]]]
# [keysize][block_size]
num_rounds = {16: {16: 10, 24: 12, 32: 14}, 24: {16: 12, 24: 12, 32: 14}, 32: {16: 14, 24: 14, 32: 14}}
A = [[1, 1, 1, 1, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 1]]
# produce log and alog tables, needed for multiplying in the
# field GF(2^m) (generator = 3)
alog = [1]
for i in range(255):
j = (alog[-1] << 1) ^ alog[-1]
if j & 0x100 != 0:
j ^= 0x11B
alog.append(j)
log = [0] * 256
for i in range(1, 255):
log[alog[i]] = i
# multiply two elements of GF(2^m)
def mul(a, b):
if a == 0 or b == 0:
return 0
return alog[(log[a & 0xFF] + log[b & 0xFF]) % 255]
# substitution box based on F^{-1}(x)
box = [[0] * 8 for i in range(256)]
box[1][7] = 1
for i in range(2, 256):
j = alog[255 - log[i]]
for t in range(8):
box[i][t] = (j >> (7 - t)) & 0x01
B = [0, 1, 1, 0, 0, 0, 1, 1]
# affine transform: box[i] <- B + A*box[i]
cox = [[0] * 8 for i in range(256)]
for i in range(256):
for t in range(8):
cox[i][t] = B[t]
for j in range(8):
cox[i][t] ^= A[t][j] * box[i][j]
# S-boxes and inverse S-boxes
S = [0] * 256
Si = [0] * 256
for i in range(256):
S[i] = cox[i][0] << 7
for t in range(1, 8):
S[i] ^= cox[i][t] << (7-t)
Si[S[i] & 0xFF] = i
# T-boxes
G = [[2, 1, 1, 3],
[3, 2, 1, 1],
[1, 3, 2, 1],
[1, 1, 3, 2]]
AA = [[0] * 8 for i in range(4)]
for i in range(4):
for j in range(4):
AA[i][j] = G[i][j]
AA[i][i+4] = 1
for i in range(4):
pivot = AA[i][i]
if pivot == 0:
t = i + 1
while AA[t][i] == 0 and t < 4:
t += 1
assert t != 4, 'G matrix must be invertible'
for j in range(8):
AA[i][j], AA[t][j] = AA[t][j], AA[i][j]
pivot = AA[i][i]
for j in range(8):
if AA[i][j] != 0:
AA[i][j] = alog[(255 + log[AA[i][j] & 0xFF] - log[pivot & 0xFF]) % 255]
for t in range(4):
if i != t:
for j in range(i+1, 8):
AA[t][j] ^= mul(AA[i][j], AA[t][i])
AA[t][i] = 0
iG = [[0] * 4 for i in range(4)]
for i in range(4):
for j in range(4):
iG[i][j] = AA[i][j + 4]
def mul4(a, bs):
if a == 0:
return 0
r = 0
for b in bs:
r <<= 8
if b != 0:
r |= mul(a, b)
return r
T1 = []
T2 = []
T3 = []
T4 = []
T5 = []
T6 = []
T7 = []
T8 = []
U1 = []
U2 = []
U3 = []
U4 = []
for t in range(256):
s = S[t]
T1.append(mul4(s, G[0]))
T2.append(mul4(s, G[1]))
T3.append(mul4(s, G[2]))
T4.append(mul4(s, G[3]))
s = Si[t]
T5.append(mul4(s, iG[0]))
T6.append(mul4(s, iG[1]))
T7.append(mul4(s, iG[2]))
T8.append(mul4(s, iG[3]))
U1.append(mul4(t, iG[0]))
U2.append(mul4(t, iG[1]))
U3.append(mul4(t, iG[2]))
U4.append(mul4(t, iG[3]))
# round constants
rcon = [1]
r = 1
for t in range(1, 30):
r = mul(2, r)
rcon.append(r)
del A
del AA
del pivot
del B
del G
del box
del log
del alog
del i
del j
del r
del s
del t
del mul
del mul4
del cox
del iG
class rijndael:
def __init__(self, key, block_size = 16):
if block_size != 16 and block_size != 24 and block_size != 32:
raise ValueError('Invalid block size: ' + str(block_size))
if len(key) != 16 and len(key) != 24 and len(key) != 32:
raise ValueError('Invalid key size: ' + str(len(key)))
self.block_size = block_size
ROUNDS = num_rounds[len(key)][block_size]
BC = block_size // 4
# encryption round keys
Ke = [[0] * BC for i in range(ROUNDS + 1)]
# decryption round keys
Kd = [[0] * BC for i in range(ROUNDS + 1)]
ROUND_KEY_COUNT = (ROUNDS + 1) * BC
KC = len(key) // 4
# copy user material bytes into temporary ints
tk = []
for i in range(0, KC):
tk.append((ord(key[i * 4]) << 24) | (ord(key[i * 4 + 1]) << 16) |
(ord(key[i * 4 + 2]) << 8) | ord(key[i * 4 + 3]))
# copy values into round key arrays
t = 0
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
tt = 0
rconpointer = 0
while t < ROUND_KEY_COUNT:
# extrapolate using phi (the round key evolution function)
tt = tk[KC - 1]
tk[0] ^= (S[(tt >> 16) & 0xFF] & 0xFF) << 24 ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 16 ^ \
(S[ tt & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) ^ \
(rcon[rconpointer] & 0xFF) << 24
rconpointer += 1
if KC != 8:
for i in range(1, KC):
tk[i] ^= tk[i-1]
else:
for i in range(1, KC // 2):
tk[i] ^= tk[i-1]
tt = tk[KC // 2 - 1]
tk[KC // 2] ^= (S[ tt & 0xFF] & 0xFF) ^ \
(S[(tt >> 8) & 0xFF] & 0xFF) << 8 ^ \
(S[(tt >> 16) & 0xFF] & 0xFF) << 16 ^ \
(S[(tt >> 24) & 0xFF] & 0xFF) << 24
for i in range(KC // 2 + 1, KC):
tk[i] ^= tk[i-1]
# copy values into round key arrays
j = 0
while j < KC and t < ROUND_KEY_COUNT:
Ke[t // BC][t % BC] = tk[j]
Kd[ROUNDS - (t // BC)][t % BC] = tk[j]
j += 1
t += 1
# inverse MixColumn where needed
for r in range(1, ROUNDS):
for j in range(BC):
tt = Kd[r][j]
Kd[r][j] = U1[(tt >> 24) & 0xFF] ^ \
U2[(tt >> 16) & 0xFF] ^ \
U3[(tt >> 8) & 0xFF] ^ \
U4[ tt & 0xFF]
self.Ke = Ke
self.Kd = Kd
def encrypt(self, plaintext):
if len(plaintext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(plaintext)))
Ke = self.Ke
BC = self.block_size // 4
ROUNDS = len(Ke) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][0]
s2 = shifts[SC][2][0]
s3 = shifts[SC][3][0]
a = [0] * BC
# temporary work array
t = []
# plaintext to ints + key
for i in range(BC):
t.append((ord(plaintext[i * 4 ]) << 24 |
ord(plaintext[i * 4 + 1]) << 16 |
ord(plaintext[i * 4 + 2]) << 8 |
ord(plaintext[i * 4 + 3]) ) ^ Ke[0][i])
# apply round transforms
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T1[(t[ i ] >> 24) & 0xFF] ^
T2[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T3[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T4[ t[(i + s3) % BC] & 0xFF] ) ^ Ke[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in range(BC):
tt = Ke[ROUNDS][i]
result.append((S[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((S[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((S[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((S[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return ''.join(map(chr, result))
def decrypt(self, ciphertext):
if len(ciphertext) != self.block_size:
raise ValueError('wrong block length, expected ' + str(self.block_size) + ' got ' + str(len(ciphertext)))
Kd = self.Kd
BC = self.block_size // 4
ROUNDS = len(Kd) - 1
if BC == 4:
SC = 0
elif BC == 6:
SC = 1
else:
SC = 2
s1 = shifts[SC][1][1]
s2 = shifts[SC][2][1]
s3 = shifts[SC][3][1]
a = [0] * BC
# temporary work array
t = [0] * BC
# ciphertext to ints + key
for i in range(BC):
t[i] = (ord(ciphertext[i * 4 ]) << 24 |
ord(ciphertext[i * 4 + 1]) << 16 |
ord(ciphertext[i * 4 + 2]) << 8 |
ord(ciphertext[i * 4 + 3]) ) ^ Kd[0][i]
# apply round transforms
for r in range(1, ROUNDS):
for i in range(BC):
a[i] = (T5[(t[ i ] >> 24) & 0xFF] ^
T6[(t[(i + s1) % BC] >> 16) & 0xFF] ^
T7[(t[(i + s2) % BC] >> 8) & 0xFF] ^
T8[ t[(i + s3) % BC] & 0xFF] ) ^ Kd[r][i]
t = copy.copy(a)
# last round is special
result = []
for i in range(BC):
tt = Kd[ROUNDS][i]
result.append((Si[(t[ i ] >> 24) & 0xFF] ^ (tt >> 24)) & 0xFF)
result.append((Si[(t[(i + s1) % BC] >> 16) & 0xFF] ^ (tt >> 16)) & 0xFF)
result.append((Si[(t[(i + s2) % BC] >> 8) & 0xFF] ^ (tt >> 8)) & 0xFF)
result.append((Si[ t[(i + s3) % BC] & 0xFF] ^ tt ) & 0xFF)
return ''.join(map(chr, result))
def encrypt(key, block):
return rijndael(key, len(block)).encrypt(block)
def decrypt(key, block):
return rijndael(key, len(block)).decrypt(block)
class zeropad:
def __init__(self, block_size):
assert 0 < block_size < 256
self.block_size = block_size
def pad(self, pt):
ptlen = len(pt)
padsize = self.block_size - ((ptlen + self.block_size - 1) % self.block_size + 1)
return pt + "\0" * padsize
def unpad(self, ppt):
assert len(ppt) % self.block_size == 0
offset = len(ppt)
if offset == 0:
return ''
end = offset - self.block_size + 1
while offset > end:
offset -= 1
if ppt[offset] != "\0":
return ppt[:offset + 1]
assert False
class cbc:
def __init__(self, padding, cipher, iv):
assert padding.block_size == cipher.block_size
assert len(iv) == cipher.block_size
self.padding = padding
self.cipher = cipher
self.iv = iv
def encrypt(self, pt):
ppt = self.padding.pad(pt)
offset = 0
ct = ''
v = self.iv
while offset < len(ppt):
block = ppt[offset:offset + self.cipher.block_size]
block = self.xorblock(block, v)
block = self.cipher.encrypt(block)
ct += block
offset += self.cipher.block_size
v = block
return ct
def decrypt(self, ct):
assert len(ct) % self.cipher.block_size == 0
ppt = ''
offset = 0
v = self.iv
while offset < len(ct):
block = ct[offset:offset + self.cipher.block_size]
decrypted = self.cipher.decrypt(block)
ppt += self.xorblock(decrypted, v)
offset += self.cipher.block_size
v = block
pt = self.padding.unpad(ppt)
return pt
def xorblock(self, b1, b2):
# sorry, not very Pythonesk
i = 0
r = ''
while i < self.cipher.block_size:
r += chr(ord(b1[i]) ^ ord(b2[i]))
i += 1
return r
def decryptRinjdael(key, iv, data, areBase64 = False):
"""
Where the magic happens
key -- AES key (string)
IV -- IV thing (string)
data -- data to decrypt (string)
areBase64 -- if True, iv and data are passed in base64
"""
if areBase64:
iv = base64.b64decode(iv).decode("latin_1")
data = base64.b64decode(data).decode("latin_1")
r = rijndael(key, 32)
p = zeropad(32)
c = cbc(p, r, iv)
return str(c.decrypt(data))
| agpl-3.0 | 8,741,889,376,303,064,000 | 23.200445 | 108 | 0.534511 | false | 2.264694 | false | false | false |
thinkWhere/Roadnet | street_browser/add.py | 1 | 15108 | # -*- coding: utf-8 -*-
import datetime
from PyQt4.QtSql import QSqlQuery, QSqlQueryModel
from PyQt4.QtGui import QMessageBox, QLineEdit, QComboBox
from PyQt4.QtCore import Qt, QDate
from qgis.core import QgsMapLayerRegistry
from ..generic_functions import SwitchStreetBrowserMode, ZoomSelectCanvas, ipdb_breakpoint
from ..roadnet_dialog import SaveRecordDlg
from edit import EditEsuLink, EditStartEndCoords, UpdateEsuSymbology
from mod_validation import ValidateDescription, ValidateStreetType
__author__ = 'matthew.walsh'
class AddRecord:
"""
Add a new street record to the model
"""
def __init__(self, iface, street_browser, model, mapper, db, params):
self.street_browser = street_browser
self.iface = iface
self.model = model
self.mapper = mapper
self.db = db
self.username = params['UserName']
self.modify = SwitchStreetBrowserMode(self.street_browser)
self.save_dlg = SaveRecordDlg()
self.save_dlg.ui.savePushButton.clicked.connect(self.save_new_record)
self.save_dlg.ui.revertPushButton.clicked.connect(self.cancel_new_record)
self.save_dlg.ui.cancelPushButton.clicked.connect(lambda: self.save_dlg.close())
self.esu_layer = QgsMapLayerRegistry.instance().mapLayersByName('ESU Graphic')[0]
self.lineedits = {1: self.street_browser.ui.usrnLineEdit,
8: self.street_browser.ui.startDateDateEdit,
7: self.street_browser.ui.updateDateLineEdit,
2: self.street_browser.ui.versionLineEdit,
6: self.street_browser.ui.entryDateLineEdit,
18: self.street_browser.ui.stateDateLineEdit,
11: self.street_browser.ui.startXLineEdit,
12: self.street_browser.ui.startYLineEdit,
13: self.street_browser.ui.endXLineEdit,
14: self.street_browser.ui.endYLineEdit,
15: self.street_browser.ui.tolLineEdit}
self.combos = {4: self.street_browser.ui.recordTypeComboBox,
20: self.street_browser.ui.localityComboBox,
22: self.street_browser.ui.townComboBox,
21: self.street_browser.ui.countyComboBox,
9: self.street_browser.ui.authorityComboBox,
17: self.street_browser.ui.stateComboBox,
19: self.street_browser.ui.classComboBox}
self.start_idx = None
self.start_desc = None
self.start_tol = None
self.edit_esu = None
self.new_usrn_no = None
self.esu_version = ZoomSelectCanvas(self.iface, self.street_browser, self.db)
def add(self):
"""
Main method to decide whether to setup for adding or complete/commit record
"""
add_text = str(self.street_browser.ui.addPushButton.text())
# Setup blank form
if add_text.lower() == "add":
self.street_browser.ui.editEsuPushButton.clicked.connect(self.create_esu_link)
self.street_browser.ui.editCoordsPushButton.clicked.connect(self.create_start_end_coords)
self.setup_sb_add()
# Completion event
else:
self.save_dlg.setWindowFlags(Qt.Window | Qt.WindowTitleHint | Qt.CustomizeWindowHint)
self.save_dlg.exec_()
def current_desc_tol_idx(self):
"""
Grab the current record index and desc
"""
self.start_idx = self.mapper.currentIndex()
self.start_desc = self.street_browser.ui.descriptionTextEdit.toPlainText()
self.start_tol = self.street_browser.ui.tolLineEdit.text()
def setup_sb_add(self):
"""
Setup the street browser for adding a new record
"""
# Grab current idx's desc, tol
self.current_desc_tol_idx()
n_usrn = self.new_usrn()
self.street_browser.ui.addPushButton.setText("Complete")
self.street_browser.ui.descriptionLabel.setStyleSheet("color : red")
self.modify.edit()
# Clear lineedits
all_lineedits = self.street_browser.findChildren(QLineEdit)
for lineedit in all_lineedits:
lineedit.setText("")
self.clear_xref_and_esu_tables()
self.set_combo_index()
self.set_current_dates()
self.street_browser.ui.tolLineEdit.setStyleSheet("background-color: white")
self.street_browser.ui.tolLineEdit.setReadOnly(False)
self.street_browser.ui.tolLineEdit.setText("10")
self.street_browser.ui.descriptionTextEdit.setText("")
# Set new usrn + version 1
self.street_browser.ui.byLineEdit.setText(self.username)
self.street_browser.ui.usrnLineEdit.setText(str(n_usrn))
self.street_browser.ui.versionLineEdit.setText("1")
# Set the ESU layer to read only
self.esu_layer.setReadOnly(True)
def revert_sb_add(self):
"""
Revert street browser back to read-only mode
"""
self.edit_esu = None
self.modify.read_only()
self.street_browser.ui.tolLineEdit.setReadOnly(True)
self.street_browser.ui.tolLineEdit.setStyleSheet("background-color: rgb(213,234,234)")
self.street_browser.ui.addPushButton.setText("Add")
self.esu_layer.setReadOnly(False)
def clear_xref_and_esu_tables(self):
"""
Blank model clears the xref table
"""
# Set xref to empty model
empty_model = QSqlQueryModel()
self.street_browser.ui.crossReferenceTableView.setModel(empty_model)
# Clear list widget
self.street_browser.ui.linkEsuListWidget.clear()
def set_combo_index(self):
"""
Set the index of the comboboxes
"""
all_combos = self.street_browser.findChildren(QComboBox)
for combo in all_combos:
combo.setCurrentIndex(0)
def set_current_dates(self):
"""
Set date lineedits/date picker to current date
"""
now_date = datetime.datetime.now()
now_formatted = now_date.strftime("%d/%m/%Y")
self.street_browser.ui.updateDateLineEdit.setText(now_formatted)
self.street_browser.ui.entryDateLineEdit.setText(now_formatted)
self.street_browser.ui.stateDateLineEdit.setText(now_formatted)
date_obj = QDate(now_date.year, now_date.month, now_date.day)
self.street_browser.ui.startDateDateEdit.setDate(date_obj)
def cancel_new_record(self):
"""
Revert street browser to read only
"""
self.revert_sb_add()
self.mapper.setCurrentIndex(self.mapper.currentIndex())
self.disconnect_esu_and_coords()
self.save_dlg.close()
def new_usrn(self):
"""
Returns a new usrn (max usrn + 1)
:rtype : int
:return: USRN
"""
query = QSqlQuery("SELECT MAX(usrn) from tblSTREET", self.db)
query.seek(0)
try:
usrn = int(query.value(0)) + 1
except TypeError:
# Throws if there are no USRNs yet. Example for demo db inserted here
# This must be set manually for a new local authority
usrn = 12700001
self.new_usrn_no = usrn
return usrn
def failed_validation_msg(self, mandatory, desc, esu_valid):
# TODO: Attach esu's to error message (see bad_esu = [] in validate_mandatory)
"""
Display appropriate error message for failed validation
:param mandatory: mand check bool
:param desc: desc present bool
:param esu_valid: valid esu links bool
"""
err = "Unable to save record:"
errors = []
if not mandatory:
errors.append("All mandatory fields must be complete")
if not desc:
errors.append("Description already exists within this town/locality")
if not esu_valid:
errors.append("Invalid ESU links")
for error in errors:
err = err + "\n" + str(error)
val_fail_msg_box = QMessageBox(QMessageBox.Warning, " ", err, QMessageBox.Ok, None)
val_fail_msg_box.setWindowFlags(Qt.CustomizeWindowHint | Qt.WindowTitleHint)
val_fail_msg_box.exec_()
def save_new_record(self):
"""
Insert new record if all validation is passed
"""
self._strip_whitespace_from_description()
usrn = self.street_browser.ui.usrnLineEdit.text()
mandatory = self.modify.mandatory_field_check()
if self._record_is_type_3_or_4():
unique_desc = True
else:
unique_desc = ValidateDescription(self.street_browser, self.db).validate()
if self.edit_esu:
final_sel = self.edit_esu.get_final_selection()[0]
esu_valid = ValidateStreetType(self.street_browser, self.db).validate(usrn, final_sel)
else:
esu_valid = True
if mandatory and unique_desc and esu_valid:
self.insert_record()
self.revert_sb_add()
self.disconnect_esu_and_coords()
# Update Esu Graphic symbology attribute for all linked Esu's
self.esu_layer = QgsMapLayerRegistry.instance().mapLayersByName('ESU Graphic')[0]
UpdateEsuSymbology(self.db, self.esu_layer).update(usrn)
else:
self.failed_validation_msg(mandatory, unique_desc, esu_valid)
self.save_dlg.close()
def _strip_whitespace_from_description(self):
"""
Strip whitespace from the text in the description field
"""
description = str(self.street_browser.ui.descriptionTextEdit.toPlainText())
description = description.strip()
self.street_browser.ui.descriptionTextEdit.setPlainText(description)
def _record_is_type_3_or_4(self):
"""
Check the combo box to see if record is Type 3 or 3
:return boolean:
"""
record_type_combo = self.street_browser.ui.recordTypeComboBox
record_type = int(record_type_combo.itemData(record_type_combo.currentIndex()))
if record_type in (3, 4):
return True
else:
return False
def disconnect_esu_and_coords(self):
try:
self.street_browser.ui.editEsuPushButton.clicked.disconnect()
self.street_browser.ui.editCoordsPushButton.clicked.disconnect()
except TypeError:
pass
def insert_record(self):
"""
Insert a record/row into the model + commit
"""
record = self.model.record()
record.setValue(1, str(self.street_browser.ui.usrnLineEdit.text())) # usrn
record.setValue(3, str(0)) # currency_flag 0
record.setValue(5, str(self.street_browser.ui.descriptionTextEdit.toPlainText()))
record.setValue(23, self.username)
# Set values from lineedits
date_cols = [6, 7, 8, 18]
for idx, lineedit in self.lineedits.iteritems():
txt = str(lineedit.text())
if txt:
# re-format dates for db
if idx in date_cols:
txt = self.database_dates(txt)
record.setValue(idx, txt)
# Set values from comboboxes
for idx, combo in self.combos.iteritems():
combo_idx = combo.currentIndex()
# if combo_idx != 0:
record.setValue(idx, str(combo.itemData(combo_idx)))
# Append record after last current record
self.model.insertRecord(-1, record)
# Commit to db + insert any esu links
self.model.submitAll()
self.commit_esu_link()
self.repopulate_model()
def repopulate_model(self):
"""
Repopulate the model to show the new model
"""
while self.model.canFetchMore():
self.model.fetchMore()
# jump to new record (appended to end)
self.mapper.toLast()
def database_dates(self, date):
"""
Format dates from lineedits for database (yyyymmdd)
:param date: Date string
:return: formattted date string
"""
date_obj = datetime.datetime.strptime(date, "%d/%m/%Y")
db_date = str(date_obj.strftime("%Y%m%d"))
return db_date
def create_esu_link(self):
"""
Add esu links to a street
"""
button = self.street_browser.ui.editEsuPushButton
layer = 'ESU Graphic'
display_attr = 'esu_id'
if self.edit_esu:
previous_unsaved = self.edit_esu.get_final_selection()[0]
self.edit_esu = EditEsuLink(self.iface, button, self.db, street_browser=self.street_browser,
layer_name=layer, dis_attr=display_attr, unsaved=previous_unsaved)
else:
self.edit_esu = EditEsuLink(self.iface, button, self.db, street_browser=self.street_browser,
layer_name=layer, dis_attr=display_attr)
self.edit_esu.show()
def commit_esu_link(self):
"""
Updates existing esu links on edit and deal with adding/remove links via editing
"""
usrn = str(self.new_usrn_no)
if self.edit_esu:
# get new set of esu links
esus = self.edit_esu.get_final_selection()
final = esus[0]
else:
# No esu edits made so query for existing esu links
final = self.esu_version.query_esu(usrn)
date = str(datetime.datetime.now().strftime("%Y%m%d"))
try:
for esu in final:
query_str = "SELECT version_no FROM tblESU WHERE esu_id = %s AND currency_flag = 0;" % esu
query = QSqlQuery(query_str, self.db)
seek = query.seek(0)
if seek:
esu_ver = query.value(0)
else:
esu_ver = str(1)
# Create new links
insert_sql = "INSERT INTO lnkESU_STREET (esu_id, usrn, esu_version_no, usrn_version_no, currency_flag," \
" entry_date, update_date) VALUES (%s, %s, %s, 1, 0, %s, %s)" \
% (esu, usrn, esu_ver, date, date)
new_lnk_query = QSqlQuery(insert_sql, self.db)
except TypeError:
# No esu's attached to record
pass
def create_start_end_coords(self):
"""
Create instance of cooord edit class
"""
coord_le = {"start_xref": self.street_browser.ui.startXLineEdit,
"start_yref": self.street_browser.ui.startYLineEdit,
"end_xref": self.street_browser.ui.endXLineEdit,
"end_yref": self.street_browser.ui.endYLineEdit}
button = self.street_browser.ui.editCoordsPushButton
usrn = self.street_browser.ui.usrnLineEdit.text()
coords = EditStartEndCoords(self.iface, coord_le, self.model, self.mapper, button, usrn=usrn, edit=False)
coords.show()
| gpl-2.0 | -7,157,423,312,518,393,000 | 39.943089 | 121 | 0.601403 | false | 3.852116 | false | false | false |
josephyli/py-db-cluster | runDDL.py | 1 | 9526 | import argparse
import os
import pymysql.cursors
import re
import sys
from ConfigParser import SafeConfigParser
from StringIO import StringIO
from pymysql import OperationalError
# returns a list of sql commands as strings
def read_DDL(ddlfilename):
f = open(ddlfilename, 'r')
ddlfile = f.read()
f.close()
temp = filter(None, ddlfile.split(';'))
sql_commands = []
# filter out white space from file input
for c in temp:
if c != "\n":
sql_commands.append(c)
return sql_commands
# returns a dict with all nodes information
# responsible for parsing the config file
def get_node_config(configfilename):
config_dict = {}
if os.path.isfile(configfilename):
with open(configfilename) as stream:
# pass into string & add a header
stream = StringIO("[fakesection]\n" + stream.read())
# read/parse catalog data
cp = SafeConfigParser()
cp.readfp(stream)
config_dict['catalog.driver'] = cp.get('fakesection', 'catalog.driver')
config_dict['catalog.hostname'] = cp.get('fakesection', 'catalog.hostname')
config_dict['catalog.username'] = cp.get('fakesection', 'catalog.username')
config_dict['catalog.passwd'] = cp.get('fakesection', 'catalog.passwd')
config_dict['catalog.database'] = cp.get('fakesection', 'catalog.hostname').rsplit('/', 1)[-1]
# read the number of nodes
numnodes = cp.getint('fakesection', 'numnodes')
config_dict['catalog.numnodes'] = numnodes
# read node data and print out info
for node in range(1, numnodes + 1):
for candidate in ['driver', 'hostname', 'username', 'passwd', 'database']:
# test if candidate exists before adding to dictionary
if cp.has_option('fakesection', "node" + str(node) + "." + candidate):
# print cp.get('fakesection', "node" + str(node) + "." + candidate)
config_dict["node" + str(node) + "." + candidate] = cp.get('fakesection', "node" + str(node) + "." + candidate)
else:
if candidate == "database":
config_dict["node" + str(node) + ".database"] = cp.get('fakesection', "node" + str(node) + ".hostname").rsplit('/', 1)[-1]
else:
print "error: candidate not found"
return config_dict
else:
print("No config file found at", configfilename)
return null
def check_dtables_exists(config_dict):
cat_hn = re.findall( r'[0-9]+(?:\.[0-9]+){3}', config_dict['catalog.hostname'] )[0]
cat_usr = config_dict['catalog.username']
cat_pw = config_dict['catalog.passwd']
cat_dr = config_dict['catalog.driver']
cat_db = config_dict['catalog.database']
sql = "SELECT * FROM information_schema.tables WHERE table_schema = '%s' AND table_name = 'dtables' LIMIT 1;" % cat_db
res = None;
try:
# connect and execute the sql statement
connection = pymysql.connect(host=cat_hn,
user=cat_usr,
password=cat_pw,
db=cat_db,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
print "[SUCCESSFUL CATALOG CONNECTION] <"+connection.host+" - "+connection.db+">", connection
print
with connection.cursor() as cursor:
res = cursor.execute(sql.strip() + ';')
connection.commit()
except pymysql.err.InternalError as d:
print "[FAILED TO CHECK IF CATALOG EXISTS]"
print d
if res:
return True
else:
return False
# stores metadata about the DDL in a catalog database
# using a list of tables that need to be created in the catalog
def update_catalog(config_dict, table_list):
cat_hn = re.findall( r'[0-9]+(?:\.[0-9]+){3}', config_dict['catalog.hostname'] )[0]
cat_usr = config_dict['catalog.username']
cat_pw = config_dict['catalog.passwd']
cat_dr = config_dict['catalog.driver']
cat_db = config_dict['catalog.database']
if check_dtables_exists(config_dict):
sql = []
else:
sql = ["CREATE TABLE IF NOT EXISTS dtables (tname char(32), nodedriver char(64), nodeurl char(128), nodeuser char(16), nodepasswd char(16), partmtd int, nodeid int, partcol char(32), partparam1 char(32), partparam2 char(32));"]
# prepares the sql statement to insert into catalog the tables in each node
for table in table_list:
for i in range(config_dict["catalog.numnodes"]):
hn = config_dict['node'+str(i + 1)+'.hostname']
usr = config_dict['node'+str(i + 1)+'.username']
pw = config_dict['node'+str(i + 1)+'.passwd']
dr = config_dict['node'+str(i + 1)+'.driver']
sql.append("INSERT INTO dtables VALUES (\'%s\', \'%s\', \'%s\', \'%s\',\'%s\', NULL,%d,NULL,NULL,NULL);" % (table,dr,hn,usr,pw,i+1))
try:
# connect and execute the sql statement
connection = pymysql.connect(host=cat_hn,
user=cat_usr,
password=cat_pw,
db=cat_db,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
print "[SUCCESSFUL CATALOG CONNECTION] <"+connection.host+" - "+connection.db+">", connection
print
with connection.cursor() as cursor:
# execute every sql command
for command in sql:
try:
print command
print
cursor.execute(command.strip() + ';')
connection.commit()
except OperationalError, msg:
print "Command skipped: ", msg
except pymysql.err.InternalError as d:
print "[FAILED TO UPDATE CATALOG]"
print d
# returns a list of connections to all nodes
def get_connections(config_dict):
connections = []
for i in range(config_dict["catalog.numnodes"]):
try:
hn = re.findall( r'[0-9]+(?:\.[0-9]+){3}', config_dict['node'+str(i + 1)+'.hostname'] )[0]
usr = config_dict['node'+str(i + 1)+'.username']
pw = config_dict['node'+str(i + 1)+'.passwd']
db = config_dict['node'+str(i + 1)+'.database']
connections.append(pymysql.connect(host=hn,
user=usr,
password=pw,
db=db,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor))
except pymysql.MySQLError as e:
print "[NODE", i + 1, "CONNECTION FAILED]:"
print "hostname:".rjust(12), re.findall( r'[0-9]+(?:\.[0-9]+){3}', config_dict['node'+str(i + 1)+'.hostname'] )[0]
print "username:".rjust(12), config_dict['node'+str(i + 1)+'.username']
print "password:".rjust(12), config_dict['node'+str(i + 1)+'.passwd']
print "database:".rjust(12), config_dict['node'+str(i + 1)+'.database']
print 'Got error {!r}, errno is {}'.format(e, e.args[0])
print
return connections
# runs the list of commands against the list of connections
# later, this will implement multi-threading
def run_commmands_against_nodes(connections, sql_commands):
import time
from threading import Thread
from threading import active_count
# create a list of jobs
list_of_threads = []
for connection in connections:
print "[JOB CREATED] <"+ connection.host+ " - " + connection.db+ ">"
print connection
list_of_threads.append(Thread(target=run_sql_commands_against_node, args=(connection, sql_commands)))
print
# start up all jobs
for t in list_of_threads:
t.start()
# wait for all jobs to complete before moving on
while active_count() > 1:
time.sleep(1)
def run_sql_commands_against_node(connection, sql_commands):
with connection.cursor() as cursor:
try:
for c in sql_commands:
cursor.execute(c.strip() + ';')
connection.commit()
print "[JOB SUCCESSFUL] <"+connection.host+ " - " + connection.db+ ">"
connection.close()
except pymysql.MySQLError as e:
print "[JOB FAILED] <"+connection.host+ " - " + connection.db+ "> ERROR: {!r}, ERROR NUMBER: {}".format(e, e.args[0])
def print_pretty_dict(idict):
import json
print json.dumps(idict, indent=1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("configfile", help="Location of Config File, See the README for more information")
parser.add_argument("ddlfile", help="Location of DDL File, See the README for more information")
args = parser.parse_args()
print
print "=" * 80
print
# read configuration and return a dictionary -------------------------------
temp = "PARSING " + str(args.configfile) + "..."
print
print temp.center(80, " ")
nodes_dict = get_node_config(args.configfile)
print_pretty_dict(nodes_dict)
print
print "-" * 80
print
# return a list of connections to all nodes --------------------------------
print "CREATING CONNECTIONS...".center(80, " ")
print
node_connections = get_connections(nodes_dict)
# if no connections were made, terminate the program, comment this out for testing
if len(node_connections) == 0:
print "Terminating due to connection failures..."
sys.exit()
print "# of connections:", str(len(node_connections))
print
for c in node_connections:
print "HOST: " + c.host + " DB: " + c.db + " " + str(c)
print
print "-" * 80
print
# read DDL and return a list of sql commands -------------------------------
print "PARSING SQL COMMANDS...".center(80, " ")
print
sql_commands = read_DDL(args.ddlfile)
# list of tables is used to update catalog with metadata
table_list = []
for command in sql_commands:
if command.split()[0].upper() == "CREATE":
table_list.append((re.split('\s|\(',command)[2]))
print "[SQL COMMANDS]:"
for s in sql_commands:
print s.strip()
print
print "TABLES:"
print table_list
print
print "-" * 80
print
# update catalog ----------------------------------------------------------
print "UPDATING CATALOG...".center(80, " ")
print
update_catalog(nodes_dict,table_list)
print
print "-" * 80
print
# run the commands against the nodes ---------------------------------------
print "EXECUTING SQL COMMANDS ON NODES...".center(80, " ")
print
run_commmands_against_nodes(node_connections, sql_commands)
print
print "=" * 80
print
if __name__ == "__main__":
main()
| gpl-3.0 | 3,556,770,068,061,387,300 | 32.780142 | 229 | 0.655364 | false | 3.196644 | true | false | false |
alibozorgkhan/django-boilerplate | django_boilerplate/settings/base.py | 1 | 4882 | """
Django settings for django_boilerplate project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+!=wvvf$f^jytsaol8_50@)+xw*7m4@v&9=xm!()b(n_731dhm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
LOCAL_APPS = [
'accounts',
'app',
]
EXTERNAL_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
]
INSTALLED_APPS = LOCAL_APPS + EXTERNAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware'
]
ROOT_URLCONF = 'django_boilerplate.urls'
TEMPLATES_DIR = ["{}/templates".format(app) for app in LOCAL_APPS] + ['django_boilerplate/templates']
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': TEMPLATES_DIR,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_boilerplate.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'assets/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Social Auth
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.linkedin.LinkedinOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'index'
SOCIAL_AUTH_FACEBOOK_KEY = os.environ.get('SOCIAL_AUTH_FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get('SOCIAL_AUTH_FACEBOOK_SECRET')
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id,name,email',
}
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get('SOCIAL_AUTH_GOOGLE_OAUTH2_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get('SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET')
SOCIAL_AUTH_LINKEDIN_OAUTH2_KEY = os.environ.get('SOCIAL_AUTH_LINKEDIN_OAUTH2_KEY')
SOCIAL_AUTH_LINKEDIN_OAUTH2_SECRET = os.environ.get('SOCIAL_AUTH_LINKEDIN_OAUTH2_SECRET')
SOCIAL_AUTH_LINKEDIN_OAUTH2_SCOPE = ['r_basicprofile', 'r_emailaddress']
SOCIAL_AUTH_LINKEDIN_OAUTH2_FIELD_SELECTORS = ['email-address']
SOCIAL_AUTH_LINKEDIN_OAUTH2_OAUTH2_EXTRA_DATA = [('id', 'id'),
('firstName', 'first_name'),
('lastName', 'last_name'),
('emailAddress', 'email_address')]
| mit | 2,478,633,743,751,343,000 | 28.409639 | 101 | 0.681073 | false | 3.364576 | false | false | false |
djangraw/PsychoPyParadigms | Reading/DistractionTask_eyelink_d6.py | 1 | 30141 | #!/usr/bin/env python2
"""Display multi-page text with simultaneous auditory distractions, recording eye position data using the EyeLink eye tracker."""
# DistractionTask_eyelink_d6.py
# Created 3/16/15 by DJ based on VidLecTask.py
# Updated 3/31/15 by DJ - renamed from ReadingTask_dict_d2.py.
# Updated 4/1-16/15 by DJ - incorporated eyelink fully, renamed ReadingTask_eyelink_d1.py.
# Updated 4/16/15 by DJ - removed questions, added randomized thought probes and automatic pick up where you left off.
# Updated 4/17/15 by DJ - removed Eyelink again to have same behavioral version
# Updated 6/29/15 by DJ - removed random session length ranges and probe times - page ranges specified in params.
# Updated 7/7/15 by DJ - Renamed from ReadingImageTask_dict_d4, added audio.
# Updated 7/15/15 by DJ - added sound time limits
# Updated 7/20/15 by DJ - switched to premade sound files, switched back to eyelink version, debugged
# Updated 7/24/15 by DJ - added quiz files list, imagePrefix list, readingQuiz list and audioQuiz list
# Updated 7/28/15 by DJ - made sounds play on page-by-page basis, sound is randomized,
# Updated 8/18/15 by DJ - added serial port (and changed name from _behavior to _serial), but haven't tried it yet.
# Updated 8/21/15 by DJ - tested in 3T-C and debugged as necessary
# Updated 9/17/15 by DJ - added logging of each message sent
# Updated 10/22/15 by DJ - added saving
# Updated 10/29/15 by DJ - cleaned up slightly, edited PromptTools to ask subjects not to skip around.
# Updated 11/11/15 by DJ - added additional calibration parameters (changed name to _d6)
# Updated 11/12/15 by DJ - switched to 1024x768 (max res of rear projector)
# Updated 12/2/15 by DJ - adapted serial version back to EyeLink version
# Import packages
from psychopy import core, gui, data, event, sound, logging #, visual # visual causes a bug in the guis, so I moved it down.
from psychopy.tools.filetools import fromFile, toFile
import time as ts, numpy as np
import AppKit, os # for monitor size detection, files
import PromptTools
import random
"""
# Import SMI libraries
import serial
from LibSmi_PsychoPy import LibSmi_PsychoPy
"""
#"""
# Import eyelink libraries
from pylink import *
from EyeLinkCoreGraphicsPsychoPy import EyeLinkCoreGraphicsPsychoPy
#"""
# ====================== #
# ===== PARAMETERS ===== #
# ====================== #
# Save the parameters declared below?
saveParams = True
newParamsFilename = 'DistractionParams_eyelink_d6.pickle'
expInfoFilename = 'lastDistractionInfo_eyelink_d6.pickle'
# Declare primary task parameters.
params = {
# FOR INITIAL PILOTS
'imagePrefixList': ['Greeks_Lec02_stretch_gray','Greeks_Lec02_stretch_gray','Greeks_Lec02_stretch_gray','Greeks_Lec02_stretch_gray','Greeks_Lec07_stretch_gray','Greeks_Lec07_stretch_gray'],
'startPageList': [1,31,61,91,1,31], # page where each session should start
'endPageList': [30,60,90,120,30,60], # inclusive
'readingQuizList':['Lecture02Questions_d4_read1.txt','Lecture02Questions_d4_read2.txt','Lecture02Questions_d4_read3.txt','Lecture02Questions_d4_read4.txt','Lecture07Questions_d3_read1.txt','Lecture07Questions_d3_read2.txt',],
'soundFileList': ['Lecture10_40min.wav']*6,
# 'imagePrefixList': ['Greeks_Lec07_stretch_gray','Greeks_Lec07_stretch_gray','Greeks_Lec10_stretch_gray','Greeks_Lec10_stretch_gray','Greeks_Lec02_stretch_gray','Greeks_Lec02_stretch_gray'],
# 'startPageList': [1,31,1,31,61,91], # page where each session should start
# 'endPageList': [30,60,30,60,90,120], # inclusive
# 'soundFileList': ['Lecture02_40min.wav']*6,
# 'readingQuizList':['Lecture07Questions_d3_read1.txt','Lecture07Questions_d3_read2.txt','Lecture10Questions_d4_read1.txt','Lecture10Questions_d4_read2.txt','Lecture02Questions_d4_read3.txt','Lecture02Questions_d4_read4.txt'],
# 'soundFileList': ['Lecture02_40min.wav']*6,
'promptTypeList': ['AttendReading','AttendBoth_short','AttendReading_short','AttendBoth_short','AttendBoth_short','AttendReading_short'],
'soundQuizList':['BLANK.txt']*6,
'quizPromptList':['TestReading_box']*6,
'probSoundList':[0.5]*6,
# REST OF PARAMS
'skipPrompts': False, # go right to the scanner-wait page
'maxPageTime': 14, # max time the subject is allowed to read each page (in seconds)
'pageFadeDur': 3, # for the last pageFadeDur seconds, the text will fade to white.
'IPI': 2, # time between when one page disappears and the next appears (in seconds)
'probSound': 0.5, # probability that sound will be played on any given page
'IBI': 1, # time between end of block/probe and beginning of next block (in seconds)
'tStartup': 2, # pause time before starting first page
'probeDur': 60, # max time subjects have to answer a Probe Q
'keyEndsProbe': True, # will a keypress end the probe?
'pageKey': 'b',#'space', # key to turn page
'respKeys': ['g','r','b','y'], # keys to be used for responses (clockwise from 9:00) - "DIAMOND" RESPONSE BOX
'wanderKey': 'z', # key to be used to indicate mind-wandering
'triggerKey': 't', # key from scanner that says scan is starting
# declare image and question files
'imageDir': 'ReadingImages/',
'imagePrefix': '', # images must start with this and end with _page<number>.jpg
'soundDir': 'sounds/',
'soundFile': '', # fill in later
'promptType': '', # fill in later
'soundVolume': 0.5,
'whiteNoiseFile': 'Lecture10_40min_phasescrambled.wav', #'WhiteNoise-7m30s.wav', # this plays when the lecture doesn't.
'pageRange': [1, 1], # pages (starting from 1) at which reading should start and stop in each block
'textDir': 'questions/', # directory containing questions and probes
'probesFile': 'BLANK.txt', #'ReadingProbes_d2.txt', #'ReadingProbes_behavior.txt', #
'readingQuiz':'', # fill in later
'soundQuiz':'', # fill in later
'quizPrompt':'', # fill in later
'questionOrder':[], # fill in later
# declare other stimulus parameters
'fullScreen': True, # run in full screen mode?
'screenToShow': 1, # display on primary screen (0) or secondary (1)?
'screenColor':(128,128,128), # in rgb255 space
'imageSize': (960,709), # (FOR 1024x768 SCREEN) # in pixels... set to None for exact size of screen #(1201,945), # (FOR 1280x1024 SCREEN)
'fixCrossSize': 10, # size of cross, in pixels
'fixCrossPos': (-480,354), # (x,y) pos of fixation cross displayed before each page (for drift correction) #[-600, 472],
'usePhotodiode': False, # add sync square in corner of screen
#"""
'isEyeLinkConnected': False # is there an EyeLink tracker connected via ethernet?
}
#"""
"""
# declare serial port & calibration parameters for SMI (remove bracket and add comma to lines just above)
'portName': '/dev/tty.usbserial',
'portBaud': 115200,
'calNPoints': 13, # number of points in the calibration (and validation)The number of points to be used for the validation (standard=9)
'calAutoAccept': False, # Let SMI pick when to accept a point (True [default]) or accept manually (False).
'calGoFast': False, # Go quickly from point to point (True) or slower and more precise (False [default]).
'calCheckLevel': 3 #calibration check level (0=none,1=weak,2=medium,3=strong [default])
}
"""
# save parameters
if saveParams:
print("Opening save dialog:")
dlgResult = gui.fileSaveDlg(prompt='Save Params...',initFilePath = os.getcwd() + '/params', initFileName = newParamsFilename,
allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
newParamsFilename = dlgResult
print("dlgResult: %s"%dlgResult)
if newParamsFilename is None: # keep going, but don't save
saveParams = False
print("Didn't save params.")
else:
toFile(newParamsFilename, params)# save it!
print("Saved params to %s."%newParamsFilename)
# toFile(newParamsFilename, params)
# print("saved params to %s."%newParamsFilename)
# ========================== #
# ===== SET UP LOGGING ===== #
# ========================== #
try:#try to get a previous parameters file
expInfo = fromFile(expInfoFilename)
expInfo['session'] +=1 # automatically increment session number
expInfo['paramsFile'] = [expInfo['paramsFile'],'Load...']
except:#if not there then use a default set
expInfo = {'subject':'1', 'session':1, 'skipPrompts':False, 'tSound':0.0, 'paramsFile':['DEFAULT','Load...']}
# overwrite if you just saved a new parameter set
if saveParams:
expInfo['paramsFile'] = [newParamsFilename,'Load...']
dateStr = ts.strftime("%b_%d_%H%M", ts.localtime()) # add the current time
#present a dialogue to change params
dlg = gui.DlgFromDict(expInfo, title='Distraction task', order=['subject','session','skipPrompts','paramsFile'])
if not dlg.OK:
core.quit()#the user hit cancel so exit
# find parameter file
if expInfo['paramsFile'] == 'Load...':
dlgResult = gui.fileOpenDlg(prompt='Select parameters file',tryFilePath=os.getcwd(),
allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
expInfo['paramsFile'] = dlgResult[0]
# load parameter file
if expInfo['paramsFile'] not in ['DEFAULT', None]: # otherwise, just use defaults.
# load params file
params = fromFile(expInfo['paramsFile'])
# GET NEW START AND STOP PAGES
params['pageRange'][0] = params['startPageList'][expInfo['session']-1] # use session-1 as index of list
params['pageRange'][1] = params['endPageList'][expInfo['session']-1] # use session-1 as index of list
# GET SOUND FILE AND OTHER SESSION-DEPENDENT INFO
params['soundFile'] = params['soundFileList'][expInfo['session']-1]
params['promptType'] = params['promptTypeList'][expInfo['session']-1]
params['imagePrefix'] = params['imagePrefixList'][expInfo['session']-1]
params['readingQuiz'] = params['readingQuizList'][expInfo['session']-1]
params['soundQuiz'] = params['soundQuizList'][expInfo['session']-1]
params['quizPrompt'] = params['quizPromptList'][expInfo['session']-1]
params['probSound'] = params['probSoundList'][expInfo['session']-1]
tSound = expInfo['tSound']
# transfer skipPrompts
params['skipPrompts'] = expInfo['skipPrompts']
# read questions and answers from text files
[questions_reading,options_reading,answers_reading] = PromptTools.ParseQuestionFile(params['textDir']+params['readingQuiz'])
print('%d questions loaded from %s'%(len(questions_reading),params['readingQuiz']))
[questions_sound,options_sound,answers_sound] = PromptTools.ParseQuestionFile(params['textDir']+params['soundQuiz'])
print('%d questions loaded from %s'%(len(questions_sound),params['soundQuiz']))
# append the two
questions_all = questions_reading + questions_sound
options_all = options_reading + options_sound
answers_all = answers_reading + answers_sound
# shuffle the order
newOrder = range(0,len(questions_all))
random.shuffle(newOrder)
questions_all = [questions_all[i] for i in newOrder]
options_all = [options_all[i] for i in newOrder]
answers_all = [answers_all[i] for i in newOrder]
params['questionOrder'] = newOrder
# ========================== #
# ===== GET SCREEN RES ===== #
# ========================== #
# kluge for secondary monitor
if params['fullScreen']:
screens = AppKit.NSScreen.screens()
screenRes = (int(screens[params['screenToShow']].frame().size.width), int(screens[params['screenToShow']].frame().size.height))
# screenRes = (1920, 1200)
if params['screenToShow']>0:
params['fullScreen'] = False
else:
screenRes = (1024,768)
# save screen size to params struct
params['screenSize'] = screenRes
# adjust image size if one was not entered.
if params['imageSize'] is None:
params['imageSize'] = (screenRes[0], screenRes[1])
# ========================== #
# ===== LOG PARAMETERS ===== #
# ========================== #
# print params to Output
print 'params = {'
for key in sorted(params.keys()):
print " '%s': %s"%(key,params[key]) # print each value as-is (no quotes)
print '}'
#make a log file to save parameter/event data
filename = 'DistractionTask-%s-%d-%s'%(expInfo['subject'], expInfo['session'], dateStr) #'Sart-' + expInfo['subject'] + '-' + expInfo['session'] + '-' + dateStr
logging.LogFile((filename+'.log'), level=logging.INFO)#, mode='w') # w=overwrite
logging.log(level=logging.INFO, msg='---START PARAMETERS---')
logging.log(level=logging.INFO, msg='filename: %s'%filename)
logging.log(level=logging.INFO, msg='subject: %s'%expInfo['subject'])
logging.log(level=logging.INFO, msg='session: %s'%expInfo['session'])
logging.log(level=logging.INFO, msg='date: %s'%dateStr)
logging.log(level=logging.INFO, msg='tSound: %s'%expInfo['tSound'])
for key in sorted(params.keys()): # in alphabetical order
logging.log(level=logging.INFO, msg='%s: %s'%(key,params[key]))
logging.log(level=logging.INFO, msg='---END PARAMETERS---')
# ========================== #
# ===== SET UP TRACKER ===== #
# ========================== #
"""
# Set up SMI's serial port by declaring LibSmi object
myTracker = LibSmi_PsychoPy(experiment='DistractionTask_serial_d4',port=params['portName'], baudrate=params['portBaud'], useSound=True, w=screenRes[0], h=screenRes[1], bgcolor=params['screenColor'],fullScreen=params['fullScreen'],screenToShow=params['screenToShow'])
print "Port %s isOpen = %d"%(myTracker.tracker.name,myTracker.tracker.isOpen())
"""
#"""
# Set up EyeLink tracker
# Declare constants
LEFT_EYE = 0
RIGHT_EYE = 1
BINOCULAR = 2
# Set up tracker
if params['isEyeLinkConnected']:
eyelinktracker = EyeLink()
else:
eyelinktracker = EyeLink(None)
# Check for successful connection
if not eyelinktracker:
print('=== ERROR: Eyelink() returned None.')
core.quit()
#Initialize the graphics
genv = EyeLinkCoreGraphicsPsychoPy(screenRes[0],screenRes[1],eyelinktracker,bgcolor=params['screenColor'])
openGraphicsEx(genv)
#Opens the EDF file.
edfFileName = filename + '.EDF'
edfHostFileName = 'TEST.EDF'
getEYELINK().openDataFile(edfHostFileName)
pylink.flushGetkeyQueue(); # used to be below openDataFile
getEYELINK().setOfflineMode();
#Gets the display surface and sends a mesage to EDF file;
screenRes = genv.win.size
getEYELINK().sendCommand("screen_pixel_coords = 0 0 %d %d" %(screenRes[0] - 1, screenRes[1] - 1))
getEYELINK().sendMessage("DISPLAY_COORDS 0 0 %d %d" %(screenRes[0] - 1, screenRes[1] - 1))
# send software version
tracker_software_ver = 0
eyelink_ver = getEYELINK().getTrackerVersion()
if eyelink_ver == 3:
tvstr = getEYELINK().getTrackerVersionString()
vindex = tvstr.find("EYELINK CL")
tracker_software_ver = int(float(tvstr[(vindex + len("EYELINK CL")):].strip()))
if eyelink_ver>=2:
getEYELINK().sendCommand("select_parser_configuration 0")
if eyelink_ver == 2: #turn off scenelink camera stuff
getEYELINK().sendCommand("scene_camera_gazemap = NO")
else:
getEYELINK().sendCommand("saccade_velocity_threshold = 35")
getEYELINK().sendCommand("saccade_acceleration_threshold = 9500")
# set EDF file contents
getEYELINK().sendCommand("file_event_filter = LEFT,RIGHT,FIXATION,SACCADE,BLINK,MESSAGE,BUTTON,INPUT")
if tracker_software_ver>=4:
getEYELINK().sendCommand("file_sample_data = LEFT,RIGHT,GAZE,AREA,GAZERES,STATUS,HTARGET,INPUT")
else:
getEYELINK().sendCommand("file_sample_data = LEFT,RIGHT,GAZE,AREA,GAZERES,STATUS,INPUT")
# set link data (used for gaze cursor)
getEYELINK().sendCommand("link_event_filter = LEFT,RIGHT,FIXATION,SACCADE,BLINK,BUTTON,INPUT")
if tracker_software_ver>=4:
getEYELINK().sendCommand("link_sample_data = LEFT,RIGHT,GAZE,GAZERES,AREA,STATUS,HTARGET,INPUT")
else:
getEYELINK().sendCommand("link_sample_data = LEFT,RIGHT,GAZE,GAZERES,AREA,STATUS,INPUT")
#getEYELINK().sendCommand("button_function 5 'accept_target_fixation'");
#
#eye_used = getEYELINK().eyeAvailable() #determine which eye(s) are available
#if eye_used == RIGHT_EYE:
# getEYELINK().sendMessage("EYE_USED 1 RIGHT")
#elif eye_used == LEFT_EYE:
# getEYELINK().sendMessage("EYE_USED 0 LEFT")
#elif eye_used == BINOCULAR:
# getEYELINK().sendMessage("EYE_USED 2 BOTH")
#else:
# print("ERROR in getting the eye information!")
# Set calibration parameters
#pylink.setCalibrationColors((0, 0, 0), (192, 192, 192)); #Sets the calibration target and background color
#pylink.setTargetSize(int(screenRes[0]/70), int(screenRes[0]/300)); #select best size for calibration target
#pylink.setCalibrationSounds("", "", "");
#pylink.setDriftCorrectSounds("", "off", "off");
# Ensure that the eye(s) selected during calibration is the one that gets used in the experiment.
getEYELINK().sendCommand("select_eye_after_validation = NO")
# Check if we should exit
if (eyelinktracker is not None and (not getEYELINK().isConnected() or getEYELINK().breakPressed())):
CoolDown()
#"""
# ========================== #
# ===== SET UP STIMULI ===== #
# ========================== #
from psychopy import visual
# Initialize deadline for displaying next frame
tNextFlip = [0.0] # put in a list to make it mutable?
#create clocks and window
globalClock = core.Clock()#to keep track of time
trialClock = core.Clock()#to keep track of time
#win = visual.Window(screenRes, fullscr=params['fullScreen'], allowGUI=False, monitor='testMonitor', screen=params['screenToShow'], units='deg', name='win',rgb=[1,1,1])
"""
win = myTracker.win # SMI version
"""
#"""
win = genv.win # eyelink version
#"""
# create stimuli
fCS = params['fixCrossSize'] # size (for brevity)
fCP = params['fixCrossPos'] # position (for brevity)
fixation = visual.ShapeStim(win,lineColor='#000000',lineWidth=3.0,vertices=((fCP[0]-fCS/2,fCP[1]),(fCP[0]+fCS/2,fCP[1]),(fCP[0],fCP[1]),(fCP[0],fCP[1]+fCS/2),(fCP[0],fCP[1]-fCS/2)),units='pix',closeShape=False,name='fixCross');
message1 = visual.TextStim(win, pos=[0,+.5], wrapWidth=1.5, color='#000000', alignHoriz='center', name='topMsg', text="aaa",units='norm')
message2 = visual.TextStim(win, pos=[0,-.5], wrapWidth=1.5, color='#000000', alignHoriz='center', name='bottomMsg', text="bbb",units='norm')
# initialize main text stimulus
imageName = '%s%s/%s_page%d.jpg'%(params['imageDir'],params['imagePrefix'],params['imagePrefix'],1)
textImage = visual.ImageStim(win, pos=[0,0], name='Text',image=imageName, units='pix', size=params['imageSize'])
# initialize photodiode stimulus
squareSize = 0.4
diodeSquare = visual.Rect(win,pos=[squareSize/4-1,squareSize/4-1],lineColor='white',fillColor='black',size=[squareSize,squareSize],units='norm',name='diodeSquare')
# declare probe parameters
[probe_strings, probe_options,_] = PromptTools.ParseQuestionFile(params['textDir']+params['probesFile'])
print('%d probes loaded from %s'%(len(probe_strings),params['probesFile']))
# Look up prompts
[topPrompts,bottomPrompts] = PromptTools.GetPrompts(os.path.basename(__file__),params['promptType'],params)
print('%d prompts loaded from %s'%(len(topPrompts),'PromptTools.py'))
# Look up question prompts
[topQuizPrompts,bottomQuizPrompts] = PromptTools.GetPrompts(os.path.basename(__file__),params['quizPrompt'],params)
print('%d prompts loaded from %s'%(len(topPrompts),'PromptTools.py'))
# declare sound!
# fullSound = sound.Sound(value='%s%s'%(params['soundDir'], params['soundFile']), volume=params['soundVolume'], name='fullSound')
pageSound = sound.Sound(value='%s%s'%(params['soundDir'], params['soundFile']), volume=params['soundVolume'], start=tSound, stop=tSound+params['maxPageTime'], name='pageSound')
whiteNoiseSound = sound.Sound(value='%s%s'%(params['soundDir'], params['whiteNoiseFile']), volume=params['soundVolume'], start=0, stop=params['maxPageTime'], name='whiteNoiseSound')
# ============================ #
# ======= SUBFUNCTIONS ======= #
# ============================ #
# increment time of next window flip
def AddToFlipTime(tIncrement=1.0):
tNextFlip[0] += tIncrement
# print("%1.3f --> %1.3f"%(globalClock.getTime(),tNextFlip[0]))
def SetFlipTimeToNow():
tNextFlip[0] = globalClock.getTime()
def SendMessage(message):
"""
# send message preceded by SMI code ET_REM (generic remark) and surround multi-word remarks by quotes(?)
myTracker.log(message)
logging.log(level=logging.INFO,msg=message)
# pass
"""
#"""
# Send EyeLink message
if eyelinktracker is None:
print('MSG: %s'%message)
else:
getEYELINK().sendMessage(message)
#"""
def ShowPage(iPage, maxPageTime=float('Inf'), pageFadeDur=0, soundToPlay=None):
print('Showing Page %d'%iPage)
#"""
# Start EyeLink's RealTime mode
pylink.beginRealTimeMode(100)
#"""
# Display text
imageName = '%s%s/%s_page%d.jpg'%(params['imageDir'],params['imagePrefix'],params['imagePrefix'],iPage)
textImage.setImage(imageName)
textImage.opacity = 1
textImage.draw()
while (globalClock.getTime()<tNextFlip[0]):
pass
# win.flip(clearBuffer=False)
# draw & flip
win.logOnFlip(level=logging.EXP, msg='Display Page%d'%iPage)
# win.callOnFlip(SendMessage,'Display Page%d'%iPage)
"""
win.callOnFlip(SendMessage,'DisplayPage%d'%iPage) # SPACE REMOVED FOR SMI
"""
win.callOnFlip(SendMessage,'Display Page%d'%iPage) # Regular for EyeLink
AddToFlipTime(maxPageTime)
# win.callOnFlip(SendPortEvent,mod(page,256))
if params['usePhotodiode']:
diodeSquare.draw()
win.flip()
# erase diode square and re-draw
textImage.draw()
win.flip()
# get time at which page was displayed
pageStartTime = globalClock.getTime()
# Play sound just after window flips
if soundToPlay is not None:
soundToPlay.play()
# Flush the key buffer and mouse movements
event.clearEvents()
# Wait for relevant key press or 'maxPageTime' seconds
fadeTime = tNextFlip[0]-pageFadeDur
respKey = None
while (globalClock.getTime()<tNextFlip[0]) and respKey==None:
newKeys = event.getKeys(keyList=[params['pageKey'],params['wanderKey'],'q','escape'],timeStamped=globalClock)
if len(newKeys)>0:
for thisKey in newKeys:
if thisKey[0] in ['q','escape']:
CoolDown()
elif thisKey[0] == params['pageKey']:
respKey = thisKey
SetFlipTimeToNow() # reset flip time
now = globalClock.getTime()
if now > fadeTime:
textImage.opacity = (tNextFlip[0]-now)/pageFadeDur
textImage.draw()
win.flip()
#"""
# Stop EyeLink's RealTime mode
pylink.endRealTimeMode()
#"""
# Display the fixation cross
if params['IPI']>0:
fixation.draw()
win.logOnFlip(level=logging.EXP, msg='Display Fixation')
win.callOnFlip(SendMessage,'DisplayFixation')
if params['usePhotodiode']:
diodeSquare.draw()
win.flip()
# erase diode square and re-draw
fixation.draw()
win.flip()
# return time for which page was shown
pageDur = tNextFlip[0] - pageStartTime
return pageDur
# Handle end ofeyelink session
def CoolDown():
# display cool-down message
message1.setText("That's the end! ")
message2.setText("Press 'q' or 'escape' to end the session.")
win.logOnFlip(level=logging.EXP, msg='Display TheEnd')
win.callOnFlip(SendMessage,'DisplayTheEnd')
message1.draw()
message2.draw()
win.flip()
thisKey = event.waitKeys(keyList=['q','escape'])
"""
# stop recording SMI via serial port
myTracker.stop_recording()
# save result
myTracker.save_data(path=(filename+'.idf'))
# close serial port
myTracker.cleanup()
"""
#"""
# End EyeLink recording: add 100 msec of data to catch final events
pylink.endRealTimeMode()
pumpDelay(100)
getEYELINK().stopRecording()
while getEYELINK().getkey(): # not sure what this is for
pass
# File transfer and cleanup!
getEYELINK().setOfflineMode()
msecDelay(500)
message1.setText("Sending EyeLink File...")
message2.setText("Please Wait.")
win.logOnFlip(level=logging.EXP, msg='Display SendingFile')
message1.draw()
message2.draw()
win.flip()
#Close the file and transfer it to Display PC
getEYELINK().closeDataFile()
getEYELINK().receiveDataFile(edfHostFileName, edfFileName)
getEYELINK().close();
#Close the experiment graphicss
pylink.closeGraphics()
#"""
# stop sound
# fullSound.stop()
whiteNoiseSound.stop()
pageSound.stop()
# save experimental info (if we reached here, we didn't have an error)
expInfo['tSound'] = tSound
toFile(expInfoFilename, expInfo) # save params to file for next time
# exit
core.quit()
# =========================== #
# ======= RUN PROMPTS ======= #
# =========================== #
"""
# Run SMI calibration and validation
myTracker.run_calibration(nr_of_pts=params['calNPoints'], auto_accept=params['calAutoAccept'], go_fast=params['calGoFast'], calib_level=params['calCheckLevel'])
"""
#"""
#Do the EyeLink tracker setup at the beginning of the experiment.
getEYELINK().doTrackerSetup()
# START EyeLink RECORDING
error = getEYELINK().startRecording(1, 1, 1, 1)
if error:
print("===WARNING: eyelink startRecording returned %s"%error)
#"""
# display prompts
if not params['skipPrompts']:
PromptTools.RunPrompts(topPrompts,bottomPrompts,win,message1,message2)
# wait for scanner
message1.setText("Waiting for scanner to start...")
message2.setText("(Press '%c' to override.)"%params['triggerKey'].upper())
message1.draw()
message2.draw()
win.logOnFlip(level=logging.EXP, msg='Display WaitingForScanner')
win.callOnFlip(SendMessage,'DisplayWaitingForScanner')
win.flip()
event.waitKeys(keyList=params['triggerKey'])
tStartSession = globalClock.getTime()
AddToFlipTime(tStartSession+params['tStartup'])
"""
# START SMI RECORDING via serial port
myTracker.start_recording(stream=False)
"""
# wait before first stimulus
fixation.draw()
win.logOnFlip(level=logging.EXP, msg='Display Fixation')
win.callOnFlip(SendMessage,'DisplayFixation')
win.flip()
# =========================== #
# ===== MAIN EXPERIMENT ===== #
# =========================== #
# set up other stuff
logging.log(level=logging.EXP, msg='---START EXPERIMENT---')
nBlocks = 1
# start sound
#fullSound.play()
# Run trials
for iBlock in range(0,nBlocks): # for each block of pages
# log new block
logging.log(level=logging.EXP, msg='Start Block %d'%iBlock)
# display pages
for iPage in range(params['pageRange'][0],params['pageRange'][1]+1): # +1 to inclue final page
# decide on sound
if random.random()<=params['probSound']:
playSound = True
soundToPlay = pageSound
else:
playSound = False
soundToPlay = whiteNoiseSound
# display text
pageDur = ShowPage(iPage=iPage,maxPageTime=params['maxPageTime'],pageFadeDur=params['pageFadeDur'],soundToPlay=soundToPlay)
# update sound
soundToPlay.stop()
if playSound:
tSound += pageDur #params['maxPageTime']
logging.log(level=logging.INFO, msg='tSound: %.3f'%tSound)
pageSound = sound.Sound(value='%s%s'%(params['soundDir'], params['soundFile']), volume=params['soundVolume'], start=tSound, stop=tSound+params['maxPageTime'], name='pageSound')
if iPage < params['pageRange'][1]:
# pause
AddToFlipTime(params['IPI'])
# Mute Sounds
pageSound.setVolume(0) # mute but don't stop... save stopping for CoolDown!
whiteNoiseSound.setVolume(0) # mute but don't stop... save stopping for CoolDown!
"""
# Pause SMI recording via serial port
myTracker.pause_recording() # save stop command for CoolDown.
"""
# fullSound.setVolume(0)
# run probes
allKeys = PromptTools.RunQuestions(probe_strings,probe_options,win,message1,message2,'Probe',questionDur=params['probeDur'], isEndedByKeypress=params['keyEndsProbe'])
# check for escape keypresses
for thisKey in allKeys:
if len(thisKey)>0 and thisKey[0] in ['q', 'escape']: # check for quit keys
CoolDown()#abort experiment
# tell the subject if the lecture is over.
message1.setText("It's time for some questions! Then, after a short break, we'll continue reading where you left off.")
message2.setText("Press any key to end this recording.")
win.logOnFlip(level=logging.EXP, msg='Display TakeABreak')
win.callOnFlip(SendMessage,'DisplayTakeABreak')
message1.draw()
message2.draw()
# change the screen
win.flip()
thisKey = event.waitKeys() # any keypress will end the session
# ============================ #
# ========= RUN QUIZ ========= #
# ============================ #
# display prompts
if not params['skipPrompts']:
PromptTools.RunPrompts(topQuizPrompts,bottomQuizPrompts,win,message1,message2)
# set up other stuff
logging.log(level=logging.EXP, msg='---START QUIZ---')
# ------- Run the questions ------- #
allKeys = PromptTools.RunQuestions(questions_all,options_all,win,message1,message2,'Question',respKeys=params['respKeys'])
# --------------------------------- #
isResponse = np.zeros(len(allKeys),dtype=bool) # was any response given?
isCorrect = np.zeros(len(allKeys)) # was the response correct?
RT = np.zeros(len(allKeys)) # how long did it take them to press a key?
#print(allKeys)
for iKey in range(0,len(allKeys)):
if len(allKeys[iKey])>0:
isResponse[iKey] = 1
RT[iKey] = allKeys[iKey][1] # keep in seconds
if float(allKeys[iKey][0]) == answers_all[iKey]:
isCorrect[iKey] = 1
#give some performance output to user
print('Performance:')
print('%d/%d = %.2f%% correct' %(np.sum(isCorrect), len(isCorrect), 100*np.average(isCorrect)))
print('RT: mean = %f, std = %f' %(np.average(RT[isResponse]),np.std(RT[isResponse])))
# exit experiment
CoolDown()
| mit | 7,485,308,004,066,379,000 | 41.572034 | 266 | 0.673103 | false | 3.28799 | false | false | false |
stanford-gfx/Horus | Code/HorusApp/app/views.py | 1 | 19731 | from app import server, db, trajectoryAPI
from pylab import *
import flask
from flask import jsonify, request, url_for, redirect, render_template, abort
from flask.ext import restful
import requests
import math, time
import urllib2
import json
import os
from os import path
starting_lat = 0
starting_lng = 0
vehicle_millis = 0
current_lat = 0
current_lng = 0
armed = False
mode = "NOT CONNECTED"
real_elapsed_time = -1
TIMEOUT_MILLIS = 5000
# TEMPLATED HTML ROUTE
@server.route('/')
@server.route('/index')
def index():
shots = db.get_shots()
return render_template('index.html', shots=shots)
# TEMPLATED HTML ROUTE
@server.route('/easing_curve')
def easing_curve():
shots = db.get_shots()
return render_template('easing_curve.html', shots=shots)
# TEXT ROUTE
@server.route('/edit')
def edit():
return render_template('edit.html')
@server.route('/api/get_keyframes.json', methods = ['POST'])
def get_keyframes():
print request.get_json()
return jsonify(request.json)
# Save a shot
@server.route('/api/set_shot', methods = ['POST'])
def set_shot():
parsed_json = request.get_json()
data = request.data
shotname = parsed_json['shotName']
db.set_shot(shotname, data)
return jsonify({
'test':1
})
# Load a shot
@server.route('/api/get_shot', methods = ['GET'])
def get_shot():
shotname = request.args.get('shot')
rev = request.args.get('rev')
if not shotname:
return abort(404)
data = None
revCount = 1
if rev:
data, revCount = db.get_shot(shotname, int(rev))
else:
data, revCount = db.get_shot(shotname)
if data:
return flask.Response(response = data,
status=200,
mimetype="application/json")
else:
abort(404)
# checks if name is unique
@server.route('/api/is_name_available', methods = ['GET'])
def is_name_available():
shotname = request.args.get('name')
valid = not db.shot_exists(shotname)
print("shotname: " + shotname + " is free? : %s" % (valid))
data = jsonify({"valid": valid})
return data
@server.route('/api/get_log', methods = ['GET'])
def get_log():
shotname = request.args.get('shot')
if not shotname:
return abort(404)
data = db.get_log(shotname)
if data:
return jsonify(data)
else:
abort(404)
@server.route('/api/get_easing_curve', methods = ['POST'])
def get_easing_curve():
js = request.get_json()
tvals = array(js['t'])
dlist = array(js['d'])
P = c_[dlist]
T = c_[tvals]
C,T,sd = trajectoryAPI.compute_easing_curve(P, T)
data = {
'C':C.tolist(),
'T':T.tolist(),
}
return jsonify(data)
# Get a spline
@server.route('/api/get_spline', methods = ['POST'])
def get_spline():
parsed_json = request.get_json()
#data = request.data
#camera lla, lookat lla
cameraPose_lat_list = parsed_json['cameraPoseLats']
cameraPose_lng_list = parsed_json['cameraPoseLngs']
cameraPose_alt_list = parsed_json['cameraPoseAlts']
lookAt_lat_list = parsed_json['lookAtLats']
lookAt_lng_list = parsed_json['lookAtLngs']
lookAt_alt_list = parsed_json['lookAtAlts']
P_cameraPose = c_[cameraPose_lat_list, cameraPose_lng_list, cameraPose_alt_list]
C_cameraPose,T_cameraPose,sd_cameraPose,dist_cameraPose = trajectoryAPI.compute_spatial_trajectory_and_arc_distance(P_cameraPose, inNED=False)
P_lookAt = c_[lookAt_lat_list, lookAt_lng_list, lookAt_alt_list]
C_lookAt,T_lookAt,sd_lookAt,dist_lookAt = trajectoryAPI.compute_spatial_trajectory_and_arc_distance(P_lookAt, inNED=False)
#P_eval, T_eval, dT = splineutils.evaluate_catmull_rom_spline(C, T, sd, num_samples=200);
data = {
'cameraPoseCoeff': C_cameraPose.tolist(),
'cameraPoseTvals': T_cameraPose.tolist(),
'cameraPoseDist' : dist_cameraPose.tolist(),
'lookAtCoeff': C_lookAt.tolist(),
'lookAtTvals': T_lookAt.tolist(),
'lookAtDist' : dist_lookAt.tolist()
}
return jsonify(data)
# Get a spline
@server.route('/api/get_spline_ned', methods = ['POST'])
def get_spline_ned():
js = request.get_json()
lookAtN = js['lookAtN']
lookAtE = js['lookAtE']
lookAtD = js['lookAtD']
lookFromN = js['lookFromN']
lookFromE = js['lookFromE']
lookFromD = js['lookFromD']
P_lookFromNED = c_[lookFromN, lookFromE, lookFromD]
C_lookFromNED,T_lookFromNED,sd_lookFromNED,dist_lookFromNED = trajectoryAPI.compute_spatial_trajectory_and_arc_distance(P_lookFromNED)
P_lookAtNED = c_[lookAtN, lookAtE, lookAtD]
C_lookAtNED,T_lookAtNED,sd_lookAtNED,dist_lookAtNED = trajectoryAPI.compute_spatial_trajectory_and_arc_distance(P_lookAtNED)
data = {
'C_lookFromNED': C_lookFromNED.tolist(),
'T_lookFromNED': T_lookFromNED.tolist(),
'dist_lookFromNED': dist_lookFromNED.tolist(),
'C_lookAtNED': C_lookAtNED.tolist(),
'T_lookAtNED': T_lookAtNED.tolist(),
'dist_lookAtNED': dist_lookAtNED.tolist()
}
return jsonify(data)
@server.route('/api/reparameterize_spline_ned', methods = ['POST'])
def reparameterize_spline_ned():
js = request.get_json()
lookAtN = js['lookAtN']
lookAtE = js['lookAtE']
lookAtD = js['lookAtD']
lookFromN = js['lookFromN']
lookFromE = js['lookFromE']
lookFromD = js['lookFromD']
P_lookFromNED = c_[lookFromN, lookFromE, lookFromD]
T_lookFromNED = c_[js['lookFromT'], js['lookFromT'], js['lookFromT']]
P_easingLookFrom = c_[array(js['lookFromEasingD'])]
T_easingLookFrom = c_[array(js['lookFromEasingT'])]
P_lookAtNED = c_[lookAtN, lookAtE, lookAtD]
T_lookAtNED = c_[js['lookAtT'], js['lookAtT'], js['lookAtT']]
P_easingLookAt = c_[array(js['lookAtEasingD'])]
T_easingLookAt = c_[array(js['lookAtEasingT'])]
T_linspace_norm_lookAt, T_user_progress_lookAt, P_user_progress_lookAt, ref_llh_lookAt = trajectoryAPI.reparameterize_spline(P_lookAtNED, T_lookAtNED, P_easingLookAt, T_easingLookAt)
T_linspace_norm_cameraPose, T_user_progress_lookFrom, P_user_progress_lookFrom, ref_llh_lookFrom = trajectoryAPI.reparameterize_spline(P_lookFromNED, T_lookFromNED, P_easingLookFrom, T_easingLookFrom)
data = {
'lookAtReparameterizedT': T_user_progress_lookAt.tolist(),
'reparameterizedTime': T_linspace_norm_lookAt.tolist(),
'lookFromReparameterizedT': T_user_progress_lookFrom.tolist(),
}
return jsonify(data)
@server.route('/api/reparameterize_spline', methods = ['POST'])
def reparameterize_spline():
js = request.get_json()
cameraPose_lat_list = js['cameraPoseLats']
cameraPose_lng_list = js['cameraPoseLngs']
cameraPose_alt_list = js['cameraPoseAlts']
lookAt_lat_list = js['lookAtLats']
lookAt_lng_list = js['lookAtLngs']
lookAt_alt_list = js['lookAtAlts']
T_cameraPose = c_[js['cameraPoseTvals'], js['cameraPoseTvals'], js['cameraPoseTvals']]
T_lookAt = c_[js['lookAtTvals'], js['lookAtTvals'], js['lookAtTvals']]
lookAt_easing_tvals = array(js['lookAtEasingT'])
lookAt_easing_dlist = array(js['lookAtEasingD'])
cameraPose_easing_tvals = array(js['cameraPoseEasingT'])
cameraPose_easing_dlist = array(js['cameraPoseEasingD'])
P_easingCameraPose = c_[cameraPose_easing_dlist]
T_easingCameraPose = c_[cameraPose_easing_tvals]
P_easingLookAt = c_[lookAt_easing_dlist]
T_easingLookAt = c_[lookAt_easing_tvals]
P_cameraPose = c_[cameraPose_lat_list, cameraPose_lng_list, cameraPose_alt_list]
P_lookAt = c_[lookAt_lat_list, lookAt_lng_list, lookAt_alt_list]
T_linspace_norm_lookAt, T_user_progress_lookAt, P_user_progress_lookAt, ref_llh_lookAt = trajectoryAPI.reparameterize_spline(P_lookAt, T_lookAt, P_easingLookAt, T_easingLookAt)
T_linspace_norm_cameraPose, T_user_progress_lookFrom, P_user_progress_lookFrom, ref_llh_lookFrom = trajectoryAPI.reparameterize_spline(P_cameraPose, T_cameraPose, P_easingCameraPose, T_easingCameraPose)
data = {
'lookAtReparameterizedT': T_user_progress_lookAt.tolist(),
'reparameterizedTime': T_linspace_norm_lookAt.tolist(),
'lookFromReparameterizedT': T_user_progress_lookFrom.tolist(),
}
return jsonify(data)
@server.route('/api/export_spline_to_quad_representation_ned', methods = ['POST'])
def export_spline_to_quad_representation_ned():
#which one is getting fvalled? FIGURE OUT WHAT'S GOING ON HERE
shot = request.args.get('shot', 0)
if not shot:
return
js = request.get_json()
lookAtN = js['lookAtN']
lookAtE = js['lookAtE']
lookAtD = js['lookAtD']
lookFromN = js['lookFromN']
lookFromE = js['lookFromE']
lookFromD = js['lookFromD']
# Exported Values
P_lookFromNED_spline = c_[lookFromN, lookFromE, lookFromD]
T_lookFromNED_spline = c_[js['lookFromT'], js['lookFromT'], js['lookFromT']]
P_lookFromNED_ease = c_[array(js['lookFromEasingD'])]
T_lookFromNED_ease = c_[array(js['lookFromEasingT'])]
P_lookAtNED_spline = c_[lookAtN, lookAtE, lookAtD]
T_lookAtNED_spline = c_[js['lookAtT'], js['lookAtT'], js['lookAtT']]
P_lookAtNED_ease = c_[array(js['lookAtEasingD'])]
T_lookAtNED_ease = c_[array(js['lookAtEasingT'])]
startAltitude = js['startAltitude']
lastTime = js['lastTime'];
rev = js['rev'];
refLLH = array([js['refLLH']['lat'], js['refLLH']['lng'], js['refLLH']['altitude']])
P = np.array([
P_lookFromNED_spline,
T_lookFromNED_spline,
P_lookFromNED_ease,
T_lookFromNED_ease,
P_lookAtNED_spline,
T_lookAtNED_spline,
P_lookAtNED_ease,
T_lookAtNED_ease,
[lastTime],
[startAltitude],
[refLLH]
])
# First Save, for later analysis!!!
millis = int(round(time.time() * 1000))
np.savez(("shot-%s-rev%s-%d" % (shot, rev, millis)),
P_lookFromNED_spline=P_lookFromNED_spline,
T_lookFromNED_spline=T_lookFromNED_spline,
P_lookFromNED_ease=P_lookFromNED_ease,
T_lookFromNED_ease=T_lookFromNED_ease,
P_lookAtNED_spline=P_lookAtNED_spline,
T_lookAtNED_spline=T_lookAtNED_spline,
P_lookAtNED_ease=P_lookAtNED_ease,
T_lookAtNED_ease=T_lookAtNED_ease,
lastTime=[lastTime],
startAltitude=[startAltitude],
refLLH=[refLLH])
export_data = {
"command" : js['command'],
"P_lookFromNED_spline": P_lookFromNED_spline.tolist(),
"T_lookFromNED_spline": T_lookFromNED_spline.tolist(),
"P_lookFromNED_ease": P_lookFromNED_ease.tolist(),
"T_lookFromNED_ease": T_lookFromNED_ease.tolist(),
"P_lookAtNED_spline": P_lookAtNED_spline.tolist(),
"T_lookAtNED_spline": T_lookAtNED_spline.tolist(),
"P_lookAtNED_ease": P_lookAtNED_ease.tolist(),
"T_lookAtNED_ease": T_lookAtNED_ease.tolist(),
"lastTime": [lastTime],
"startAltitude": [startAltitude],
"refLLH": c_[refLLH].tolist()
}
req = urllib2.Request("http://localhost:9000", json.dumps(js), {'Content-Type': 'application/json'})
f = urllib2.urlopen(req)
res = f.read()
f.close()
return jsonify({'result':'ok'})
@server.route('/api/export_spline_to_quad_representation', methods = ['POST'])
def export_spline_to_quad_representation():
js = request.get_json()
cameraPose_lat_list = js['cameraPoseLats']
cameraPose_lng_list = js['cameraPoseLngs']
cameraPose_alt_list = js['cameraPoseAlts']
lookAt_lat_list = js['lookAtLats']
lookAt_lng_list = js['lookAtLngs']
lookAt_alt_list = js['lookAtAlts']
lookAt_easing_tvals = array(js['lookAtEasingT'])
lookAt_easing_dlist = array(js['lookAtEasingD'])
cameraPose_easing_tvals = array(js['cameraPoseEasingT'])
cameraPose_easing_dlist = array(js['cameraPoseEasingD'])
# Exported Values
P_lookFrom_spline = c_[cameraPose_lat_list, cameraPose_lng_list, cameraPose_alt_list]
T_lookFrom_spline = c_[js['cameraPoseTvals'], js['cameraPoseTvals'], js['cameraPoseTvals']]
P_lookFrom_ease = c_[cameraPose_easing_dlist]
T_lookFrom_ease = c_[cameraPose_easing_tvals]
P_lookAt_spline = c_[lookAt_lat_list, lookAt_lng_list, lookAt_alt_list]
T_lookAt_spline = c_[js['lookAtTvals'], js['lookAtTvals'], js['lookAtTvals']]
P_lookAt_ease = c_[lookAt_easing_dlist]
T_lookAt_ease = c_[lookAt_easing_tvals]
lastTime = js['lastTime'];
millis = int(round(time.time() * 1000))
np.savez(("shot-%d" % millis),
P_lookFrom_spline=P_lookFrom_spline,
T_lookFrom_spline=T_lookFrom_spline,
P_lookFrom_ease=P_lookFrom_ease,
T_lookFrom_ease=T_lookFrom_ease,
P_lookAt_spline=P_lookAt_spline,
T_lookAt_spline=T_lookAt_spline,
P_lookAt_ease=P_lookAt_ease,
T_lookAt_ease=T_lookAt_ease,
lastTime=[lastTime])
P = np.array([
P_lookFrom_spline,
T_lookFrom_spline,
P_lookFrom_ease,
T_lookFrom_ease,
P_lookAt_spline,
T_lookAt_spline,
P_lookAt_ease,
T_lookAt_ease,
[lastTime]
])
export_data = {
"command" : js['command'],
"P_lookFrom_spline" :P_lookFrom_spline,
"T_lookFrom_spline" :T_lookFrom_spline,
"P_lookFrom_ease" :P_lookFrom_ease,
"T_lookFrom_ease" :T_lookFrom_ease,
"P_lookAt_spline" :P_lookAt_spline,
"T_lookAt_spline" :T_lookAt_spline,
"P_lookAt_ease" :P_lookAt_ease,
"T_lookAt_ease" :T_lookAt_ease,
"lastTime" :[lastTime]}
print export_data
headers = {'content-type': 'application/json'}
r = requests.post("http://localhost:9000", data = jsonify(export_data), headers = headers);
return jsonify({'result':'ok'})
@server.route('/api/calculate_feasibility_ned', methods = ['POST'])
def calculate_feasibility_ned():
js = request.get_json()
lookAtN = js['lookAtN']
lookAtE = js['lookAtE']
lookAtD = js['lookAtD']
lookFromN = js['lookFromN']
lookFromE = js['lookFromE']
lookFromD = js['lookFromD']
# Exported Values
P_lookFromNED_spline = c_[lookFromN, lookFromE, lookFromD]
T_lookFromNED_spline = c_[js['lookFromT'], js['lookFromT'], js['lookFromT']]
P_lookFromNED_ease = c_[array(js['lookFromEasingD'])]
T_lookFromNED_ease = c_[array(js['lookFromEasingT'])]
P_lookAtNED_spline = c_[lookAtN, lookAtE, lookAtD]
T_lookAtNED_spline = c_[js['lookAtT'], js['lookAtT'], js['lookAtT']]
P_lookAtNED_ease = c_[array(js['lookAtEasingD'])]
T_lookAtNED_ease = c_[array(js['lookAtEasingT'])]
refLLH = js['refLLH']
total_time = js['totalShotTime']
# make a call to the trajectoryAPI
u_nominal, p_body_nominal, p_body_dot_nominal, p_body_dot_dot_nominal, theta_body_nominal, phi_body_nominal, theta_cam_nominal, theta_cam_dot_nominal, psi_cam_nominal, phi_cam_nominal, phi_cam_dot_nominal = trajectoryAPI.calculate_feasibility_ned(P_lookFromNED_spline, T_lookFromNED_spline, P_lookAtNED_spline, T_lookAtNED_spline, P_lookFromNED_ease, T_lookFromNED_ease, P_lookAtNED_ease, T_lookAtNED_ease, total_time, refLLH);
data = {
'u_nominal': u_nominal.tolist(),
'p_body_nominal': p_body_nominal.tolist(),
'p_body_dot_nominal': p_body_dot_nominal.tolist(),
'p_body_dot_dot_nominal': p_body_dot_dot_nominal.tolist(),
'theta_body_nominal': theta_body_nominal.tolist(),
'phi_body_nominal': phi_body_nominal.tolist(),
'theta_cam_nominal': theta_cam_nominal.tolist(),
'theta_cam_dot_nominal': theta_cam_dot_nominal.tolist(),
'psi_cam_nominal': psi_cam_nominal.tolist(),
'phi_cam_nominal': phi_cam_nominal.tolist(),
'phi_cam_dot_nominal': phi_cam_dot_nominal.tolist(),
}
return jsonify(data)
@server.route('/api/calculate_feasibility', methods = ['POST'])
def calculate_feasibility():
js = request.get_json()
cameraPose_lat_list = js['cameraPoseLats']
cameraPose_lng_list = js['cameraPoseLngs']
cameraPose_alt_list = js['cameraPoseAlts']
lookAt_lat_list = js['lookAtLats']
lookAt_lng_list = js['lookAtLngs']
lookAt_alt_list = js['lookAtAlts']
T_cameraPose = c_[js['cameraPoseTvals'], js['cameraPoseTvals'], js['cameraPoseTvals']]
T_lookAt = c_[js['lookAtTvals'], js['lookAtTvals'], js['lookAtTvals']]
lookAt_easing_tvals = array(js['lookAtEasingT'])
lookAt_easing_dlist = array(js['lookAtEasingD'])
cameraPose_easing_tvals = array(js['cameraPoseEasingT'])
cameraPose_easing_dlist = array(js['cameraPoseEasingD'])
P_easingCameraPose = c_[cameraPose_easing_dlist]
T_easingCameraPose = c_[cameraPose_easing_tvals]
P_easingLookAt = c_[lookAt_easing_dlist]
T_easingLookAt = c_[lookAt_easing_tvals]
P_cameraPose = c_[cameraPose_lat_list, cameraPose_lng_list, cameraPose_alt_list]
P_lookAt = c_[lookAt_lat_list, lookAt_lng_list, lookAt_alt_list]
total_time = js['totalShotTime']
# make a call to the trajectoryAPI
u_nominal, p_body_nominal, p_body_dot_nominal, p_body_dot_dot_nominal, theta_body_nominal, phi_body_nominal, theta_cam_nominal, theta_cam_dot_nominal, psi_cam_nominal, phi_cam_nominal, phi_cam_dot_nominal = trajectoryAPI.calculate_feasibility(P_cameraPose, T_cameraPose, P_lookAt, T_lookAt, P_easingCameraPose, T_easingCameraPose, P_easingLookAt, T_easingLookAt, total_time)
data = {
'u_nominal': u_nominal.tolist(),
'p_body_nominal': p_body_nominal.tolist(),
'p_body_dot_nominal': p_body_dot_nominal.tolist(),
'p_body_dot_dot_nominal': p_body_dot_dot_nominal.tolist(),
'theta_body_nominal': theta_body_nominal.tolist(),
'phi_body_nominal': phi_body_nominal.tolist(),
'theta_cam_nominal': theta_cam_nominal.tolist(),
'theta_cam_dot_nominal': theta_cam_dot_nominal.tolist(),
'psi_cam_nominal': psi_cam_nominal.tolist(),
'phi_cam_nominal': phi_cam_nominal.tolist(),
'phi_cam_dot_nominal': phi_cam_dot_nominal.tolist(),
}
return jsonify(data)
@server.route('/api/get_fov.kml', methods = ['GET'])
def get_fov():
GoProView = request.args.get('GoProView')
GoProFOV = {'NARROW':64.4, 'MEDIUM':94.4, 'WIDE':118.2}
if GoProView not in GoProFOV:
GoProView = 'WIDE'
fov = GoProFOV[GoProView]
lat = request.args.get('lat') or 37.42726975867168
lng = request.args.get('lng') or -122.16676019825722
altitude = request.args.get('altitude') or 125
heading = request.args.get('heading') or -31.127314342134174
tilt = request.args.get('tilt') or 51.24538395621526
view = {'lng':lng, 'lat':lat, 'altitude':altitude, 'heading': heading, 'tilt': tilt, 'fov':fov}
return render_template('fov.kml', view=view)
@server.route('/api/set_vehicle_location', methods = ['GET'])
def set_vehicle_location():
global starting_lat
global starting_lng
global vehicle_millis
global current_lat
global current_lng
global mode
global armed
vehicle_millis = int(round(time.time() * 1000))
armed = (request.args.get('armed') == 'True')
mode = request.args.get('mode')
if armed:
current_lat = request.args.get('lat', 0)
current_lng = request.args.get('lng', 0)
else:
starting_lat = request.args.get('lat', 0)
starting_lng = request.args.get('lng', 0)
return "OK"
@server.route('/api/get_vehicle_pos', methods= ['GET'])
def get_vehicle_pos():
global vehicle_millis
global starting_lat
global starting_lng
global vehicle_millis
global current_lat
global current_lng
global mode
global armed
current_millis = int(round(time.time() * 1000))
success = "success"
if current_millis - vehicle_millis > TIMEOUT_MILLIS:
mode = "NOT CONNECTED"
armed = False
starting_lat = starting_lng = 0
success = 'no data'
data = {'status':success, 'starting_lat':starting_lat, 'starting_lng':starting_lng, 'current_lat':current_lat, 'current_lng':current_lng, 'mode':mode}
return jsonify(data)
@server.route('/api/set_elapsed_time', methods = ['GET'])
def set_elapsed_time():
global real_elapsed_time
real_elapsed_time = request.args.get('elapsed', -1)
return "OK"
@server.route('/api/get_elapsed_time', methods= ['GET'])
def get_elapsed_time():
data = {'status':'no data'}
if real_elapsed_time != -1:
data = {'status':'success', 'elapsed':real_elapsed_time}
return jsonify(data)
| bsd-3-clause | 2,205,061,663,268,043,300 | 33.07772 | 431 | 0.67959 | false | 2.738515 | false | false | false |
fake-name/ReadableWebProxy | amqpstorm/channel0.py | 1 | 6351 | """AMQPStorm Connection.Channel0."""
import logging
import platform
from pamqp import specification
from pamqp.heartbeat import Heartbeat
from amqpstorm import __version__
from amqpstorm.base import AUTH_MECHANISM
from amqpstorm.base import FRAME_MAX
from amqpstorm.base import LOCALE
from amqpstorm.base import MAX_CHANNELS
from amqpstorm.base import Stateful
from amqpstorm.compatibility import try_utf8_decode
from amqpstorm.exception import AMQPConnectionError
LOGGER = logging.getLogger(__name__)
class Channel0(object):
"""Internal Channel0 handler."""
def __init__(self, connection):
super(Channel0, self).__init__()
self.is_blocked = False
self.server_properties = {}
self._connection = connection
self._heartbeat = connection.parameters['heartbeat']
self._parameters = connection.parameters
def on_frame(self, frame_in):
"""Handle frames sent to Channel0.
:param frame_in: Amqp frame.
:return:
"""
LOGGER.debug('Frame Received: %s', frame_in.name)
if frame_in.name == 'Heartbeat':
return
elif frame_in.name == 'Connection.Close':
self._close_connection(frame_in)
elif frame_in.name == 'Connection.CloseOk':
self._close_connection_ok()
elif frame_in.name == 'Connection.Blocked':
self._blocked_connection(frame_in)
elif frame_in.name == 'Connection.Unblocked':
self._unblocked_connection()
elif frame_in.name == 'Connection.OpenOk':
self._set_connection_state(Stateful.OPEN)
elif frame_in.name == 'Connection.Start':
self.server_properties = frame_in.server_properties
self._send_start_ok(frame_in)
elif frame_in.name == 'Connection.Tune':
self._send_tune_ok()
self._send_open_connection()
else:
LOGGER.error('[Channel0] Unhandled Frame: %s', frame_in.name)
def send_close_connection(self):
"""Send Connection Close frame.
:return:
"""
self._write_frame(specification.Connection.Close())
def send_heartbeat(self):
"""Send Heartbeat frame.
:return:
"""
if not self._connection.is_open:
return
self._write_frame(Heartbeat())
def _close_connection(self, frame_in):
"""Connection Close.
:param specification.Connection.Close frame_in: Amqp frame.
:return:
"""
self._set_connection_state(Stateful.CLOSED)
if frame_in.reply_code != 200:
reply_text = try_utf8_decode(frame_in.reply_text)
message = (
'Connection was closed by remote server: %s' % reply_text
)
exception = AMQPConnectionError(message,
reply_code=frame_in.reply_code)
self._connection.exceptions.append(exception)
def _close_connection_ok(self):
"""Connection CloseOk frame received.
:return:
"""
self._set_connection_state(Stateful.CLOSED)
def _blocked_connection(self, frame_in):
"""Connection is Blocked.
:param frame_in:
:return:
"""
self.is_blocked = True
LOGGER.warning(
'Connection is blocked by remote server: %s',
try_utf8_decode(frame_in.reason)
)
def _unblocked_connection(self):
"""Connection is Unblocked.
:return:
"""
self.is_blocked = False
LOGGER.info('Connection is no longer blocked by remote server')
def _plain_credentials(self):
"""AMQP Plain Credentials.
:rtype: str
"""
return '\0%s\0%s' % (self._parameters['username'],
self._parameters['password'])
def _send_start_ok(self, frame_in):
"""Send Start OK frame.
:param specification.Connection.Start frame_in: Amqp frame.
:return:
"""
if 'PLAIN' not in try_utf8_decode(frame_in.mechanisms):
exception = AMQPConnectionError(
'Unsupported Security Mechanism(s): %s' %
frame_in.mechanisms
)
self._connection.exceptions.append(exception)
return
credentials = self._plain_credentials()
start_ok_frame = specification.Connection.StartOk(
mechanism=AUTH_MECHANISM,
client_properties=self._client_properties(),
response=credentials,
locale=LOCALE
)
self._write_frame(start_ok_frame)
def _send_tune_ok(self):
"""Send Tune OK frame.
:return:
"""
tune_ok_frame = specification.Connection.TuneOk(
channel_max=MAX_CHANNELS,
frame_max=FRAME_MAX,
heartbeat=self._heartbeat)
self._write_frame(tune_ok_frame)
def _send_open_connection(self):
"""Send Open Connection frame.
:return:
"""
open_frame = specification.Connection.Open(
virtual_host=self._parameters['virtual_host']
)
self._write_frame(open_frame)
def _set_connection_state(self, state):
"""Set Connection state.
:param state:
:return:
"""
self._connection.set_state(state)
def _write_frame(self, frame_out):
"""Write a pamqp frame from Channel0.
:param frame_out: Amqp frame.
:return:
"""
self._connection.write_frame(0, frame_out)
LOGGER.debug('Frame Sent: %s', frame_out.name)
@staticmethod
def _client_properties():
"""AMQPStorm Client Properties.
:rtype: dict
"""
return {
'product': 'AMQPStorm',
'platform': 'Python %s (%s)' % (platform.python_version(),
platform.python_implementation()),
'capabilities': {
'basic.nack': True,
'connection.blocked': True,
'publisher_confirms': True,
'consumer_cancel_notify': True,
'authentication_failure_close': True,
},
'information': 'See https://github.com/eandersson/amqpstorm',
'version': __version__
}
| bsd-3-clause | -8,175,721,914,685,432,000 | 29.830097 | 78 | 0.571406 | false | 4.167323 | false | false | false |
benjolitz/trollius-redis | trollius_redis/encoders.py | 1 | 2145 | """
The redis protocol only knows about bytes, but we like to have strings inside
Python. This file contains some helper classes for decoding the bytes to
strings and encoding the other way around. We also have a `BytesEncoder`, which
provides raw access to the redis server.
"""
__all__ = (
'BaseEncoder',
'BytesEncoder',
'UTF8Encoder',
)
import six
class BaseEncoder(object):
"""
Abstract base class for all encoders.
"""
#: The native Python type from which we encode, or to which we decode.
native_type = None
def encode_from_native(self, data):
"""
Encodes the native Python type to network bytes.
Usually this will encode a string object to bytes using the UTF-8
encoding. You can either override this function, or set the
`encoding` attribute.
"""
raise NotImplementedError
def decode_to_native(self, data):
"""
Decodes network bytes to a Python native type.
It should always be the reverse operation of `encode_from_native`.
"""
raise NotImplementedError
class BytesEncoder(BaseEncoder):
"""
For raw access to the Redis database.
"""
#: The native Python type from which we encode, or to which we decode.
native_type = six.binary_type
def encode_from_native(self, data):
return data
def decode_to_native(self, data):
return data
class StringEncoder(BaseEncoder):
"""
Abstract base class for all string encoding encoders.
"""
#: Redis keeps all values in binary. Set the encoding to be used to
#: decode/encode Python string values from and to binary.
encoding = None
#: The native Python type from which we encode, or to which we decode.
native_type = six.text_type
def encode_from_native(self, data):
""" string to bytes """
return data.encode(self.encoding)
def decode_to_native(self, data):
""" bytes to string """
return data.decode(self.encoding)
class UTF8Encoder(StringEncoder):
"""
Encode strings to and from utf-8 bytes.
"""
encoding = 'utf-8'
| bsd-2-clause | -7,322,729,814,872,177,000 | 26.5 | 79 | 0.653613 | false | 4.333333 | false | false | false |
kHarshit/DAT210x_Microsoft | Module2/assignment3.py | 1 | 1178 | import pandas as pd
# TODO: Load up the dataset Ensuring you set the appropriate header column names
df = pd.read_csv('Datasets/servo.data')
df.columns = ['motor', 'screw', 'pgain', 'vgain', 'class']
print(df.describe())
# TODO: Create a slice that contains all entries having a vgain equal to 5. Then print the length of(# of samples in) that slice:
k = df[df.iloc[:, 3] == 5]
print(k.describe())
print(len(k))
# TODO: Create a slice that contains all entries having a motor equal to E and screw equal
# to E. Then print the length of (# of samples in) that slice:
print(df[(df.iloc[:, 0] == 'E') & (df.iloc[:, 1] == 'E')])
l = df[(df['motor'] == 'E') & (df['screw'] == 'E')]
print(l.describe())
print(len(l)) # the answer should be 6; checkout read_csv() api documentation that will fix your issue!
# TODO: Create a slice that contains all entries having a pgain equal to 4. Use one of the various methods of finding
# the mean vgain value for the samples in that slice. Once you've found it, print it:
m = df[df.pgain == 4]
print(m.mean())
print(m.vgain.mean())
# TODO: (Bonus) See what happens when you run the .dtypes method on your dataframe!
print(df.dtypes)
| mit | 6,821,107,749,193,167,000 | 34.69697 | 129 | 0.684211 | false | 3.192412 | false | false | false |
upptalk/uppsell | uppsell/migrations/0002_auto__add_unique_store_code__add_field_listing_price__chg_field_listin.py | 1 | 19453 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Store', fields ['code']
db.create_unique('stores', ['code'])
# Adding field 'Listing.price'
db.add_column('listings', 'price',
self.gf('django.db.models.fields.DecimalField')(default=0.0, max_digits=8, decimal_places=2),
keep_default=False)
# Changing field 'Listing.subtitle'
db.alter_column('listings', 'subtitle', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Changing field 'Listing.description'
db.alter_column('listings', 'description', self.gf('django.db.models.fields.CharField')(max_length=10000, null=True))
# Changing field 'Listing.title'
db.alter_column('listings', 'title', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Changing field 'Listing.name'
db.alter_column('listings', 'name', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
# Adding field 'Product.provisioning_codes'
db.add_column('products', 'provisioning_codes',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Removing unique constraint on 'Store', fields ['code']
db.delete_unique('stores', ['code'])
# Deleting field 'Listing.price'
db.delete_column('listings', 'price')
# Changing field 'Listing.subtitle'
db.alter_column('listings', 'subtitle', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
# Changing field 'Listing.description'
db.alter_column('listings', 'description', self.gf('django.db.models.fields.CharField')(default='', max_length=10000))
# Changing field 'Listing.title'
db.alter_column('listings', 'title', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
# Changing field 'Listing.name'
db.alter_column('listings', 'name', self.gf('django.db.models.fields.CharField')(default='', max_length=200))
# Deleting field 'Product.provisioning_codes'
db.delete_column('products', 'provisioning_codes')
models = {
u'uppsell.address': {
'Meta': {'object_name': 'Address', 'db_table': "'addresses'"},
'city': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_used': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'other': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'province': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'uppsell.cart': {
'Meta': {'object_name': 'Cart', 'db_table': "'carts'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Listing']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Store']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'uppsell.cartitem': {
'Meta': {'object_name': 'CartItem', 'db_table': "'cart_items'"},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Cart']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Listing']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'uppsell.coupon': {
'Meta': {'object_name': 'Coupon', 'db_table': "'coupons'"},
'code': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']", 'null': 'True', 'blank': 'True'}),
'discount_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'discount_pct': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_uses': ('django.db.models.fields.PositiveIntegerField', [], {}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Listing']", 'null': 'True', 'blank': 'True'}),
'product_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.ProductGroup']", 'null': 'True', 'blank': 'True'}),
'relation': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'remaining': ('django.db.models.fields.PositiveIntegerField', [], {}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Store']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'valid_until': ('django.db.models.fields.DateTimeField', [], {})
},
u'uppsell.couponspend': {
'Meta': {'unique_together': "(('customer', 'coupon'),)", 'object_name': 'CouponSpend', 'db_table': "'coupon_spends'"},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Coupon']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'uppsell.customer': {
'Meta': {'object_name': 'Customer', 'db_table': "'customers'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_logged_in_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'uppsell.invoice': {
'Meta': {'object_name': 'Invoice', 'db_table': "'invoices'"},
'billing_address': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'order_shipping_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'order_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'payment_made_ts': ('django.db.models.fields.DateTimeField', [], {}),
'product_id': ('django.db.models.fields.IntegerField', [], {}),
'psp_id': ('django.db.models.fields.IntegerField', [], {}),
'psp_response_code': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'psp_response_text': ('django.db.models.fields.CharField', [], {'max_length': '10000'}),
'psp_type': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shipping_address': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'store_id': ('django.db.models.fields.IntegerField', [], {}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user_email': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user_fullname': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'user_jid': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'user_mobile_msisdn': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'uppsell.linkedaccount': {
'Meta': {'object_name': 'LinkedAccount', 'db_table': "'linked_accounts'"},
'account_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'linked_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.LinkedAccountType']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'uppsell.linkedaccounttype': {
'Meta': {'object_name': 'LinkedAccountType', 'db_table': "'linked_account_types'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'uppsell.listing': {
'Meta': {'object_name': 'Listing', 'db_table': "'listings'"},
'description': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '8', 'decimal_places': '2'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Product']"}),
'sales_tax_rate': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Store']"}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'uppsell.order': {
'Meta': {'object_name': 'Order', 'db_table': "'orders'"},
'billing_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'billing_address'", 'null': 'True', 'to': u"orm['uppsell.Address']"}),
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Coupon']", 'null': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'customer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Customer']"}),
'fraud_state': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order_shipping_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'order_state': ('django.db.models.fields.CharField', [], {'default': "'init'", 'max_length': '30'}),
'order_total': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'payment_made_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'payment_state': ('django.db.models.fields.CharField', [], {'default': "'init'", 'max_length': '30'}),
'shipping_address': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'shipping_address'", 'null': 'True', 'to': u"orm['uppsell.Address']"}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Store']"}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'uppsell.orderevent': {
'Meta': {'object_name': 'OrderEvent', 'db_table': "'order_events'"},
'action_type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Order']"}),
'state_after': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'state_before': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'uppsell.orderitem': {
'Meta': {'object_name': 'OrderItem', 'db_table': "'order_items'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Order']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.Listing']"}),
'quantity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'uppsell.product': {
'Meta': {'object_name': 'Product', 'db_table': "'products'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '10000'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.ProductGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'provisioning_codes': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'stock_units': ('django.db.models.fields.FloatField', [], {}),
'subtitle': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'uppsell.productcode': {
'Meta': {'object_name': 'ProductCode', 'db_table': "'product_codes'"},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['uppsell.ProductGroup']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'uppsell.productgroup': {
'Meta': {'object_name': 'ProductGroup', 'db_table': "'product_groups'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'uppsell.store': {
'Meta': {'object_name': 'Store', 'db_table': "'stores'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'default_currency': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'default_lang': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'sales_tax_rate': ('django.db.models.fields.FloatField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['uppsell'] | mit | 2,520,933,076,126,869,500 | 74.403101 | 189 | 0.55575 | false | 3.556957 | false | false | false |
alexanderfefelov/nav | python/nav/web/status/sections.py | 1 | 29405 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009, 2012 UNINETT AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
# You should have received a copy of the GNU General Public License along with
# NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Status sections.
Used to build up different sections for display.
"""
from datetime import datetime
from django.db.models import Q
from django.core.urlresolvers import reverse
from nav.metrics.templates import metric_prefix_for_device
from nav.models.profiles import StatusPreference, StatusPreferenceCategory
from nav.models.profiles import StatusPreferenceOrganization
from nav.models.event import AlertHistory, AlertHistoryVariable
from nav.models.manage import Netbox, Category, Organization
from nav.models.thresholds import ThresholdRule
from nav.web import servicecheckers
from nav.web.status.forms import SectionForm, NetboxForm
from nav.web.status.forms import NetboxMaintenanceForm, ServiceForm
from nav.web.status.forms import ServiceMaintenanceForm, ModuleForm
from nav.web.status.forms import ThresholdForm, LinkStateForm, SNMPAgentForm
MAINTENANCE_STATE = 'maintenanceState'
BOX_STATE = 'boxState'
SERVICE_STATE = 'serviceState'
MODULE_STATE = 'moduleState'
THRESHOLD_STATE = 'thresholdState'
LINK_STATE = 'linkState'
SNMP_STATE = 'snmpAgentState'
PSU_STATE = 'psuState'
def get_section_model(section_type):
"""Dispatch table"""
dtable = {
StatusPreference.SECTION_NETBOX: NetboxSection,
StatusPreference.SECTION_NETBOX_MAINTENANCE: NetboxMaintenanceSection,
StatusPreference.SECTION_MODULE: ModuleSection,
StatusPreference.SECTION_SERVICE: ServiceSection,
StatusPreference.SECTION_SERVICE_MAINTENANCE: ServiceMaintenanceSection,
StatusPreference.SECTION_THRESHOLD: ThresholdSection,
StatusPreference.SECTION_LINKSTATE: LinkStateSection,
StatusPreference.SECTION_SNMPAGENT: SNMPAgentSection,
StatusPreference.SECTION_PSU: PSUSection,
}
return dtable[section_type]
def get_user_sections(account):
'''Fetches all status sections for account in one swoop.
'''
sections = []
preferences = StatusPreference.objects.filter(
account=account
).order_by('position')
# Pre-fetching all categories and organisations
all_cats = Category.objects.values_list('pk', flat=True)
all_orgs = Organization.objects.values_list('pk', flat=True)
categories = {}
organizations = {}
cats = StatusPreferenceCategory.objects.filter(
statuspreference__in=preferences
)
orgs = StatusPreferenceOrganization.objects.filter(
statuspreference__in=preferences
)
# Buld dicts with statuspreference_id as keys.
for cat in cats:
if not cat.statuspreference_id in categories:
categories[cat.statuspreference_id] = []
categories[cat.statuspreference_id].append(cat.category_id)
for org in orgs:
if not org.statuspreference_id in organizations:
organizations[org.statuspreference_id] = []
organizations[org.statuspreference_id].append(org.organization_id)
# Add pre fetched categories and organisations to section preferences.
# Adds all categories and organisations if nothing is found in database.
for pref in preferences:
if pref.id in categories:
pref.fetched_categories = categories[pref.id]
pref.all_categories = False
else:
pref.fetched_categories = all_cats
pref.all_categories = True
if pref.id in organizations:
pref.fetched_organizations = organizations[pref.id]
pref.all_organizations = False
else:
pref.fetched_organizations = all_orgs
pref.all_organizations = True
for pref in preferences:
section_model = get_section_model(pref.type)
section = section_model(prefs=pref)
section.fetch_history()
sections.append(section)
return sections
class _Section(object):
'''Base class for sections.
Attributes:
columns - tuples of the wanted columns. First part gives the displayed
name of the column, while the second defines the field that
are looked up in the database.
history - the query used to look up the history
type_title - readable type name of this section
devicehistory_type - used in links to devicehistory
'''
columns = []
history = []
type_title = ''
devicehistory_type = ''
def __init__(self, prefs=None):
self.prefs = prefs
self.categories = self.prefs.fetched_categories
self.organizations = self.prefs.fetched_organizations
self.states = self.prefs.states.split(',')
for key, title in StatusPreference.SECTION_CHOICES:
if self.prefs.type == key:
self.type_title = title
break
def fetch_history(self):
"""Empty method,- should get overridden in
sub-classes"""
self.history = []
def devicehistory_url(self):
"""Make history urls for this device"""
url = reverse('devicehistory-view')
url += "?eventtype=%s" % self.devicehistory_type
url += "&group_by=datetime"
if not self.prefs.all_organizations:
for org in self.organizations:
url += "&org=%s" % org
if not self.prefs.all_categories:
for cat in self.categories:
url += "&cat=%s" % cat
# If custom orgs and cats, use AND search
if not self.prefs.all_categories and not self.prefs.all_organizations:
url += "&mode=and"
return url
@staticmethod
def form_class():
"""Return the chosen form"""
return SectionForm
@staticmethod
def form_data(status_prefs):
"""Insert data in the form for the view"""
data = {
'id': status_prefs.id,
'name': status_prefs.name,
'type': status_prefs.type,
'organizations': list(status_prefs.organizations.values_list(
'id', flat=True)) or [''],
}
data['categories'] = list(status_prefs.categories.values_list(
'id', flat=True)) or ['']
data['states'] = status_prefs.states.split(",")
return data
@classmethod
def form(cls, status_prefs):
"""Get the appropriate form"""
form_model = cls.form_class()
data = cls.form_data(status_prefs)
return form_model(data)
class NetboxSection(_Section):
columns = [
'Sysname',
'IP',
'Начало',
'Продолжительность',
'История',
'',
]
devicehistory_type = 'a_boxDown'
@staticmethod
def form_class():
return NetboxForm
def fetch_history(self):
maintenance = self._maintenance()
alert_types = self._alerttype()
netbox_history = AlertHistory.objects.select_related(
'netbox'
).filter(
~Q(netbox__in=maintenance),
Q(netbox__up='n') | Q(netbox__up='s'),
alert_type__name__in=alert_types,
end_time__gte=datetime.max,
netbox__category__in=self.categories,
netbox__organization__in=self.organizations,
).extra(
select={'downtime': "date_trunc('second', NOW() - start_time)"}
).order_by('-start_time', 'end_time')
history = []
for h in netbox_history:
row = {'netboxid': h.netbox.id,
'tabrow': (
(
h.netbox.sysname,
reverse('ipdevinfo-details-by-name',
args=[h.netbox.sysname])
),
(h.netbox.ip, None),
(h.start_time, None),
(h.downtime, None),
(
'history',
reverse('devicehistory-view') +
'?netbox=%(id)s&eventtype=a_boxDown&group_by=datetime' % {
'id': h.netbox.id,
}
),
),
}
history.append(row)
self.history = history
def _maintenance(self):
return AlertHistory.objects.filter(
event_type=MAINTENANCE_STATE,
end_time__gte=datetime.max,
netbox__isnull=False,
).values('netbox').query
def _alerttype(self):
states = []
if 'y' in self.states:
states.append('boxUp')
if 'n' in self.states:
states.append('boxDown')
if 's' in self.states:
states.append('boxShadow')
return states
class NetboxMaintenanceSection(_Section):
columns = [
'Sysname',
'IP',
'Начало',
'Продолжительность',
'',
]
devicehistory_type = 'e_maintenanceState'
@staticmethod
def form_class():
return NetboxMaintenanceForm
def fetch_history(self):
maintenance = self._maintenance()
boxes_down = self._boxes_down()
history = []
for m in maintenance:
# Find out if the box is down as well as on maintenance
down = boxes_down.get(m.alert_history.netbox.id, None)
if m.alert_history.netbox.up == 'y':
down_since = 'Up'
downtime = ''
else:
if down:
down_since = down['start_time']
downtime = down['downtime']
else:
down_since = 'N/A'
downtime = 'N/A'
row = {'netboxid': m.alert_history.netbox.id,
'tabrow': (
(
m.alert_history.netbox.sysname,
reverse('ipdevinfo-details-by-name',
args=[m.alert_history.netbox.sysname])
),
(m.alert_history.netbox.ip, None),
(down_since, None),
(downtime, None),
(
'history',
reverse('devicehistory-view') +
('?netbox=%(id)s&eventtype=e_maintenanceState'
'&group_by=datetime' %
{'id': m.alert_history.netbox.id})
),
),
}
history.append(row)
self.history = history
def _maintenance(self):
return AlertHistoryVariable.objects.select_related(
'alert_history', 'alert_history__netbox'
).filter(
alert_history__netbox__category__in=self.categories,
alert_history__netbox__organization__in=self.organizations,
alert_history__netbox__up__in=self.states,
alert_history__end_time__gte=datetime.max,
alert_history__event_type=MAINTENANCE_STATE,
variable='maint_taskid',
).order_by('-alert_history__start_time')
def _boxes_down(self):
history = AlertHistory.objects.select_related(
'netbox'
).filter(
end_time__gte=datetime.max,
event_type=BOX_STATE,
).extra(
select={'downtime': "date_trunc('second', NOW() - start_time)"}
).order_by('-start_time').values(
'netbox', 'start_time', 'downtime'
)
ret = {}
for h in history:
ret[h['netbox']] = h
return ret
class ServiceSection(_Section):
columns = [
'Sysname',
'Handler',
'Начало',
'Продолжительность',
'',
]
devicehistory_type = 'e_serviceState'
@staticmethod
def form_class():
return ServiceForm
@staticmethod
def form_data(status_prefs):
data = {
'id': status_prefs.id,
'name': status_prefs.name,
'type': status_prefs.type,
'organizations': list(status_prefs.organizations.values_list(
'id', flat=True)) or [''],
}
data['services'] = status_prefs.services.split(",") or ['']
data['states'] = status_prefs.states.split(",")
return data
def __init__(self, prefs=None):
super(ServiceSection, self).__init__(prefs=prefs)
if self.prefs.services:
self.services = self.prefs.services.split(',')
else:
self.services = [s for s in servicecheckers.get_checkers()]
def fetch_history(self):
maintenance = AlertHistory.objects.filter(
end_time__gte=datetime.max,
event_type=MAINTENANCE_STATE,
).values('netbox').query
services = AlertHistory.objects.select_related(
'netbox'
).filter(
~Q(netbox__in=maintenance),
end_time__gte=datetime.max,
event_type=SERVICE_STATE,
netbox__organization__in=self.organizations,
).extra(
select={
'downtime': "date_trunc('second', NOW() - start_time)",
'handler': 'service.handler',
},
tables=['service'],
where=[
'alerthist.subid = service.serviceid::text',
'service.handler IN %s',
],
params=[tuple(self.services)]
)
history = []
for s in services:
row = {'netboxid': s.netbox.id,
'tabrow': (
(
s.netbox.sysname,
reverse('ipdevinfo-details-by-name', args=[
s.netbox.sysname
])
),
(
s.handler,
reverse('ipdevinfo-service-list-handler', args=[
s.handler
])
),
(s.start_time, None),
(s.downtime, None),
(
'history',
reverse('devicehistory-view') +
('?netbox=%(id)s&eventtype=e_serviceState'
'&group_by=datetime' %
{'id': s.netbox.id})
)
),
}
history.append(row)
self.history = history
def devicehistory_url(self):
url = reverse('devicehistory-view')
url += "?eventtype=%s" % self.devicehistory_type
url += "&group_by=datetime"
if not self.prefs.all_organizations:
# FIXME filter service
# Service is joined in on the alerthist.subid field, which is not a
# part of this query. Yay
netboxes = Netbox.objects.filter(
organization__in=self.organizations,
).values('id')
for n in netboxes:
url += "&netbox=%s" % n['id']
return url
class ServiceMaintenanceSection(ServiceSection):
devicehistory_type = 'e_maintenanceState'
@staticmethod
def form_class():
return ServiceMaintenanceForm
def fetch_history(self):
maintenance = AlertHistoryVariable.objects.select_related(
'alert_history', 'alert_history__netbox'
).filter(
alert_history__end_time__gte=datetime.max,
alert_history__event_type=MAINTENANCE_STATE,
variable='maint_taskid',
).extra(
select={
'downtime': "date_trunc('second', NOW() - start_time)",
'handler': 'service.handler',
'up': 'service.up',
},
tables=['service'],
where=['subid = serviceid::text'],
).order_by('-alert_history__start_time')
service_history = AlertHistory.objects.filter(
end_time__gte=datetime.max,
event_type=SERVICE_STATE,
).extra(
select={'downtime': "date_trunc('second', NOW() - start_time)"}
).values('netbox', 'start_time', 'downtime')
service_down = {}
for s in service_history:
service_down[s['netbox']] = s
history = []
for m in maintenance:
down = service_down.get(m.alert_history.netbox.id, None)
if m.up == 'y':
down_since = 'Up'
downtime = ''
else:
if down:
down_since = down['start_time']
downtime = down['downtime']
else:
down_since = 'N/A'
downtime = 'N/A'
row = {'netboxid': m.alert_history.netbox.id,
'tabrow': (
(
m.alert_history.netbox.sysname,
reverse('ipdevinfo-details-by-name',
args=[m.alert_history.netbox.sysname])
),
(m.handler, reverse('ipdevinfo-service-list-handler',
args=[m.handler])),
(down_since, None),
(downtime, None),
(
'history',
reverse('devicehistory-view') +
('?netbox=%(id)s&eventtype=e_maintenanceState'
'&group_by=datetime' %
{'id': m.alert_history.netbox.id})
),
),
}
history.append(row)
self.history = history
class ModuleSection(_Section):
columns = [
'Sysname',
'IP',
'Module',
'Начало',
'Продолжительность',
'',
]
devicehistory_type = 'a_moduleDown'
@staticmethod
def form_class():
return ModuleForm
def fetch_history(self, module_history=None):
module_history = AlertHistory.objects.select_related(
'netbox', 'device'
).filter(
end_time__gte=datetime.max,
event_type=MODULE_STATE,
alert_type__name='moduleDown',
netbox__organization__in=self.organizations,
netbox__category__in=self.categories,
).extra(
select={
'downtime': "date_trunc('second', NOW() - start_time)",
'module_id': 'module.moduleid',
'module_name': 'module.name',
},
tables=['module'],
where=[
'alerthist.deviceid = module.deviceid',
'module.up IN %s',
],
params=[tuple(self.states)]
).order_by('-start_time') if module_history is None else module_history
history = []
for module in module_history:
row = {'netboxid': module.netbox.id,
'tabrow': (
(
module.netbox.sysname,
reverse('ipdevinfo-details-by-name',
args=[module.netbox.sysname])
),
(module.netbox.ip, None),
(
module.module_name,
reverse('ipdevinfo-module-details', args=[
module.netbox.sysname,
module.module_name
]) if module.module_name else None
),
(module.start_time, None),
(module.downtime, None),
(
'history',
reverse('devicehistory-view') +
'?module=%(id)s&eventtype=a_moduleDown&group_by=datetime' % {
'id': module.module_id,
}
),
),
}
history.append(row)
self.history = history
class ThresholdSection(_Section):
columns = [
'Sysname',
'Описание',
'Начало',
'Продолжительность',
'',
]
devicehistory_type = 'a_exceededThreshold'
@staticmethod
def form_class():
return ThresholdForm
@staticmethod
def form_data(status_prefs):
data = {
'id': status_prefs.id,
'name': status_prefs.name,
'type': status_prefs.type,
'organizations': list(status_prefs.organizations.values_list(
'id', flat=True)) or [''],
'categories': list(status_prefs.categories.values_list(
'id', flat=True)) or ['']
}
return data
def fetch_history(self):
thresholds = AlertHistory.objects.select_related(
'netbox'
).filter(
end_time__gte=datetime.max,
event_type=THRESHOLD_STATE,
alert_type__name='exceededThreshold',
netbox__organization__in=self.organizations,
netbox__category__in=self.categories,
).extra(
select={
'downtime': "date_trunc('second', NOW() - start_time)",
},
).order_by('-start_time')
history = []
for alert in thresholds:
description = self._description_from_alert(alert)
row = {'netboxid': alert.netbox.id,
'tabrow': (
(alert.netbox.sysname,
reverse('ipdevinfo-details-by-name',
args=[alert.netbox.sysname])),
(description, None),
(alert.start_time, None),
(alert.downtime, None),
('history',
reverse('devicehistory-view') +
'?netbox=%(id)s&eventtype=a_exceededThreshold'
'&group_by=datetime' % {
'id': alert.netbox.id,
}),
),
}
history.append(row)
self.history = history
@staticmethod
def _description_from_alert(alert):
try:
ruleid, metric = alert.subid.split(':', 1)
except ValueError:
description = None
else:
try:
rule = ThresholdRule.objects.get(id=ruleid)
except ThresholdRule.DoesNotExist:
limit = ''
else:
limit = rule.alert
prefix = metric_prefix_for_device(alert.netbox.sysname)
if metric.startswith(prefix):
metric = metric[len(prefix)+1:]
description = "{0} {1}".format(metric, limit)
return description
class LinkStateSection(_Section):
columns = [
'Sysname',
'IP',
'Interface',
'Начало',
'Продолжительность',
'История',
'',
]
devicehistory_type = 'a_linkDown'
@staticmethod
def form_class():
return LinkStateForm
def fetch_history(self):
netbox_history = AlertHistory.objects.select_related(
'netbox'
).filter(
event_type=LINK_STATE,
end_time__gte=datetime.max,
netbox__category__in=self.categories,
netbox__organization__in=self.organizations,
).extra(
select={
'downtime': "date_trunc('second', NOW() - start_time)",
'interfaceid': 'interface.interfaceid',
'ifname': 'interface.ifname',
},
where=['subid = interfaceid::text'],
tables=['interface']
).order_by('-start_time', 'end_time')
history = []
for h in netbox_history:
row = {
'netboxid': h.netbox.id,
'alerthistid': h.id,
'tabrow': (
(
h.netbox.sysname,
reverse('ipdevinfo-details-by-name',
args=[h.netbox.sysname])
),
(h.netbox.ip, None),
(
h.ifname,
reverse('ipdevinfo-interface-details',
args=[h.netbox.sysname, h.interfaceid])
),
(h.start_time, None),
(h.downtime, None),
('history', reverse('devicehistory-view') +
'?netbox=%(id)s&eventtype=a_linkDown&group_by=datetime' % {
'id': h.netbox.id, }
),
),
}
history.append(row)
self.history = history
class SNMPAgentSection(_Section):
columns = [
'Sysname',
'IP',
'Начало',
'Продолжительность',
'',
]
devicehistory_type = 'a_snmpAgentDown'
@staticmethod
def form_class():
return SNMPAgentForm
@staticmethod
def form_data(status_prefs):
data = {
'id': status_prefs.id,
'name': status_prefs.name,
'type': status_prefs.type,
'organizations': list(status_prefs.organizations.values_list(
'id', flat=True)) or [''],
}
data['categories'] = list(status_prefs.categories.values_list(
'id', flat=True)) or ['']
return data
def fetch_history(self):
netbox_history = AlertHistory.objects.select_related(
'netbox'
).filter(
event_type=SNMP_STATE,
end_time__gte=datetime.max,
netbox__category__in=self.categories,
netbox__organization__in=self.organizations,
).extra(
select={
'downtime': "date_trunc('second', NOW() - start_time)",
}
).order_by('-start_time', 'end_time')
history = []
for h in netbox_history:
row = {'netboxid': h.netbox.id,
'tabrow': (
(
h.netbox.sysname,
reverse('ipdevinfo-details-by-name',
args=[h.netbox.sysname])
),
(h.netbox.ip, None),
(h.start_time, None),
(h.downtime, None),
(
'history',
reverse('devicehistory-view') +
('?netbox=%(id)s&eventtype=a_snmpAgentDown'
'&group_by=datetime' % {'id': h.netbox.id})
),
),
}
history.append(row)
self.history = history
class PSUSection(_Section):
columns = [
'Sysname',
'IP',
'PSU',
'Начало',
'Продолжительность',
'',
]
devicehistory_type = 'a_psuNotOK'
@staticmethod
def form_class():
return ModuleForm
def fetch_history(self, psu_history=None):
psu_history = AlertHistory.objects.select_related(
'netbox', 'device'
).filter(
end_time__gte=datetime.max,
event_type=PSU_STATE,
alert_type__name='psuNotOK',
netbox__organization__in=self.organizations,
netbox__category__in=self.categories,
).extra(
select={
'downtime': "date_trunc('second', NOW() - start_time)",
'powersupply_id': 'powersupply_or_fan.powersupplyid',
'powersupply_name': 'powersupply_or_fan.name',
},
tables=['powersupply_or_fan'],
where=[
'alerthist.subid = powersupply_or_fan.powersupplyid::TEXT',
],
).order_by('-start_time') if psu_history is None else psu_history
self.history = [self._psu_to_table_row(psu) for psu in psu_history]
@staticmethod
def _psu_to_table_row(psu):
return {'netboxid': psu.netbox.id,
'tabrow': (
(psu.netbox.sysname,
reverse('ipdevinfo-details-by-name', args=[psu.netbox.sysname])),
(psu.netbox.ip, None),
(psu.powersupply_name, None),
(psu.start_time, None),
(psu.downtime, None),
('history',
(reverse('devicehistory-view') + '?powersupply=%s'
'&eventtype=a_psuNotOK'
'&group_by=datetime' %
psu.powersupply_id)),
)}
| gpl-2.0 | -2,737,846,190,334,382,000 | 32.294185 | 85 | 0.508648 | false | 4.241575 | false | false | false |
projectarkc/arkc-server-gae | fetchfrom/goagent.py | 1 | 9320 | #!/usr/bin/env python
# coding:utf-8
__version__ = '3.2.0'
__password__ = ''
__hostsdeny__ = () # __hostsdeny__ = ('.youtube.com', '.youku.com')
import os
import re
import time
import struct
import zlib
import base64
import logging
import urlparse
import httplib
import io
import string
import json
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
from google.appengine.api import urlfetch
from google.appengine.api.taskqueue.taskqueue import MAX_URL_LENGTH
from google.appengine.runtime import apiproxy_errors
URLFETCH_MAX = 2
URLFETCH_MAXSIZE = 4 * 1024 * 1024
URLFETCH_DEFLATE_MAXSIZE = 4 * 1024 * 1024
URLFETCH_TIMEOUT = 30
class NotFoundKey(Exception):
pass
class GAEfail(Exception):
pass
class Nonsense(Exception):
pass
class PermanentFail(Exception):
pass
class TimeoutFail(Exception):
pass
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
def send_error(self, code, message):
self.error_code = code
self.error_message = message
def message_html(title, banner, detail=''):
MESSAGE_TEMPLATE = '''
<html><head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>$title</title>
<style><!--
body {font-family: arial,sans-serif}
div.nav {margin-top: 1ex}
div.nav A {font-size: 10pt; font-family: arial,sans-serif}
span.nav {font-size: 10pt; font-family: arial,sans-serif; font-weight: bold}
div.nav A,span.big {font-size: 12pt; color: #0000cc}
div.nav A {font-size: 10pt; color: black}
A.l:link {color: #6f6f6f}
A.u:link {color: green}
//--></style>
</head>
<body text=#000000 bgcolor=#ffffff>
<table border=0 cellpadding=2 cellspacing=0 width=100%>
<tr><td bgcolor=#3366cc><font face=arial,sans-serif color=#ffffff><b>Message From FetchServer</b></td></tr>
<tr><td> </td></tr></table>
<blockquote>
<H1>$banner</H1>
$detail
<p>
</blockquote>
<table width=100% cellpadding=0 cellspacing=0><tr><td bgcolor=#3366cc><img alt="" width=1 height=4></td></tr></table>
</body></html>
'''
return string.Template(MESSAGE_TEMPLATE).substitute(title=title, banner=banner, detail=detail)
try:
from Crypto.Cipher.ARC4 import new as RC4Cipher
except ImportError:
logging.warn('Load Crypto.Cipher.ARC4 Failed, Use Pure Python Instead.')
class RC4Cipher(object):
def __init__(self, key):
x = 0
box = range(256)
for i, y in enumerate(box):
x = (x + y + ord(key[i % len(key)])) & 0xff
box[i], box[x] = box[x], y
self.__box = box
self.__x = 0
self.__y = 0
def encrypt(self, data):
out = []
out_append = out.append
x = self.__x
y = self.__y
box = self.__box
for char in data:
x = (x + 1) & 0xff
y = (y + box[x]) & 0xff
box[x], box[y] = box[y], box[x]
out_append(chr(ord(char) ^ box[(box[x] + box[y]) & 0xff]))
self.__x = x
self.__y = y
return ''.join(out)
def inflate(data):
return zlib.decompress(data, -zlib.MAX_WBITS)
def deflate(data):
return zlib.compress(data)[2:-4]
def format_response(status, headers, content):
if content:
headers.pop('content-length', None)
headers['Content-Length'] = str(len(content))
data = 'HTTP/1.1 %d %s\r\n%s\r\n\r\n%s' % (status, httplib.responses.get(
status, 'Unknown'), '\r\n'.join('%s: %s' % (k.title(), v) for k, v in headers.items()), content)
data = deflate(data)
assert len(data) <= 65536
return "%04x" % len(data) + data
def application(headers, body, method, url):
kwargs = {}
any(kwargs.__setitem__(x[len('x-urlfetch-'):].lower(), headers.pop(x))
for x in headers.keys() if x.lower().startswith('x-urlfetch-'))
if 'Content-Encoding' in headers and body:
if headers['Content-Encoding'] == 'deflate':
body = inflate(body)
headers['Content-Length'] = str(len(body))
del headers['Content-Encoding']
# logging.info(
# '%s "%s %s %s" - -', environ['REMOTE_ADDR'], method, url, 'HTTP/1.1')
if __password__ and __password__ != kwargs.get('password', ''):
raise GAEfail
netloc = urlparse.urlparse(url).netloc
if __hostsdeny__ and netloc.endswith(__hostsdeny__):
raise GAEfail
if len(url) > MAX_URL_LENGTH:
raise GAEfail
if netloc.startswith(('127.0.0.', '::1', 'localhost')):
raise GAEfail
fetchmethod = getattr(urlfetch, method, None)
if not fetchmethod:
raise GAEfail
timeout = int(kwargs.get('timeout', URLFETCH_TIMEOUT))
validate_certificate = bool(int(kwargs.get('validate', 0)))
maxsize = int(kwargs.get('maxsize', 0))
# https://www.freebsdchina.org/forum/viewtopic.php?t=54269
accept_encoding = headers.get(
'Accept-Encoding', '') or headers.get('Bccept-Encoding', '')
errors = []
for i in xrange(int(kwargs.get('fetchmax', URLFETCH_MAX))):
try:
response = urlfetch.fetch(url, body, fetchmethod, headers, allow_truncated=False,
follow_redirects=False, deadline=timeout, validate_certificate=validate_certificate)
break
except apiproxy_errors.OverQuotaError as e:
time.sleep(5)
except urlfetch.DeadlineExceededError as e:
errors.append('%r, timeout=%s' % (e, timeout))
logging.error(
'DeadlineExceededError(timeout=%s, url=%r)', timeout, url)
time.sleep(1)
timeout *= 2
except urlfetch.DownloadError as e:
errors.append('%r, timeout=%s' % (e, timeout))
logging.error('DownloadError(timeout=%s, url=%r)', timeout, url)
time.sleep(1)
timeout *= 2
except urlfetch.ResponseTooLargeError as e:
errors.append('%r, timeout=%s' % (e, timeout))
response = e.response
logging.error(
'ResponseTooLargeError(timeout=%s, url=%r) response(%r)', timeout, url, response)
m = re.search(
r'=\s*(\d+)-', headers.get('Range') or headers.get('range') or '')
if m is None:
headers['Range'] = 'bytes=0-%d' % (maxsize or URLFETCH_MAXSIZE)
else:
headers.pop('Range', '')
headers.pop('range', '')
start = int(m.group(1))
headers[
'Range'] = 'bytes=%s-%d' % (start, start + (maxsize or URLFETCH_MAXSIZE))
timeout *= 2
except urlfetch.SSLCertificateError as e:
errors.append('%r, should validate=0 ?' % e)
logging.error('%r, timeout=%s', e, timeout)
except Exception as e:
errors.append(str(e))
if i == 0 and method == 'GET':
timeout *= 2
else:
raise PermanentFail
#logging.debug('url=%r response.status_code=%r response.headers=%r response.content[:1024]=%r', url, response.status_code, dict(response.headers), response.content[:1024])
status_code = int(response.status_code)
data = response.content
response_headers = response.headers
content_type = response_headers.get('content-type', '')
if status_code == 200 and maxsize and len(data) > maxsize and response_headers.get('accept-ranges', '').lower() == 'bytes' and int(response_headers.get('content-length', 0)):
status_code = 206
response_headers[
'Content-Range'] = 'bytes 0-%d/%d' % (maxsize - 1, len(data))
data = data[:maxsize]
if status_code == 200 and 'content-encoding' not in response_headers and 512 < len(data) < URLFETCH_DEFLATE_MAXSIZE and content_type.startswith(('text/', 'application/json', 'application/javascript')):
if 'gzip' in accept_encoding:
response_headers['Content-Encoding'] = 'gzip'
compressobj = zlib.compressobj(
zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0)
dataio = io.BytesIO()
dataio.write('\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff')
dataio.write(compressobj.compress(data))
dataio.write(compressobj.flush())
dataio.write(
struct.pack('<LL', zlib.crc32(data) & 0xFFFFFFFFL, len(data) & 0xFFFFFFFFL))
data = dataio.getvalue()
elif 'deflate' in accept_encoding:
response_headers['Content-Encoding'] = 'deflate'
data = deflate(data)
response_headers['Content-Length'] = str(len(data))
#logging.info("Goagent:: Get %d data and sent.", len(data))
return format_response(status_code, response_headers, '') + data
def process(data):
req = HTTPRequest(data)
p = json.loads(''.join(req.rfile.readlines()))
#logging.info("Access URL: " + p["url"])
return application(p["headers"], p["body"], p["method"], p["url"])
| gpl-2.0 | 4,217,351,065,966,016,500 | 33.64684 | 205 | 0.591094 | false | 3.559969 | false | false | false |
kratorius/ads | python/interviewquestions/longest_sequence.py | 1 | 1730 | """
Given a list of distinct numbers, find the longest monotonically increasing
subsequence within that list.
For example:
S = [2, 4, 3, 5, 1, 7, 6, 9, 8] -> [2, 3, 5, 6, 8]
or [2, 4, 5, 7, 8]
or [2, 4, 5, 7, 9]
If there's more than one solution, just return one of them.
"""
import unittest
def longest_sequence(lst):
if not lst:
return []
lengths = [0] * len(lst)
predecessors = [None] * len(lst)
max_idx = 0
for idx, item in enumerate(lst):
# what's the longest subsequence until this point?
# (whose last item < current item)
max_length = 1
lengths[idx] = 1
predecessors[idx] = None
for i, length in enumerate(lengths[:idx]):
if length >= max_length and lst[i] < item:
max_length = length + 1
lengths[idx] = max_length
predecessors[idx] = i
max_idx = idx
# proceed backward and rebuild the list
longest = []
while max_idx is not None:
item = lst[max_idx]
longest.append(item)
max_idx = predecessors[max_idx]
return list(reversed(longest))
class LongestSequenceTest(unittest.TestCase):
def test_sequence_find(self):
self.assertEqual([], longest_sequence([]))
self.assertEqual([10], longest_sequence([10]))
self.assertEqual([2, 4, 5, 7, 8], longest_sequence([2, 4, 3, 5, 1, 7, 6, 9, 8]))
self.assertEqual([1, 2, 3], longest_sequence([1, 2, 3, 1, 2, 3, 1, 2, 3]))
self.assertEqual([1, 2, 3], longest_sequence([1, 2, 3]))
self.assertEqual([10, 20, 30], longest_sequence([10, 5, 4, 20, 3, 2, 30]))
| mit | 2,014,271,797,512,256,300 | 29.892857 | 88 | 0.546821 | false | 3.46 | false | false | false |
Yannig/ansible | lib/ansible/plugins/action/net_base.py | 1 | 7262 | # (c) 2015, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action import ActionBase
from ansible.module_utils.network_common import load_provider
from imp import find_module, load_module
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
if self._play_context.connection != 'local':
return dict(
failed=True,
msg='invalid connection specified, expected connection=local, '
'got %s' % self._play_context.connection
)
play_context = copy.deepcopy(self._play_context)
play_context.network_os = self._get_network_os(task_vars)
# we should be able to stream line this a bit by creating a common
# provider argument spec in module_utils/network_common.py or another
# option is that there isn't a need to push provider into the module
# since the connection is started in the action handler.
f, p, d = find_module('ansible')
f2, p2, d2 = find_module('module_utils', [p])
f3, p3, d3 = find_module(play_context.network_os, [p2])
module = load_module('ansible.module_utils.' + play_context.network_os, f3, p3, d3)
self.provider = load_provider(module.get_provider_argspec(), self._task.args)
if play_context.network_os == 'junos':
play_context.connection = 'netconf'
play_context.port = int(self.provider['port'] or self._play_context.port or 830)
else:
play_context.connection = 'network_cli'
play_context.port = int(self.provider['port'] or self._play_context.port or 22)
play_context.remote_addr = self.provider['host'] or self._play_context.remote_addr
play_context.remote_user = self.provider['username'] or self._play_context.connection_user
play_context.password = self.provider['password'] or self._play_context.password
play_context.private_key_file = self.provider['ssh_keyfile'] or self._play_context.private_key_file
play_context.timeout = int(self.provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
if 'authorize' in self.provider.keys():
play_context.become = self.provider['authorize'] or False
play_context.become_pass = self.provider['auth_pass']
socket_path = self._start_connection(play_context)
task_vars['ansible_socket'] = socket_path
if 'fail_on_missing_module' not in self._task.args:
self._task.args['fail_on_missing_module'] = False
result = super(ActionModule, self).run(tmp, task_vars)
module = self._get_implementation_module(play_context.network_os, self._task.action)
if not module:
if self._task.args['fail_on_missing_module']:
result['failed'] = True
else:
result['failed'] = False
result['msg'] = ('Could not find implementation module %s for %s' %
(self._task.action, play_context.network_os))
else:
new_module_args = self._task.args.copy()
# perhaps delete the provider argument here as well since the
# module code doesn't need the information, the connection is
# already started
if 'network_os' in new_module_args:
del new_module_args['network_os']
del new_module_args['fail_on_missing_module']
display.vvvv('Running implementation module %s' % module)
result.update(self._execute_module(module_name=module,
module_args=new_module_args, task_vars=task_vars,
wrap_async=self._task.async))
display.vvvv('Caching network OS %s in facts' % play_context.network_os)
result['ansible_facts'] = {'network_os': play_context.network_os}
return result
def _start_connection(self, play_context):
display.vvv('using connection plugin %s' % play_context.connection, play_context.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent',
play_context, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, play_context.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
# make sure we are in the right cli context which should be
# enable mode and not config module
rc, out, err = connection.exec_command('prompt()')
if str(out).strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
connection.exec_command('exit')
if self._play_context.become_method == 'enable':
self._play_context.become = False
self._play_context.become_method = None
return socket_path
def _get_network_os(self, task_vars):
if ('network_os' in self._task.args and self._task.args['network_os']):
display.vvvv('Getting network OS from task argument')
network_os = self._task.args['network_os']
elif (self._play_context.network_os):
display.vvvv('Getting network OS from inventory')
network_os = self._play_context.network_os
elif ('network_os' in task_vars['ansible_facts'] and
task_vars['ansible_facts']['network_os']):
display.vvvv('Getting network OS from fact')
network_os = task_vars['ansible_facts']['network_os']
else:
# this will be replaced by the call to get_capabilities() on the
# connection
display.vvvv('Getting network OS from net discovery')
network_os = None
return network_os
def _get_implementation_module(self, network_os, platform_agnostic_module):
implementation_module = network_os + '_' + platform_agnostic_module.partition('_')[2]
if implementation_module not in self._shared_loader_obj.module_loader:
implementation_module = None
return implementation_module
| gpl-3.0 | -8,083,114,629,988,565,000 | 43.280488 | 118 | 0.630956 | false | 4.114448 | false | false | false |
F5Networks/f5-common-python | f5/bigiq/cm/device/licensing/pool/initial_activation.py | 1 | 1773 | # coding=utf-8
#
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""BIG-IQ® license pool regkeys.
REST URI
``http://localhost/mgmt/cm/device/licensing/pool/initial-activation``
REST Kind
``cm:device:licensing:pool:initial-activation:*``
"""
from f5.bigiq.resource import Collection
from f5.bigiq.resource import Resource
class Initial_Activations(Collection):
def __init__(self, pool):
super(Initial_Activations, self).__init__(pool)
self._meta_data['required_json_kind'] = \
'cm:device:licensing:pool:initial-activation:initialactivationworkercollectionstate' # NOQA
self._meta_data['allowed_lazy_attributes'] = [Initial_Activation]
self._meta_data['attribute_registry'] = {
'cm:device:licensing:pool:initial-activation:initialactivationworkeritemstate': Initial_Activation # NOQA
}
class Initial_Activation(Resource):
def __init__(self, initial_activations):
super(Initial_Activation, self).__init__(initial_activations)
self._meta_data['required_creation_parameters'] = {'name', 'regKey'}
self._meta_data['required_json_kind'] = \
'cm:device:licensing:pool:initial-activation:initialactivationworkeritemstate'
| apache-2.0 | 1,947,570,729,455,576,800 | 36.702128 | 118 | 0.713883 | false | 3.852174 | false | false | false |
CSD-Public/stonix | src/MacBuild/ramdisk/lib/environment.py | 1 | 30726 | #!/usr/bin/env python3
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
# ============================================================================#
# Filename $RCSfile: stonix/environment.py,v $
# Description Security Configuration Script
# OS Linux, OS X, Solaris, BSD
# Author Dave Kennel
# Last updated by $Author: $
# Notes Based on CIS Benchmarks, NSA RHEL
# Guidelines, NIST and DISA STIG/Checklist
# Release $Revision: 1.0 $
# Modified Date $Date: 2010/8/24 14:00:00 $
# ============================================================================#
'''
Created on Aug 24, 2010
@author: dkennel
@change: 2014/05/29 - ekkehard j. koch - pep8 and comment updates
'''
#--- Native python libraries
import os
import re
import sys
import socket
import subprocess
import types
import platform
import pwd
import time
class Environment:
'''The Environment class collects commonly used information about the
execution platform and makes it available to the rules.
:version: 1.0
:author: D. Kennel
'''
def __init__(self):
self.operatingsystem = ''
self.osreportstring = ''
self.osfamily = ''
self.hostname = ''
self.ipaddress = ''
self.macaddress = ''
self.osversion = ''
self.numrules = 0
self.euid = os.geteuid()
currpwd = pwd.getpwuid(self.euid)
try:
self.homedir = currpwd[5]
except(IndexError):
self.homedir = '/dev/null'
self.installmode = False
self.verbosemode = False
self.debugmode = False
self.runtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
self.collectinfo()
def setinstallmode(self, installmode):
'''Set the install mode bool value. Should be true if the prog should run
in install mode.
:param bool: installmode
:param installmode:
:returns: void
@author: D. Kennel
'''
try:
if type(installmode) is bool:
self.installmode = installmode
except (NameError):
# installmode was undefined
pass
def getinstallmode(self):
'''Return the current value of the install mode bool. Should be true if
the program is to run in install mode.
:returns: bool : installmode
@author: D. Kennel
'''
return self.installmode
def setverbosemode(self, verbosemode):
'''Set the verbose mode bool value. Should be true if the prog should run
in verbose mode.
:param bool: verbosemode
:param verbosemode:
:returns: void
@author: D. Kennel
'''
try:
if type(verbosemode) is bool:
self.verbosemode = verbosemode
except (NameError):
# verbosemode was undefined
pass
def getverbosemode(self):
'''Return the current value of the verbose mode bool. Should be true if
the program is to run in verbose mode.
:returns: bool : verbosemode
@author: D. Kennel
'''
return self.verbosemode
def setdebugmode(self, debugmode):
'''Set the verbose mode bool value. Should be true if the prog should run
in verbose mode.
:param bool: debugmode
:param debugmode:
:returns: void
@author: D. Kennel
'''
try:
if type(debugmode) is bool:
self.debugmode = debugmode
except (NameError):
# debugmode was undefined
pass
def getdebugmode(self):
'''Return the current value of the debug mode bool. Should be true if the
program is to run in debug mode.
:returns: bool : debugmode
@author: D. Kennel
'''
return self.debugmode
def getostype(self):
'''Return the detailed operating system type.
:returns: string :
@author D. Kennel
'''
return self.operatingsystem
def getosreportstring(self):
'''Return the detailed operating system type with full version info.
:returns: string :
@author D. Kennel
'''
return self.osreportstring
def getosfamily(self):
'''Return the value of self.osfamily which should be linux, darwin,
solaris or freebsd.
:returns: string :
@author: D. Kennel
'''
return self.osfamily
def getosver(self):
'''Return the OS version as a string.
:returns: string :
@author D. Kennel
'''
return self.osversion
def gethostname(self):
'''Return the hostname of the system.
:returns: string
@author: dkennel
'''
return self.hostname
def getipaddress(self):
'''Return the IP address associated with the host name.
:returns: string :
@author D. Kennel
'''
return self.ipaddress
def getmacaddr(self):
'''Return the mac address in native format.
:returns: string :
@author D. Kennel
'''
return self.macaddress
def geteuid(self):
'''Return the effective user ID
:returns: int :
@author D. Kennel
'''
return self.euid
def geteuidhome(self):
'''Returns the home directory of the current effective user ID.
:returns: string
@author: D. Kennel
'''
return self.homedir
def collectinfo(self):
'''Private method to populate data.
:returns: void
@author D. Kennel
'''
# print 'Environment Running discoveros'
self.discoveros()
# print 'Environment running setosfamily'
self.setosfamily()
# print 'Environment running guessnetwork'
self.guessnetwork()
self.collectpaths()
def discoveros(self):
'''Discover the operating system type and version
:returns: void
@author: D. Kennel
'''
# Alternative (better) implementation for Linux
if os.path.exists('/usr/bin/lsb_release'):
proc = subprocess.Popen('/usr/bin/lsb_release -dr',
shell=True, stdout=subprocess.PIPE,
close_fds=True)
description = proc.stdout.readline()
release = proc.stdout.readline()
description = description.split()
# print description
del description[0]
description = " ".join(description)
self.operatingsystem = description
self.osreportstring = description
release = release.split()
release = release[1]
self.osversion = release
elif os.path.exists('/etc/redhat-release'):
relfile = open('/etc/redhat-release')
release = relfile.read()
relfile.close()
release = release.split()
opsys = ''
for element in release:
if re.search('release', element):
break
else:
opsys = opsys + " " + element
self.operatingsystem = opsys
self.osreportstring = opsys
index = 0
for element in release:
if re.search('release', element):
index = index + 1
osver = release[index]
else:
index = index + 1
self.osversion = osver
elif os.path.exists('/etc/gentoo-release'):
relfile = open('/etc/gentoo-release')
release = relfile.read()
relfile.close()
release = release.split()
opsys = ''
for element in release:
if re.search('release', element):
break
else:
opsys = opsys + " " + element
self.operatingsystem = opsys
self.osreportstring = opsys
index = 0
for element in release:
if re.search('release', element):
index = index + 1
osver = release[index]
else:
index = index + 1
self.osversion = osver
elif os.path.exists('/usr/bin/sw_vers'):
proc1 = subprocess.Popen('/usr/bin/sw_vers -productName',
shell=True, stdout=subprocess.PIPE,
close_fds=True)
description = proc1.stdout.readline()
description = description.strip()
proc2 = subprocess.Popen('/usr/bin/sw_vers -productVersion',
shell=True, stdout=subprocess.PIPE,
close_fds=True)
release = proc2.stdout.readline()
release = release.strip()
self.operatingsystem = description
self.osversion = release
proc3 = subprocess.Popen('/usr/bin/sw_vers -buildVersion',
shell=True, stdout=subprocess.PIPE,
close_fds=True)
build = proc3.stdout.readline()
build = build.strip()
opsys = str(description) + ' ' + str(release) + ' ' + str(build)
self.osreportstring = opsys
def setosfamily(self):
'''Private method to detect and set the self.osfamily property. This is a
fuzzy classification of the OS.
'''
uname = sys.platform
if uname == 'linux2':
self.osfamily = 'linux'
elif uname == 'darwin':
self.osfamily = 'darwin'
elif uname == 'sunos5':
self.osfamily = 'solaris'
elif uname == 'freebsd9':
self.osfamily = 'freebsd'
def guessnetwork(self):
'''This private method checks the configured interfaces and tries to
make an educated guess as to the correct network data. self.ipaddress
and self.macaddress will be updated by this method.
'''
# regex to match mac addresses
macre = '(([0-9A-Fa-f]{2}[:-]){5}[0-9A-Fa-f]{2})'
ipaddress = ''
macaddress = '00:00:00:00:00:00'
hostname = socket.getfqdn()
try:
ipdata = socket.gethostbyname_ex(hostname)
iplist = ipdata[2]
try:
iplist.remove('127.0.0.1')
except (ValueError):
# tried to remove loopback when it's not present, continue
pass
if len(iplist) >= 1:
ipaddress = iplist[0]
else:
ipaddress = '127.0.0.1'
except(socket.gaierror):
# If we're here it's because socket.getfqdn did not in fact return
# a valid hostname and gethostbyname errored.
ipaddress = self.getdefaultip()
# In ifconfig output macaddresses are always one line before the ip
# address.
if sys.platform == 'linux2':
cmd = '/sbin/ifconfig'
elif os.path.exists('/usr/sbin/ifconfig'):
cmd = '/usr/sbin/ifconfig -a'
else:
cmd = '/sbin/ifconfig -a'
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, close_fds=True)
netdata = proc.stdout.readlines()
for line in netdata:
# print "processing: " + line
match = re.search(macre, line.decode('utf-8'))
if match is not None:
# print 'Matched MAC address'
macaddress = match.group()
if re.search(ipaddress, line.decode('utf-8')):
# print 'Found ipaddress'
break
self.hostname = hostname
self.ipaddress = ipaddress
self.macaddress = macaddress
def getdefaultip(self):
'''This method will return the ip address of the interface
associated with the current default route.
:returns: string - ipaddress
@author: dkennel
'''
ipaddr = '127.0.0.1'
gateway = ''
if sys.platform == 'linux2':
try:
routecmd = subprocess.Popen('/sbin/route -n', shell=True,
stdout=subprocess.PIPE,
close_fds=True)
routedata = routecmd.stdout.readlines()
except(OSError):
return ipaddr
for line in routedata:
if re.search('^default', line.decode('utf-8')):
line = line.split()
try:
gateway = line[1]
except(IndexError):
return ipaddr
else:
try:
if os.path.exists('/usr/sbin/route'):
cmd = '/usr/sbin/route -n get default'
else:
cmd = '/sbin/route -n get default'
routecmd = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
close_fds=True)
routedata = routecmd.stdout.readlines()
except(OSError):
return ipaddr
for line in routedata:
if re.search('gateway:', line.decode('utf-8')):
line = line.decode('utf-8').split()
try:
gateway = line[1]
except(IndexError):
return ipaddr
if gateway:
iplist = self.getallips()
for level in [1, 2, 3, 4]:
matched = self.matchip(gateway, iplist, level)
if len(matched) == 1:
ipaddr = matched[0]
break
return ipaddr
def matchip(self, target, iplist, level=1):
'''This method will when given an IP try to find matching ip
from a list of IP addresses. Matching will work from left to right
according to the level param. If no match is found
the loopback address will be returned.
:param string: ipaddress
:param list: list of ipaddresses
:param int: level
:param target:
:param iplist:
:param level: (Default value = 1)
:returns: list - ipaddresses
@author: dkennel
'''
quad = target.split('.')
if level == 1:
network = quad[0]
elif level == 2:
network = quad[0] + '.' + quad[1]
elif level == 3:
network = quad[0] + '.' + quad[1] + '.' + quad[2]
elif level == 4:
return ['127.0.0.1']
matchlist = []
for addr in iplist:
if re.search(network, addr.decode('utf-8')):
matchlist.append(addr)
if len(matchlist) == 0:
matchlist.append('127.0.0.1')
return matchlist
def getallips(self):
'''This method returns all ip addresses on all interfaces on the system.
:returns: list of strings
@author: dkennel
'''
iplist = []
if sys.platform == 'linux2':
try:
ifcmd = subprocess.Popen('/sbin/ifconfig', shell=True,
stdout=subprocess.PIPE,
close_fds=True)
ifdata = ifcmd.stdout.readlines()
except(OSError):
return iplist
for line in ifdata:
if re.search('inet addr:', line.decode('utf-8')):
try:
line = line.split()
addr = line[1]
addr = addr.split(':')
addr = addr[1]
iplist.append(addr)
except(IndexError):
continue
else:
try:
if os.path.exists('/usr/sbin/ifconfig'):
cmd = '/usr/sbin/ifconfig -a'
else:
cmd = '/sbin/ifconfig -a'
ifcmd = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
close_fds=True)
ifdata = ifcmd.stdout.readlines()
except(OSError):
return iplist
for line in ifdata:
if re.search('inet ', line.decode('utf-8')):
try:
line = line.split()
addr = line[1]
iplist.append(addr)
except(IndexError):
continue
return iplist
def get_property_number(self):
'''Find and return the
Property number of the local machine
@author: scmcleni
@author: D. Kennel
:returns: int
'''
propnum = 0
try:
if os.path.exists('/etc/property-number'):
propertynumberfile = open('/etc/property-number', 'r')
propnum = propertynumberfile.readline()
propnum = propnum.strip()
propertynumberfile.close()
if platform.system() == 'Darwin':
pnfetch = '/usr/sbin/nvram asset_id 2>/dev/null'
cmd = subprocess.Popen(pnfetch, shell=True,
stdout=subprocess.PIPE,
close_fds=True)
cmdout = cmd.stdout.readline()
cmdout = cmdout.split()
try:
propnum = cmdout[1]
except(IndexError, KeyError):
propnum = 0
except:
pass
# Failed to obtain property number
return propnum
def get_system_serial_number(self):
'''Find and return the
Serial number of the local machine
@author: dkennel
:returns: string
'''
systemserial = '0'
if os.path.exists('/usr/sbin/system_profiler'):
profilerfetch = '/usr/sbin/system_profiler SPHardwareDataType'
cmd3 = subprocess.Popen(profilerfetch, shell=True,
stdout=subprocess.PIPE,
close_fds=True)
cmd3output = cmd3.stdout.readlines()
for line in cmd3output:
if re.search('Serial Number (system):', line.decode('utf-8')):
line = line.split(':')
try:
systemserial = line[1]
except(IndexError, KeyError):
pass
systemserial = systemserial.strip()
return systemserial
def get_sys_uuid(self):
'''Find and return a unique identifier for the system. On most systems
this will be the UUID of the system. On Solaris SPARC this will be
a number that is _hopefully_ unique as that platform doesn't have
UUID numbers.
@author: D. Kennel
:returns: string
'''
uuid = '0'
if os.path.exists('/usr/sbin/smbios'):
smbiosfetch = '/usr/sbin/smbios -t SMB_TYPE_SYSTEM 2>/dev/null'
cmd2 = subprocess.Popen(smbiosfetch, shell=True,
stdout=subprocess.PIPE,
close_fds=True)
cmdoutput = cmd2.stdout.readlines()
for line in cmdoutput:
if re.search('UUID:', line.decode('utf-8')):
line = line.split()
try:
uuid = line[1]
except(IndexError, KeyError):
pass
elif os.path.exists('/usr/sbin/system_profiler'):
profilerfetch = '/usr/sbin/system_profiler SPHardwareDataType'
cmd3 = subprocess.Popen(profilerfetch, shell=True,
stdout=subprocess.PIPE,
close_fds=True)
cmd3output = cmd3.stdout.readlines()
for line in cmd3output:
if re.search('UUID:', line.decode('utf-8')):
line = line.split()
try:
uuid = line[2]
except(IndexError, KeyError):
pass
elif platform.system() == 'SunOS':
fetchhostid = '/usr/bin/hostid'
cmd1 = subprocess.Popen(fetchhostid, shell=True,
stdout=subprocess.PIPE,
close_fds=True)
uuid = cmd1.stdout.readline()
uuid = uuid.strip()
return uuid
def ismobile(self):
'''Returns a bool indicating whether or not the system in question is a
laptop. The is mobile method is used by some rules that have alternate
settings for laptops.
@author: dkennel
@regturn: bool - true if system is a laptop
'''
ismobile = False
dmitypes = ['LapTop', 'Portable', 'Notebook', 'Hand Held',
'Sub Notebook']
if os.path.exists('/usr/sbin/system_profiler'):
profilerfetch = '/usr/sbin/system_profiler SPHardwareDataType'
cmd3 = subprocess.Popen(profilerfetch, shell=True,
stdout=subprocess.PIPE,
close_fds=True)
cmd3output = cmd3.stdout.readlines()
for line in cmd3output:
if re.search('Book', line.decode('utf-8')):
ismobile = True
break
return ismobile
def issnitchactive(self):
'''Returns a bool indicating whether or not the little snitch program is
active. Little snitch is a firewall utility used on Mac systems and can
interfere with STONIX operations.
@author: ekkehard
:returns: bool - true if little snitch is running
'''
issnitchactive = False
if self.osfamily == 'darwin':
cmd = 'ps axc -o comm | grep lsd'
littlesnitch = 'lsd'
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE, close_fds=True)
netdata = proc.stdout.readlines()
for line in netdata:
print("processing: " + line.decode('utf-8'))
match = re.search(littlesnitch, line.decode('utf-8'))
if match is not None:
print('LittleSnitch Is Running')
issnitchactive = True
break
return issnitchactive
def collectpaths(self):
'''Determine how stonix is run and return appropriate paths for:
icons
rules
conf
logs
@author: Roy Nielsen
'''
script_path_zero = os.path.realpath(sys.argv[0])
try:
script_path_one = os.path.realpath(sys.argv[1])
except:
script_path_one = ""
self.test_mode = False
#####
# Check which argv variable has the script name -- required to allow
# for using the eclipse debugger.
if re.search("stonix.py$", script_path_zero) or re.search("stonix$", script_path_zero):
#####
# Run normally
self.script_path = os.path.dirname(os.path.realpath(sys.argv[0]))
else:
#####
# Run with Eclipse debugger -- Eclipse debugger will never try to run
# the "stonix" binary blob created by pyinstaller, so don't include
# here.
#print "DEBUG: Environment.collectpaths: unexpected argv[0]: " + str(sys.argv[0])
if re.search("stonix.py$", script_path_one) or re.search("stonixtest.py$", script_path_one):
script = script_path_one.split("/")[-1]
script_path = "/".join(script_path_one.split("/")[:-1])
if re.match("^stonixtest.py$", script) and \
os.path.exists(script_path_one) and \
os.path.exists(os.path.join(script_path, "stonixtest.py")) and \
os.path.exists(os.path.join(script_path, "stonix.py")):
self.test_mode = True
self.script_path = os.path.dirname(os.path.realpath(sys.argv[1]))
else:
print("ERROR: Cannot run using this method")
else:
#print "DEBUG: Cannot find appropriate path, building paths for current directory"
self.script_path = os.getcwd()
#####
# Set the rules & stonix_resources paths
if re.search("stonix.app/Contents/MacOS$", self.script_path):
#####
# Find the stonix.conf file in the stonix.app/Contents/Resources
# directory
macospath = self.script_path
self.resources_path = os.path.join(self.script_path,
"stonix_resources")
self.rules_path = os.path.join(self.resources_path,
"rules")
else:
# ##
# create the self.resources_path
self.resources_path = os.path.join(self.script_path,
"stonix_resources")
# ##
# create the self.rules_path
self.rules_path = os.path.join(self.script_path,
"stonix_resources",
"rules")
#####
# Set the log file path
if self.geteuid() == 0:
self.log_path = '/var/log'
else:
userpath = self.geteuidhome()
self.log_path = os.path.join(userpath, '.stonix')
if userpath == '/dev/null':
self.log_path = '/tmp'
#####
# Set the icon path
self.icon_path = os.path.join(self.resources_path, 'gfx')
#####
# Set the configuration file path
if re.search("stonix.app/Contents/MacOS/stonix$", os.path.realpath(sys.argv[0])):
#####
# Find the stonix.conf file in the stonix.app/Contents/Resources
# directory
macospath = self.script_path
parents = macospath.split("/")
parents.pop()
parents.append("Resources")
resources_dir = "/".join(parents)
self.conf_path = os.path.join(resources_dir, "stonix.conf")
elif os.path.exists(os.path.join(self.script_path, "etc", "stonix.conf")):
self.conf_path = os.path.join(self.script_path, "etc", "stonix.conf")
elif re.search('pydev', script_path_zero) and re.search('stonix_resources', script_path_one):
print("INFO: Called by unit test")
srcpath = script_path_one.split('/')[:-2]
srcpath = '/'.join(srcpath)
self.conf_path = os.path.join(srcpath, 'etc', 'stonix.conf')
print((self.conf_path))
else:
self.conf_path = "/etc/stonix.conf"
def get_test_mode(self):
'''Getter test mode flag
@author: Roy Nielsen
'''
return self.test_mode
def get_script_path(self):
'''Getter for the script path
@author: Roy Nielsen
'''
return self.script_path
def get_icon_path(self):
'''Getter for the icon path
@author: Roy Nielsen
'''
return self.icon_path
def get_rules_path(self):
'''Getter for rules path
@author: Roy Nielsen
'''
return self.rules_path
def get_config_path(self):
'''Getter for conf file path
@author: Roy Nielsen
'''
return self.conf_path
def get_log_path(self):
'''Getter for log path
@author: Roy Nielsen
'''
return self.log_path
def get_resources_path(self):
'''Getter for stonix resources directory
@author: Roy Nielsen
'''
return self.resources_path
def getruntime(self):
'''
:returns: @author: dkennel
'''
return self.runtime
def setnumrules(self, num):
'''Set the number of rules that apply to the system. This information is
used by the log dispatcher in the run metadata.
:param num: int - number of rules that apply to this host
@author: dkennel
'''
if type(num) is not int:
raise TypeError('Number of rules must be an integer')
elif num < 0:
raise ValueError('Number of rules must be a positive integer')
else:
self.numrules = num
def getnumrules(self):
'''
:returns: @author: dkennel
'''
return self.numrules
| gpl-2.0 | 8,765,713,703,017,385,000 | 32.145631 | 104 | 0.499837 | false | 4.573002 | false | false | false |
ajbouh/tfi | src/tfi/main.py | 1 | 13541 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import os.path
import sys
import tempfile
import tfi
import tfi.driver
import tfi.driverconfig
from tfi.resolve.model import _detect_model_file_kind, _model_module_for_kind, _load_model_from_path_fn
from tfi.cli import resolve as _resolve_model
from tfi.tensor.codec import encode as _tfi_tensor_codec_encode
from tfi.format.iterm2 import imgcat as _tfi_format_iterm2_imgcat
def _detect_model_object_kind(model):
klass = model if isinstance(model, type) else type(model)
for c in klass.mro():
if c.__name__ != "Model":
continue
if c.__module__ == "tfi.driver.pytorch":
return "pytorch"
if c.__module__ == "tfi.driver.prophet":
return "prophet"
if c.__module__ == "tfi.driver.tf":
return "tensorflow"
if c.__module__ == "tfi.driver.msp":
return "msp"
if c.__module__ == "tfi.driver.spacy":
return "spacy"
raise Exception("Unknown model type %s" % klass)
def _model_export(path, model):
kind = _detect_model_object_kind(model)
mod = _model_module_for_kind(kind)
return mod.export(path, model)
def _model_publish(f):
from tfi.publish import publish as _publish
kind = _detect_model_file_kind(f)
_publish(kind, f)
class ModelSpecifier(argparse.Action):
def __init__(self,
option_strings,
dest,
**kwargs):
super(ModelSpecifier, self).__init__(
option_strings=option_strings,
dest=dest,
**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if values is None:
setattr(namespace, self.dest, None)
return
if values:
leading_value, *rest = values
else:
leading_value = None
rest = []
resolution = _resolve_model(leading_value, rest)
setattr(namespace, self.dest, resolution['model'])
setattr(namespace, "%s_module_fn" % self.dest, resolution.get('module_fn', lambda x: None))
setattr(namespace, "%s_can_refresh" % self.dest, resolution.get('can_refresh', None))
setattr(namespace, "%s_refresh_fn" % self.dest, resolution.get('refresh_fn', None))
setattr(namespace, "%s_method_fn" % self.dest, resolution['model_method_fn'])
setattr(namespace, "%s_source" % self.dest, resolution.get('source', None))
setattr(namespace, "%s_source_sha1hex" % self.dest, resolution.get('source_sha1hex', None))
setattr(namespace, "%s_via_python" % self.dest, resolution.get('via_python', None))
setattr(namespace, "%s_raw" % self.dest, resolution.get('leading_value', None))
parser = argparse.ArgumentParser(prog='tfi', add_help=False)
parser.add_argument('--serve', default=False, action='store_true', help='Start REST API on given port')
parser.add_argument('--tracing-host', type=str, default=os.environ.get('JAEGER_HOST', None), help='Jaeger host to submit traces to while serving')
parser.add_argument('--tracing-tags', type=str, default=os.environ.get('JAEGER_TAGS', ''), help='Jaeger tags to include in traces to while serving')
parser.add_argument('--internal-config', type=str, default=os.environ.get("TFI_INTERNAL_CONFIG", ""), help='For internal use.')
parser.add_argument('--publish', default=False, action='store_true', help='Publish model')
parser.add_argument('--bind', type=str, help='Set address:port to serve model on. Default behavior is 127.0.0.1 if available, otherwise 127.0.0.1:0')
parser.add_argument('--bind-default', type=str, default='127.0.0.1:5000')
parser.add_argument('--export', type=str, help='path to export to')
parser.add_argument('--export-doc', type=str, help='path to export doc to')
parser.add_argument('--watch', default=False, action='store_true', help='Watch given model and reload when it changes')
parser.add_argument('--interactive', '-i', default=None, action='store_true', help='Start interactive session')
parser.add_argument('--tf-tensorboard-bind-default', type=str, default='127.0.0.1:6007')
parser.add_argument('--tf-tensorboard-bind', type=str, help='Set address:port to serve TensorBoard on. Default behavior is 127.0.0.1:6007 if available, otherwise 127.0.0.1:0')
parser.add_argument('--tf-logdir',
default=os.path.expanduser('~/.tfi/tf/log/%F_%H-%M-%S/%04i'),
help='Set TensorFlow log dir to write to. Renders any % placeholders with strftime, runs TensorBoard from parent dir. %04i is replaced by a 0-padded run_id count')
parser.add_argument('specifier', type=str, default=None, nargs=argparse.REMAINDER, action=ModelSpecifier, help='fully qualified class name to instantiate')
# TODO(adamb)
# And let's add basic text --doc output.
# Then we'll add support for training a model locally ... (which?)
# Then we'll add support for training a model ELSEWHERE.
def run(argns, remaining_args):
model = None
module = None
exporting = argns.export is not None or argns.export_doc is not None
serving = argns.serve is not False
publishing = argns.publish is not False
batch = False
if argns.interactive is None:
argns.interactive = not batch and not exporting and not serving and not publishing
def tf_make_logdir_fn(datetime):
import re
base_logdir = datetime.strftime(argns.tf_logdir)
def logdir_fn(run_id=None):
if run_id is None:
return re.sub('(%\d*)i', '', base_logdir)
base_logdir_formatstr = re.sub('(%\d*)i', '\\1d', base_logdir)
return base_logdir_formatstr % run_id
return logdir_fn
import tfi
import tfi.driverconfig
tfi.driverconfig.tf.make_logdir_fn = tf_make_logdir_fn
if argns.specifier:
model = argns.specifier
module = argns.specifier_module_fn()
if argns.specifier_method_fn:
result = argns.specifier_method_fn()
accept_mimetypes = {"image/png": _tfi_format_iterm2_imgcat, "text/plain": lambda x: x}
result_val = _tfi_tensor_codec_encode(accept_mimetypes, result)
if result_val is None:
result_val = result
result_str = '%r\n' % (result_val, )
print(result_str)
batch = True
internal_config = argns.internal_config or (model and _detect_model_object_kind(model))
if internal_config == 'tensorflow':
import tensorflow
tensorboard = internal_config == 'tensorflow' and argns.interactive
if tensorboard:
import tfi.driver.tf.tensorboard_server
import threading
tb_logdir = argns.tf_logdir
while '%' in tb_logdir:
tb_logdir = os.path.dirname(tb_logdir)
if argns.tf_tensorboard_bind:
tb_host, tb_port = argns.tf_tensorboard_bind.split(':', 1)
tb_port = int(tb_port)
else:
tb_host, tb_port = argns.tf_tensorboard_bind_default.split(':', 1)
tb_port = int(tb_port)
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind((tb_host, tb_port))
except socket.error as e:
if e.errno == 98:
tb_port = 0
# Use some fancy footwork to delay continuing until TensorBoard has started.
tb_cv = threading.Condition()
def tb_run():
def on_ready_fn(url):
if url:
print('TensorBoard at %s now serving %s' % (url, tb_logdir))
sys.stdout.flush()
with tb_cv:
tb_cv.notify_all()
tfi.driver.tf.tensorboard_server.main(tb_logdir, tb_host=tb_host, tb_port=tb_port, tb_on_ready_fn=on_ready_fn)
with tb_cv:
tb_thread = threading.Thread(target=tb_run, daemon=True)
tb_thread.start()
tb_cv.wait()
if internal_config == 'spacy':
import tfi.driver.spacy
if serving:
segment_js = """
<script>
!function(){var analytics=window.analytics=window.analytics||[];if(!analytics.initialize)if(analytics.invoked)window.console&&console.error&&console.error("Segment snippet included twice.");else{analytics.invoked=!0;analytics.methods=["trackSubmit","trackClick","trackLink","trackForm","pageview","identify","reset","group","track","ready","alias","debug","page","once","off","on"];analytics.factory=function(t){return function(){var e=Array.prototype.slice.call(arguments);e.unshift(t);analytics.push(e);return analytics}};for(var t=0;t<analytics.methods.length;t++){var e=analytics.methods[t];analytics[e]=analytics.factory(e)}analytics.load=function(t){var e=document.createElement("script");e.type="text/javascript";e.async=!0;e.src=("https:"===document.location.protocol?"https://":"http://")+"cdn.segment.com/analytics.js/v1/"+t+"/analytics.min.js";var n=document.getElementsByTagName("script")[0];n.parentNode.insertBefore(e,n)};analytics.SNIPPET_VERSION="4.0.0";
analytics.load("GaappI2dkNZV4PLVdiJ8pHQ7Hofbf6Vz");
analytics.page();
}}();
</script>
"""
segment_js = ""
def on_bind(url):
print("Serving at %s" % url)
tracing_tags = {}
if argns.tracing_tags:
for tag_entry in argns.tracing_tags.split(' '):
tag_k, tag_v = tag_entry.split('=', 1)
tracing_tags[tag_k] = tag_v
if argns.bind:
host, port = argns.bind.split(':')
port = int(port)
else:
host, initial_port = argns.bind_default.split(':')
initial_port = int(initial_port)
port = 0
for possible_port in range(initial_port, initial_port + 32):
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind((host, possible_port))
port = possible_port
break
except socket.error as e:
if e.errno == 98 or e.errno == 48:
pass
if model is None:
from tfi.serve import run_deferred as serve_deferred
serve_deferred(
host=host, port=port, on_bind=on_bind,
load_model_from_path_fn=_load_model_from_path_fn,
extra_scripts=segment_js,
jaeger_host=argns.tracing_host,
jaeger_tags=tracing_tags)
else:
from tfi.serve import run as serve
def model_file_fn():
if argns.specifier_source and not argns.specifier_via_python:
return argns.specifier_source
with tempfile.NamedTemporaryFile(mode='rb', delete=False) as f:
print("Exporting ...", end='', flush=True)
_model_export(f.name, model)
print(" done", flush=True)
return f.name
serve(model,
host=host,
port=port,
on_bind=on_bind,
extra_scripts=segment_js,
jaeger_host=argns.tracing_host,
jaeger_tags=tracing_tags,
model_file_fn=model_file_fn)
if argns.watch:
if not argns.specifier_can_refresh:
print("WARN: Can't watch unrefreshable model.")
else:
import tfi.watch
ar = tfi.watch.AutoRefresher()
def do_refresh():
def refresh_progress(model, ix, total):
print("Refreshing %d/%d: %s" % (ix, total, model))
argns.specifier_refresh_fn(refresh_progress)
ar.watch(argns.specifier_source, argns.specifier_source_sha1hex, do_refresh)
ar.start()
if argns.interactive:
from tfi.repl import run as run_repl
run_repl(
globals=globals(),
locals=None,
history_filename=os.path.expanduser('~/.tfihistory'),
model=model,
module=module)
if argns.export_doc:
tfi.doc.save(argns.export_doc, model)
if argns.export:
if argns.specifier_source and not argns.specifier_via_python:
import shutil
shutil.copyfile(argns.specifier_source, argns.export)
else:
_model_export(argns.export, model)
if argns.publish:
if argns.specifier_source and not argns.specifier_via_python:
with open(argns.specifier_source, 'rb') as f:
# TODO(adamb) Should actually autodetect which environment to use.
url = _model_publish(f)
else:
with tempfile.NamedTemporaryFile(mode='rb') as f:
# TODO(adamb) Should actually autodetect which environment to use.
print("Exporting ...", end='', flush=True)
_model_export(f.name, model)
print(" done", flush=True)
url = _model_publish(f)
print(url)
def cli(args):
argns, remaining_args = parser.parse_known_args(args)
argns.load_model_from_path_fn = _load_model_from_path_fn
run(argns, remaining_args)
def main():
cli(sys.argv[1:])
if __name__ == '__main__':
main() | mit | 230,525,589,146,237,150 | 41.990476 | 972 | 0.600251 | false | 3.714952 | true | false | false |
abramconnelly/genevieve | file_process/tasks.py | 1 | 5314 | """Tasks for analyzing genome/genetic data files"""
# absolute_import prevents conflicts between project celery.py file
# and the celery package.
from __future__ import absolute_import
import bz2
import csv
import gzip
import os
from random import randint
from celery import shared_task
from django.conf import settings
from django.core.files import File
from genomes.models import GenomeAnalysis, GenomeAnalysisVariant
from variants.models import Variant, ClinVarRecord
from .utils import vcf_parsing_tools as vcftools
from .utils.twentythree_and_me import (api23andme_full_gen_data,
api23andme_full_gen_infer_sex,
api23andme_to_vcf)
from .utils.cgivar_to_vcf import convert as convert_cgivar_to_vcf
CLINVAR_FILENAME = "clinvar-latest.vcf"
@shared_task
def analyze_23andme_from_api(access_token, profile_id, user):
genome_data = api23andme_full_gen_data(access_token, profile_id)
sex = api23andme_full_gen_infer_sex(genome_data)
vcf_data = api23andme_to_vcf(genome_data, sex)
targetdir = '/tmp'
filename = '23andme-api-' + profile_id + '.vcf.gz'
if os.path.exists(os.path.join(targetdir, filename)):
inc = 2
while os.path.exists(os.path.join(targetdir, filename)):
filename = '23andme-api-' + profile_id + '-' + str(inc) + '.vcf.gz'
inc += 1
filepath = os.path.join(targetdir, filename)
output_file = gzip.open(filepath, mode='wb')
output_file.writelines(vcf_data)
# Close to ensure it's *really* closed before using File.
output_file.close()
# Reopen as binary so we don't lose compression.
vcf_file = open(filepath)
django_file = File(vcf_file)
new_analysis = GenomeAnalysis(uploadfile=django_file,
user=user, name=filename)
new_analysis.save()
vcf_file.close()
os.remove(filepath)
read_input_genome(analysis_in=new_analysis, genome_format='vcf')
@shared_task
def read_input_genome(analysis_in, genome_format='vcf'):
"""Read genome, VCF or Complete Genomics, and match against ClinVar"""
name = os.path.basename(analysis_in.uploadfile.path)
print genome_format
if genome_format == 'cgivar':
print "Treating as CGI var to be translated"
genome_file = convert_cgivar_to_vcf(
analysis_in.uploadfile.path,
os.path.join(settings.DATA_FILE_ROOT, 'hg19.2bit'))
elif name.endswith('.gz'):
print "reading directly as gzip"
genome_file = gzip.open(analysis_in.uploadfile.path, 'rb')
elif name.endswith('.bz2'):
print 'reading directly as bz2'
genome_file = bz2.BZ2File(analysis_in.uploadfile.path, 'rb')
# GzipFile(mode='rb', compresslevel=9,
# fileobj=analysis_in.uploadfile)
read_vcf(analysis_in, genome_file)
@shared_task
def read_vcf(analysis_in, genome_file):
"""Takes two .vcf files and returns matches"""
clinvar_filepath = os.path.join(settings.DATA_FILE_ROOT, CLINVAR_FILENAME)
clin_file = open(clinvar_filepath, 'r')
# Creates a tmp file to write the .csv
tmp_output_file_path = os.path.join(
'/tmp', 'django_celery_fileprocess-' +
str(randint(10000000, 99999999)) + '-' +
os.path.basename(analysis_in.uploadfile.path))
tmp_output_file = open(tmp_output_file_path, 'w')
csv_out = csv.writer(tmp_output_file)
header = ("Chromosome", "Position", "Name", "Significance", "Frequency",
"Zygosity", "ACC URL")
csv_out.writerow(header)
matched_variants = vcftools.match_to_clinvar(genome_file, clin_file)
for var in matched_variants:
print var
chrom = var[0]
pos = var[1]
ref_allele = var[2]
alt_allele = var[3]
name_acc = var[4]
freq = var[5]
zygosity = var[6]
variant, _ = Variant.objects.get_or_create(chrom=chrom,
pos=pos,
ref_allele=ref_allele,
alt_allele=alt_allele)
if not variant.freq:
variant.freq = freq
variant.save()
genomeanalysisvariant = GenomeAnalysisVariant.objects.create(
genomeanalysis=analysis_in, variant=variant, zyg=zygosity)
genomeanalysisvariant.save()
for spec in name_acc:
# for online report
url = "http://www.ncbi.nlm.nih.gov/clinvar/" + str(spec[0])
name = spec[1]
clnsig = spec[2]
record, _ = ClinVarRecord.objects.get_or_create(
accnum=spec[0], variant=variant, condition=name, clnsig=clnsig)
record.save()
# analysis_in.variants.add(variant)
# for CSV output
data = (chrom, pos, name, clnsig, freq, zygosity, url)
csv_out.writerow(data)
# closes the tmp file
tmp_output_file.close()
# opens the tmp file and creates an output processed file"
csv_filename = os.path.basename(analysis_in.uploadfile.path) + '.csv'
with open(tmp_output_file_path, 'rb') as file_out:
output_file = File(file_out)
analysis_in.processedfile.save(csv_filename, output_file)
| mit | 4,513,550,672,840,500,000 | 37.230216 | 79 | 0.621754 | false | 3.509908 | false | false | false |
centricular/meson | tools/cmake2meson.py | 1 | 10941 | #!/usr/bin/env python3
# Copyright 2014 Jussi Pakkanen
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
import re
class Token:
def __init__(self, tid, value):
self.tid = tid
self.value = value
self.lineno = 0
self.colno = 0
class Statement():
def __init__(self, name, args):
self.name = name
self.args = args
class Lexer:
def __init__(self):
self.token_specification = [
# Need to be sorted longest to shortest.
('ignore', re.compile(r'[ \t]')),
('string', re.compile(r'"([^\\]|(\\.))*?"', re.M)),
('varexp', re.compile(r'\${[-_0-9a-z/A-Z.]+}')),
('id', re.compile('''[,-><${}=+_0-9a-z/A-Z|@.*]+''')),
('eol', re.compile(r'\n')),
('comment', re.compile(r'\#.*')),
('lparen', re.compile(r'\(')),
('rparen', re.compile(r'\)')),
]
def lex(self, code):
lineno = 1
line_start = 0
loc = 0;
col = 0
while(loc < len(code)):
matched = False
for (tid, reg) in self.token_specification:
mo = reg.match(code, loc)
if mo:
col = mo.start()-line_start
matched = True
loc = mo.end()
match_text = mo.group()
if tid == 'ignore':
continue
if tid == 'comment':
yield(Token('comment', match_text))
elif tid == 'lparen':
yield(Token('lparen', '('))
elif tid == 'rparen':
yield(Token('rparen', ')'))
elif tid == 'string':
yield(Token('string', match_text[1:-1]))
elif tid == 'id':
yield(Token('id', match_text))
elif tid == 'eol':
#yield('eol')
lineno += 1
col = 1
line_start = mo.end()
pass
elif tid == 'varexp':
yield(Token('varexp', match_text[2:-1]))
else:
raise RuntimeError('Wharrgarbl')
break
if not matched:
raise RuntimeError('Lexer got confused line %d column %d' % (lineno, col))
class Parser():
def __init__(self, code):
self.stream = Lexer().lex(code)
self.getsym()
def getsym(self):
try:
self.current = next(self.stream)
except StopIteration:
self.current = Token('eof', '')
def accept(self, s):
if self.current.tid == s:
self.getsym()
return True
return False
def expect(self, s):
if self.accept(s):
return True
raise RuntimeError('Expecting %s got %s.' % (s, self.current.tid), self.current.lineno, self.current.colno)
def statement(self):
cur = self.current
if self.accept('comment'):
return Statement('_', [cur.value])
self.accept('id')
self.expect('lparen')
args = self.arguments()
self.expect('rparen')
return Statement(cur.value, args)
def arguments(self):
args = []
if self.accept('lparen'):
args.append(self.arguments())
self.expect('rparen')
arg = self.current
if self.accept('string') or self.accept('varexp') or\
self.accept('id'):
args.append(arg)
rest = self.arguments()
args += rest
return args
def parse(self):
while not self.accept('eof'):
yield(self.statement())
class Converter:
ignored_funcs = {'cmake_minimum_required' : True,
'enable_testing' : True,
'include' : True}
def __init__(self, cmake_root):
self.cmake_root = cmake_root
self.indent_unit = ' '
self.indent_level = 0
self.options = []
def convert_args(self, args, as_array=True):
res = []
if as_array:
start = '['
end = ']'
else:
start = ''
end = ''
for i in args:
if i.tid == 'id':
res.append("'%s'" % i.value)
elif i.tid == 'varexp':
res.append('%s' % i.value)
elif i.tid == 'string':
res.append("'%s'" % i.value)
else:
print(i)
raise RuntimeError('Unknown arg type.')
if len(res) > 1:
return start + ', '.join(res) + end
if len(res) == 1:
return res[0]
return ''
def write_entry(self, outfile, t):
if t.name in Converter.ignored_funcs:
return
preincrement = 0
postincrement = 0
if t.name == '_':
line = t.args[0]
elif t.name == 'add_subdirectory':
line = "subdir('" + t.args[0].value + "')"
elif t.name == 'pkg_search_module' or t.name == 'pkg_search_modules':
varname = t.args[0].value.lower()
mods = ["dependency('%s')" % i.value for i in t.args[1:]]
if len(mods) == 1:
line = '%s = %s' % (varname, mods[0])
else:
line = '%s = [%s]' % (varname, ', '.join(["'%s'" % i for i in mods]))
elif t.name == 'find_package':
line = "%s_dep = dependency('%s')" % (t.args[0].value, t.args[0].value)
elif t.name == 'find_library':
line = "%s = find_library('%s')" % (t.args[0].value.lower(), t.args[0].value)
elif t.name == 'add_executable':
line = '%s_exe = executable(%s)' % (t.args[0].value, self.convert_args(t.args, False))
elif t.name == 'add_library':
if t.args[1].value == 'SHARED':
libcmd = 'shared_library'
args = [t.args[0]] + t.args[2:]
elif t.args[1].value == 'STATIC':
libcmd = 'static_library'
args = [t.args[0]] + t.args[2:]
else:
libcmd = 'static_library'
args = t.args
line = '%s_lib = %s(%s)' % (t.args[0].value, libcmd, self.convert_args(args, False))
elif t.name == 'add_test':
line = 'test(%s)' % self.convert_args(t.args, False)
elif t.name == 'option':
optname = t.args[0].value
description = t.args[1].value
if len(t.args) > 2:
default = t.args[2].value
else:
default = None
self.options.append((optname, description, default))
return
elif t.name == 'project':
pname = t.args[0].value
args = [pname]
for l in t.args[1:]:
l = l.value.lower()
if l == 'cxx':
l = 'cpp'
args.append(l)
args = ["'%s'" % i for i in args]
line = 'project(' + ', '.join(args) + ')'
elif t.name == 'set':
varname = t.args[0].value.lower()
line = '%s = %s\n' % (varname, self.convert_args(t.args[1:]))
elif t.name == 'if':
postincrement = 1
line = 'if %s' % self.convert_args(t.args, False)
elif t.name == 'elseif':
preincrement = -1
postincrement = 1
line = 'elif %s' % self.convert_args(t.args, False)
elif t.name == 'else':
preincrement = -1
postincrement = 1
line = 'else'
elif t.name == 'endif':
preincrement = -1
line = 'endif'
else:
line = '''# %s(%s)''' % (t.name, self.convert_args(t.args))
self.indent_level += preincrement
indent = self.indent_level*self.indent_unit
outfile.write(indent)
outfile.write(line)
if not(line.endswith('\n')):
outfile.write('\n')
self.indent_level += postincrement
def convert(self, subdir=''):
if subdir == '':
subdir = self.cmake_root
cfile = os.path.join(subdir, 'CMakeLists.txt')
try:
with open(cfile) as f:
cmakecode = f.read()
except FileNotFoundError:
print('\nWarning: No CMakeLists.txt in', subdir, '\n')
return
p = Parser(cmakecode)
with open(os.path.join(subdir, 'meson.build'), 'w') as outfile:
for t in p.parse():
if t.name == 'add_subdirectory':
# print('\nRecursing to subdir',
# os.path.join(self.cmake_root, t.args[0].value),
# '\n')
self.convert(os.path.join(subdir, t.args[0].value))
# print('\nReturning to', self.cmake_root, '\n')
self.write_entry(outfile, t)
if subdir == self.cmake_root and len(self.options) > 0:
self.write_options()
def write_options(self):
filename = os.path.join(self.cmake_root, 'meson_options.txt')
with open(filename, 'w') as optfile:
for o in self.options:
(optname, description, default) = o
if default is None:
defaultstr = ''
else:
if default == 'OFF':
typestr = ' type : \'boolean\','
default = 'false'
elif default == 'ON':
default = 'true'
typestr = ' type : \'boolean\','
else:
typestr = ' type : \'string\','
defaultstr = ' value : %s,' % default
line = "option(%r,%s%s description : '%s')\n" % (optname,
typestr,
defaultstr,
description)
optfile.write(line)
if __name__ == '__main__':
if len(sys.argv) != 2:
print(sys.argv[0], '<CMake project root>')
sys.exit(1)
c = Converter(sys.argv[1])
c.convert()
| apache-2.0 | 8,318,193,074,904,967,000 | 35.348837 | 115 | 0.452792 | false | 4.013573 | false | false | false |
adyliu/mysql-connector-python | python2/tests/test_examples.py | 1 | 6610 | # -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Unittests for examples
"""
import sys
import logging
import mysql.connector
import tests
logger = logging.getLogger(tests.LOGGER_NAME)
class TestExamples(tests.MySQLConnectorTests):
def setUp(self):
config = self.getMySQLConfig()
self.cnx = mysql.connector.connect(**config)
def tearDown(self):
self.cnx.close()
def _exec_main(self, example):
try:
return example.main(self.getMySQLConfig())
except StandardError as e:
self.fail(e)
def test_dates(self):
"""examples/dates.py"""
try:
import examples.dates as example
except StandardError as e:
self.fail(e)
output = example.main(self.getMySQLConfig())
exp = [' 1 | 1977-06-14 | 1977-06-14 21:10:00 | 21:10:00 |',
' 2 | None | None | 0:00:00 |',
' 3 | None | None | 0:00:00 |']
self.assertEqual(output, exp)
example.DATA.append(('0000-00-00',None,'00:00:00'),)
self.assertRaises(mysql.connector.errors.IntegrityError,
example.main, self.getMySQLConfig())
def test_engines(self):
"""examples/engines.py"""
try:
import examples.engines as example
except:
self.fail()
output = self._exec_main(example)
# Can't check output as it might be different per MySQL instance
# We check only if MyISAM is present
found = False
for s in output:
if s.find('MyISAM') > -1:
found = True
break
self.assertTrue(found,'MyISAM engine not found in output')
def test_inserts(self):
"""examples/inserts.py"""
try:
import examples.inserts as example
except StandardError as e:
self.fail(e)
output = self._exec_main(example)
exp = [u'1 | Geert | 30\nInfo: c..\n',
u'2 | Jan | 30\nInfo: c..\n', u'3 | Michel | 30\nInfo: c..\n']
self.assertEqual(output,exp,'Output was not correct')
def test_transactions(self):
"""examples/transactions.py"""
db = mysql.connector.connect(**self.getMySQLConfig())
r = self.haveEngine(db,'InnoDB')
db.close()
if not r:
return
try:
import examples.transaction as example
except StandardError as e:
self.fail(e)
output = self._exec_main(example)
exp = ['Inserting data', 'Rolling back transaction',
'No data, all is fine.', 'Data before commit:',
u'4 | Geert', u'5 | Jan', u'6 | Michel', 'Data after commit:',
u'4 | Geert', u'5 | Jan', u'6 | Michel']
self.assertEqual(output,exp,'Output was not correct')
def test_unicode(self):
"""examples/unicode.py"""
try:
import examples.unicode as example
except StandardError as e:
self.fail(e)
output = self._exec_main(example)
exp = ['Unicode string: \xc2\xbfHabla espa\xc3\xb1ol?',
'Unicode string coming from db: \xc2\xbfHabla espa\xc3\xb1ol?']
self.assertEqual(output,exp,'Output was not correct')
def test_warnings(self):
"""examples/warnings.py"""
try:
import examples.warnings as example
except StandardError as e:
self.fail(e)
output = self._exec_main(example)
exp = ["Executing 'SELECT 'abc'+1'",
u"1292: Truncated incorrect DOUBLE value: 'abc'"]
self.assertEqual(output,exp,'Output was not correct')
example.STMT = "SELECT 'abc'"
self.assertRaises(StandardError, example.main, self.getMySQLConfig())
def test_multi_resultsets(self):
"""examples/multi_resultsets.py"""
try:
import examples.multi_resultsets as example
except StandardError as e:
self.fail(e)
output = self._exec_main(example)
exp = ['Inserted 1 row', 'Number of rows: 1', 'Inserted 2 rows',
u'Names in table: Geert Jan Michel']
self.assertEqual(output,exp,'Output was not correct')
def test_microseconds(self):
"""examples/microseconds.py"""
try:
import examples.microseconds as example
except StandardError as e:
self.fail(e)
output = self._exec_main(example)
if self.cnx.get_server_version() < (5,6,4):
exp = "does not support fractional precision for timestamps."
self.assertTrue(output[0].endswith(exp))
else:
exp = [
' 1 | 1 | 0:00:47.510000',
' 1 | 2 | 0:00:47.020000',
' 1 | 3 | 0:00:47.650000',
' 1 | 4 | 0:00:46.060000',
]
self.assertEqual(output, exp, 'Output was not correct')
def test_prepared_statements(self):
"""examples/prepared_statements.py"""
try:
import examples.prepared_statements as example
except StandardError as e:
self.fail(e)
output = self._exec_main(example)
exp = [
'Inserted data',
'1 | Geert',
'2 | Jan',
'3 | Michel',
]
self.assertEqual(output, exp, 'Output was not correct')
| gpl-2.0 | -8,095,006,123,662,199,000 | 34.72973 | 78 | 0.579879 | false | 3.965207 | true | false | false |
clearcare/cc_dynamodb | tests/conftest.py | 1 | 1033 | from decimal import Decimal
import os.path
import pytest
AWS_DYNAMODB_CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'dynamodb.yml')
@pytest.fixture
def fake_config():
import cc_dynamodb
cc_dynamodb.set_config(
table_config=AWS_DYNAMODB_CONFIG_PATH,
aws_access_key_id='<KEY>',
aws_secret_access_key='<SECRET>',
namespace='dev_')
DYNAMODB_FIXTURES = {
'nps_survey': [
{
'agency_id': Decimal('1669'),
'change': "I can't think of any...",
'comments': 'No comment',
'created': '2014-12-19T22:10:42.705243+00:00',
'favorite': 'I like all of ClearCare!',
'profile_id': Decimal('2616346'),
'recommend_score': '9'
},
{
'agency_id': Decimal('1669'),
'change': 'Most of the features, please',
'created': '2014-12-19T22:10:42.705243+00:00',
'profile_id': Decimal('2616347'),
'recommend_score': '3'
},
],
}
| mit | 1,849,978,970,764,546,300 | 25.487179 | 82 | 0.53243 | false | 3.353896 | false | false | false |
felixbb/forseti-security | google/cloud/security/scanner/audit/buckets_rules_engine.py | 1 | 8802 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules engine for Bucket acls"""
from collections import namedtuple
import itertools
import re
# pylint: disable=line-too-long
from google.cloud.security.common.gcp_type import bucket_access_controls as bkt_acls
# pylint: enable=line-too-long
from google.cloud.security.common.util import log_util
from google.cloud.security.scanner.audit import base_rules_engine as bre
from google.cloud.security.scanner.audit import errors as audit_errors
LOGGER = log_util.get_logger(__name__)
# TODO: move this to utils since it's used in more that one engine
def escape_and_globify(pattern_string):
"""Given a pattern string with a glob, create actual regex pattern.
To require > 0 length glob, change the "*" to ".+". This is to handle
strings like "*@company.com". (THe actual regex would probably be
".*@company.com", except that we don't want to match zero-length
usernames before the "@".)
Args:
pattern_string: The pattern string of which to make a regex.
Returns:
The pattern string, escaped except for the "*", which is
transformed into ".+" (match on one or more characters).
"""
return '^{}$'.format(re.escape(pattern_string).replace('\\*', '.+'))
class BucketsRulesEngine(bre.BaseRulesEngine):
"""Rules engine for bucket acls"""
def __init__(self, rules_file_path):
"""Initialize.
Args:
rules_file_path: file location of rules
"""
super(BucketsRulesEngine,
self).__init__(rules_file_path=rules_file_path)
self.rule_book = None
def build_rule_book(self):
"""Build BucketsRuleBook from the rules definition file."""
self.rule_book = BucketsRuleBook(self._load_rule_definitions())
# pylint: disable=arguments-differ
def find_policy_violations(self, buckets_acls,
force_rebuild=False):
"""Determine whether bucket acls violates rules."""
violations = itertools.chain()
if self.rule_book is None or force_rebuild:
self.build_rule_book()
resource_rules = self.rule_book.get_resource_rules()
for rule in resource_rules:
violations = itertools.chain(violations,
rule.\
find_policy_violations(buckets_acls))
return violations
def add_rules(self, rules):
"""Add rules to the rule book."""
if self.rule_book is not None:
self.rule_book.add_rules(rules)
class BucketsRuleBook(bre.BaseRuleBook):
"""The RuleBook for bucket acls resources."""
def __init__(self, rule_defs=None):
"""Initialization.
Args:
rule_defs: rule definitons
"""
super(BucketsRuleBook, self).__init__()
self.resource_rules_map = {}
if not rule_defs:
self.rule_defs = {}
else:
self.rule_defs = rule_defs
self.add_rules(rule_defs)
def add_rules(self, rule_defs):
"""Add rules to the rule book"""
for (i, rule) in enumerate(rule_defs.get('rules', [])):
self.add_rule(rule, i)
def add_rule(self, rule_def, rule_index):
"""Add a rule to the rule book.
Args:
rule_def: A dictionary containing rule definition properties.
rule_index: The index of the rule from the rule definitions.
Assigned automatically when the rule book is built.
Raises:
"""
resources = rule_def.get('resource')
for resource in resources:
resource_ids = resource.get('resource_ids')
if not resource_ids or len(resource_ids) < 1:
raise audit_errors.InvalidRulesSchemaError(
'Missing resource ids in rule {}'.format(rule_index))
bucket = rule_def.get('bucket')
entity = rule_def.get('entity')
email = rule_def.get('email')
domain = rule_def.get('domain')
role = rule_def.get('role')
if (bucket is None) or (entity is None) or (email is None) or\
(domain is None) or (role is None):
raise audit_errors.InvalidRulesSchemaError(
'Faulty rule {}'.format(rule_def.get('name')))
rule_def_resource = bkt_acls.BucketAccessControls(
escape_and_globify(bucket),
escape_and_globify(entity),
escape_and_globify(email),
escape_and_globify(domain),
escape_and_globify(role.upper()))
rule = Rule(rule_name=rule_def.get('name'),
rule_index=rule_index,
rules=rule_def_resource)
resource_rules = self.resource_rules_map.get(rule_index)
if not resource_rules:
self.resource_rules_map[rule_index] = rule
def get_resource_rules(self):
"""Get all the resource rules for (resource, RuleAppliesTo.*).
Args:
resource: The resource to find in the ResourceRules map.
Returns:
A list of ResourceRules.
"""
resource_rules = []
for resource_rule in self.resource_rules_map:
resource_rules.append(self.resource_rules_map[resource_rule])
return resource_rules
class Rule(object):
"""Rule properties from the rule definition file.
Also finds violations.
"""
def __init__(self, rule_name, rule_index, rules):
"""Initialize.
Args:
rule_name: Name of the loaded rule
rule_index: The index of the rule from the rule definitions
rules: The rules from the file
"""
self.rule_name = rule_name
self.rule_index = rule_index
self.rules = rules
def find_policy_violations(self, bucket_acl):
"""Find bucket policy acl violations in the rule book.
Args:
bucket_acl: Bucket ACL resource
Returns:
Returns RuleViolation named tuple
"""
if self.rules.bucket != '^.+$':
bucket_bool = re.match(self.rules.bucket, bucket_acl.bucket)
else:
bucket_bool = True
if self.rules.entity != '^.+$':
entity_bool = re.match(self.rules.entity, bucket_acl.entity)
else:
entity_bool = True
if self.rules.email != '^.+$':
email_bool = re.match(self.rules.email, bucket_acl.email)
else:
email_bool = True
if self.rules.domain != '^.+$':
domain_bool = re.match(self.rules.domain, bucket_acl.domain)
else:
domain_bool = True
if self.rules.role != '^.+$':
role_bool = re.match(self.rules.role, bucket_acl.role)
else:
role_bool = True
should_raise_violation = (
(bucket_bool is not None and bucket_bool) and
(entity_bool is not None and entity_bool) and
(email_bool is not None and email_bool) and
(domain_bool is not None and domain_bool) and
(role_bool is not None and role_bool))
if should_raise_violation:
yield self.RuleViolation(
resource_type='project',
resource_id=bucket_acl.project_number,
rule_name=self.rule_name,
rule_index=self.rule_index,
violation_type='BUCKET_VIOLATION',
role=bucket_acl.role,
entity=bucket_acl.entity,
email=bucket_acl.email,
domain=bucket_acl.domain,
bucket=bucket_acl.bucket)
# Rule violation.
# resource_type: string
# resource_id: string
# rule_name: string
# rule_index: int
# violation_type: BUCKET_VIOLATION
# role: string
# entity: string
# email: string
# domain: string
# bucket: string
RuleViolation = namedtuple('RuleViolation',
['resource_type', 'resource_id', 'rule_name',
'rule_index', 'violation_type', 'role',
'entity', 'email', 'domain', 'bucket'])
| apache-2.0 | -2,150,819,857,089,099,300 | 33.249027 | 84 | 0.586571 | false | 4.149929 | false | false | false |
cyncyncyn/evette | languagefiles/language_irish_1.3.2.py | 1 | 68626 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#Copyright (C) 2007 Adam Spencer - Free Veterinary Management Suite
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
##Contact: [email protected]
####Irish####
def GetDictionary():
dictionary = {}
##Misc
dictionary["usernamelabel"] = (
"Username",
"Username"
)
dictionary["passwordlabel"] = (
"Password",
"Password"
)
dictionary["submitlabel"] = (
"Submit",
"Submit"
)
dictionary["totallabel"] = (
"Total",
"Total"
)
dictionary["fromlabel"] = (
"From",
"From"
)
dictionary["tolabel"] = (
"To",
"To"
)
dictionary["pricelabel"] = (
"Price",
"Price"
)
dictionary["descriptionlabel"] = (
"Description",
"Decsription"
)
dictionary["yeslabel"] = (
"Yes",
"Yes"
)
dictionary["nolabel"] = (
"No",
"No"
)
dictionary["editlabel"] = (
"Edit",
"Edit"
)
dictionary["deletelabel"] = (
"Delete",
"Delete"
)
dictionary["searchlabel"] = (
"Search",
"Search"
)
dictionary["resetlabel"] = (
"Reset",
"Reset"
)
dictionary["movelabel"] = (
"Move",
"Move"
)
dictionary["unitlabel"] = (
"Unit",
"Unit"
)
dictionary["onlabel"] = (
"on",
"on"
)
dictionary["namelabel"] = (
"Name",
"Name"
)
dictionary["headertext1"] = (
"The complete FREE veterinary practice management package",
"The complete open-source veterinary practice management package"
)
dictionary["headertext2"] = (
"You can change this header to anything you like by editing",
"You can change this header to anything you like by editing"
)
dictionary["generatedbylabel"] = (
"Generated by",
"Generated by"
)
dictionary["timelabel"] = (
"Time",
"Time"
)
dictionary["operationslabel"] = (
"Operations",
"Operations"
)
dictionary["operatinglabel"] = (
"Operating",
"Operating"
)
dictionary["consultinglabel"] = (
"Consulting",
"Consulting"
)
dictionary["vetlabel"] = (
"Vet",
"Vet"
)
dictionary["animaldetailslabel"] = (
"Animal Details",
"Animal Details"
)
dictionary["ownerdetailslabel"] = (
"Owner Details",
"Owner Details"
)
dictionary["receiptlabel"] = (
"Receipt",
"Receipt"
)
dictionary["problemlabel"] = (
"Problem",
"Problem"
)
dictionary["noteslabel"] = (
"Notes",
"Notes"
)
dictionary["planlabel"] = (
"Plan",
"Plan"
)
dictionary["userdeleted"] = (
"User deleted",
"User Deleted"
)
dictionary["changelog"] = (
"Change Log",
"Change Log"
)
dictionary["positionlabel"] = (
"Position",
"Position"
)
dictionary["datelabel"] = (
"Date",
"Date"
)
dictionary["invalidtimemessage"] = (
"Invalid Time",
"Invalid Time"
)
dictionary["containslabel"] = (
"Contains",
"Contains"
)
dictionary["nextduelabel"] = (
"Next Due",
"Next Due"
)
dictionary["nonelabel"] = (
"None",
"None"
)
##Menus
dictionary["clientmenu"] = (
"&Clients",
"&Clients"
)
dictionary["appointmentsmenu"] = (
"&Appointments",
"&Appointments"
)
dictionary["medicationmenu"] = (
"&Medication",
"&Medication"
)
dictionary["proceduresmenu"] = (
"&Procedures",
"&Procedures"
)
dictionary["lookupsmenu"] = (
"&Lookups",
"&Lookups"
)
dictionary["formsmenu"] = (
"&Forms",
"&Forms"
)
dictionary["staffmenu"] = (
"&Staff",
"&Staff"
)
dictionary["settingsmenu"] = (
"Se&ttings",
"Se&ttings"
)
dictionary["helpmenu"] = (
"&Help",
"&Help"
)
dictionary["entirelabel"] = (
"Entire",
"Entire"
)
dictionary["neuteredlabel"] = (
"Neutered",
"Neutered"
)
##Menu items
dictionary["addclientmenu"] = (
("Add Client", "Create a new client record"),
("Add Client", "Create a new client record")
)
dictionary["findclientmenu"] = (
("Find Clients", "Find client and animal records"),
("Find Clients", "Find client and animal records")
)
dictionary["viewappointmentsmenu"] = (
("Todays Appointments", "View todays appointments"),
("Todays Appointments", "View todays appointments")
)
dictionary["viewoperationsmenu"] = (
("Todays Operations", "View todays operations"),
("Todays Operations", "View todays operations")
)
dictionary["editusersmenu"] = (
("Edit Users", "Add and edit Evette users"),
("Edit Users", "Add and edit Evette users")
)
dictionary["editrotamenu"] = (
("Edit Rota", "Edit the rota"),
("Edit Rota", "Edit the rota")
)
dictionary["editmedicationmenu"] = (
("Edit Medication", "Edit Medication"),
("Edit Medication", "Edit Medication")
)
dictionary["editvaccinationsmenu"] = (
("Edit Vaccinations", "Edit Vaccinations"),
("Edit Vaccinations", "Edit Vaccinations")
)
dictionary["editproceduresmenu"] = (
("Edit Procedures", "Edit Procedures"),
("Edit Procedures", "Edit Procedures")
)
dictionary["editcoloursmenu"] = (
("Edit Colours", "Edit Colours"),
("Edit Colours", "Edit Colours")
)
dictionary["editbreedsmenu"] = (
("Edit Breeds", "Edit Breeds"),
("Edit Breeds", "Edit Breeds")
)
dictionary["editspeciesmenu"] = (
("Edit Species", "Edit Species"),
("Edit Species", "Edit Species")
)
dictionary["editformsmenu"] = (
("Edit Forms", "Edit Forms"),
("Edit Forms", "Edit Forms")
)
dictionary["editsettingsmenu"] = (
("Edit Settings", "Edit settings unique to this practice"),
("Edit Settings", "Edit settings unique to this practice")
)
dictionary["randomdatamenu"] = (
("Random Data", "Generate random sample data to experiment with"),
("Random Data", "Generate random sample data to experiment with")
)
dictionary["resettablesmenu"] = (
("Reset Database", "Completely reset the evette database"),
("Reset Database", "Completely reset the Evette database. Be careful!")
)
dictionary["gethelpmenu"] = (
("Help", "Get help on using Evette"),
("Help", "Get help on using Evette")
)
dictionary["aboutmenu"] = (
("About", "Information about this program"),
("About", "Information about Evette")
)
##Toolbar
dictionary["addclienttoolbar"] = (
(" Add Client ", "Create a new client record"),
(" Add Client ", "Create a new client record")
)
dictionary["findclienttoolbar"] = (
(" Client Search ", "Find clients and animals"),
(" Client Search ", "Find clients and their animals")
)
dictionary["viewappointmentstoolbar"] = (
(" Todays Appointments ", "View todays appointments"),
(" Todays Appointments ", "View todays appointments")
)
dictionary["viewoperationstoolbar"] = (
(" Todays Operations ", "View todays operations"),
(" Todays Operations ", "View todays operations")
)
##Client Panel
dictionary["newclientpagetitle"] = (
"New Client",
"New Client"
)
dictionary["clienttitlelabel"] = (
"Title",
"Title"
)
dictionary["clientforenameslabel"] = (
"First Name",
"First Names"
)
dictionary["clientsurnamelabel"] = (
"Last Name",
"Last Name"
)
dictionary["clientaddresslabel"] = (
"Address",
"Address"
)
dictionary["clientpostcodelabel"] = (
"Post Code",
"Post Code"
)
dictionary["clienthomephonelabel"] = (
"Home Phone",
"Home Phone"
)
dictionary["clientmobilephonelabel"] = (
"Mobile Phone",
"Mobile Phone"
)
dictionary["clientworkphonelabel"] = (
"Work Phone",
"Work Phone"
)
dictionary["clientemailaddresslabel"] = (
"Email",
"Email"
)
dictionary["clientcommentslabel"] = (
"Comments",
"Comments"
)
dictionary["clientanimalslabel"] = (
"Animals",
"Animals"
)
dictionary["clientaddanimaltooltip"] = (
"Create a new animal",
"Create a new animal"
)
dictionary["clienteditanimaltooltip"] = (
"Edit the selected animal record",
"Edit the selected animal record"
)
dictionary["clientdeleteanimaltooltip"] = (
"Delete the selected animal record",
"Delete the selected animal record"
)
dictionary["clientrefreshanimalstooltip"] = (
"Refresh the list of animals",
"Refresh the list of animals"
)
dictionary["clientcreateappointmenttooltip"] = (
"Create an appointment for the selected animal",
"Create an appointment for the selected animal"
)
dictionary["clientbalancelabel"] = (
"Balance",
"Balance"
)
dictionary["clientdetailedbilllabel"] = (
"Detailed Bill",
"Detailed Bill"
)
dictionary["clientsavetooltip"] = (
"Save changes to client record",
"Save changes to client record"
)
dictionary["clientunsavedchangesmessage"] = (
"This client record has unsaved changes, are you sure you want to close?",
"This client record has unsaved changes, are you sure you want to close?"
)
dictionary["clientdeleteanimalmessage"] = (
"Really delete animal?",
"Really delete animal?"
)
dictionary["clientrefreshbilltooltip"] = (
"Refresh bill",
"Refresh bill"
)
dictionary["clientrecentbillitems"] = (
(" Recent Items", "Adjust the date range of the bill items displayed"),
(" Recent Items", "Adjust the date range of the bill items displayed")
)
dictionary["clientcleardetailedbillentriestooltip"] = (
"Unselect the current bill item and clear the price and description entries",
"Unselect the current bill item and clear the price and description entries"
)
dictionary["clientsubmitdetailedbillentriestooltip"] = (
"Submit changes to the selected bill item",
"Submit changes to the selected bill item"
)
dictionary["clientdeletedetailedbillentriestooltip"] = (
"Delete the selected bill item",
"Delete the selected bill item"
)
##Animal Panel
dictionary["newanimalpagetitle"] = (
"New Animal",
"New Animal"
)
dictionary["animalownerlabel"] = (
"Owner",
"Owner"
)
dictionary["animaleditownertooltip"] = (
"Edit this animals owner",
"Edit this animals owner"
)
dictionary["animalnamelabel"] = (
"Name",
"Name"
)
dictionary["animalsexlabel"] = (
"Sex",
"Sex"
)
dictionary["animalspecieslabel"] = (
"Species",
"Species"
)
dictionary["animalbreedlabel"] = (
"Breed",
"Breed"
)
dictionary["animalcolourlabel"] = (
"Colour",
"Colour"
)
dictionary["animaldoblabel"] = (
"DOB",
"DOB"
)
dictionary["animalchipnolabel"] = (
"Chip #",
"Chip #"
)
dictionary["animalcommentslabel"] = (
"Comments",
"Comments"
)
dictionary["animalneuteredtooltip"] = (
"Check if the animal is neutered",
"Check if the animal is neutered"
)
dictionary["animalprintentirerecordtooltip"] = (
"Generate printable output of this entire animal record",
"Generate printable output of this entire animal record"
)
dictionary["animalgenerateformtooltip"] = (
"Generate a form using this animals details",
"Generate a form using this animals details"
)
dictionary["animalappointmentslabel"] = (
"Appointments",
"Appointments"
)
dictionary["animalcreateappointmenttooltip"] = (
"Create an appointment for this animal",
"Create an appointment for this animal"
)
dictionary["animaleditappointmenttooltip"] = (
"Edit the selected appointment",
"Edit the selected appointment"
)
dictionary["animalrefreshappointmentstooltip"] = (
"Refresh the list of appointments",
"Refresh the list of appointments"
)
dictionary["animaldeleteappointmenttooltip"] = (
"Delete the selected appointment",
"Delete the selected appointment"
)
dictionary["animalprintappointmenttooltip"] = (
"Generate printable output for the selected appointment",
"Generate printable output for the selected appointment"
)
dictionary["animalvetformbutton"] = (
("Vet Form", "Edit the vet form for the selected appointment"),
("Vet Form", "Edit the vet form for the selected appointment")
)
dictionary["animalappointmentdetailslabel"] = (
"Appointment Details",
"Appointment Details"
)
dictionary["animalvaccinationslabel"] = (
"Vaccinations",
"Vaccinations"
)
dictionary["animalsavebuttontooltip"] = (
"Save any changes made to this animal record",
"Save any changes made to this animal record"
)
dictionary["animalunsavedchangesmessage"] = (
"This animal record has unsaved changes, are you sure you want to close?",
"This animal record has unsaved changes, are you sure you want to close?"
)
dictionary["animalconfirmdeleteappointmentmessage"] = (
"Really delete appointment?",
"Really delete appointment?"
)
dictionary["animalresetvaccinationentries"] = (
"Reset vaccination entries",
"Reset vaccination entries"
)
dictionary["animalvaccinelabel"] = (
" Vaccine: ",
" Vaccine: "
)
dictionary["animalgivenlabel"] = (
"Given: ",
"Given: "
)
dictionary["animalnextlabel"] = (
" Next: ",
" Next: "
)
dictionary["animaldeletevaccinationtooltip"] = (
"Delete the selected vaccination",
"Delete the selected vaccination"
)
dictionary["animalsubmitvaccinationtooltip"] = (
"Submit this vaccination",
"Submit this vaccination"
)
dictionary["animalconfirmdeletevaccinationmessage"] = (
"Are you sure that you want to delete this vaccination?",
"Are you sure that you want to delete this vaccination?"
)
dictionary["animalvaccinationbatchlabel"] = (
" Batch: ",
" Batch: "
)
##Appointments
dictionary["appointmentappointmentforlabel"] = (
"Appointment for",
"Appointment for"
)
dictionary["appointmentoperationforlabel"] = (
"Operation for",
"Operation for"
)
dictionary["appointmententervettooltip"] = (
"If this appointment is for a specific vet, enter the vet's name here",
"If this appointment is for a specific vet, enter the vet's name here"
)
dictionary["appointmentrefreshtooltip"] = (
"Refresh the list of appointments",
"Refresh the list of appointments"
)
dictionary["appointmentreasonlabel"] = (
"Reason For Appointment",
"Reason For Appointment"
)
dictionary["appointmenttimelabel"] = (
"Appointment time",
"Appointment time"
)
dictionary["appointmentisopcheckbox"] = (
("Operation?", "Check this box if you would like to book an operation"),
("Operation?", "Check this box if you would like to book an operation")
)
dictionary["appointmentsubmittooltip"] = (
"Submit this appointment",
"Submit this appointment"
)
dictionary["appointmentdeletetooltip"] = (
"Delete this appointment",
"Delete this appointment"
)
dictionary["appointmentstatuslabel"] = (
"Status",
"Status"
)
dictionary["appointmentnotarrivedlabel"] = (
"Not Arrived",
"Not Arrived"
)
dictionary["appointmentwaitinglabel"] = (
"Waiting",
"Waiting"
)
dictionary["appointmentwithvetlabel"] = (
"With Vet",
"With Vet"
)
dictionary["appointmentdonelabel"] = (
"Done",
"Done"
)
dictionary["appointmenteditownerbutton"] = (
("Edit Owner", "Edit client record"),
("Edit Owner", "Edit client record")
)
dictionary["appointmenteditanimalbutton"] = (
("Edit Animal", "Edit animal record"),
("Edit Animal", "Edit animal record")
)
dictionary["appointmentappointmentsforlabel"] = (
"Appointments for",
"Appointments for"
)
dictionary["appointmentoperationsforlabel"] = (
"Operations for",
"Operations for"
)
dictionary["appointmenttimetooearlymessage"] = (
"Appointment time is before the practice opens!",
"Appointment time is before the practice opens!"
)
dictionary["appointmenttimetoolatemessage"] = (
"Appointment time is after the practice closes!",
"Appointment time is after the practice closes!"
)
dictionary["appointmentinvalidtimemessage"] = (
"Invalid time - times must be HH:MM!",
"Invalid time - times must be HH:MM!"
)
##Client search panel
dictionary["clientsearchpagetitle"] = (
"Client Search",
"Client Search"
)
dictionary["clientsearchstitlelabel"] = (
"Clients",
"Clients"
)
dictionary["clientsearchsurnamelabel"] = (
"Last Name",
"Last Name"
)
dictionary["clientsearchphonelabel"] = (
"Phone",
"Phone"
)
dictionary["clientsearchaddresslabel"] = (
"Address",
"Address"
)
dictionary["clientsearchpostcodelabel"] = (
"Post Code",
"Zip Code"
)
dictionary["clientsearchemaillabel"] = (
"Email",
"Email"
)
dictionary["clientsearchclearbutton"] = (
("Clear", "Clear all entries"),
("Clear", "Clear all entries")
)
dictionary["clientsearchsearchbutton"] = (
("Search", "Perform the search"),
("Search", "Perform the search")
)
dictionary["clientsearcheditclienttooltip"] = (
"Edit the selected client record",
"Edit the selected client record"
)
dictionary["clientsearchdeleteclienttooltip"] = (
"Delete the selected client record",
"Delete the selected client record"
)
dictionary["clientsearchanimallabel"] = (
"Animals",
"Animals"
)
dictionary["clientsearchanimalnamelabel"] = (
"Name",
"Name"
)
dictionary["clientsearchanimalsexlabel"] = (
"Sex",
"Sex"
)
dictionary["clientsearchanimalspecieslabel"] = (
"Species",
"Species"
)
dictionary["clientsearchanimalbreedlabel"] = (
"Breed",
"Breed"
)
dictionary["clientsearchanimalchipnolabel"] = (
"Chip #",
"Chip #"
)
dictionary["clientsearchanimalcommentslabel"] = (
"Comments",
"Comments"
)
dictionary["clientsearcheditanimaltooltip"] = (
"Edit the selected animal record",
"Edit the selected animal record"
)
dictionary["clientsearchdeleteanimaltooltip"] = (
"Delete the selected animal record",
"Delete the selected animal record"
)
dictionary["clientreceiptchangeloglabel"] = (
"Receipt item - ",
"Receipt item - "
)
dictionary["clientreceiptdeletemessage"] = (
"Really delete this receipt entry?",
"Really delete this receipt entry?"
)
dictionary["clientclearpaymenttooltip"] = (
"Empty the payment entry",
"Empty the payment entry"
)
dictionary["clientpaymentlabel"] = (
"Payment",
"Payment"
)
dictionary["clientsubmitpaymenttooltip"] = (
"Submit Payment",
"Submit Payment"
)
dictionary["clientpaymentinreceiptlabel"] = (
"Payment",
"Payment"
)
##Launch panels
dictionary["launchcreateconffilemessage"] = (
"Conf file not found! Create one now?",
"Configuration file not found! Create one now?"
)
dictionary["launchevettefoldermessage"] = (
"Evette folder not found! Create it now?",
"Evette folder not found! Create it now?"
)
dictionary["launchnodatabaseservermessage"] = (
"Unable to connect to database server! Please check that it is installed and running. Would you like to adjust your local settings?",
"Unable to connect to database server! Please check that it is installed and running. Would you like to adjust your local settings?"
)
dictionary["launchnoevettedatabasemessage"] = (
"Unable to locate evette database! Would you like to create one now?",
"Unable to locate Evette database! Would you like to create one now?"
)
dictionary["launchconffilecreatedmessage"] = (
"Conf file created",
"Configuration file created"
)
dictionary["launchevettefoldercreatedmessage"] = (
"Evette folder created",
"Evette folder created"
)
dictionary["launchdbiplabel"] = (
"DB IP",
"Database IP Address"
)
dictionary["launchdbuserlabel"] = (
"DB User",
"Database User"
)
dictionary["launchdbpasslabel"] = (
"DB Pass",
"Database Password"
)
dictionary["launchunabletocreatedatabasemessage"] = (
"Unable to create database, please check your mysql server config!",
"Unable to create database, please check your MySQL server configuration!"
)
dictionary["launchdatabasecreatedmessage"] = (
"Database created successfully!",
"Database created successfully!"
)
dictionary["launchlogintooltip"] = (
"Log in",
"Log in"
)
##Lookups
dictionary["lookupscolourpagetitle"] = (
"Edit Colour Lookups",
"Edit Colour Lookups"
)
dictionary["lookupsspeciespagetitle"] = (
"Edit Species Lookups",
"Edit Species Lookups"
)
dictionary["lookupsbreedpagetitle"] = (
"Edit Breed Lookups",
"Edit Breed Lookups"
)
dictionary["lookupsrefreshtooltip"] = (
"Refresh the list",
"Refresh the list"
)
dictionary["lookupsdeletetooltip"] = (
"Delete the selected lookup",
"Delete the selected lookup"
)
dictionary["lookupssubmittooltip"] = (
"Submit lookup",
"Submit lookup"
)
dictionary["lookupsduplicatemessage"] = (
"That lookup already exists, it's pointless putting it in again!",
"That lookup already exists, it's pointless putting it in again!"
)
dictionary["lookupsnonamemessage"] = (
"You must give a name for this lookup!",
"You must give a name for this lookup!"
)
dictionary["lookupsdeletemessage"] = (
"Are you sure that you want to delete this lookup?",
"Are you sure that you want to delete this lookup?"
)
##Medication
dictionary["medicationeditmedicationpagetitle"] = (
"Edit Medication",
"Edit Medication"
)
dictionary["medicationrefreshtooltip"] = (
"Refresh Medication List",
"Refresh Medication List"
)
dictionary["medicationdeletetooltip"] = (
"Delete the selected medication",
"Delete the selected medication"
)
dictionary["medicationbatchnolabel"] = (
"Batch #",
"Batch #"
)
dictionary["medicationbatchmovementreporttooltip"] = (
"Generate a report showing all movements of this batch",
"Generate a report showing all movements of this batch"
)
dictionary["medicationstocklisttooltip"] = (
"Print a list of your current stock",
"Print a list of your current stock"
)
dictionary["medicationmovementsoflabel"] = (
"Movements of ",
"Movements of "
)
dictionary["medicationconfirmdeletemessage"] = (
"Are you sure you want to delete ",
"Are you sure you want to delete "
)
dictionary["medicationconfirmoverwritemessage"] = (
"Are you sure you want to overwrite this medication?",
"Are you sure you want to overwrite this medication?"
)
dictionary["medicationmovementsofbatchnumberlabel"] = (
"Movements of Batch Number ",
"Movements of Batch Number "
)
dictionary["medicationexpireslabel"] = (
"Expires",
"Expires"
)
dictionary["medicationrefreshdetailstooltip"] = (
"Refresh the details of this medication",
"Refresh the details of this medication"
)
dictionary["medicationdeletemovementtooltip"] = (
"Delete this medication movement",
"Delete this medication movement"
)
dictionary["movementmovementlabel"] = (
"Movement",
"Movement"
)
dictionary["movementoverwritemovementmessage"] = (
"Are you sure that you want to overwrite this movement?",
"Are you sure that you want to overwrite this movement?"
)
dictionary["movementconfirmdeletemovementmessage"] = (
"Are you sure that you want to delete this movement?",
"Are you sure that you want to delete this movement?"
)
dictionary["movementrefreshmovementsmessage"] = (
"Refresh the details of this medication",
"Refresh the details of this medication"
)
dictionary["movementresetsearchentriestooltip"] = (
"Reset search entries",
"Reset search entries"
)
dictionary["medicationcurrentbatchlabel"] = (
"Current Batch",
"Current Batch"
)
dictionary["medicationunitpricelabel"] = (
"Unit Price",
"Unit Price"
)
##Weekdays
dictionary["monday"] = (
"Monday",
"Monday"
)
dictionary["tuesday"] = (
"Tuesday",
"Tuesday"
)
dictionary["wednesday"] = (
"Wednesday",
"Wednesday"
)
dictionary["thursday"] = (
"Thursday",
"Thursday"
)
dictionary["friday"] = (
"Friday",
"Friday"
)
dictionary["saturday"] = (
"Saturday",
"Saturday"
)
dictionary["sunday"] = (
"Sunday",
"Sunday"
)
##Procedures
dictionary["editprocedurespagetitle"] = (
"Edit Procedures",
"Edit Procedures"
)
dictionary["proceduresrefreshprocedurestooltip"] = (
"Refresh the list of procedures",
"Refresh the list of procedures"
)
dictionary["proceduresdeleteproceduretooltip"] = (
"Delete the selected procedure",
"Delete the selected procedure"
)
dictionary["proceduresunnamedproceduremessage"] = (
"You must give this procedure a name!",
"You must give this procedure a name!"
)
dictionary["proceduresoverwritemessage"] = (
"Are you sure that you want to edit this procedure?",
"Are you sure that you want to edit this procedure?"
)
dictionary["proceduresdeletemessage"] = (
"Are you sure that you want to delete this procedure?",
"Are you sure that you want to delete this procedure?"
)
##Random data
dictionary["randomdatapagetitle"] = (
"Random Data",
"Random Data"
)
dictionary["randomdatanoofclientslabel"] = (
"No of clients",
"Number of clients"
)
dictionary["randomdatanoofanimalslabel"] = (
"No of animals",
"Number of animals"
)
dictionary["randomdatanoofappointmentslabel"] = (
"No of appointments",
"Number of appointments"
)
dictionary["randomdatanoofoperationslabel"] = (
"No of operations",
"Number of operations"
)
dictionary["randomdatanoofmedicationslabel"] = (
"No of medications",
"Number of medications"
)
dictionary["randomdataclientslabel"] = (
"Clients",
"Clients"
)
dictionary["randomdataanimalslabel"] = (
"Animals",
"Animals"
)
dictionary["randomdataappointmentslabel"] = (
"Appointments",
"Appointments"
)
dictionary["randomdataoperationslabel"] = (
"Operations",
"Operations"
)
dictionary["randomdatamedicationlabel"] = (
"Medication",
"Medication"
)
dictionary["randomdatasubmittooltip"] = (
"Create random data",
"Create random data"
)
##Settings Panel
dictionary["settingspracticenamelabel"] = (
"Practice Name",
"Practice Name"
)
dictionary["settingsopenfromlabel"] = (
"Open from",
"Open from"
)
dictionary["settingsopentolabel"] = (
"Open to",
"Open to"
)
dictionary["settingsoperatingtimelabel"] = (
"Operating time",
"Operating time"
)
dictionary["settingshtmlviewerlabel"] = (
"HTML viewer",
"HTML viewer"
)
dictionary["settingsfindhtmlviewertooltip"] = (
"HTML viewer",
"HTML viewer"
)
dictionary["settingslanguagelabel"] = (
"Language",
"American English"
)
##Staff settings
dictionary["editvetformlabel"] = (
"Edit Vet Form",
"Edit Vet Form"
)
dictionary["editfinanceslabel"] = (
"Edit Finances",
"Edit Finances"
)
dictionary["showtoolbarlabel"] = (
"Show Toolbar",
"Show Toolbar"
)
dictionary["viewchangeloglabel"] = (
"View Changelogs",
"View Changelogs"
)
dictionary["editsettingslabel"] = (
"Edit Settings",
"Edit Settings"
)
dictionary["editrotalabel"] = (
"Edit Rota",
"Edit Rota"
)
dictionary["editstaffpagetitle"] = (
"Edit Rota",
"Edit Rota"
)
dictionary["staffmemberlabel"] = (
"Staff Member",
"Staff Member"
)
dictionary["deleteusertooltip"] = (
"Delete the selected user",
"Delete the selected user"
)
dictionary["clientslabel"] = (
"Clients",
"Clients"
)
dictionary["animalslabel"] = (
"Animals",
"Animals"
)
dictionary["appointmentslabel"] = (
"Appointments",
"Appointments"
)
dictionary["medicationlabel"] = (
"Medication",
"Medication"
)
dictionary["procedureslabel"] = (
"Clients",
"Clients"
)
dictionary["lookupslabel"] = (
"Lookups",
"Lookups"
)
dictionary["formslabel"] = (
"Forms",
"Forms"
)
dictionary["userslabel"] = (
"Users",
"Users"
)
dictionary["misclabel"] = (
"Misc",
"Misc"
)
dictionary["tickalllabel"] = (
"Check All",
"Check All"
)
dictionary["tickalltooltip"] = (
"Give the user permission to use ALL areas of the system. Use with care!",
"Give the user permission to use ALL areas of the system. Use with care!"
)
dictionary["useroverwritemessage"] = (
"Are you sure that you want to overwrite this user?",
"Are you sure that you want to overwrite this user?"
)
dictionary["userdeletemessage"] = (
"Are you sure that you want to delete this user?",
"Are you sure that you want to delete this user?"
)
##Edit Rota
dictionary["editrotapagetitle"] = (
"Edit Rota",
"Edit Rota"
)
dictionary["timeonlabel"] = (
"Time On",
"Time On"
)
dictionary["timeofflabel"] = (
"Time Off",
"Time Off"
)
dictionary["operatinglabel"] = (
"Operating",
"Operating"
)
dictionary["staffsummarylabel"] = (
"Staff Summary",
"Staff Summary"
)
dictionary["dayplanlabel"] = (
"Day Plan",
"Day Plan"
)
dictionary["novetnamemessage"] = (
"You must enter a vets name!",
"You must enter a vets name!"
)
dictionary["vetfinishedbeforestartingmessage"] = (
"The vet cannot finish before starting!",
"The vet cannot finish before starting!"
)
dictionary["vettwoplacesatoncemessage"] = (
"This vet cannot be in two places at once!",
"This vet cannot be in two places at once!"
)
##Vaccinations
dictionary["vaccinationseditvaccinationspagetitle"] = (
"Edit Vaccinations",
"Edit Vaccinations"
)
dictionary["vaccinationsvaccinelabel"] = (
"Vaccine",
"Vaccine"
)
dictionary["vaccinationsrefreshvaccinationstooltip"] = (
"Refresh the list of vaccinations",
"Refresh the list of vaccinations"
)
dictionary["vaccinationsdeletevaccinationstooltip"] = (
"Delete the selected vaccination",
"Delete the selected vaccination"
)
dictionary["vaccinationsprintstocklisttooltip"] = (
"Print a list of your current stock",
"Print a list of your current stock"
)
dictionary["vaccinationsconfirmdeletevaccinationmessage"] = (
"Are you sure you want to delete this vaccination?",
"Are you sure you want to delete this vaccination?"
)
dictionary["vaccinationsconfirmoverwritevaccinationmessage"] = (
"Are you sure you want to overwrite this vaccination?",
"Are you sure you want to overwrite this vaccination?"
)
dictionary["vaccinationsrefreshmovementstooltip"] = (
"Refresh the details of this vaccination",
"Refresh the details of this vaccination"
)
dictionary["vaccinationsdeletemovementtooltip"] = (
"Delete this vaccination movement",
"Delete this vaccination movement"
)
dictionary["vaccinationsoverwritemovementmessage"] = (
"Are you sure that you want to edit this movement?",
"Are you sure that you want to edit this movement?"
)
dictionary["vaccinationsdeletemovementmessage"] = (
"Are you sure that you want to delete this movement?",
"Are you sure that you want to delete this movement?"
)
##Vet Form
dictionary["vetformpagetitle"] = (
"Vet Form",
"Vet Form"
)
dictionary["vetformotherappointmentslabel"] = (
"Appointment History",
"Appointment History"
)
dictionary["vetformappointmentdetailslabel"] = (
"Appointment Details",
"Appointment Details"
)
dictionary["vetformmedlabel"] = (
"Med",
"Med"
)
dictionary["vetformvacclabel"] = (
"Vacc",
"Vacc"
)
dictionary["vetformproclabel"] = (
"Proc",
"Proc"
)
dictionary["vetformmanlabel"] = (
"Man",
"Man"
)
dictionary["vetformdeletereceipttooltip"] = (
"Delete the selected item from the receipt",
"Delete the selected item from the receipt"
)
dictionary["vetformdonetooltip"] = (
"Mark this appointment as complete and close",
"Mark this appointment as complete and close"
)
dictionary["vetformsavetooltip"] = (
"Save any changes made to this vet form",
"Save any changes made to this vet form"
)
dictionary["vetformreceiptitemlabel"] = (
"Receipt Item",
"Receipt Item"
)
dictionary["vetformdeletereceiptmessage"] = (
"Are you sure you want to delete this receipt item?",
"Are you sure you want to delete this receipt item?"
)
dictionary["vetformmedicationclearcontainstooltip"] = (
"Clear the \"Contains\" entry",
"Clear the \"Contains\" entry"
)
dictionary["vetformrefreshmedicationtooltip"] = (
"Refresh the medication list",
"Refresh the medication list"
)
dictionary["vetformnoofunitstooltip"] = (
"Enter the number of units that you are dispensing here",
"Enter the number of units that you are dispensing here"
)
dictionary["vetforminstructionslabel"] = (
"Instructions",
"Instructions"
)
dictionary["vetforminstructionstooltip"] = (
"Enter instructions on how to administer this medication here",
"Enter instructions on how to administer this medication here"
)
dictionary["vetformprintlabeltooltip"] = (
"Print a label for this medication",
"Print a label for this medication"
)
dictionary["vetformbatchnotooltip"] = (
"Enter the batch number here",
"Enter the batch number here"
)
dictionary["vetformrefreshvaccinationtooltip"] = (
"Refresh the vaccination list",
"Refresh the vaccination list"
)
dictionary["vetformrefreshprocedurestooltip"] = (
"Refresh the procedures list",
"Refresh the procedures list"
)
dictionary["vetformnodescriptionmessage"] = (
"You must give a description!",
"You must give a description!"
)
##View Appointments
dictionary["viewappointmentspagetitle"] = (
"View Appointments",
"View Appointments"
)
dictionary["viewoperationsspagetitle"] = (
"View Operations",
"View Operations"
)
dictionary["viewappointmentsmarkwithvettooltip"] = (
"Mark this appointment as with the vet",
"Mark this appointment as with the vet"
)
dictionary["viewappointmentschoosevettooltip"] = (
"Choose a vet",
"Choose a vet"
)
dictionary["viewappointmentsvetformtooltip"] = (
"Carry out the vet visit for this appointment",
"Carry out the vet visit for this appointment"
)
dictionary["viewappointmentsmarkarrivedtooltip"] = (
"Mark this appointment as arrived",
"Mark this appointment as arrived"
)
dictionary["viewappointmentsmarkdonetooltip"] = (
"Mark this appointment as done",
"Mark this appointment as done"
)
dictionary["viewappointmentseditclientbuttonlabel"] = (
"Edit Client",
"Edit Client"
)
dictionary["viewappointmentseditclientbuttontooltip"] = (
"Edit this clients record (so they can pay their bill)",
"Edit this clients record (so they can pay their bill)"
)
dictionary["viewappointmentsvetsonlabel"] = (
"Vets On",
"Vets On"
)
dictionary["appointmentsearchpagetitle"] = (
"Appointment Search",
"Appointment Search"
)
dictionary["appointmentsearchmenu"] = (
("Appointment Search", "Find an appointment"),
("Appointment Search", "Find an appointment")
)
dictionary["appointmentsearchanimalnamelabel"] = (
"Animal Name",
"Animal Name"
)
dictionary["reasonlabel"] = (
"Reason",
"Reason"
)
dictionary["viewoperationspagetitle"] = (
"View Operations",
"View Operations"
)
dictionary["dateformat"] = (
"DDMMYYYY",
"DDMMYYYY"
)
dictionary["currency"] = (
"£",
"EUR "
)
dictionary["mailshotmenu"] = (
("Mail Shot", "Compile a list of clients to contact"),
("Mail Shot", "Compile a list of clients to contact")
)
dictionary["mailshotpagetitle"] = (
"Mail Shot",
"Mail Shot"
)
dictionary["anyvaccine"] = (
"Any Vaccine",
"Any Vaccine"
)
dictionary["anyspecies"] = (
"Any Species",
"Any Species"
)
dictionary["deceasedlabel"] = (
"Deceased",
"Deceased"
)
dictionary["causeofdeathlabel"] = (
"Cause of Death",
"Cause of Death"
)
dictionary["includedeceasedlabel"] = (
"Include Deceased",
"Include Deceased"
)
dictionary["createvaccinationappointmentbutton"] = (
("Create Appointment", "Create an appointment for this vaccination"),
("Create Appointment", "Create an appointment for this vaccination")
)
dictionary["generatevaccinationcsvbutton"] = (
("Create CSV File", "Create and save a CSV file to disc. This can be used by most word processors to create mail shots"),
("Create CSV File", "Create and save a CSV file to disc. This can be used by most word processors to create mail shots")
)
dictionary["csvsavedtolabel"] = (
"CSV file saved to",
"CSV file saved to"
)
dictionary["versiontablenotfoundquestion"] = (
"Version table not found, create it now?",
"Version table not found, create it now?"
)
dictionary["versionupdatequestion1"] = (
"You are attempting to run evette",
"ou are attempting to run evette"
)
dictionary["versionupdatequestion2"] = (
"your database is version",
"your database is version"
)
dictionary["versionupdatequestion3"] = (
"Would you like to upgrade your database?",
"Would you like to upgrade your database?"
)
dictionary["resetdatabasequestion"] = (
"Are you sure that you want to reset all tables? ALL DATA WILL BE LOST!",
"Are you sure that you want to reset all tables? ALL DATA WILL BE LOST!"
)
dictionary["alltablesresetmessage"] = (
"All tables have been reset!",
"All tables have been reset!"
)
dictionary["addstafflabel"] = (
"Add staff?",
"Add staff?"
)
dictionary["vetslabel"] = (
"Vets",
"Vets"
)
dictionary["nurseslabel"] = (
"Nurses",
"Nurses"
)
dictionary["otherslabel"] = (
"Others",
"Others"
)
dictionary["nextmonthtooltip"] = (
"Show next month",
"Show next month"
)
dictionary["previousmonthtooltip"] = (
"Show previous month",
"Show previous month"
)
dictionary["backtocalendartooltip"] = (
"Back to calendar",
"Back to calendar"
)
dictionary["addstafftodailyrotatooltip"] = (
"Add a member of staff to this days rota",
"Add a member of staff to this days rota"
)
dictionary["deleterotaitemtooltip"] = (
"Delete this rota entry",
"Delete this rota entry"
)
dictionary["submitrotaitemtooltip"] = (
"Submit this rota entry",
"Submit this rota entry"
)
dictionary["vetpositiontitle"] = (#Note: If a user is given this position, Evette will assume that the user is a vet
"Vet",
"Vet"
)
dictionary["vetnursepositiontitle"] = (#Note: If a user is given this position, Evette will assume that the user is a vet nurse
"Nurse",
"Nurse"
)
dictionary["managerpositiontitle"] = (#Note: If a user is given this position, Evette will assume that the user is a manager
"Manager",
"Manager"
)
dictionary["errorlabel"] = (
"Sorry, the following error has occured",
"Sorry, the following error has occured"
)
dictionary["editdiarytoolbar"] = (
("Edit Diary", "Edit the diary"),
("Edit Diary", "Edit the diary")
)
dictionary["editdiarypagetitle"] = (
"Edit Diary",
"Edit Diary"
)
dictionary["notesuptolabel"] = (
"Up to",
"Up to"
)
dictionary["subjectcontainslabel"] = (
"Subject contains",
"Subject contains"
)
dictionary["notecontainslabel"] = (
"Note contains",
"Note contains"
)
dictionary["showremovedlabel"] = (
"Include removed?",
"Include removed?"
)
dictionary["subjectlabel"] = (
"Subject",
"Subject"
)
dictionary["notelabel"] = (
"Note",
"Note"
)
dictionary["removedlabel"] = (
"Removed",
"Removed"
)
dictionary["linklabel"] = (
"Link",
"Link"
)
dictionary["clientlabel"] = (
"Client",
"Client"
)
dictionary["animallabel"] = (
"Animal",
"Animal"
)
dictionary["opentargetrecordtooltip"] = (
"Open the record linked to this diary note",
"Open the record linked to this diary note"
)
dictionary["diarynotelabel"] = (
"Diary Note",
"Diary Note"
)
dictionary["confirmdeletediarynotemessage"] = (
"Are you sure that you want to delete this diary note?",
"Are you sure that you want to delete this diary note?"
)
dictionary["nolinklabel"] = (
"No Link",
"No Link"
)
dictionary["createassociateddiarynotetooltip"] = (
"Create a diary note associated with this record",
"Create a diary note associated with this record"
)
dictionary["newdiarynotetooltip"] = (
"Create a new diary note",
"Create a new diary note"
)
dictionary["editdiarynotetooltip"] = (
"Edit the selected diary note",
"Edit the selected diary note"
)
dictionary["deletediarynotetooltip"] = (
"Delete the selected diary note",
"Delete the selected diary note"
)
dictionary["refreshdiarytooltip"] = (
"Refresh the list of diary notes",
"Refresh the list of diary notes"
)
dictionary["cleardiarytooltip"] = (
"Clear the diary filters",
"Clear the diary filters"
)
dictionary["clientolderthanservermessage"] = (
"You are trying to run an out-of-date client, please upgrade then try again",
"You are trying to run an out-of-date client, please upgrade then try again"
)
dictionary["adddiarynotes"] = (
"Add to diary",
"Add to diary"
)
dictionary["editdiarynotes"] = (
"Edit diary",
"Edit diary"
)
dictionary["deletediarynotes"] = (
"Delete from diary",
"Delete from diary"
)
dictionary["diarylabel"] = (
"Diary",
"Diary"
)
dictionary["viewlicensemenu"] = (
("View License", "View the license for this software."),
("View License", "View the license for this software.")
)
dictionary["fileaccosiationmenu"] = (
("File Associations", "Edit the external applications associated with attached files"),
("File Associations", "Edit the external applications associated with attached files")
)
dictionary["licenselabel"] = (
"License",
"License"
)
dictionary["aboutlabel"] = (
"About",
"About"
)
dictionary["attachedfileslabel"] = (
"Attached Files",
"Attached Files"
)
dictionary["deleteattachedfileconfirm"] = (
"Are you sure that you want to delete this file?",
"Are you sure that you want to delete this file?"
)
dictionary["addnewmediatooltip"] = (
"Add a new external file to this record",
"Add a new external file to this record"
)
dictionary["replacemediatooltip"] = (
"Update the description of the selected file",
"Update the description of the selected file"
)
dictionary["deletemediatooltip"] = (
"Delete the selected file",
"Delete the selected file"
)
dictionary["savemediatooltip"] = (
"Save the selected file to disk",
"Save the selected file to disk"
)
dictionary["fileassociationspagetitle"] = (
"File Associations",
"File Associations"
)
dictionary["extensionlabel"] = (
"Extension",
"Extension"
)
dictionary["programlabel"] = (
"Program",
"Program"
)
dictionary["fileassociationexistsmessage"] = (
"There is already a program associated with this file extension!",
"There is already a program associated with this file extension!"
)
dictionary["deleteassociationconfirm"] = (
"Are you sure that you want to delete this file association?",
"Are you sure that you want to delete this file association?"
)
dictionary["noprogramassociatedmessage"] = (
"There is no program associated with this file type!",
"There is no program associated with this file type!"
)
dictionary["mediatoolargemessage"] = (
"This file is too large to attach!",
"This file is too large to attach!"
)
############################## 1.1.9 ###############################################
dictionary["weightpanelpagetitle"] = (
"Weight",
"Weight"
)
dictionary["deleteweighttooltip"] = (
"Delete the selected weight",
"Delete the selected weight"
)
dictionary["deleteweightconfirm"] = (
"Are you sure that you want to delete this weight?",
"Are you sure that you want to delete this weight?"
)
dictionary["samelabel"] = (
"Same",
"Same"
)
dictionary["reorderlabel"] = (
"Minimum",
"Minimum"
)
dictionary["runninglowlabel"] = (
"Running Low?",
"Running Low?"
)
dictionary["diarymenu"] = (
"&Diary",
"&Diary"
)
############################## 1.2 ###############################################
dictionary["clientanimalsearchtooltip"] = (
"If you wish to filter the animals by name, enter the name here",
"If you wish to filter the animals by name, enter the name here"
)
dictionary["browseappointmentsmenu"] = (
( "Browse Appointments", "Browse all appointments" ),
( "Browse Appointments", "Browse all appointments" )
)
dictionary["browseappointmentspagetitle"] = (
"Browse Appointments",
"Browse Appointments"
)
dictionary["appointmentlabel"] = (
"appointment",
"appointment"
)
dictionary["januarylabel"] = (
"January",
"January"
)
dictionary["februarylabel"] = (
"February",
"February"
)
dictionary["marchlabel"] = (
"March",
"March"
)
dictionary["aprillabel"] = (
"April",
"April"
)
dictionary["maylabel"] = (
"May",
"May"
)
dictionary["junelabel"] = (
"June",
"June"
)
dictionary["julylabel"] = (
"July",
"July"
)
dictionary["augustlabel"] = (
"August",
"August"
)
dictionary["septemberlabel"] = (
"September",
"September"
)
dictionary["octoberlabel"] = (
"October",
"October"
)
dictionary["novemberlabel"] = (
"November",
"November"
)
dictionary["decemberlabel"] = (
"December",
"December"
)
dictionary["readfileassociationhelpmessage"] = (
"To learn about file associations - visit the help section.",
"To learn about file associations - visit the help section."
)
dictionary["websitelabel"] = (
"Website",
u"Website"
)
dictionary["generateinvoicelabel"] = (
"Generate a printable invoice for this client",
u"Generate a printable invoice for this client"
)
dictionary["animalformsmenu"] = (
("Animal Forms", "Create or edit forms that be generated using an animal's details"),
(u"Animal Forms", u"Create or edit forms that be generated using an animal's details")
)
dictionary["clientformsmenu"] = (
("Client Forms", "Create or edit forms that be generated using an client's details"),
(u"Client Forms", u"Create or edit forms that be generated using an client's details")
)
dictionary["animalformspagetitle"] = (
"Animal Forms",
u"Animal Forms"
)
dictionary["clientformspagetitle"] = (
"Client Forms",
u"Client Forms"
)
dictionary["previewlabel"] = (
"Preview",
u"Preview"
)
dictionary["wordkeyslabel"] = (
"Wordkeys",
u"Wordkeys"
)
dictionary["invoiceformsmenu"] = (
("Invoice Forms", "Edit the invoice templates"),
(u"Invoice Forms", u"Edit the invoice templates")
)
dictionary["editinvoicepagetitle"] = (
"Edit Invoices",
u"Edit Invoices"
)
dictionary["medicationformsmenu"] = (
("Medication Forms", "Edit the medication templates"),
(u"Medication Forms", u"Edit the medication templates")
)
dictionary["editmedicationtformspagetitle"] = (
"Medication Forms",
u"Medication Forms"
)
dictionary["invoicespagetitle"] = (
"Invoices",
u"Invoices"
)
dictionary["newinvoicetooltip"] = (
"Create a new invoice",
u"Create a new invoice"
)
dictionary["editinvoicetooltip"] = (
"Edit the selected invoice",
u"Edit the selected invoice"
)
dictionary["deleteinvoicetooltip"] = (
"Delete the selected invoice",
u"Delete the selected invoice"
)
dictionary["invoiceoverlapmessage"] = (
"Invoices are not allowed to overlap, please adjust the dates",
u"Invoices are not allowed to overlap, please adjust the dates"
)
dictionary["clientgenerateformtooltip"] = (
"Generate a form using this clients details",
u"Generate a form using this clients details"
)
dictionary["randomdatawarningmessage"] = (
"Note: Evette will need close when this process has completed,\nplease start Evette again to see the results.",
u"Note: Evette will need close when this process has completed,\nplease start Evette again to see the results."
)
dictionary["invoiceidlabel"] = (
"Invoice ID",
u"Invoice ID"
)
dictionary["paidlabel"] = (
"paid",
u"paid"
)
dictionary["unpaidlabel"] = (
"unpaid",
u"unpaid"
)
dictionary["invoiceidchoicetooltip"] = (
"Choose an invoice ID to mark an invoice as paid.",
u"Choose an invoice ID to mark an invoice as paid."
)
dictionary["editpaymentinvoicetooltip"] = (
"Edit the amount paid on the selected invoice.",
u"Edit the amount paid on the selected invoice."
)
dictionary["editinvoicepaymenttitle"] = (
"Edit payment",
u"Edit payment"
)
dictionary["editanimaltooltip"] = (
"Edit Animal",
u"Edit Animal"
)
###################1.2.2#####################
dictionary["stocklabel"] = (
"Stock",
u"Stock"
)
dictionary["editstockmenu"] = (
("Edit Stock", "Edit Stock"),
("Edit Stock", "Edit Stock")
)
dictionary["batchsearchmenu"] = (
("Batch Search", "Show movements for a specific batch number"),
("Batch Search", "Show movements for a specific batch number")
)
dictionary["batchbreakdowntooltip"] = (
"View a breakdown of the current stock by batch number",
u"View a breakdown of the current stock by batch number"
)
dictionary["editmovementlabel"] = (
"Edit Movement",
u"Edit Movement"
)
dictionary["createmovementlabel"] = (
"Create Movement",
u"Create Movement"
)
dictionary["consumablelabel"] = (
"Consumable",
u"Consumable"
)
dictionary["shoplabel"] = (
"Shop",
u"Shop"
)
dictionary["procedurelabel"] = (
"Procedure",
u"Procedure"
)
dictionary["manuallabel"] = (
"Manual",
u"Manual"
)
dictionary["prescribemedicationlabel"] = (
"Prescribe Medication",
u"Prescribe Medication"
)
dictionary["quantitylabel"] = (
"Quantity",
u"Quantity"
)
dictionary["quantityerrormessage"] = (
"Invalid quantity",
u"Invalid quantity"
)
dictionary["viewinvoicetooltip"] = (
"View Invoice",
u"View Invoice"
)
dictionary["diagnosislabel"] = (
"Diagnosis",
u"Diagnosis"
)
dictionary["createreceiptitemtooltip"] = (
"Create a receipt item",
u"Create a receipt item"
)
############################## 1.2.3 ###############################################
dictionary["editkennelsmenu"] = (
("Edit Kennels", "Edit kennels available"),
("Edit Kennels", "Edit kennels available")
)
dictionary["viewkennelsmenu"] = (
("View Kennels", "View Kennels"),
("View Kennels", "View Kennels")
)
dictionary["kennelsmenu"] = (
"&Kennels",
"&Kennels"
)
dictionary["kennelblocktitlelabel"] = (
"Kennel Blocks",
"Kennel Blocks"
)
dictionary["kennelstitlelabel"] = (
"Kennels",
"Kennels"
)
dictionary["editkennelblocktitle"] = (
"Edit kennel block",
"Edit kennel block"
)
dictionary["deletekennelblockconfirmation"] = (
"Are you sure that you want to delete this kennel block?",
"Are you sure that you want to delete this kennel block?"
)
dictionary["deletekennelconfirmation"] = (
"Are you sure that you want to delete this kennel?",
"Are you sure that you want to delete this kennel?"
)
dictionary["editkenneltitle"] = (
"Edit kennel",
"Edit kennel"
)
dictionary["stayinglabel"] = (
"Staying",
"Staying"
)
dictionary["occupiedlabel"] = (
"occupied",
"occupied"
)
dictionary["vacantlabel"] = (
"vacant",
"vacant"
)
dictionary["changeownershiptooltip"] = (
"Transfer ownership of this animal",
"Transfer ownership of this animal"
)
dictionary["choosenewownerdialogtitle"] = (
"Choose new owner",
"Choose new owner"
)
dictionary["doubleclicktoselecttooltip"] = (
"Double click to select",
"Double click to select"
)
dictionary["importasmanimaltooltip"] = (
"Create an animal record from an ASM record",
"Create an animal record from an ASM record"
)
dictionary["chooseananimaltitle"] = (
"Choose an animal",
"Choose an animal"
)
dictionary["clientrefnolabel"] = (
"Reference Number",
"Reference Number"
)
dictionary["toomanyresultsmessage"] = (
"Your search produced too many results to display, please narrow down your search",
"Your search produced too many results to display, please narrow down your search"
)
dictionary["idlelabel"] = (
"Idle",
"Idle"
)
dictionary["connectinglabel"] = (
"Connecting",
"Connecting"
)
dictionary["connectedlabel"] = (
"Connected",
"Connected"
)
dictionary["errorlabel"] = (
"Error",
"Error"
)
dictionary["usernamepassworderrormessage"] = (
"Unsuccessful Login",
"Unsuccessful Login"
)
dictionary["successfulloginmessage"] = (
"Successful Login",
"Successful Login"
)
dictionary["creatingevettefolder"] = (
"Creating Evette folder",
"Creating Evette folder"
)
dictionary["evettedatabasecreatedmessage"] = (
"Created Evette database",
"Created Evette database"
)
dictionary["errorcreatingdatabasemessage"] = (
"Error creating Evette database",
"Error creating Evette database"
)
dictionary["asmimportmenu"] = (
("ASM Import", "Import an animal from ASM"),
("ASM Import", "Import an animal from ASM")
)
dictionary["errorobtainingownermessage"] = (
"Unable to find owner",
"Unable to find owner"
)
dictionary["alreadyimportedmessage"] = (
"This animal has already been imported. Would you like to view it?",
"This animal has already been imported. Would you like to view it?"
)
dictionary["addweighttooltip"] = (
"Add Weight",
"Add Weight"
)
dictionary["editweightlabel"] = (
"Edit Weight",
"Edit Weight"
)
dictionary["adduserlabel"] = (
"Add User",
"Add User"
)
dictionary["edituserlabel"] = (
"Edit User",
"Edit User"
)
dictionary["editreasonsmenu"] = (
("Edit Reasons", "Edit common appointment reasons"),
("Edit Reasons", "Edit common appointment reasons")
)
dictionary["lookupsreasonpagetitle"] = (
"Appointment Reason Lookups",
"Appointment Reason Lookups"
)
dictionary["doubleclickforreasonstooltip"] = (
"Double click for a choice of common appointment reasons",
"Double click for a choice of common appointment reasons"
)
dictionary["filemenu"] = (
"File",
"File"
)
dictionary["fileexitmenu"] = (
("Exit", "Exit Evette"),
("Exit", "Exit Evette")
)
dictionary["fileclosewindowsmenu"] = (
("Close All Panels", "Close all open panels"),
("Close All Panels", "Close all open panels")
)
dictionary["confirmcloseallwindowsmessage"] = (
("Are you sure that you want to close all open panels? Any unsaved data will be lost."),
("Are you sure that you want to close all open panels? Any unsaved data will be lost.")
)
dictionary["locationlabel"] = (
"Location",
"Location"
)
dictionary["editprocedurelabel"] = (
"Edit Procedure",
"Edit Procedure"
)
############################## 1.2.4 ###############################################
dictionary["addlookuptooltip"] = (
"Create a new lookup",
u"Create a new lookup"
)
dictionary["malelabel"] = (
"Male",
u"Male"
)
dictionary["femalelabel"] = (
"Female",
u"Female"
)
dictionary["unknownlabel"] = (
"Unknown",
u"Unknown"
)
dictionary["dayslabel"] = (
"days",
u"days"
)
dictionary["weekslabel"] = (
"weeks",
u"weeks"
)
dictionary["monthslabel"] = (
"months",
u"months"
)
dictionary["yearslabel"] = (
"years",
u"years"
)
dictionary["invaliddobtooltip"] = (
"Invalid DOB",
u"Invalid DOB"
)
dictionary["addkennelblocktooltip"] = (
"Create a new kennel block",
u"Create a new kennel block"
)
dictionary["addkenneltooltip"] = (
"Create a new kennel",
u"Create a new kennel"
)
############################## 1.2.5 ###############################################
dictionary["asmclientimportmenu"] = (
("ASM Client Import", "Import a client from ASM"),
(u"ASM Client Import", u"Import a client from ASM")
)
dictionary["chooseclientlabel"] = (
"Choose client",
u"Choose client"
)
dictionary["datectrltooltip"] = (
"Double click to choose from a calendar",
u"Double click to choose from a calendar"
)
dictionary["choosedatetitle"] = (
"Choose a date",
u"Choose a date"
)
dictionary["editappointmentlabel"] = (
"Edit Appointment",
u"Edit Appointment"
)
dictionary["agelabel"] = (
"Age",
u"Age"
)
dictionary["addvaccinationtooltip"] = (
"Add Vaccination",
u"Add Vaccination"
)
dictionary["printtooltip"] = (
"Print",
u"Print"
)
############################## 1.2.6 ###############################################
dictionary["filealteredmessage"] = (
"Another user has altered this file since you opened it. Please close this record and try again.",
u"Another user has altered this file since you opened it. Please close this record and try again."
)
dictionary["asmreflabel"] = (
"ASM Ref",
u"ASM Ref"
)
dictionary["deselectlabel"] = (
"Deselect",
u"Deselect"
)
dictionary["createappointmentlabel"] = (
"Create Appointment",
u"Create Appointment"
)
dictionary["multiplepanellabel"] = (
"Allow multiple panels open",
u"Allow multiple panels open"
)
dictionary["filealteredchoice"] = (
"Another user has altered this file since you opened it. Would you like to force through your changes?",
u"Another user has altered this file since you opened it. Would you like to force through your changes?"
)
dictionary["latelabel"] = (
"Late",
u"Late"
)
dictionary["minslabel"] = (#Abrreviation of minutes - it is advisable to keep this as short as possible.
"mins",
u"mins"
)
dictionary["microchiplabel"] = (
"Microchip",
u"Microchip"
)
dictionary["microchippedlabel"] = (
"Microchip implanted",
u"Microchip implanted"
)
dictionary["costpricelabel"] = (
"Cost Price",
u"Cost Price"
)
dictionary["viewvetnoteslabel"] = (
"View Vet Notes",
u"View Vet Notes"
)
dictionary["appointmentsummarylistboxtooltip"] = (
"Right click to view available vets\nDouble click to choose time slot",
u"Right click to view available vets\nDouble click to choose time slot"
)
############################## 1.2.7 ###############################################
dictionary["shopsalemenuitem"] = (
"Shop Sale",
u"Shop Sale"
)
dictionary["shopitemstitle"] = (
"Shop Items",
u"Shop Items"
)
dictionary["basketlabel"] = (
"Basket",
u"Basket"
)
dictionary["putbacktooltip"] = (
"Put back",
u"Put back"
)
dictionary["addtobaskettooltip"] = (
"Add to basket",
u"Add to basket"
)
dictionary["clientmergetooltip"] = (
"Merge another client into this one",
u"Merge another client into this one"
)
dictionary["clientsmergedmessage"] = (
"Clients merged",
u"Clients merged"
)
dictionary["addlabel"] = (
"Add",
u"Add"
)
dictionary["subtractlabel"] = (
"Subtract",
u"Subtract"
)
dictionary["editmarkupmenu"] = (
("Define Markup Rules", "Define Markup Rules"),
(u"Define Markup Rules", u"Define Markup Rules")
)
dictionary["multiplybylabel"] = (
"Multiply by",
u"Multiply by"
)
dictionary["roundtolabel"] = (
"Round up to",
u"Round up to"
)
dictionary["costpriceentrytooltip"] = (
"This value is not included in your settings, it is here simply to allow you to try your settings out on some real figures.",
u"This value is not included in your settings, it is here simply to allow you to try your settings out on some real figures."
)
dictionary["invalidpricemessage"] = (
"Invalid Price!",
u"Invalid Price!"
)
dictionary["priceinpenniestooltip"] = (
"Please enter price in pennies eg. \"50\" to round to the nearest 50p, \"100\" to round to the nearest pound.",
u"Please enter price in cents eg. \"50\" to round to the nearest 50 cents, \"100\" to round to the nearest euro."
)
dictionary["customerpricelabel"] = (
"Customer Price",
u"Customer Price"
)
dictionary["submitsettingstooltip"] = (
"Submit settings",
u"Submit settings"
)
dictionary["applymarkuptostocktooltip"] = (
"Apply the current markup settings to all stock.",
u"Apply the current markup settings to all stock."
)
dictionary["markupappliedtoallmessage"] = (
"Markup applied to all prices",
u"Markup applied to all prices"
)
dictionary["automarkupconfirmmessage"] = (
"Continuing will alter all of your public prices, are you sure that you want to continue?",
u"Continuing will alter all of your public prices, are you sure that you want to continue?"
)
dictionary["unitpricentrytooltip"] = (
"Type \"a\" to autogenerate a price from markup rules.",
u"Type \"a\" to autogenerate a price from markup rules."
)
dictionary["costpricentrytooltip"] = (
"Type \"c\" for help calculating the cost price.",
u"Type \"c\" for help calculating the cost price."
)
dictionary["calculatecostpricetitle"] = (
"Calculate Cost Price",
u"Calculate Cost Price"
)
dictionary["packpricelabel"] = (
"Price per pack",
u"Price per pack"
)
dictionary["unitsperpacklabel"] = (
"Units per pack",
u"Units per pack"
)
############################## 1.2.8 ###############################################
dictionary["phonenumbertooltip"] = (
"CTRL + P to toggle public availability.",
u"CTRL + P to toggle public availability."
)
dictionary["lostanimallabel"] = (
"Lost Animal",
u"Lost Animal"
)
dictionary["foundanimallabel"] = (
"Found Animal",
u"Found Animal"
)
dictionary["lostandfoundmenu"] = (
("Lost and Found", "View/Edit Lost and Found"),
(u"Lost and Found", u"View/Edit Lost and Found")
)
dictionary["lostlabel"] = (
"Lost",
u"Lost"
)
dictionary["foundlabel"] = (
"Found",
u"Found"
)
dictionary["datelostlabel"] = (
"Date Lost",
u"Date Lost"
)
dictionary["datefoundlabel"] = (
"Date Found",
u"Date Found"
)
dictionary["furlengthlabel"] = (
"Fur Length",
u"Fur Length"
)
dictionary["longlabel"] = (
"Long",
u"Long"
)
dictionary["shortlabel"] = (
"Short",
u"Short"
)
dictionary["fluffylabel"] = (
"Fluffy",
u"Fluffy"
)
dictionary["hairlesslabel"] = (
"Hairless",
u"Hairless"
)
dictionary["sizelabel"] = (
"Size",
u"Size"
)
dictionary["largelabel"] = (
"Large",
u"Large"
)
dictionary["mediumlabel"] = (
"Medium",
u"Medium"
)
dictionary["smalllabel"] = (
"Small",
u"Small"
)
dictionary["juvenilelabel"] = (
"Juvenile",
u"Juvenile"
)
dictionary["adultlabel"] = (
"Adult",
u"Adult"
)
dictionary["elderlylabel"] = (
"Elderly",
u"Elderly"
)
dictionary["temperamentlabel"] = (
"Temperament",
u"Temperament"
)
dictionary["friendlylabel"] = (
"Friendly",
u"Friendly"
)
dictionary["timidlabel"] = (
"Timid",
u"Timid"
)
dictionary["aggressivelabel"] = (
"Aggressive",
u"Aggressive"
)
dictionary["collarlabel"] = (
"Collar",
u"Collar"
)
dictionary["collardescriptiontooltip"] = (
"Collar description",
u"Collar description"
)
dictionary["arealabel"] = (
"Area",
u"Area"
)
dictionary["areatooltip"] = (
"Please put in likely areas by postcode if possible as well as the city/state, separated by spaces.",
u"Please put in likely areas by postcode if possible as well as the city/state, separated by spaces."
)
dictionary["datecompletelabel"] = (
"Date complete",
u"Date complete"
)
dictionary["savetooltip"] = (
"Save",
u"Save"
)
dictionary["contacttooltip"] = (
"Contact",
u"Contact"
)
dictionary["completelabel"] = (
"Complete",
u"Complete"
)
dictionary["idlabel"] = (
"ID",
u"ID"
)
dictionary["rightclickformenutooltip"] = (
"Right click for available options.",
u"Right click for available options."
)
dictionary["lostandfoundsearchtooltip"] = (
"Search for a match",
u"Search for a match"
)
dictionary["searchuptolabel"] = (
"Search ceiling",
u"Search ceiling"
)
dictionary["searchfromlabel"] = (
"Search floor",
u"Search floor"
)
dictionary["alreadyonlostandfoundmessage"] = (
"This animal is already on the lost and found!",
u"This animal is already on the lost and found!"
)
dictionary["includecompletelabel"] = (
"Include complete?",
u"Include complete?"
)
dictionary["closelabel"] = (
"Close",
u"Close"
)
dictionary["scorelabel"] = (
"Score",
u"Score"
)
dictionary["lostandfoundsearchresultspagetitle"] = (
"Lost and Found Search Results",
u"Lost and Found Search Results"
)
dictionary["systemlabel"] = (
"System",
u"System"
)
dictionary["versionlabel"] = (
"Version",
u"Version"
)
############################## 1.3 ###############################################
dictionary["addlostmenu"] = (
("Add Lost", "Add a lost animal"),
(u"Add Lost", u"Add a lost animal")
)
dictionary["addfoundmenu"] = (
("Add Found", "Add a found animal"),
(u"Add Found", u"Add a found animal")
)
dictionary["alllabel"] = (
"All",
u"All"
)
dictionary["refreshlabel"] = (
"Refresh",
u"Refresh"
)
dictionary["filteranimalslabel"] = (
"Filter Animals",
u"Filter Animals"
)
dictionary["markaspaidlabel"] = (
"Mark as paid?",
u"Mark as paid?"
)
############################## 1.3.1 ###############################################
dictionary["asmshelterlabel"] = (
"ASM Shelter",
u"ASM Shelter"
)
dictionary["asmsheltertooltip"] = (
"If you use the Animal Shelter Manager system you can mark a client as \"The Shelter\" allowing you to import animal records from ASM who do not have an owner.",
u"If you use the Animal Shelter Manager system you can mark a client as \"The Shelter\" allowing you to import animal records from ASM who do not have an owner."
)
dictionary["appointmentrefreshlabel"] = (
"Appointment Refresh Interval",
u"Appointment Refresh Interval"
)
############################## 1.3.2 ###############################################
dictionary["asmvaccinationlabel"] = (
"ASM Vaccination",
u"ASM Vaccination"
)
dictionary["asmvaccinationtooltip"] = (
"Choose which ASM vaccine you would like Evette to use when updating animal records.",
u"Choose which ASM vaccine you would like Evette to use when updating animal records."
)
dictionary["asmerrormessage"] = (
"Unable to update ASM record!",
u"Unable to update ASM record!"
)
dictionary["asmsynctooltip"] = (
"Sync with ASM record",
u"Sync with ASM record"
)
dictionary["fieldlabel"] = (
"Field",
u"Field"
)
dictionary["asmsyncbuttontooltip"] = (
"Sync this field on Evette and ASM records",
u"Sync this field on Evette and ASM records"
)
dictionary["synctoasmlabel"] = (
"Sync to ASM",
u"Sync to ASM"
)
dictionary["synctoevettelabel"] = (
"Sync to Evette",
u"Sync to Evette"
)
dictionary["asmconnectionerrormessage"] = (
"Unable to connect to ASM.",
u"Unable to connect to ASM."
)
dictionary["asmdeathreasonlabel"] = (
"Record updated via ASM.",
u"Record updated via ASM."
)
dictionary["evettedeathreasonlabel"] = (
"Record updated via Evette.",
u"Record updated via Evette."
)
dictionary["importnewasmownermenuitem"] = (
"Import new ASM owner",
u"New Language"
)
dictionary["updateownermenuitem"] = (
"Update current owner",
u"Update current owner"
)
dictionary["1.3.2updatemessage"] = (
"Note: when you run the evette client following this upgrade you will need to re-input your database settings.",
u"Note: when you run the evette client following this upgrade you will need to re-input your database settings."
)
dictionary["tabbetweenentriestooltip"] = (
"You can switch between the user and password entries with the TAB key.",
u"You can switch between the user and password entries with the TAB key."
)
dictionary["dischargelabel"] = (
"Discharge",
u"Discharge"
)
dictionary["overnightstaylabel"] = (
"Overnight Stay",
u"Overnight Stay"
)
dictionary["animalstayedmessage"] = (
"This animal has stayed overnight, creating a new vet form.",
u"This animal has stayed overnight, creating a new vet form."
)
dictionary["prescriptionfeelabel"] = (
"Prescription Fee",
u"Prescription Fee"
)
dictionary["ontimelabel"] = (
"On time",
u"On time"
)
dictionary["dnalabel"] = (
"Did not arrive",
u"Did not arrive"
)
dictionary["viewlabel"] = (
"View",
u"View"
)
dictionary["renamelabel"] = (
"Rename",
u"Rename"
)
dictionary["filterlabel"] = (
"Filter",
u"Filter"
)
dictionary["programbrowsertooltip"] = (
"Browse to find an appropriate program.",
u"Browse to find an appropriate program."
)
dictionary["agelabel"] = (
"Age",
u"Age"
)
dictionary["batchbreakdownlabel"] = (
"Batch No Breakdown",
u"Batch No Breakdown"
)
dictionary["returntoshelterlabel"] = (
"Return to shelter",
u"Return to shelter"
)
dictionary["possibleduplicateownermessage"] = (
"This owner may already be known to the system. Would you like to view the list of similar clients?",
u"This owner may already be known to the system. Would you like to view the list of similar clients?"
)
dictionary["asmimportlabel"] = (
"Imported from ASM",
u"Imported from ASM"
)
return dictionary
| gpl-2.0 | -7,974,533,037,425,782,000 | 23.938619 | 163 | 0.665987 | false | 3.111585 | false | false | false |
JhonyVilla/blog | pelican-plugins/assets/assets.py | 1 | 2672 | # -*- coding: utf-8 -*-
"""
Asset management plugin for Pelican
===================================
This plugin allows you to use the `webassets`_ module to manage assets such as
CSS and JS files.
The ASSET_URL is set to a relative url to honor Pelican's RELATIVE_URLS
setting. This requires the use of SITEURL in the templates::
<link rel="stylesheet" href="{{ SITEURL }}/{{ ASSET_URL }}">
.. _webassets: https://webassets.readthedocs.org/
"""
from __future__ import unicode_literals
import os
import logging
from pelican import signals
logger = logging.getLogger(__name__)
try:
import webassets
from webassets import Environment
from webassets.ext.jinja2 import AssetsExtension
except ImportError:
webassets = None
def add_jinja2_ext(pelican):
"""Add Webassets to Jinja2 extensions in Pelican settings."""
if 'JINJA_ENVIRONMENT' in pelican.settings: # pelican 3.7+
pelican.settings['JINJA_ENVIRONMENT']['extensions'].append(AssetsExtension)
else:
pelican.settings['JINJA_EXTENSIONS'].append(AssetsExtension)
def create_assets_env(generator):
"""Define the assets environment and pass it to the generator."""
theme_static_dir = generator.settings['THEME_STATIC_DIR']
assets_destination = os.path.join(generator.output_path, theme_static_dir)
generator.env.assets_environment = Environment(
assets_destination, theme_static_dir)
if 'ASSET_CONFIG' in generator.settings:
for item in generator.settings['ASSET_CONFIG']:
generator.env.assets_environment.config[item[0]] = item[1]
if 'ASSET_BUNDLES' in generator.settings:
for name, args, kwargs in generator.settings['ASSET_BUNDLES']:
generator.env.assets_environment.register(name, *args, **kwargs)
if 'ASSET_DEBUG' in generator.settings:
generator.env.assets_environment.debug = generator.settings['ASSET_DEBUG']
elif logging.getLevelName(logger.getEffectiveLevel()) == "DEBUG":
generator.env.assets_environment.debug = True
for path in (generator.settings['THEME_STATIC_PATHS'] +
generator.settings.get('ASSET_SOURCE_PATHS', [])):
full_path = os.path.join(generator.theme, path)
generator.env.assets_environment.append_path(full_path)
def register():
"""Plugin registration."""
if webassets:
signals.initialized.connect(add_jinja2_ext)
signals.generator_init.connect(create_assets_env)
else:
logger.warning('`assets` failed to load dependency `webassets`.'
'`assets` plugin not loaded.')
| gpl-3.0 | -8,804,839,349,497,951,000 | 33.626667 | 83 | 0.666542 | false | 3.906433 | false | false | false |
samuelmaudo/yepes | yepes/contrib/datamigrations/importation_plans/base.py | 1 | 5591 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import collections
import operator
from django.db import transaction
from django.db.models import F, Q
from django.utils.six.moves import reduce
from django.utils.text import camel_case_to_spaces, capfirst
from yepes.contrib.datamigrations.exceptions import (
UnableToCreateError,
UnableToImportError,
UnableToUpdateError,
)
from yepes.utils.iterators import isplit
from yepes.utils.properties import class_property
class ImportationPlan(object):
"""
Base class for data-importation plan implementations.
Subclasses must at least overwrite ``import_batch()``.
"""
inserts_data = True
updates_data = True
@class_property
def name(cls):
name = camel_case_to_spaces(cls.__name__)
if name.endswith('plan'):
name = name[:-5]
if name.endswith('importation'):
name = name[:-12]
return '_'.join(name.split())
@class_property
def verbose_name(cls):
return capfirst(cls.name.replace('_', ' ').strip())
def __init__(self, migration):
self.migration = migration
def check_conditions(self):
if not self.migration.can_import:
raise UnableToImportError
if self.inserts_data and not self.migration.can_create:
raise UnableToCreateError
if self.updates_data and not self.migration.can_update:
raise UnableToUpdateError
def finalize_importation(self):
pass
def import_batch(self, batch):
raise NotImplementedError('Subclasses of ImportationPlan must override import_batch() method')
def prepare_batch(self, batch):
return batch
def prepare_importation(self):
pass
def run(self, data, batch_size=100):
self.check_conditions()
with transaction.atomic():
self.prepare_importation()
for batch in isplit(data, batch_size):
self.import_batch(self.prepare_batch(batch))
self.finalize_importation()
class ModelImportationPlan(ImportationPlan):
"""
Base class for data-importation plan implementations.
Subclasses must at least overwrite ``import_batch()``.
"""
def get_existing_keys(self, batch):
key = self.migration.primary_key
if not batch or key is None:
return set()
qs = self.get_existing_queryset(batch)
if not isinstance(key, collections.Iterable):
return set(qs.values_list(key.attname, flat=True).iterator())
else:
key_attrs = [k.attname for k in key]
return set(qs.values_list(*key_attrs).iterator())
def get_existing_objects(self, batch):
key = self.migration.primary_key
if not batch or key is None:
return {}
qs = self.get_existing_queryset(batch)
if not isinstance(key, collections.Iterable):
key_attr = key.attname
return {
getattr(obj, key_attr): obj
for obj
in qs.iterator()
}
else:
key_attrs = [k.attname for k in key]
return {
tuple(getattr(obj, attr) for attr in key_attrs): obj
for obj
in qs.iterator()
}
def get_existing_queryset(self, batch):
key = self.migration.primary_key
model = self.migration.model
manager = model._base_manager
if not batch or key is None:
return manager.none()
if not isinstance(key, collections.Iterable):
key_attr = key.attname
return manager.filter(**{
'{0}__in'.format(key_attr): (
row[key_attr]
for row
in batch
)
})
else:
key_attrs = [k.attname for k in key]
return manager.filter(reduce(operator.or_, (
Q(**{
attr: row[attr]
for attr
in key_attrs
})
for row
in batch
)))
def prepare_batch(self, batch):
m = self.migration
if m.natural_foreign_keys is not None:
for fld in m.natural_foreign_keys:
attr = fld.attname
path = fld.path
rel_field = m.model_fields[fld][-1]
rel_manager = rel_field.model._base_manager
keys = dict(
rel_manager.filter(**{
'{0}__in'.format(rel_field.name): {
row[path]
for row
in batch
}
}).values_list(
rel_field.name,
'pk',
).iterator()
)
if not m.ignore_missing_foreign_keys:
for row in batch:
row[attr] = keys[row.pop(path)]
else:
erroneous_rows = []
for i, row in enumerate(batch):
try:
value = keys[row.pop(path)]
except KeyError:
erroneous_rows.append(i)
else:
row[attr] = value
for i in reversed(erroneous_rows):
del batch[i]
return batch
| bsd-3-clause | 1,023,181,490,690,836,600 | 29.551913 | 102 | 0.519943 | false | 4.613036 | false | false | false |
j-towns/fastar | fastar/test_util.py | 1 | 3377 | from itertools import chain
from random import shuffle
import numpy as np
from jax import numpy as jnp, test_util as jtu
from jax.util import safe_map, safe_zip
from jax.tree_util import tree_multimap, tree_flatten, tree_map
from fastar import lazy_eval, lazy_eval_fixed_point, LazyArray
map = safe_map
zip = safe_zip
def check_shape_and_dtype(expected, actual):
assert expected.shape == actual.shape
assert expected.dtype == actual.dtype
def naive_fixed_point(fun, arg):
arg, arg_prev = fun(arg), arg
while not jnp.all(arg == arg_prev):
arg, arg_prev = fun(arg), arg
return arg
def check_child_counts(arrs):
visited = set()
def _check_child_counts(arrs):
for arr in arrs:
if isinstance(arr, LazyArray) and arr not in visited:
assert type(arr.child_counts) is np.ndarray
assert arr.child_counts.dtype == np.int64
assert np.all(arr.child_counts == 0)
visited.add(arr)
_check_child_counts(arr.eqn.invars)
_check_child_counts(arrs)
def check_state(arrs):
# Make sure none of the elements are in the temporary REQUESTED state
visited = set()
def _check_state(arrs):
for arr in arrs:
if isinstance(arr, LazyArray) and arr not in visited:
assert np.all((arr.state == 0) | (arr.state == 1))
visited.add(arr)
_check_state(arr.eqn.invars)
_check_state(arrs)
def _identity(x):
return x + np.zeros((), x.dtype)
def check_lazy_fun(fun_, *args, atol=None, rtol=None):
def fun(*args):
args = tree_map(_identity, args)
return fun_(*args)
out_expected_flat, out_expected_tree = tree_flatten(fun(*args))
out_flat, out_tree = tree_flatten(lazy_eval(fun, *args))
assert out_expected_tree == out_tree
tree_multimap(check_shape_and_dtype, out_expected_flat, out_flat)
jtu.check_close(out_expected_flat,
[o[:] if o.shape else o[()] for o in out_flat], atol, rtol)
check_child_counts(out_flat)
check_state(out_flat)
out_flat, _ = tree_flatten(lazy_eval(fun, *args))
indices = []
for n, o in enumerate(out_flat):
indices.append([(n, i) for i in np.ndindex(*o.shape)])
indices = list(chain(*indices))
shuffle(indices)
indices = indices[:5]
for n, i in indices:
jtu.check_close(out_flat[n][i], out_expected_flat[n][i], atol, rtol)
assert np.dtype(out_flat[n][i]) == np.dtype(out_expected_flat[n][i])
check_child_counts(out_flat)
check_state(out_flat)
def check_lazy_fixed_point(fun, mock_arg, atol=None, rtol=None):
out_expected_flat, out_expected_tree = tree_flatten(
naive_fixed_point(fun, mock_arg))
out_flat, out_tree = tree_flatten(lazy_eval_fixed_point(fun, mock_arg))
assert out_expected_tree == out_tree
tree_multimap(check_shape_and_dtype, out_expected_flat, out_flat)
jtu.check_close(out_expected_flat, [o[:] for o in out_flat], atol, rtol)
check_child_counts(out_flat)
check_state(out_flat)
out_flat, out_tree = tree_flatten(lazy_eval_fixed_point(fun, mock_arg))
indices = []
for n, o in enumerate(out_flat):
indices.append([(n, i) for i in np.ndindex(*o.shape)])
indices = list(chain(*indices))
shuffle(indices)
indices = indices[:5]
for n, i in indices:
jtu.check_close(out_flat[n][i], out_expected_flat[n][i], atol, rtol)
assert np.dtype(out_flat[n][i]) == np.dtype(out_expected_flat[n][i])
check_child_counts(out_flat)
check_state(out_flat)
| mit | 8,212,361,584,758,945,000 | 34.177083 | 77 | 0.674267 | false | 2.991143 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.