repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
selentd/pythontools
pytools/src/oldsrc/addindex.py
1
2335
import datetime import pymongo from pymongo.mongo_client import MongoClient import indexdata def getIndexEntry( indexData ): return indexData.getDictionary() def getIndexDateEntry( indexData ): return { "date": datetime.datetime(indexData.date.year, indexData.date.month, indexData.date.day, 0, 0) } def getIndexHistory( source, size = 10000000 ): indexHistory = indexdata.IndexHistory(source, size) indexHistory.readIndex() return indexHistory def addIndex( source, dbName, indexName ): #client = MongoClient("192.168.81.147") client = MongoClient("127.0.0.1") database = client[dbName] collection = database[indexName] collection.create_index([("date", pymongo.ASCENDING)], name="date", unique=True) indexHistory = getIndexHistory(source) for indexData in indexHistory.indexHistory: indexEntry = getIndexEntry(indexData) indexDate = getIndexDateEntry(indexData) if collection.find_one(indexDate) == None: collection.insert(indexEntry) def addIndizes(): ''' addIndex('../../data/sp500.csv', 'stockdb', 'sp500') addIndex('../../data/tecdax.csv', 'stockdb', 'tecdax') addIndex('../../data/mdax.csv', 'stockdb', 'mdax') addIndex('../../data/nasdaq100.csv', 'stockdb', 'nasdaq100') addIndex('../../data/smi.csv', 'stockdb', 'smi') addIndex('../../data/tecdax.csv', 'stockdb', 'tecdax') ''' indexList = ['atx', 'brent', 'cac', 'dax', 'dowjones', 'estoxx50', 'ftse100', 'ftsemib', 'gold', 'hangseng', 'hscei', 'ibex', 'mdax', 'nasdaq100', 'nikkei', 'sdax', 'smi', 'sp500', 'tecdax'] for index in indexList: print '../../data/'+index+'.csv' addIndex('../../data/'+index+'.csv', 'stockdb', index) if __name__ == '__main__': addIndizes()
apache-2.0
2,964,568,120,293,750,000
28.556962
64
0.494647
false
3.964346
false
false
false
openvenues/address_normalizer
address_normalizer/deduping/near_duplicates.py
1
6806
import geohash import logging import operator from functools import partial from itertools import chain, product, combinations, imap from address_normalizer.deduping.duplicates import * from address_normalizer.deduping.storage.base import * from address_normalizer.text.gazetteers import * from address_normalizer.text.normalize import * from address_normalizer.models.address import * from address_normalizer.models.venue import * near_dupe_registry = {} # Two lat/longs sharing a geohash prefix of 6 characters are within about 610 meters of each other DEFAULT_GEOHASH_PRECISION = 6 logger = logging.getLogger('near_dupes') class NearDupeMeta(type): def __init__(cls, name, bases, dict_): if 'abstract' not in dict_: near_dupe_registry[cls.__entity_type__] = cls super(NearDupeMeta, cls).__init__(name, bases, dict_) dupe_cache = {} class NearDupe(object): abstract = True __metaclass__ = NearDupeMeta key_generators = () configured = False storage = NopStorage() @classmethod def configure(cls, storage): cls.storage = storage @classmethod def find_dupes(cls, ents): if not ents: return {}, {}, {} entity_dict = {e.guid: e for e in ents} clusters = defaultdict(set) _ = [clusters[safe_encode(c)].add(ent.guid) for ent in ents for c in cls.gen_keys(ent)] clusters = dict(clusters) logger.info('{} clusters found'.format(len(clusters))) logger.info('Checking for local dupes') local_guid_pairs = set() local_dupes = {} for cluster_id, guids in clusters.iteritems(): if len(guids) < 2: continue local_guid_pairs.update(combinations(guids, 2)) for g1, g2 in local_guid_pairs: ent1 = entity_dict[g1] ent2 = entity_dict[g2] if cls.exact_dupe.is_dupe(ent1, ent2): cls.assign_local_dupe(local_dupes, ent1, ent2) logger.info('Checking global dupes') existing_clusters = defaultdict(list) if clusters: _ = [existing_clusters[c].append(guid) for c, guid in cls.storage.search(clusters.keys()).iteritems() if guid] existing_guids = set() existing_ents = {} if existing_clusters: existing_guids = set.union(*(set(v) for v in existing_clusters.itervalues())) existing_ents = {guid: cls.model(json.loads(e)) for guid, e in cls.storage.multiget(list(existing_guids)).iteritems() if e} global_dupes = {} global_guid_pairs = set([(new_guid, existing_guid) for cluster_id, existing in existing_clusters.iteritems() for new_guid, existing_guid in product(clusters[cluster_id], existing)]) for new_guid, existing_guid in global_guid_pairs: local_ent = entity_dict[new_guid] existing_ent = existing_ents[existing_guid] if cls.exact_dupe.is_dupe(existing_ent, local_ent): cls.assign_global_dupe(global_dupes, existing_ent, local_ent) logger.info('Done with global dupe checking') return clusters, local_dupes, global_dupes @classmethod def check(cls, objects, add=True): object_dict = {o.guid: o for o in objects} clusters, local_dupes, global_dupes = cls.find_dupes(objects) new_clusters = {} new_objects = {} dupes = local_dupes.copy() dupes.update(global_dupes) if add: for k, guids in clusters.iteritems(): non_dupes = [g for g in guids if g not in dupes] if non_dupes: guid = non_dupes[0] new_clusters[k] = guid new_objects[guid] = object_dict[guid] cls.add({guid: json.dumps(obj.to_primitive()) for guid, obj in new_objects.iteritems()}) cls.add_clusters(new_clusters) return [(obj, (dupes.get(obj.guid, obj.guid), obj.guid in dupes)) for obj in objects] @classmethod def assign_local_dupe(cls, dupes, existing, new): guid1 = existing.guid guid2 = new.guid guid1_existing = dupes.get(guid1) guid2_existing = dupes.get(guid2) if not guid1_existing and not guid2_existing: dupes[guid1] = guid2 elif guid1_existing: dupes[guid2] = guid1_existing elif guid2_existing: dupes[guid1] = guid2_existing @classmethod def assign_global_dupe(cls, dupes, existing, new): dupes[new.guid] = existing.guid @classmethod def add(cls, kvs): cls.storage.multiput(kvs) @classmethod def add_clusters(cls, kvs): cls.storage.multiput(kvs) class AddressNearDupe(NearDupe): __entity_type__ = Address.entity_type model = Address exact_dupe = AddressDupe geohash_precision = DEFAULT_GEOHASH_PRECISION street_gazetteers = list(chain(*[gazette_field_registry[f] for f in (address_fields.NAME, address_fields.HOUSE_NUMBER, address_fields.STREET)])) all_gazetteers = list(chain(*gazette_field_registry.values())) @classmethod def configure(cls, storage, bloom_filter=None, geohash_precision=DEFAULT_GEOHASH_PRECISION): cls.storage = storage if bloom_filter: cls.bloom_filter = bloom_filter cls.geohash_precision = geohash_precision @classmethod def expanded_street_address(cls, address): street_address_components = [] house_number = (address.house_number or '').strip() if house_number: street_address_components.append(house_number) street = (address.street or '').strip() if street: street_address_components.append(street) surface_forms = set() if street_address_components: street_address = u' '.join(street_address_components) # the return value from expand return address_phrase_filter.expand_street_address(street_address) @classmethod def geohash(cls, address): geo = geohash.encode(address.latitude, address.longitude, cls.geohash_precision) neighbors = geohash.neighbors(geo) all_geo = [geo] + neighbors return all_geo @classmethod def gen_keys(cls, address): street_surface_forms = cls.expanded_street_address(address) if address.latitude and address.longitude: all_geo = cls.geohash(address) for geo, norm_address in product(all_geo, street_surface_forms): key = '|'.join([geo, norm_address]) yield key class VenueNearDupe(NearDupe): __entity_type__ = Venue.entity_type model = Venue
mit
7,338,996,294,223,279,000
31.108491
189
0.618425
false
3.725233
false
false
false
miyanishi2/caffe-rpc
caffe_extractor.py
1
1439
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'miyanishi' import caffe import numpy as np class CaffeExtractor(): def __init__(self, caffe_root=None, feature_layers=["fc6"], gpu=True): self.feature_layers = feature_layers MODEL_FILE = caffe_root + 'examples/imagenet/imagenet_deploy.prototxt' PRETRAINED = caffe_root + 'examples/imagenet/caffe_reference_imagenet_model' MEAN_FILE = caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy' self.net = caffe.Classifier(MODEL_FILE, PRETRAINED, mean=np.load(MEAN_FILE), channel_swap=(2,1,0), raw_scale=255, image_dims=(256, 256)) #self.net.set_phase_test() if gpu: self.net.set_mode_gpu() else: self.net.set_mode_cpu() imagenet_labels_filename = caffe_root + 'data/ilsvrc12/synset_words.txt' self.labels = np.loadtxt(imagenet_labels_filename, str, delimiter='\t') def getImageFeatures(self, image): score = self.net.predict([image]) feature_dic = {layer:np.copy(self.net.blobs[layer].data[4][:,0,0]) for layer in self.feature_layers} return feature_dic def getImageLabels(self): top_k = self.net.blobs['prob'].data[4].flatten().argsort()[-1:-6:-1] labels = self.labels[top_k].tolist() return labels
bsd-2-clause
-3,062,253,772,976,018,400
36.868421
108
0.587908
false
3.323326
false
false
false
Donkyhotay/MoonPy
twisted/internet/posixbase.py
1
14121
# -*- test-case-name: twisted.test.test_internet -*- # Copyright (c) 2001-2009 Twisted Matrix Laboratories. # See LICENSE for details. """ Posix reactor base class """ import warnings import socket import errno import os from zope.interface import implements, classImplements from twisted.python.compat import set from twisted.internet.interfaces import IReactorUNIX, IReactorUNIXDatagram from twisted.internet.interfaces import IReactorTCP, IReactorUDP, IReactorSSL, IReactorArbitrary from twisted.internet.interfaces import IReactorProcess, IReactorMulticast from twisted.internet.interfaces import IHalfCloseableDescriptor from twisted.internet import error from twisted.internet import tcp, udp from twisted.python import log, failure, util from twisted.persisted import styles from twisted.python.runtime import platformType, platform from twisted.internet.base import ReactorBase, _SignalReactorMixin try: from twisted.internet import ssl sslEnabled = True except ImportError: sslEnabled = False try: from twisted.internet import unix unixEnabled = True except ImportError: unixEnabled = False processEnabled = False if platformType == 'posix': from twisted.internet import fdesc import process processEnabled = True if platform.isWindows(): try: import win32process processEnabled = True except ImportError: win32process = None class _Win32Waker(log.Logger, styles.Ephemeral): """I am a workaround for the lack of pipes on win32. I am a pair of connected sockets which can wake up the main loop from another thread. """ disconnected = 0 def __init__(self, reactor): """Initialize. """ self.reactor = reactor # Following select_trigger (from asyncore)'s example; server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.setsockopt(socket.IPPROTO_TCP, 1, 1) server.bind(('127.0.0.1', 0)) server.listen(1) client.connect(server.getsockname()) reader, clientaddr = server.accept() client.setblocking(0) reader.setblocking(0) self.r = reader self.w = client self.fileno = self.r.fileno def wakeUp(self): """Send a byte to my connection. """ try: util.untilConcludes(self.w.send, 'x') except socket.error, (err, msg): if err != errno.WSAEWOULDBLOCK: raise def doRead(self): """Read some data from my connection. """ try: self.r.recv(8192) except socket.error: pass def connectionLost(self, reason): self.r.close() self.w.close() class _UnixWaker(log.Logger, styles.Ephemeral): """This class provides a simple interface to wake up the event loop. This is used by threads or signals to wake up the event loop. """ disconnected = 0 i = None o = None def __init__(self, reactor): """Initialize. """ self.reactor = reactor self.i, self.o = os.pipe() fdesc.setNonBlocking(self.i) fdesc._setCloseOnExec(self.i) fdesc.setNonBlocking(self.o) fdesc._setCloseOnExec(self.o) self.fileno = lambda: self.i def doRead(self): """Read some bytes from the pipe. """ fdesc.readFromFD(self.fileno(), lambda data: None) def wakeUp(self): """Write one byte to the pipe, and flush it. """ # We don't use fdesc.writeToFD since we need to distinguish # between EINTR (try again) and EAGAIN (do nothing). if self.o is not None: try: util.untilConcludes(os.write, self.o, 'x') except OSError, e: if e.errno != errno.EAGAIN: raise def connectionLost(self, reason): """Close both ends of my pipe. """ if not hasattr(self, "o"): return for fd in self.i, self.o: try: os.close(fd) except IOError: pass del self.i, self.o if platformType == 'posix': _Waker = _UnixWaker elif platformType == 'win32': _Waker = _Win32Waker class PosixReactorBase(_SignalReactorMixin, ReactorBase): """ A basis for reactors that use file descriptors. """ implements(IReactorArbitrary, IReactorTCP, IReactorUDP, IReactorMulticast) def __init__(self): ReactorBase.__init__(self) if self.usingThreads or platformType == "posix": self.installWaker() def _disconnectSelectable(self, selectable, why, isRead, faildict={ error.ConnectionDone: failure.Failure(error.ConnectionDone()), error.ConnectionLost: failure.Failure(error.ConnectionLost()) }): """ Utility function for disconnecting a selectable. Supports half-close notification, isRead should be boolean indicating whether error resulted from doRead(). """ self.removeReader(selectable) f = faildict.get(why.__class__) if f: if (isRead and why.__class__ == error.ConnectionDone and IHalfCloseableDescriptor.providedBy(selectable)): selectable.readConnectionLost(f) else: self.removeWriter(selectable) selectable.connectionLost(f) else: self.removeWriter(selectable) selectable.connectionLost(failure.Failure(why)) def installWaker(self): """ Install a `waker' to allow threads and signals to wake up the IO thread. We use the self-pipe trick (http://cr.yp.to/docs/selfpipe.html) to wake the reactor. On Windows we use a pair of sockets. """ if not self.waker: self.waker = _Waker(self) self._internalReaders.add(self.waker) self.addReader(self.waker) # IReactorProcess def spawnProcess(self, processProtocol, executable, args=(), env={}, path=None, uid=None, gid=None, usePTY=0, childFDs=None): args, env = self._checkProcessArgs(args, env) if platformType == 'posix': if usePTY: if childFDs is not None: raise ValueError("Using childFDs is not supported with usePTY=True.") return process.PTYProcess(self, executable, args, env, path, processProtocol, uid, gid, usePTY) else: return process.Process(self, executable, args, env, path, processProtocol, uid, gid, childFDs) elif platformType == "win32": if uid is not None or gid is not None: raise ValueError("The uid and gid parameters are not supported on Windows.") if usePTY: raise ValueError("The usePTY parameter is not supported on Windows.") if childFDs: raise ValueError("Customizing childFDs is not supported on Windows.") if win32process: from twisted.internet._dumbwin32proc import Process return Process(self, processProtocol, executable, args, env, path) else: raise NotImplementedError, "spawnProcess not available since pywin32 is not installed." else: raise NotImplementedError, "spawnProcess only available on Windows or POSIX." # IReactorUDP def listenUDP(self, port, protocol, interface='', maxPacketSize=8192): """Connects a given L{DatagramProtocol} to the given numeric UDP port. @returns: object conforming to L{IListeningPort}. """ p = udp.Port(port, protocol, interface, maxPacketSize, self) p.startListening() return p def connectUDP(self, remotehost, remoteport, protocol, localport=0, interface='', maxPacketSize=8192): """DEPRECATED. Connects a L{ConnectedDatagramProtocol} instance to a UDP port. """ warnings.warn("use listenUDP and then transport.connect().", DeprecationWarning, stacklevel=2) p = udp.ConnectedPort((remotehost, remoteport), localport, protocol, interface, maxPacketSize, self) p.startListening() return p # IReactorMulticast def listenMulticast(self, port, protocol, interface='', maxPacketSize=8192, listenMultiple=False): """Connects a given DatagramProtocol to the given numeric UDP port. EXPERIMENTAL. @returns: object conforming to IListeningPort. """ p = udp.MulticastPort(port, protocol, interface, maxPacketSize, self, listenMultiple) p.startListening() return p # IReactorUNIX def connectUNIX(self, address, factory, timeout=30, checkPID=0): """@see: twisted.internet.interfaces.IReactorUNIX.connectUNIX """ assert unixEnabled, "UNIX support is not present" c = unix.Connector(address, factory, timeout, self, checkPID) c.connect() return c _unspecified = object() def _checkMode(self, name, mode): """ Check C{mode} to see if a value was specified for it and emit a deprecation warning if so. Return the default value if none was specified, otherwise return C{mode}. """ if mode is not self._unspecified: warnings.warn( 'The mode parameter of %(name)s will be removed. Do not pass ' 'a value for it. Set permissions on the containing directory ' 'before calling %(name)s, instead.' % dict(name=name), category=DeprecationWarning, stacklevel=3) else: mode = 0666 return mode def listenUNIX(self, address, factory, backlog=50, mode=_unspecified, wantPID=0): """ @see: twisted.internet.interfaces.IReactorUNIX.listenUNIX """ assert unixEnabled, "UNIX support is not present" mode = self._checkMode('IReactorUNIX.listenUNIX', mode) p = unix.Port(address, factory, backlog, mode, self, wantPID) p.startListening() return p # IReactorUNIXDatagram def listenUNIXDatagram(self, address, protocol, maxPacketSize=8192, mode=_unspecified): """ Connects a given L{DatagramProtocol} to the given path. EXPERIMENTAL. @returns: object conforming to L{IListeningPort}. """ assert unixEnabled, "UNIX support is not present" mode = self._checkMode('IReactorUNIXDatagram.listenUNIXDatagram', mode) p = unix.DatagramPort(address, protocol, maxPacketSize, mode, self) p.startListening() return p def connectUNIXDatagram(self, address, protocol, maxPacketSize=8192, mode=_unspecified, bindAddress=None): """ Connects a L{ConnectedDatagramProtocol} instance to a path. EXPERIMENTAL. """ assert unixEnabled, "UNIX support is not present" mopde = self._checkMode('IReactorUNIXDatagram.connectUNIXDatagram', mode) p = unix.ConnectedDatagramPort(address, protocol, maxPacketSize, mode, bindAddress, self) p.startListening() return p # IReactorTCP def listenTCP(self, port, factory, backlog=50, interface=''): """@see: twisted.internet.interfaces.IReactorTCP.listenTCP """ p = tcp.Port(port, factory, backlog, interface, self) p.startListening() return p def connectTCP(self, host, port, factory, timeout=30, bindAddress=None): """@see: twisted.internet.interfaces.IReactorTCP.connectTCP """ c = tcp.Connector(host, port, factory, timeout, bindAddress, self) c.connect() return c # IReactorSSL (sometimes, not implemented) def connectSSL(self, host, port, factory, contextFactory, timeout=30, bindAddress=None): """@see: twisted.internet.interfaces.IReactorSSL.connectSSL """ assert sslEnabled, "SSL support is not present" c = ssl.Connector(host, port, factory, contextFactory, timeout, bindAddress, self) c.connect() return c def listenSSL(self, port, factory, contextFactory, backlog=50, interface=''): """@see: twisted.internet.interfaces.IReactorSSL.listenSSL """ assert sslEnabled, "SSL support is not present" p = ssl.Port(port, factory, contextFactory, backlog, interface, self) p.startListening() return p # IReactorArbitrary def listenWith(self, portType, *args, **kw): kw['reactor'] = self p = portType(*args, **kw) p.startListening() return p def connectWith(self, connectorType, *args, **kw): kw['reactor'] = self c = connectorType(*args, **kw) c.connect() return c def _removeAll(self, readers, writers): """ Remove all readers and writers, and list of removed L{IReadDescriptor}s and L{IWriteDescriptor}s. Meant for calling from subclasses, to implement removeAll, like:: def removeAll(self): return self._removeAll(self._reads, self._writes) where C{self._reads} and C{self._writes} are iterables. """ removedReaders = set(readers) - self._internalReaders for reader in removedReaders: self.removeReader(reader) removedWriters = set(writers) for writer in removedWriters: self.removeWriter(writer) return list(removedReaders | removedWriters) if sslEnabled: classImplements(PosixReactorBase, IReactorSSL) if unixEnabled: classImplements(PosixReactorBase, IReactorUNIX, IReactorUNIXDatagram) if processEnabled: classImplements(PosixReactorBase, IReactorProcess) __all__ = ["PosixReactorBase"]
gpl-3.0
8,697,282,309,824,158,000
32.147887
108
0.622477
false
4.241814
false
false
false
unfoldingWord-dev/uwadmin
uwadmin/migrations/0005_auto_20150524_1534.py
1
1202
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('uwadmin', '0004_auto_20150318_0034'), ] operations = [ migrations.AddField( model_name='publishrequest', name='approved_at', field=models.DateTimeField(default=None, null=True, db_index=True, blank=True), preserve_default=True, ), migrations.AddField( model_name='publishrequest', name='source_text', field=models.ForeignKey(related_name='source_publish_requests', to='uwadmin.LangCode', null=True), preserve_default=True, ), migrations.AddField( model_name='publishrequest', name='source_version', field=models.CharField(max_length=10, blank=True), preserve_default=True, ), migrations.AlterField( model_name='publishrequest', name='language', field=models.ForeignKey(related_name='publish_requests', to='uwadmin.LangCode'), preserve_default=True, ), ]
mit
8,915,345,621,021,246,000
30.631579
110
0.583195
false
4.386861
false
false
false
IQSS/miniverse
miniverse/settings/local_with_routing.py
1
6078
""" Settings template for running two databases: - Existing Dataverse databases (we only read it) - Second database for Django core apps + Miniverse apps Please read through and change the settings where noted """ from __future__ import absolute_import import sys from os import makedirs, environ from os.path import join, isdir from miniverse.testrunners.disable_migrations import DisableMigrations from miniverse.settings.base import * # ----------------------------------- # DEBUG # - True: Dataverse Key required for API # - Includes SQL for many of the API call results # ----------------------------------- DEBUG = True #True False # ----------------------------------- # TIME_ZONE # ----------------------------------- TIME_ZONE = 'America/New_York' # ----------------------------------- # Secret key # ----------------------------------- SECRET_KEY = 'DEV-j94xnz*dj5f@_6-gt@ov)yjbcx0uagb7sv9a0j-(jo)j%m$el%' # ----------------------------------- # Metrics cache settings # ----------------------------------- METRICS_CACHE_VIEW = False METRICS_CACHE_VIEW_TIME = 60 * 60 * 2 # Cache for visualizations METRICS_CACHE_API_TIME = 60 * 15 # Cache for API endpoints # ----------------------------------- # For local runs, this directory will include: # - static files (after running 'collectstatic') # - optional, sqlite db if that's used for the Django apps db # ----------------------------------- LOCAL_SETUP_DIR = join(PROJECT_ROOT, 'test_setup') if not isdir(LOCAL_SETUP_DIR): makedirs(LOCAL_SETUP_DIR) # ----------------------------------- # Database routing. # e.g. between the Dataverse db and Django db # ----------------------------------- DATABASE_ROUTERS = ['miniverse.db_routers.db_dataverse_router.DataverseRouter',] # ----------------------------------- # URL of the Dataverse db being read # ----------------------------------- #DATAVERSE_INSTALLATION_URL = 'https://demo.dataverse.org' #DATAVERSE_INSTALLATION_URL = 'https://dataverse.harvard.edu' DATAVERSE_INSTALLATION_URL = 'http://localhost:8080' # ----------------------------------- # Database Setup # - default -> Create a new db for the django/miniverse specific apps # - May be any relational db type: postgres, sqlite, etc # - dataverse -> Read-only users for the Dataverse Posgres db # ----------------------------------- DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': join(LOCAL_SETUP_DIR, 'miniverse_default.db3'), }, 'dataverse': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'dvndb', # dvndb_demo, dvn_thedata, dvndb 'USER': 'postgres', # Set to a read-only user 'PASSWORD': '123', 'HOST': 'localhost', 'TEST': { 'MIRROR': 'default', # For running tests, only create 1 db }, } } # ----------------------------------- # Need when running DEBUG = False # ----------------------------------- ALLOWED_HOSTS = ('127.0.0.1', 'dd7be506.ngrok.io') # ----------------------------------- # Need to set when RestrictAdminMiddleware is active # ----------------------------------- INTERNAL_IPS = ('127.0.0.1',) # ----------------------------------- # Slackbot # ----------------------------------- SLACK_USERNAME = 'dvbot' SLACK_BOT_TOKEN = environ.get('SLACK_BOT_TOKEN') BOT_ID = environ.get('BOT_ID') SLACK_WEBHOOK_SECRET = environ.get('SLACK_WEBHOOK_SECRET') # ----------------------------------- # Optional MIDDLEWARE_CLASSES # ----------------------------------- MIDDLEWARE_CLASSES += [ # Restrict by IP address #'dv_apps.admin_restrict.middleware.RestrictAdminMiddleware', # Email about broken 404s #'django.middleware.common.BrokenLinkEmailsMiddleware', ] # ----------------------------------- # cookie name # ----------------------------------- SESSION_COOKIE_NAME = 'dv_metrics' # ----------------------------------- # Where static files are collected # ----------------------------------- STATIC_ROOT = join(LOCAL_SETUP_DIR, 'staticfiles') if not isdir(STATIC_ROOT): makedirs(STATIC_ROOT) # ----------------------------------- # Django Debug TOOLBAR CONFIGURATION # ----------------------------------- # See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup # ----------------------------------- INSTALLED_APPS += ( 'debug_toolbar', 'django.contrib.admindocs', ) MIDDLEWARE_CLASSES += [ 'debug_toolbar.middleware.DebugToolbarMiddleware', ] DEBUG_TOOLBAR_PATCH_SETTINGS = False # http://django-debug-toolbar.readthedocs.org/en/latest/installation.html # ----------------------------------- # For running tests: # - Only create 1 test database it has to be a Postgres db # - Remove the Database routing # - Disable migrations. e.g., We don't want to run them # - Set a new TEST_RUNNER: # - We want to *create* unmanaged tables in the test db # - Disable timezone awareness for fixture loading # ----------------------------------- if 'test' in sys.argv or 'test_coverage' in sys.argv: # Covers regular testing and django-coverage DATABASES['default']['ENGINE'] = 'django.db.backends.postgresql_psycopg2' DATABASES['default']['HOST'] = 'localhost' DATABASES['default']['USER'] = 'postgres' DATABASES['default']['PASSWORD'] = '123' # The custom routers we're using to route certain ORM queries # to the remote host conflict with our overridden db settings. # Set DATABASE_ROUTERS to an empty list to return to the defaults # during the test run. DATABASE_ROUTERS = [] MIGRATION_MODULES = DisableMigrations() # Set Django's test runner a custom class that will create # 'unmanaged' tables TEST_RUNNER = 'miniverse.testrunners.managed_model_test_runner.ManagedModelTestRunner' # Disable timezone awareness to False to avoid warnings when loading fixtures # e.g. to avoid: RuntimeWarning: (some object)received a naive datetime (2016-08-16 # 09:25:41.349000) while time zone support is active. USE_TZ = False
mit
906,195,234,186,292,000
33.534091
99
0.557585
false
3.980354
true
false
false
sanluca/py-acqua
setup.py
1
1902
# -*- coding: iso-8859-15 -*- #Copyright (C) 2005, 2008 Py-Acqua #http://www.pyacqua.net #email: [email protected] # # #Py-Acqua is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # #Py-Acqua is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Py-Acqua; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import os import sys import glob from distutils.core import setup ### def moon_walk (root_dir, repl): packages, data_files = [], [] for dirpath, dirnames, filenames in os.walk (root_dir): for i, dirname in enumerate (dirnames): if dirname.startswith('.'): del dirnames[i] data_files.append(("share/pyacqua/" + repl + dirpath[len(root_dir):], [os.path.join(dirpath, f) for f in filenames])) return data_files if __name__ != "__main__": print moon_walk (sys.argv[1]) else: setup ( name="py-acqua", version="1.0", description="PyAcqua program", author="Francesco Piccinno", author_email="[email protected]", url="http://pyacqua.altervista.org", scripts=["src/acqua.py"], package_dir={'pyacqua': 'src'}, packages=['pyacqua'], data_files=moon_walk ("skins", "skins") + moon_walk ("locale", "locale") + [ #("src", glob.glob ("src/*")), ("share/pyacqua/plugins", glob.glob ("plugins/*.py")), ("share/pyacqua/pixmaps", glob.glob ("pixmaps/*")), ("share/pyacqua/tips", ["src/tip_of_the_day_en.txt", "src/tip_of_the_day.txt"]) ] )
gpl-2.0
8,070,389,984,959,156,000
32.368421
120
0.679811
false
3.05297
false
false
false
bhermansyah/DRR-datacenter
scripts/misc-boedy1996/glofas_refactor.py
1
6276
import os, sys os.environ.setdefault("DJANGO_SETTINGS_MODULE","geonode.settings") import csv from django.db import connection, connections from django.conf import settings from geodb.models import Glofasintegrated, AfgBasinLvl4GlofasPoint from netCDF4 import Dataset, num2date import numpy as np from django.contrib.gis.geos import Point def getRefactorData(): # f_IN = open("/Users/budi/Documents/iMMAP/DRR-datacenter/scripts/misc-boedy1996/Glofas_Baseline_Output_Adjustment_factor.csv", 'rU') f_IN = open("/home/ubuntu/Glofas_Baseline_Output_Adjustment_factor.csv", 'rU') reader = csv.reader(f_IN) first = True data = {} for row in reader: if first: first = False else: lon = row[2] lat = row[1] # data[lat][lon]['rl2_factor']=row[8] data[lat]={lon:{'rl2_factor':row[8],'rl5_factor':row[9],'rl20_factor':row[10]}} f_IN.close() # print data['67.75']['31.85'] return data def calculate_glofas_params(date): date_arr = date.split('-') filename = getattr(settings, 'GLOFAS_NC_FILES')+date_arr[0]+date_arr[1]+date_arr[2]+"00.nc" # print Glofasintegrated.objects.latest('datadate').date nc = Dataset(filename, 'r', Format='NETCDF4') # get coordinates variables lats = nc.variables['lat'][:] lons = nc.variables['lon'][:] rl2s= nc.variables['rl2'][:] rl5s= nc.variables['rl5'][:] rl20s= nc.variables['rl20'][:] times = nc.variables['time'][:] essemble = nc.variables['ensemble'][:] # convert date, how to store date only strip away time? # print "Converting Dates" units = nc.variables['time'].units dates = num2date (times[:], units=units, calendar='365_day') d = np.array(nc.variables['dis']) # header = ['Latitude', 'Longitude', 'rl2', 'rl5', 'rl20', 'rl2_dis_percent', 'rl2_avg_dis_percent', 'rl5_dis_percent', 'rl5_avg_dis_percent', 'rl20_dis_percent', 'rl20_avg_dis_percent'] times_index=[] for i,j in enumerate(times): times_index.append(i) coord_index = 0 refactor = getRefactorData() for lat, lon, rl2, rl5, rl20 in zip(lats, lons, rl2s, rl5s, rl20s): # print str(lat), str(lon) try: # print refactor[str(lat)][str(lon)] rl2_temp = rl2*float(refactor[str(lat)][str(lon)]['rl2_factor']) rl5_temp = rl5*float(refactor[str(lat)][str(lon)]['rl5_factor']) rl20_temp = rl20*float(refactor[str(lat)][str(lon)]['rl20_factor']) except: rl2_temp = rl2 rl5_temp = rl5 rl20_temp = rl20 rl2 = rl2_temp rl5 = rl5_temp rl20 = rl20_temp # print rl2,rl5,rl20, refactor[str(lat)][str(lon)]['rl2_factor'] data_in = [] data_in.append(lat) data_in.append(lon) data_in.append(rl2) data_in.append(rl5) data_in.append(rl20) rl2_dis_percent = [] rl5_dis_percent = [] rl20_dis_percent = [] rl2_avg_dis = [] rl5_avg_dis = [] rl20_avg_dis = [] for i in times_index: data = d[i,:,coord_index] dis_data = [] for l in data: dis_data.append(l) dis_avg = np.median(dis_data) count = sum(1 for x in data if x>rl2) percent_rl2 = round(float(count)/float(51)*100) rl2_avg_dis.append(round(float(dis_avg)/float(rl2)*100)) rl2_dis_percent.append(percent_rl2) count = sum(1 for x in data if x>rl5) percent_rl5 = round(float(count)/float(51)*100) rl5_avg_dis.append(round(float(dis_avg)/float(rl5)*100)) rl5_dis_percent.append(percent_rl5) count = sum(1 for x in data if x>rl20) percent_rl20 = round(float(count)/float(51)*100) rl20_avg_dis.append(round(float(dis_avg)/float(rl20)*100)) rl20_dis_percent.append(percent_rl20) if i>=19: break # print rl2_avg_dis data_in.append(max(rl2_dis_percent)) temp_avg_dis=[] for index, item in enumerate(rl2_dis_percent): if item == max(rl2_dis_percent): # print index, item temp_avg_dis.append(rl2_avg_dis[index]) data_in.append(max(temp_avg_dis)) rl2_avg_dis_percent = max(temp_avg_dis) data_in.append(max(rl5_dis_percent)) temp_avg_dis=[] for index, item in enumerate(rl5_dis_percent): if item == max(rl5_dis_percent): # print index, item temp_avg_dis.append(rl5_avg_dis[index]) data_in.append(max(temp_avg_dis)) rl5_avg_dis_percent = max(temp_avg_dis) data_in.append(max(rl20_dis_percent)) temp_avg_dis=[] for index, item in enumerate(rl20_dis_percent): if item == max(rl20_dis_percent): # print index, item temp_avg_dis.append(rl20_avg_dis[index]) data_in.append(max(temp_avg_dis)) rl20_avg_dis_percent = max(temp_avg_dis) if coord_index>2035 and max(rl2_dis_percent)>=25: pnt = Point(round(float(lon),2), round(float(lat),2), srid=4326) checkdata = AfgBasinLvl4GlofasPoint.objects.filter(geom__intersects=pnt) for z in checkdata: p = Glofasintegrated(basin_id=z.value, datadate=date, lon=lon, lat=lat, rl2=rl2, rl5=rl5, rl20=rl20, rl2_dis_percent=max(rl2_dis_percent), rl2_avg_dis_percent=rl2_avg_dis_percent, rl5_dis_percent=max(rl5_dis_percent), rl5_avg_dis_percent=rl5_avg_dis_percent, rl20_dis_percent=max(rl20_dis_percent), rl20_avg_dis_percent=rl20_avg_dis_percent) p.save() print coord_index, z.value coord_index = coord_index+1 # print data_in # print Glofasintegrated.objects.filter(datadate=date).count() # if Glofasintegrated.objects.filter(datadate=date).count() == 0 : # Glofasintegrated(datadate=date).save() nc.close() Glofasintegrated.objects.filter(datadate='2017-04-13').delete() calculate_glofas_params('2017-04-13') # px = Glofasintegrated.objects.order_by().values('datadate').distinct() # for i in px: # print str(i['datadate']) # Glofasintegrated.objects.filter(datadate=i['datadate']).delete() # calculate_glofas_params(str(i['datadate']))
gpl-3.0
6,982,054,730,688,295,000
33.108696
357
0.605003
false
2.949248
false
false
false
deavid/bjsonrpc
bjsonrpc/main.py
1
2824
""" bjson/main.py Copyright (c) 2010 David Martinez Marti All rights reserved. Licensed under 3-clause BSD License. See LICENSE.txt for the full license text. """ import socket import bjsonrpc.server import bjsonrpc.connection import bjsonrpc.handlers __all__ = [ "createserver", "connect", ] def createserver(host="127.0.0.1", port=10123, handler_factory=bjsonrpc.handlers.NullHandler, sock=None, http=False): """ Creates a *bjson.server.Server* object linked to a listening socket. Parameters: **host** Address (IP or Host Name) to listen to as in *socket.bind*. Use "0.0.0.0" to listen to all address. By default this points to 127.0.0.1 to avoid security flaws. **port** Port number to bind the socket. In Unix, port numbers less than 1024 requires special permissions. **handler_factory** Class to instantiate to publish remote functions. **(return value)** A *bjson.server.Server* instance or raises an exception. Servers are usually created this way:: import bjsonrpc server = bjsonrpc.createserver("0.0.0.0") server.serve() Check :ref:`bjsonrpc.server` documentation """ if sock is None: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.bind((host, port)) sock.listen(3) return bjsonrpc.server.Server(sock, handler_factory=handler_factory, http=http) def connect(host="127.0.0.1", port=10123, sock=None, handler_factory=bjsonrpc.handlers.NullHandler): """ Creates a *bjson.connection.Connection* object linked to a connected socket. Parameters: **host** Address (IP or Host Name) to connect to. **port** Port number to connect to. **handler_factory** Class to instantiate to publish remote functions to the server. By default this is *NullHandler* which means that no functions are executable by the server. **(return value)** A *bjson.connection.Connection* instance or raises an exception. Connections are usually created this way:: import bjsonrpc conn = bjsonrpc.connect("rpc.host.net") print conn.call.some_method_in_server_side() """ if sock is None: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((host, port)) return bjsonrpc.connection.Connection(sock, handler_factory=handler_factory)
bsd-3-clause
5,525,710,750,083,788,000
29.042553
83
0.596671
false
4.246617
false
false
false
Micket/CCBuilder
make_cc.py
1
8680
#!/usr/bin/env python3 from __future__ import print_function from __future__ import division import argparse import pickle import time import CCBuilder as ccb import CCBuilder_c as ccb_c import numpy as np import scipy.special def uniform_dist(x): """ Returns uniform distributions of given range """ return lambda: np.random.uniform(x[0], x[1]) def weibull_dist(a, mu): """ Returns Weibull distributions for given shape parameter and average """ return lambda: np.random.weibull(a) * mu / scipy.special.gamma(1/a + 1) def parse_dist(arg): # Parses input string for given distribution. # Returns a distribution, and the average d, params = arg.split(':') params = [float(x) for x in params.split(',')] if d == 'U': return uniform_dist(params), np.mean(params) elif d == 'W': a, mu = params return weibull_dist(a, mu), mu parser = argparse.ArgumentParser(description='''Generate a WC microstructure. Grain shape/size supports 2 types of distributions: Uniform: U:low,high Weibull: U:a,mu (a=k in some notation, mu=mean) ''', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # parser.add_argument('-V', dest='verbose', action='store_true', help='Verbose mode.') parser.add_argument('-f', dest='fname', metavar='basename', required=True, help='Output base filename.') parser.add_argument('-L', dest='L', metavar='length', required=True, type=float, help='Cell length (volume is L^3)') parser.add_argument('-m', dest='m', metavar='m', required=True, type=int, help='Grid resolution. Total number of voxels are (m*L)^3') parser.add_argument('--vol_frac_goal', dest="vol_frac_goal", metavar='v', required=True, type=float, help='Goal for volume fraction WC (excluding overlap)') parser.add_argument('-s', dest='seed', metavar='s', default=None, type=int, help='Seed for RNG. Given identical parameters, ' + 'CCBuilder will generate identical output given a controlled seed.') parser.add_argument('--stray_cleanup', action='store_true', help='Clean up stray voxels') group = parser.add_argument_group('WC grain shape') group.add_argument('-k', dest='k_dist', metavar='type,[params]', default='U:0.4,1.4', help='k distribution') group.add_argument('-r', dest='r_dist', metavar='type,[params]', default='U:0.1,0.4', help='r distribution') group.add_argument('-d', dest='d_dist', metavar='type,[params]', default='U:0.5,1.5', help='d distribution') group = parser.add_argument_group('Packing') group.add_argument('--use_potential', action='store_true', help='Use repulsive potential.') group.add_argument('--nr_tries', dest='nr_tries', metavar='n', default=2500, type=int, help='Number of random translations.') group.add_argument('--delta', dest='delta', metavar='d', type=float, help='Maximum distance for randomized translations.') group.add_argument('--m_coarse', dest="m_coarse", metavar='mc', default=10, help='Grid resolution during packing.') group = parser.add_argument_group('Potts simulation') group.add_argument('--mc_steps', dest="mc_steps", metavar='steps', default=0.05, type=float, help='Monte-Carlo steps (scales with (m*L)^4. Set to zero to turn off.') group.add_argument('--tau', dest='tau', metavar='t', default=0.5, type=float, help='Ficticious temperature in Potts model.') options = parser.parse_args() if options.seed is not None: np.random.seed(options.seed) # Heuristic mapping from actual to goal volume fraction # vol_frac_goal = (alpha - 2)/(2 * alpha) + 1/alpha * np.sqrt(1 - alpha * np.log(-2*(vol_frac - 1))) d_eq, d_0 = parse_dist(options.d_dist) r, r_0 = parse_dist(options.r_dist) k, k_0 = parse_dist(options.k_dist) fname = options.fname # to avoid confusion with types: m = np.int(options.m) m_coarse = np.int(options.m_coarse) L = np.float(options.L) mc_steps = np.float(options.mc_steps) vol_frac_goal = np.double(options.vol_frac_goal) tau = np.double(options.tau) nr_tries = np.int(options.nr_tries) delta_x = d_0/float(m) M = np.int(m * L / d_0) M_coarse = np.int(m_coarse * L / d_0) idelta = M idelta_coarse = M_coarse if options.delta: idelta = np.int(M * options.delta / L) idelta_coarse = np.int(M_coarse * options.delta / L) trunc_triangles = ccb.prepare_triangles(vol_frac_goal, L, r, k, d_eq) # trunc_triangles = trunc_triangles[:1] # trunc_triangles[0].rot_matrix = np.eye(3) # trunc_triangles[0].rot_matrix_tr = np.eye(3) # trunc_triangles[0].midpoint = np.array([2., 2., 2.]) # Sort triangles w.r.t. volume, so that large triangles are added to the box first (better packing) trunc_triangles.sort(key=lambda x: x.volume, reverse=True) print('Prepared', len(trunc_triangles), 'triangles') if options.use_potential: ccb.optimize_midpoints(L, trunc_triangles) if m_coarse == m: grain_ids, overlaps, voxel_indices = ccb_c.populate_voxels(M, L, trunc_triangles, nr_tries, idelta, 1.0) else: if nr_tries > 0: # Optimization: Use coarser grid for packing, then insert packed grains into fine grid # No need to get the return values, trunc_triangles ccb_c.populate_voxels(M_coarse, L, trunc_triangles, nr_tries, idelta_coarse, 1.0) grain_ids, overlaps, voxel_indices = ccb_c.populate_voxels(M, L, trunc_triangles, 1, 0, 1.0) if mc_steps > 0: start_time = time.time() # Do Potts on coarse grid first for an improved initial guess. M_coarseMC = M//2 grain_ids_coarse, overlaps_coarse, voxel_indices_coarse = ccb_c.populate_voxels(M_coarseMC, L, trunc_triangles, 0, 0, 1.0) _, gb_voxels_coarse, _ = ccb_c.calc_surface_prop(M_coarseMC, grain_ids_coarse) ccb_c.make_mcp_bound(M_coarseMC, grain_ids_coarse, gb_voxels_coarse, overlaps_coarse, voxel_indices_coarse, np.int(mc_steps * M_coarseMC**4), tau) # Copy over that solution to the overlap regions of the fine grid as a starting point M2 = M**2 i = np.nonzero(overlaps)[0] iz = i // M2 iy = (i - iz*M2) // M ix = i - iz*M2 - iy*M cix = ix * M_coarseMC // M ciy = iy * M_coarseMC // M ciz = iz * M_coarseMC // M ci = cix + ciy*M_coarseMC + ciz*M_coarseMC**2 gid = grain_ids_coarse[ci] # Could use a Cython implementation for efficiency. for ii, g in zip(i, gid): if g != grain_ids[ii] and np.searchsorted(voxel_indices[g-2], ii) < len(voxel_indices[g-2]): grain_ids[ii] = g # This might change a few voxels to a value that they shouldn't obtain, but it's barely noticeable # grain_ids_1[i] = grain_ids_coarse[ci] _, gb_voxels, _ = ccb_c.calc_surface_prop(M, grain_ids) # and run the full resolution MCP: ccb_c.make_mcp_bound(M, grain_ids, gb_voxels, overlaps, voxel_indices, np.int(mc_steps * M ** 4), tau) print('Potts model took {} seconds'.format(np.str(time.time() - start_time))) if options.stray_cleanup: start_time = time.time() ccb_c.stray_cleanup(M, grain_ids) print('Stray voxel cleanup took {} seconds'.format(np.str(time.time() - start_time))) surface_voxels, gb_voxels, interface_voxels = ccb_c.calc_surface_prop(M, grain_ids) phases, good_voxels, euler_angles = ccb_c.calc_grain_prop(M, grain_ids, trunc_triangles) phase_volumes = np.bincount(phases) vol_frac_WC = phase_volumes[2] / np.float(M ** 3) vol_frac_Co = 1 - vol_frac_WC mass_frac_WC = ccb.mass_fraction(vol_frac_WC) sum_gb_voxels = np.sum(gb_voxels) contiguity = sum_gb_voxels / np.float(sum_gb_voxels + np.sum(interface_voxels)) print('Contiguity {:5f}, Co volume frac {:.5f}, mass frac {:.5f}'.format( contiguity, 1 - vol_frac_WC, ccb.mass_fraction(vol_frac_WC))) ccb.write_dream3d(fname, 3 * [M], 3 * [delta_x], trunc_triangles, grain_ids, phases, good_voxels, euler_angles, surface_voxels, gb_voxels, interface_voxels, overlaps) with open(fname + '_trunc_triangles.data', 'wb') as f: pickle.dump([t.rot_matrix for t in trunc_triangles], f) # Saving grain volume data if False: grain_volumes = np.bincount(grain_ids) d_eq = ccb.volume_to_eq_d(grain_volumes[2:] * delta_x ** 3) # np.savetxt(fname + '_d_orig.txt', [t.d_eq for t in trunc_triangles]) np.savetxt(fname + '_d.txt', d_eq) # Plot initial and final distributions import matplotlib.pyplot as plt plt.hist(np.array([t.d_eq for t in trunc_triangles]), alpha=0.5, bins=15, normed=True, label='Initial') plt.hist(d_eq, alpha=0.5, bins=15, normed=True, label='Final') plt.legend(loc='upper right') plt.show()
gpl-3.0
-3,357,573,291,557,032,400
41.54902
126
0.65818
false
2.923543
false
false
false
lyoniionly/django-cobra
src/cobra/models/fields/node.py
1
3306
from __future__ import absolute_import, print_function import collections import logging import six import warnings from django.db import models from django.db.models.signals import post_delete from cobra.core.cache import memoize from cobra.core.compat import pickle from cobra.core.strings import decompress, compress from .gzippeddict import GzippedDictField try: from south.modelsinspector import add_introspection_rules except ImportError: pass else: add_introspection_rules([], ["^cobra\.models\.fields\.node\.NodeField"]) __all__ = ('NodeField',) logger = logging.getLogger('cobra.errors') class NodeData(collections.MutableMapping): def __init__(self, id, data=None): self.id = id self._node_data = data def __getitem__(self, key): return self.data[key] def __setitem__(self, key, value): self.data[key] = value def __delitem__(self, key): del self.data[key] def __iter__(self): return iter(self.data) def __len__(self): return len(self.data) def __repr__(self): cls_name = type(self).__name__ if self._node_data: return '<%s: id=%s data=%r>' % ( cls_name, self.id, repr(self._node_data)) return '<%s: id=%s>' % (cls_name, self.id,) @memoize def data(self): from cobra import singleton if self._node_data is not None: return self._node_data elif self.id: warnings.warn('You should populate node data before accessing it.') return singleton.nodestore.get(self.id) or {} return {} def bind_data(self, data): self._node_data = data class NodeField(GzippedDictField): """ Similar to the gzippedictfield except that it stores a reference to an external node. """ def contribute_to_class(self, cls, name): super(NodeField, self).contribute_to_class(cls, name) post_delete.connect( self.on_delete, sender=self.model, weak=False) def on_delete(self, instance, **kwargs): from cobra import singleton value = getattr(instance, self.name) if not value.id: return singleton.nodestore.delete(value.id) def to_python(self, value): if isinstance(value, six.string_types) and value: try: value = pickle.loads(decompress(value)) except Exception as e: logger.exception(e) value = {} elif not value: value = {} if 'node_id' in value: node_id = value.pop('node_id') data = None else: node_id = None data = value return NodeData(node_id, data) def get_prep_value(self, value): from cobra import singleton if not value and self.null: # save ourselves some storage return None # TODO(dcramer): we should probably do this more intelligently # and manually if not value.id: value.id = singleton.nodestore.create(value.data) else: singleton.nodestore.set(value.id, value.data) return compress(pickle.dumps({ 'node_id': value.id }))
apache-2.0
8,760,018,005,294,236,000
24.635659
79
0.588627
false
3.987937
false
false
false
andersgs/dingo
dingo/random_forest.py
1
2551
''' Some functions to fit a random forest ''' import sklearn.ensemble import pandas import progressbar bar = progressbar.ProgressBar() def test_max_features(max_features): if (max_features not in ['sqrt', 'auto', 'log2', None]): try: max_features = int(max_features) except ValueError: print("max_features has to be an integer or one of 'sqrt', 'auto', 'log2' or None.") raise return max_features def learn(X,y, n_trees = 10, criterion = 'entropy', max_features = "sqrt", max_depth = None, min_samples_split = 2, min_samples_leaf = 1, min_weight_fraction_leaf = 0, max_leaf_nodes = None, min_impurity_split = 1e-7, bootstrap = False, oob_score = False, n_jobs = 10, random_state = None, warm_start = False, class_weight = 'balanced_subsample'): rf = sklearn.ensemble.RandomForestClassifier(n_estimators = n_trees, \ criterion = criterion, \ max_features = max_features, \ max_depth = max_depth, \ min_samples_split = min_samples_split, \ min_samples_leaf = min_samples_leaf, \ min_weight_fraction_leaf = min_weight_fraction_leaf, \ max_leaf_nodes = max_leaf_nodes, \ min_impurity_split = min_impurity_split, \ bootstrap = bootstrap, \ oob_score = oob_score, \ n_jobs = n_jobs, \ random_state = random_state, \ warm_start = warm_start, \ class_weight = class_weight, \ verbose = 1 ) rf.fit(X, y) return rf def importance(rf, kmers): importance = rf.estimators_[0].feature_importances_ for est in bar(rf.estimators_[1:]): importance += est.feature_importances_ importance = importance/rf.n_estimators d = {"kmer": kmers, "importance": importance} d = pandas.DataFrame(d) d = d.sort_values(by = "importance", ascending = 0) d = d.loc[d.importance > 0] return d
bsd-3-clause
-2,556,278,225,202,661,400
49.019608
347
0.46374
false
4.555357
false
false
false
Orav/kbengine
kbe/src/lib/python/Tools/demo/life.py
1
9249
#!/usr/bin/env python3 """ A curses-based version of Conway's Game of Life. An empty board will be displayed, and the following commands are available: E : Erase the board R : Fill the board randomly S : Step for a single generation C : Update continuously until a key is struck Q : Quit Cursor keys : Move the cursor around the board Space or Enter : Toggle the contents of the cursor's position Contributed by Andrew Kuchling, Mouse support and color by Dafydd Crosby. """ import curses import random class LifeBoard: """Encapsulates a Life board Attributes: X,Y : horizontal and vertical size of the board state : dictionary mapping (x,y) to 0 or 1 Methods: display(update_board) -- If update_board is true, compute the next generation. Then display the state of the board and refresh the screen. erase() -- clear the entire board make_random() -- fill the board randomly set(y,x) -- set the given cell to Live; doesn't refresh the screen toggle(y,x) -- change the given cell from live to dead, or vice versa, and refresh the screen display """ def __init__(self, scr, char=ord('*')): """Create a new LifeBoard instance. scr -- curses screen object to use for display char -- character used to render live cells (default: '*') """ self.state = {} self.scr = scr Y, X = self.scr.getmaxyx() self.X, self.Y = X - 2, Y - 2 - 1 self.char = char self.scr.clear() # Draw a border around the board border_line = '+' + (self.X * '-') + '+' self.scr.addstr(0, 0, border_line) self.scr.addstr(self.Y + 1, 0, border_line) for y in range(0, self.Y): self.scr.addstr(1 + y, 0, '|') self.scr.addstr(1 + y, self.X + 1, '|') self.scr.refresh() def set(self, y, x): """Set a cell to the live state""" if x < 0 or self.X <= x or y < 0 or self.Y <= y: raise ValueError("Coordinates out of range %i,%i" % (y, x)) self.state[x, y] = 1 def toggle(self, y, x): """Toggle a cell's state between live and dead""" if x < 0 or self.X <= x or y < 0 or self.Y <= y: raise ValueError("Coordinates out of range %i,%i" % (y, x)) if (x, y) in self.state: del self.state[x, y] self.scr.addch(y + 1, x + 1, ' ') else: self.state[x, y] = 1 if curses.has_colors(): # Let's pick a random color! self.scr.attrset(curses.color_pair(random.randrange(1, 7))) self.scr.addch(y + 1, x + 1, self.char) self.scr.attrset(0) self.scr.refresh() def erase(self): """Clear the entire board and update the board display""" self.state = {} self.display(update_board=False) def display(self, update_board=True): """Display the whole board, optionally computing one generation""" M, N = self.X, self.Y if not update_board: for i in range(0, M): for j in range(0, N): if (i, j) in self.state: self.scr.addch(j + 1, i + 1, self.char) else: self.scr.addch(j + 1, i + 1, ' ') self.scr.refresh() return d = {} self.boring = 1 for i in range(0, M): L = range(max(0, i - 1), min(M, i + 2)) for j in range(0, N): s = 0 live = (i, j) in self.state for k in range(max(0, j - 1), min(N, j + 2)): for l in L: if (l, k) in self.state: s += 1 s -= live if s == 3: # Birth d[i, j] = 1 if curses.has_colors(): # Let's pick a random color! self.scr.attrset(curses.color_pair( random.randrange(1, 7))) self.scr.addch(j + 1, i + 1, self.char) self.scr.attrset(0) if not live: self.boring = 0 elif s == 2 and live: # Survival d[i, j] = 1 elif live: # Death self.scr.addch(j + 1, i + 1, ' ') self.boring = 0 self.state = d self.scr.refresh() def make_random(self): "Fill the board with a random pattern" self.state = {} for i in range(0, self.X): for j in range(0, self.Y): if random.random() > 0.5: self.set(j, i) def erase_menu(stdscr, menu_y): "Clear the space where the menu resides" stdscr.move(menu_y, 0) stdscr.clrtoeol() stdscr.move(menu_y + 1, 0) stdscr.clrtoeol() def display_menu(stdscr, menu_y): "Display the menu of possible keystroke commands" erase_menu(stdscr, menu_y) # If color, then light the menu up :-) if curses.has_colors(): stdscr.attrset(curses.color_pair(1)) stdscr.addstr(menu_y, 4, 'Use the cursor keys to move, and space or Enter to toggle a cell.') stdscr.addstr(menu_y + 1, 4, 'E)rase the board, R)andom fill, S)tep once or C)ontinuously, Q)uit') stdscr.attrset(0) def keyloop(stdscr): # Clear the screen and display the menu of keys stdscr.clear() stdscr_y, stdscr_x = stdscr.getmaxyx() menu_y = (stdscr_y - 3) - 1 display_menu(stdscr, menu_y) # If color, then initialize the color pairs if curses.has_colors(): curses.init_pair(1, curses.COLOR_BLUE, 0) curses.init_pair(2, curses.COLOR_CYAN, 0) curses.init_pair(3, curses.COLOR_GREEN, 0) curses.init_pair(4, curses.COLOR_MAGENTA, 0) curses.init_pair(5, curses.COLOR_RED, 0) curses.init_pair(6, curses.COLOR_YELLOW, 0) curses.init_pair(7, curses.COLOR_WHITE, 0) # Set up the mask to listen for mouse events curses.mousemask(curses.BUTTON1_CLICKED) # Allocate a subwindow for the Life board and create the board object subwin = stdscr.subwin(stdscr_y - 3, stdscr_x, 0, 0) board = LifeBoard(subwin, char=ord('*')) board.display(update_board=False) # xpos, ypos are the cursor's position xpos, ypos = board.X // 2, board.Y // 2 # Main loop: while True: stdscr.move(1 + ypos, 1 + xpos) # Move the cursor c = stdscr.getch() # Get a keystroke if 0 < c < 256: c = chr(c) if c in ' \n': board.toggle(ypos, xpos) elif c in 'Cc': erase_menu(stdscr, menu_y) stdscr.addstr(menu_y, 6, ' Hit any key to stop continuously ' 'updating the screen.') stdscr.refresh() # Activate nodelay mode; getch() will return -1 # if no keystroke is available, instead of waiting. stdscr.nodelay(1) while True: c = stdscr.getch() if c != -1: break stdscr.addstr(0, 0, '/') stdscr.refresh() board.display() stdscr.addstr(0, 0, '+') stdscr.refresh() stdscr.nodelay(0) # Disable nodelay mode display_menu(stdscr, menu_y) elif c in 'Ee': board.erase() elif c in 'Qq': break elif c in 'Rr': board.make_random() board.display(update_board=False) elif c in 'Ss': board.display() else: # Ignore incorrect keys pass elif c == curses.KEY_UP and ypos > 0: ypos -= 1 elif c == curses.KEY_DOWN and ypos + 1 < board.Y: ypos += 1 elif c == curses.KEY_LEFT and xpos > 0: xpos -= 1 elif c == curses.KEY_RIGHT and xpos + 1 < board.X: xpos += 1 elif c == curses.KEY_MOUSE: mouse_id, mouse_x, mouse_y, mouse_z, button_state = curses.getmouse() if (mouse_x > 0 and mouse_x < board.X + 1 and mouse_y > 0 and mouse_y < board.Y + 1): xpos = mouse_x - 1 ypos = mouse_y - 1 board.toggle(ypos, xpos) else: # They've clicked outside the board curses.flash() else: # Ignore incorrect keys pass def main(stdscr): keyloop(stdscr) # Enter the main loop if __name__ == '__main__': curses.wrapper(main)
lgpl-3.0
-7,308,566,763,295,885,000
33.301527
81
0.484917
false
3.782822
false
false
false
jwilliamn/handwritten
extraction/FormatModel/CreatePage3Variable.py
1
6106
import pickle from extraction.FormatModel.VariableDefinitions import * from extraction.FormatModel.RawVariableDefinitions import * import json def jsonDefault(object): return object.__dict__ if __name__ == '__main__': Page3 = Category('page3', 'pagina 3') ############ for r in range(1,6): str_r = str(r) if len(str_r) == 1: str_r = '0'+str_r P = Category('P'+str_r,'Persona '+str_r) ap_paterno=Category('apellido_paterno','Apellido paterno') variable_ap_paterno=Variable('pos_TL_BR','posicion final', None) ap_paterno.addSubType(variable_ap_paterno) ap_materno = Category('apellido_materno', 'Apellido materno') variable_ap_materno = Variable('pos_TL_BR', 'posicion final', None) ap_materno.addSubType(variable_ap_materno) nombres = Category('nombres', 'nombres') variable_nombres = Variable('pos_TL_BR', 'posicion final', None) nombres.addSubType(variable_nombres) fecha_nacimiento = Category('fecha_nacimiento', 'Fecha de nacimiento') variable_fecha_nacimiento = Variable('pos_TL_BR', 'posicion final', None) fecha_nacimiento.addSubType(variable_fecha_nacimiento) edad_anhos = Category('edad_anhos', 'edad_anios') variable_edad_anhos = Variable('pos_TL_BR', 'posicion final', None) edad_anhos.addSubType(variable_edad_anhos) edad_meses = Category('edad_meses', 'edad_meses') variable_edad_meses = Variable('pos_TL_BR', 'posicion final', None) edad_meses.addSubType(variable_edad_meses) tipo_documento = Category('tipo_documento', 'tipo_documento') variable_tipo_documento = Variable('pos_TL_BR', 'posicion final', None) tipo_documento.addSubType(variable_tipo_documento) numero_documento = Category('numero_documento', 'numero_documento') variable_numero_documento = Variable('pos_TL_BR', 'posicion final', None) numero_documento.addSubType(variable_numero_documento) parentesco_jefe_hogar = Category('parentesco_jefe_hogar', 'parentesco_jefe_hogar') variable_parentesco_jefe_hogar = Variable('pos_TL_BR', 'posicion final', None) parentesco_jefe_hogar.addSubType(variable_parentesco_jefe_hogar) num_nucleo_familiar = Category('num_nucleo_familiar', 'num_nucleo_familiar') variable_num_nucleo_familiar = Variable('pos_TL_BR', 'posicion final', None) num_nucleo_familiar.addSubType(variable_num_nucleo_familiar) sexo = Category('sexo', 'sexo') variable_sexo = Variable('pos_TL_BR', 'posicion final', None) sexo.addSubType(variable_sexo) estado_civil = Category('estado_civil', 'estado_civil') variable_estado_civil = Variable('pos_TL_BR', 'posicion final', None) estado_civil.addSubType(variable_estado_civil) tipo_seguro = Category('tipo_seguro', 'tipo_seguro') variable_tipo_seguro = Variable('pos_TL_BR', 'posicion final', None) tipo_seguro.addSubType(variable_tipo_seguro) lengua_materna = Category('lengua_materna', 'lengua_materna') variable_lengua_materna = Variable('pos_TL_BR', 'posicion final', None) lengua_materna.addSubType(variable_lengua_materna) sabe_leer_escribir = Category('sabe_leer_escribir', 'sabe_leer_escribir') variable_sabe_leer_escribir = Variable('pos_TL_BR', 'posicion final', None) sabe_leer_escribir.addSubType(variable_sabe_leer_escribir) nivel_educativo = Category('nivel_educativo', 'nivel_educativo') variable_nivel_educativo = Variable('pos_TL_BR', 'posicion final', None) nivel_educativo.addSubType(variable_nivel_educativo) ultimo_grado_aprobado = Category('ultimo_grado_aprobado', 'ultimo_grado_aprobado') variable_ultimo_grado_aprobado = Variable('pos_TL_BR', 'posicion final', None) ultimo_grado_aprobado.addSubType(variable_ultimo_grado_aprobado) ultimo_mes_era_un = Category('ultimo_mes_era_un', 'ultimo_mes_era_un') variable_ultimo_mes_era_un = Variable('pos_TL_BR', 'posicion final', None) ultimo_mes_era_un.addSubType(variable_ultimo_mes_era_un) sector_desempenho = Category('sector_desempenho', 'sector_desempenho') variable_sector_desempenho = Variable('pos_TL_BR', 'posicion final', None) sector_desempenho.addSubType(variable_sector_desempenho) presenta_discapacidad = Category('presenta_discapacidad', 'presenta_discapacidad') variable_presenta_discapacidad = Variable('pos_TL_BR', 'posicion final', None) presenta_discapacidad.addSubType(variable_presenta_discapacidad) programa_social_beneficiario = Category('programa_social_beneficiario', 'programa_social_beneficiario') variable_programa_social_beneficiario = Variable('pos_TL_BR', 'posicion final', None) programa_social_beneficiario.addSubType(variable_programa_social_beneficiario) ############# P.addSubType(ap_paterno) P.addSubType(ap_materno) P.addSubType(nombres) P.addSubType(fecha_nacimiento) P.addSubType(edad_anhos) P.addSubType(edad_meses) P.addSubType(tipo_documento) P.addSubType(numero_documento) P.addSubType(parentesco_jefe_hogar) P.addSubType(num_nucleo_familiar) P.addSubType(sexo) P.addSubType(estado_civil) P.addSubType(tipo_seguro) P.addSubType(lengua_materna) P.addSubType(sabe_leer_escribir) P.addSubType(nivel_educativo) P.addSubType(ultimo_grado_aprobado) P.addSubType(ultimo_mes_era_un) P.addSubType(sector_desempenho) P.addSubType(presenta_discapacidad) P.addSubType(programa_social_beneficiario) Page3.addSubType(P) with open('pagina3.json', 'w') as output: json.dump(Page3, output, default=jsonDefault, indent=4) Page3.describe(True)
gpl-3.0
2,086,762,254,357,249,800
44.237037
111
0.655748
false
2.909004
false
false
false
huzhifeng/py12306
py12306.py
1
62739
#!/usr/bin/env python # -*- coding: utf-8 -*- # 标准库 import argparse import urllib import time import datetime import sys import re import ConfigParser import random import smtplib from email.mime.text import MIMEText # 第三方库 import requests from huzhifeng import dumpObj, hasKeys # Set default encoding to utf-8 reload(sys) sys.setdefaultencoding('utf-8') requests.packages.urllib3.disable_warnings() # 全局变量 RET_OK = 0 RET_ERR = -1 MAX_TRIES = 3 MAX_DAYS = 60 stations = [] seatMaps = [ ('1', u'硬座'), # 硬座/无座 ('3', u'硬卧'), ('4', u'软卧'), ('7', u'一等软座'), ('8', u'二等软座'), ('9', u'商务座'), ('M', u'一等座'), ('O', u'二等座'), ('B', u'混编硬座'), ('P', u'特等座') ] # 全局函数 def printDelimiter(): print('-' * 64) def getTime(): return time.strftime('%Y-%m-%d %X', time.localtime()) # 2014-01-01 12:00:00 def date2UTC(d): # Convert '2014-01-01' to 'Wed Jan 01 00:00:00 UTC+0800 2014' t = time.strptime(d, '%Y-%m-%d') asc = time.asctime(t) # 'Wed Jan 01 00:00:00 2014' # 'Wed Jan 01 00:00:00 UTC+0800 2014' return (asc[0:-4] + 'UTC+0800 ' + asc[-4:]) def getCardType(key): d = { '1': u'二代身份证', '2': u'一代身份证', 'C': u'港澳通行证', 'G': u'台湾通行证', 'B': u'护照' } return d[key] if key in d else u'未知证件类型' def getTicketType(key): d = { '1': u'成人票', '2': u'儿童票', '3': u'学生票', '4': u'残军票' } return d[key] if key in d else u'未知票种' def getSeatType(key): d = dict(seatMaps) return d[key] if key in d else u'未知席别' def selectSeatType(): key = '1' # 默认硬座 while True: print(u'请选择席别编码(即左边第一个英文字母):') for xb in seatMaps: print(u'%s: %s' % (xb[0], xb[1])) key = raw_input().upper() d = dict(seatMaps) if key in d: return key else: print(u'无效的席别类型!') def checkDate(date): m = re.match(r'^\d{4}-\d{2}-\d{2}$', date) # 2014-01-01 if m: today = datetime.datetime.now() fmt = '%Y-%m-%d' today = datetime.datetime.strptime(today.strftime(fmt), fmt) train_date = datetime.datetime.strptime(m.group(0), fmt) delta = train_date - today if delta.days < 0: print(u'乘车日期%s无效, 只能预订%s以后的车票' % ( train_date.strftime(fmt), today.strftime(fmt))) return False else: return True else: return False def selectDate(): fmt = '%Y-%m-%d' week_days = [u'星期一', u'星期二', u'星期三', u'星期四', u'星期五', u'星期六', u'星期天'] now = datetime.datetime.now() available_date = [(now + datetime.timedelta(days=i)) for i in xrange(MAX_DAYS)] for i in xrange(0, MAX_DAYS, 2): print(u'第%2d天: %s(%s)' % ( i + 1, available_date[i].strftime(fmt), week_days[available_date[i].weekday()])), if i + 1 < MAX_DAYS: print(u'\t\t第%2d天: %s(%s)' % ( i + 2, available_date[i + 1].strftime(fmt), week_days[available_date[i + 1].weekday()])) else: print('') while True: print(u'请选择乘车日期(1~%d)' % (MAX_DAYS)) index = raw_input() if not index.isdigit(): print(u'只能输入数字序号, 请重新选择乘车日期(1~%d)' % (MAX_DAYS)) continue index = int(index) if index < 1 or index > MAX_DAYS: print(u'输入的序号无效, 请重新选择乘车日期(1~%d)' % (MAX_DAYS)) continue index -= 1 train_date = available_date[index].strftime(fmt) return train_date def getStationByName(name): matched_stations = [] for station in stations: if ( station['name'] == name or station['abbr'].find(name.lower()) != -1 or station['pinyin'].find(name.lower()) != -1 or station['pyabbr'].find(name.lower()) != -1): matched_stations.append(station) count = len(matched_stations) if count <= 0: return None elif count == 1: return matched_stations[0] else: for i in xrange(0, count): print(u'%d:\t%s' % (i + 1, matched_stations[i]['name'])) print(u'请选择站点(1~%d)' % (count)) index = raw_input() if not index.isdigit(): print(u'只能输入数字序号(1~%d)' % (count)) return None index = int(index) if index < 1 or index > count: print(u'输入的序号无效(1~%d)' % (count)) return None else: return matched_stations[index - 1] def inputStation(): while True: print(u'支持中文, 拼音和拼音缩写(如: 北京,beijing,bj)') name = raw_input().decode('gb2312', 'ignore') station = getStationByName(name) if station: return station else: print(u'站点错误, 没有站点"%s", 请重新输入.' % (name)) def selectTrain(trains): trains_num = len(trains) index = 0 while True: # 必须选择有效的车次 index = raw_input() if not index.isdigit(): print(u'只能输入数字序号,请重新选择车次(1~%d)' % (trains_num)) continue index = int(index) if index < 1 or index > trains_num: print(u'输入的序号无效,请重新选择车次(1~%d)' % (trains_num)) continue if trains[index - 1]['queryLeftNewDTO']['canWebBuy'] != 'Y': print(u'您选择的车次%s没票啦,请重新选择车次' % ( trains[index - 1]['queryLeftNewDTO']['station_train_code'])) continue else: break return index class MyOrder(object): '''docstring for MyOrder''' def __init__( self, username='', password='', train_date='', from_city_name='', to_city_name=''): super(MyOrder, self).__init__() self.username = username # 账号 self.password = password # 密码 self.train_date = train_date # 乘车日期[2014-01-01] today = datetime.datetime.now() self.back_train_date = today.strftime('%Y-%m-%d') # 返程日期[2014-01-01] self.tour_flag = 'dc' # 单程dc/往返wf self.purpose_code = 'ADULT' # 成人票 self.from_city_name = from_city_name # 对应查询界面'出发地'输入框中的内容 self.to_city_name = to_city_name # 对应查询界面'目的地'输入框中的内容 self.from_station_telecode = '' # 出发站编码 self.to_station_telecode = '' # 目的站编码 self.passengers = [] # 乘车人列表,最多5人 self.normal_passengers = [] # 我的联系人列表 self.trains = [] # 列车列表, 查询余票后自动更新 self.current_train_index = 0 # 当前选中的列车索引序号 self.captcha = '' # 图片验证码 self.orderId = '' # 订单流水号 self.canWebBuy = False # 可预订 self.notify = { 'mail_enable': 0, 'mail_username': '', 'mail_password': '', 'mail_server': '', 'mail_to': [], 'dates': [], 'trains': [], 'xb': [], 'focus': {} } def initSession(self): self.session = requests.Session() self.session.headers = { 'Accept': 'application/x-ms-application, image/jpeg, application/xaml+xml, image/gif, image/pjpeg, application/x-ms-xbap, */*', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN', 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C)', 'Referer': 'https://kyfw.12306.cn/otn/index/init', 'Host': 'kyfw.12306.cn', 'Connection': 'Keep-Alive' } def updateHeaders(self, url): d = { 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js': { 'method': 'GET', 'Referer': 'https://kyfw.12306.cn/otn/' }, 'https://kyfw.12306.cn/otn/login/init': { 'method': 'GET', 'Referer': 'https://kyfw.12306.cn/otn/' }, 'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand&': { 'method': 'GET', 'Referer': 'https://kyfw.12306.cn/otn/login/init' }, 'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp&': { 'method': 'GET', 'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc' }, 'https://kyfw.12306.cn/otn/passcodeNew/checkRandCodeAnsyn': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/login/init', 'Cache-Control': 'no-cache', 'x-requested-with': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' }, 'https://kyfw.12306.cn/otn/login/loginAysnSuggest': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/login/init', 'Cache-Control': 'no-cache', 'x-requested-with': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' }, 'https://kyfw.12306.cn/otn/login/userLogin': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/login/init' }, 'https://kyfw.12306.cn/otn/index/init': { 'method': 'GET', 'Referer': 'https://kyfw.12306.cn/otn/login/init' }, 'https://kyfw.12306.cn/otn/leftTicket/init': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/index/init', 'Content-Type': 'application/x-www-form-urlencoded' }, 'https://kyfw.12306.cn/otn/leftTicket/log?': { 'method': 'GET', 'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init', 'x-requested-with': 'XMLHttpRequest', 'Cache-Control': 'no-cache', 'If-Modified-Since': '0' }, 'https://kyfw.12306.cn/otn/leftTicket/query?': { 'method': 'GET', 'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init', 'x-requested-with': 'XMLHttpRequest', 'Cache-Control': 'no-cache', 'If-Modified-Since': '0' }, 'https://kyfw.12306.cn/otn/leftTicket/queryT?': { 'method': 'GET', 'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init', 'x-requested-with': 'XMLHttpRequest', 'Cache-Control': 'no-cache', 'If-Modified-Since': '0' }, 'https://kyfw.12306.cn/otn/login/checkUser': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init', 'Cache-Control': 'no-cache', 'If-Modified-Since': '0', 'x-requested-with': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' }, 'https://kyfw.12306.cn/otn/leftTicket/submitOrderRequest': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/', 'Cache-Control': 'no-cache', 'x-requested-with': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' }, 'https://kyfw.12306.cn/otn/confirmPassenger/initDc': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/leftTicket/init', 'Content-Type': 'application/x-www-form-urlencoded', 'Cache-Control': 'no-cache' }, 'https://kyfw.12306.cn/otn/confirmPassenger/getPassengerDTOs': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc', 'Cache-Control': 'no-cache', 'x-requested-with': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' }, 'https://kyfw.12306.cn/otn/confirmPassenger/checkOrderInfo': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc', 'Cache-Control': 'no-cache', 'x-requested-with': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' }, 'https://kyfw.12306.cn/otn/confirmPassenger/getQueueCount': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc', 'Cache-Control': 'no-cache', 'x-requested-with': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' }, 'https://kyfw.12306.cn/otn/confirmPassenger/confirmSingleForQueue': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc', 'Cache-Control': 'no-cache', 'x-requested-with': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' }, 'https://kyfw.12306.cn/otn/confirmPassenger/queryOrderWaitTime?': { 'method': 'GET', 'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc', 'x-requested-with': 'XMLHttpRequest' }, 'https://kyfw.12306.cn/otn/confirmPassenger/resultOrderForDcQueue': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc', 'Cache-Control': 'no-cache', 'x-requested-with': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' }, 'https://kyfw.12306.cn/otn//payOrder/init?': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc', 'Cache-Control': 'no-cache', 'Content-Type': 'application/x-www-form-urlencoded' }, 'https://kyfw.12306.cn/otn/queryOrder/initNoComplete': { 'method': 'GET', 'Referer': 'https://kyfw.12306.cn/otn//payOrder/init?random=1417862054369' }, 'https://kyfw.12306.cn/otn/queryOrder/queryMyOrderNoComplete': { 'method': 'POST', 'Referer': 'https://kyfw.12306.cn/otn/queryOrder/initNoComplete', 'Cache-Control': 'no-cache', 'x-requested-with': 'XMLHttpRequest', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8' } } l = [ 'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand&', 'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp&', 'https://kyfw.12306.cn/otn/leftTicket/log?', 'https://kyfw.12306.cn/otn/leftTicket/query?', 'https://kyfw.12306.cn/otn/leftTicket/queryT?', 'https://kyfw.12306.cn/otn/confirmPassenger/queryOrderWaitTime?', 'https://kyfw.12306.cn/otn//payOrder/init?' ] for s in l: if url.find(s) == 0: url = s if not url in d: print(u'未知 url: %s' % url) return RET_ERR self.session.headers.update({'Referer': d[url]['Referer']}) keys = [ 'Referer', 'Cache-Control', 'x-requested-with', 'Content-Type' ] for key in keys: if key in d[url]: self.session.headers.update({key: d[url][key]}) else: self.session.headers.update({key: None}) def get(self, url): self.updateHeaders(url) tries = 0 while tries < MAX_TRIES: tries += 1 try: r = self.session.get(url, verify=False, timeout=16) except requests.exceptions.ConnectionError as e: print('ConnectionError(%s): e=%s' % (url, e)) continue except requests.exceptions.Timeout as e: print('Timeout(%s): e=%s' % (url, e)) continue except requests.exceptions.TooManyRedirects as e: print('TooManyRedirects(%s): e=%s' % (url, e)) continue except requests.exceptions.HTTPError as e: print('HTTPError(%s): e=%s' % (url, e)) continue except requests.exceptions.RequestException as e: print('RequestException(%s): e=%s' % (url, e)) continue except: print('Unknown exception(%s)' % (url)) continue if r.status_code != 200: print('Request %s failed %d times, status_code=%d' % ( url, tries, r.status_code)) else: return r else: return None def post(self, url, payload): self.updateHeaders(url) if url == 'https://kyfw.12306.cn/otn/passcodeNew/checkRandCodeAnsyn': if payload.find('REPEAT_SUBMIT_TOKEN') != -1: self.session.headers.update({'Referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc'}) else: self.session.headers.update({'Referer': 'https://kyfw.12306.cn/otn/login/init'}) tries = 0 while tries < MAX_TRIES: tries += 1 try: r = self.session.post(url, data=payload, verify=False, timeout=16) except requests.exceptions.ConnectionError as e: print('ConnectionError(%s): e=%s' % (url, e)) continue except requests.exceptions.Timeout as e: print('Timeout(%s): e=%s' % (url, e)) continue except requests.exceptions.TooManyRedirects as e: print('TooManyRedirects(%s): e=%s' % (url, e)) continue except requests.exceptions.HTTPError as e: print('HTTPError(%s): e=%s' % (url, e)) continue except requests.exceptions.RequestException as e: print('RequestException(%s): e=%s' % (url, e)) continue except: print('Unknown exception(%s)' % (url)) continue if r.status_code != 200: print('Request %s failed %d times, status_code=%d' % ( url, tries, r.status_code)) else: return r else: return None def getCaptcha(self, url): self.updateHeaders(url) r = self.session.get(url, verify=False, stream=True, timeout=16) with open('captcha.gif', 'wb') as fd: for chunk in r.iter_content(): fd.write(chunk) print(u'请输入4位图片验证码(回车刷新验证码):') captcha = raw_input() if len(captcha) == 4: return captcha elif len(captcha) != 0: print(u'%s是无效的图片验证码, 必须是4位' % (captcha)) return None else: return 1 # 刷新 def initStation(self): url = 'https://kyfw.12306.cn/otn/resources/js/framework/station_name.js' r = self.get(url) if not r: print(u'站点数据库初始化失败, 请求异常') return None data = r.text station_list = data.split('@') if len(station_list) < 1: print(u'站点数据库初始化失败, 数据异常') return None station_list = station_list[1:] for station in station_list: items = station.split('|') # bji|北京|BJP|beijing|bj|2 if len(items) < 5: print(u'忽略无效站点: %s' % (items)) continue stations.append({'abbr': items[0], 'name': items[1], 'telecode': items[2], 'pinyin': items[3], 'pyabbr': items[4]}) return stations def readConfig(self, config_file='config.ini'): cp = ConfigParser.ConfigParser() try: cp.readfp(open(config_file, 'r')) except IOError as e: print(u'打开配置文件"%s"失败啦, 请先创建或者拷贝一份配置文件config.ini' % (config_file)) raw_input('Press any key to continue') sys.exit() self.username = cp.get('login', 'username') self.password = cp.get('login', 'password') self.train_date = cp.get('train', 'date') self.from_city_name = cp.get('train', 'from') self.to_city_name = cp.get('train', 'to') self.notify['mail_enable'] = int(cp.get('notify', 'mail_enable')) self.notify['mail_username'] = cp.get('notify', 'mail_username') self.notify['mail_password'] = cp.get('notify', 'mail_password') self.notify['mail_server'] = cp.get('notify', 'mail_server') self.notify['mail_to'] = cp.get('notify', 'mail_to').split(',') self.notify['dates'] = cp.get('notify', 'dates').split(',') self.notify['trains'] = cp.get('notify', 'trains').split(',') self.notify['xb'] = cp.get('notify', 'xb').split(',') for t in self.notify['trains']: self.notify['focus'][t] = self.notify['xb'] # 检查出发站 station = getStationByName(self.from_city_name) if not station: print(u'出发站错误, 请重新输入') station = inputStation() self.from_city_name = station['name'] self.from_station_telecode = station['telecode'] # 检查目的站 station = getStationByName(self.to_city_name) if not station: print(u'目的站错误,请重新输入') station = inputStation() self.to_city_name = station['name'] self.to_station_telecode = station['telecode'] # 检查乘车日期 if not checkDate(self.train_date): print(u'乘车日期无效, 请重新选择') self.train_date = selectDate() # 分析乘客信息 self.passengers = [] index = 1 passenger_sections = ['passenger%d' % (i) for i in xrange(1, 6)] sections = cp.sections() for section in passenger_sections: if section in sections: passenger = {} passenger['index'] = index passenger['name'] = cp.get(section, 'name') # 必选参数 passenger['cardtype'] = cp.get( section, 'cardtype') if cp.has_option( section, 'cardtype') else '1' # 证件类型:可选参数,默认值1,即二代身份证 passenger['id'] = cp.get(section, 'id') # 必选参数 passenger['phone'] = cp.get( section, 'phone') if cp.has_option( section, 'phone') else '13800138000' # 手机号码 passenger['seattype'] = cp.get( section, 'seattype') if cp.has_option( section, 'seattype') else '1' # 席别:可选参数, 默认值1, 即硬座 passenger['tickettype'] = cp.get( section, 'tickettype') if cp.has_option( section, 'tickettype') else '1' # 票种:可选参数, 默认值1, 即成人票 self.passengers.append(passenger) index += 1 def printConfig(self): printDelimiter() print(u'订票信息:\n%s\t%s\t%s--->%s' % ( self.username, self.train_date, self.from_city_name, self.to_city_name)) printDelimiter() th = [u'序号', u'姓名', u'证件类型', u'证件号码', u'席别', u'票种'] print(u'%s\t%s\t%s\t%s\t%s\t%s' % ( th[0].ljust(2), th[1].ljust(4), th[2].ljust(5), th[3].ljust(12), th[4].ljust(2), th[5].ljust(3))) for p in self.passengers: print(u'%s\t%s\t%s\t%s\t%s\t%s' % ( p['index'], p['name'].decode('utf-8', 'ignore').ljust(4), getCardType(p['cardtype']).ljust(5), p['id'].ljust(20), getSeatType(p['seattype']).ljust(2), getTicketType(p['tickettype']).ljust(3))) def checkRandCodeAnsyn(self, module): d = { 'login': { # 登陆验证码 'rand': 'sjrand', 'referer': 'https://kyfw.12306.cn/otn/login/init' }, 'passenger': { # 订单验证码 'rand': 'randp', 'referer': 'https://kyfw.12306.cn/otn/confirmPassenger/initDc' } } if not module in d: print(u'无效的 module: %s' % (module)) return RET_ERR tries = 0 while tries < MAX_TRIES: tries += 1 url = 'https://kyfw.12306.cn/otn/passcodeNew/getPassCodeNew?module=%s&rand=%s&' % (module, d[module]['rand']) if tries > 1: url = '%s%1.16f' % (url, random.random()) print(u'正在等待验证码...') self.captcha = self.getCaptcha(url) if not self.captcha: continue if self.captcha == 1: # 刷新不计数 tries -= 1 continue url = 'https://kyfw.12306.cn/otn/passcodeNew/checkRandCodeAnsyn' parameters = [ ('randCode', self.captcha), ('rand', d[module]['rand']) ] if module == 'login': parameters.append(('randCode_validate', '')) else: parameters.append(('_json_att', '')) parameters.append(('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken)) payload = urllib.urlencode(parameters) print(u'正在校验验证码...') r = self.post(url, payload) if not r: print(u'校验验证码异常') continue # {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"result":"1","msg":"randCodeRight"},"messages":[],"validateMessages":{}} obj = r.json() if ( hasKeys(obj, ['status', 'httpstatus', 'data']) and hasKeys(obj['data'], ['result', 'msg']) and (obj['data']['result'] == '1')): print(u'校验验证码成功') return RET_OK else: print(u'校验验证码失败') dumpObj(obj) continue else: return RET_ERR def login(self): url = 'https://kyfw.12306.cn/otn/login/init' r = self.get(url) if not r: print(u'登录失败, 请求异常') return RET_ERR if self.session.cookies: cookies = requests.utils.dict_from_cookiejar(self.session.cookies) if cookies['JSESSIONID']: self.jsessionid = cookies['JSESSIONID'] if self.checkRandCodeAnsyn('login') == RET_ERR: return RET_ERR print(u'正在登录...') url = 'https://kyfw.12306.cn/otn/login/loginAysnSuggest' parameters = [ ('loginUserDTO.user_name', self.username), ('userDTO.password', self.password), ('randCode', self.captcha), ('randCode_validate', ''), #('ODg3NzQ0', 'OTIyNmFhNmQwNmI5ZmQ2OA%3D%3D'), ('myversion', 'undefined') ] payload = urllib.urlencode(parameters) r = self.post(url, payload) if not r: print(u'登录失败, 请求异常') return RET_ERR # {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"loginCheck":"Y"},"messages":[],"validateMessages":{}} obj = r.json() if ( hasKeys(obj, ['status', 'httpstatus', 'data']) and hasKeys(obj['data'], ['loginCheck']) and (obj['data']['loginCheck'] == 'Y')): print(u'登陆成功^_^') url = 'https://kyfw.12306.cn/otn/login/userLogin' parameters = [ ('_json_att', ''), ] payload = urllib.urlencode(parameters) r = self.post(url, payload) return RET_OK else: print(u'登陆失败啦!重新登陆...') dumpObj(obj) return RET_ERR def getPassengerDTOs(self): url = 'https://kyfw.12306.cn/otn/confirmPassenger/getPassengerDTOs' parameters = [ ('', ''), ] payload = urllib.urlencode(parameters) r = self.post(url, payload) if not r: print(u'获取乘客信息异常') return RET_ERR obj = r.json() if ( hasKeys(obj, ['status', 'httpstatus', 'data']) and hasKeys(obj['data'], ['normal_passengers']) and obj['data']['normal_passengers']): self.normal_passengers = obj['data']['normal_passengers'] return RET_OK else: print(u'获取乘客信息失败') if hasKeys(obj, ['messages']): dumpObj(obj['messages']) if hasKeys(obj, ['data']) and hasKeys(obj['data'], ['exMsg']): dumpObj(obj['data']['exMsg']) return RET_ERR def selectPassengers(self, prompt): if prompt == 1: print(u'是否重新选择乘客?(如需选择请输入y或者yes, 默认使用配置文件提供的乘客信息)') act = raw_input() act = act.lower() if act != 'y' and act != 'yes': self.printConfig() return RET_OK if not (self.normal_passengers and len(self.normal_passengers)): tries = 0 while tries < MAX_TRIES: tries += 1 if self.getPassengerDTOs() == RET_OK: break else: print(u'获取乘客信息失败次数太多, 使用配置文件提供的乘客信息') return RET_ERR num = len(self.normal_passengers) for i in xrange(0, num): p = self.normal_passengers[i] print(u'%d.%s \t' % (i + 1, p['passenger_name'])), if (i + 1) % 5 == 0: print('') while True: print(u'\n请选择乘车人(最多选择5个, 以逗号隔开, 如:1,2,3,4,5, 直接回车不选择, 使用配置文件中的乘客信息)') buf = raw_input() if not buf: return RET_ERR pattern = re.compile(r'^[0-9,]*\d$') # 只能输入数字和逗号, 并且必须以数字结束 if pattern.match(buf): break else: print(u'输入格式错误, 只能输入数字和逗号, 并且必须以数字结束, 如:1,2,3,4,5') ids = buf.split(',') if not (ids and 1 <= len(ids) <= 5): return RET_ERR seattype = selectSeatType() ids = [int(id) for id in ids] del self.passengers[:] for id in ids: if id < 1 or id > num: print(u'不存在的联系人, 忽略') else: passenger = {} id = id - 1 passenger['index'] = len(self.passengers) + 1 passenger['name'] = self.normal_passengers[id]['passenger_name'] passenger['cardtype'] = self.normal_passengers[id]['passenger_id_type_code'] passenger['id'] = self.normal_passengers[id]['passenger_id_no'] passenger['phone'] = self.normal_passengers[id]['mobile_no'] passenger['seattype'] = seattype passenger['tickettype'] = self.normal_passengers[id]['passenger_type'] self.passengers.append(passenger) self.printConfig() return RET_OK def queryTickets(self): self.canWebBuy = False url = 'https://kyfw.12306.cn/otn/leftTicket/init' parameters = [ ('_json_att', ''), ('leftTicketDTO.from_station_name', self.from_city_name), ('leftTicketDTO.to_station_name', self.to_city_name), ('leftTicketDTO.from_station', self.from_station_telecode), ('leftTicketDTO.to_station', self.to_station_telecode), ('leftTicketDTO.train_date', self.train_date), ('back_train_date', self.back_train_date), ('purpose_codes', self.purpose_code), ('pre_step_flag', 'index') ] payload = urllib.urlencode(parameters) r = self.post(url, payload) if not r: print(u'查询车票异常') url = 'https://kyfw.12306.cn/otn/leftTicket/log?' parameters = [ ('leftTicketDTO.train_date', self.train_date), ('leftTicketDTO.from_station', self.from_station_telecode), ('leftTicketDTO.to_station', self.to_station_telecode), ('purpose_codes', self.purpose_code), ] url += urllib.urlencode(parameters) r = self.get(url) if not r: print(u'查询车票异常') url = 'https://kyfw.12306.cn/otn/leftTicket/queryT?' parameters = [ ('leftTicketDTO.train_date', self.train_date), ('leftTicketDTO.from_station', self.from_station_telecode), ('leftTicketDTO.to_station', self.to_station_telecode), ('purpose_codes', self.purpose_code), ] url += urllib.urlencode(parameters) r = self.get(url) if not r: print(u'查询车票异常') return RET_ERR obj = r.json() if (hasKeys(obj, ['status', 'httpstatus', 'data']) and len(obj['data'])): self.trains = obj['data'] return RET_OK else: print(u'查询车票失败') if hasKeys(obj, ['messages']): dumpObj(obj['messages']) return RET_ERR def sendMailNotification(self): print(u'正在发送邮件提醒...') me = u'订票提醒<%s>' % (self.notify['mail_username']) msg = MIMEText( self.notify['mail_content'], _subtype='plain', _charset='gb2312') msg['Subject'] = u'余票信息' msg['From'] = me msg['To'] = ';'.join(self.notify['mail_to']) try: server = smtplib.SMTP() server.connect(self.notify['mail_server']) server.login( self.notify['mail_username'], self.notify['mail_password']) server.sendmail(me, self.notify['mail_to'], msg.as_string()) server.close() print(u'发送邮件提醒成功') return True except Exception as e: print(u'发送邮件提醒失败, %s' % str(e)) return False def printTrains(self): printDelimiter() print(u'余票查询结果如下:') print(u"%s\t%s--->%s\n'有':票源充足 '无':票已售完 '*':未到起售时间 '--':无此席别" % ( self.train_date, self.from_city_name, self.to_city_name)) printDelimiter() print(u'序号/车次\t乘车站\t目的站\t一等\t二等\t软卧\t硬卧\t硬座\t无座') seatTypeCode = { 'swz': '商务座', 'tz': '特等座', 'zy': '一等座', 'ze': '二等座', 'gr': '高级软卧', 'rw': '软卧', 'yw': '硬卧', 'rz': '软座', 'yz': '硬座', 'wz': '无座', 'qt': '其它', } # TODO 余票数量和票价 https://kyfw.12306.cn/otn/leftTicket/queryTicketPrice?train_no=770000K77505&from_station_no=09&to_station_no=13&seat_types=1431&train_date=2014-01-01 # yp_info=4022300000301440004610078033421007800536 代表 # 4022300000 软卧0 # 3014400046 硬卧46 # 1007803342 无座342 # 1007800536 硬座536 index = 1 self.notify['mail_content'] = '' for train in self.trains: t = train['queryLeftNewDTO'] status = '售完' if t['canWebBuy'] == 'N' else '预定' i = 0 ypInfo = { 'wz': { # 无座 'price': 0, 'left': 0 }, 'yz': { # 硬座 'price': 0, 'left': 0 }, 'yw': { # 硬卧 'price': 0, 'left': 0 }, 'rw': { # 软卧 'price': 0, 'left': 0 }, } # 分析票价和余票数量 while i < (len(t['yp_info']) / 10): tmp = t['yp_info'][i * 10:(i + 1) * 10] price = int(tmp[1:5]) left = int(tmp[-3:]) if tmp[0] == '1': if tmp[6] == '3': ypInfo['wz']['price'] = price ypInfo['wz']['left'] = left else: ypInfo['yz']['price'] = price ypInfo['yz']['left'] = left elif tmp[0] == '3': ypInfo['yw']['price'] = price ypInfo['yw']['left'] = left elif tmp[0] == '4': ypInfo['rw']['price'] = price ypInfo['rw']['left'] = left i = i + 1 yz_price = u'硬座%s' % ( ypInfo['yz']['price']) if ypInfo['yz']['price'] else '' yw_price = u'硬卧%s' % ( ypInfo['yw']['price']) if ypInfo['yw']['price'] else '' print(u'(%d) %s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % ( index, t['station_train_code'], t['from_station_name'][0:3], # 最多保留3个中文 t['to_station_name'][0:3], # 最多保留3个中文 t['zy_num'], t['ze_num'], ypInfo['rw']['left'] if ypInfo['rw']['left'] else t['rw_num'], ypInfo['yw']['left'] if ypInfo['yw']['left'] else t['yw_num'], #t['rz_num'], ypInfo['yz']['left'] if ypInfo['yz']['left'] else t['yz_num'], ypInfo['wz']['left'] if ypInfo['wz']['left'] else t['wz_num'], #yz_price, #yw_price )) if t['canWebBuy'] == 'Y': self.canWebBuy = True index += 1 if self.notify['mail_enable'] == 1 and t['canWebBuy'] == 'Y': msg = '' prefix = u'[%s]车次%s[%s/%s->%s/%s, 历时%s]现在有票啦\n' % ( t['start_train_date'], t['station_train_code'], t['from_station_name'], t['start_time'], t['to_station_name'], t['arrive_time'], t['lishi']) if 'all' in self.notify['focus']: # 任意车次 if self.notify['focus']['all'][0] == 'all': # 任意席位 msg = prefix else: # 指定席位 for seat in self.notify['focus']['all']: if seat in ypInfo and ypInfo[seat]['left']: msg += u'座位类型:%s, 剩余车票数量:%s, 票价:%s \n' % ( seat if seat not in seatTypeCode else seatTypeCode[seat], ypInfo[seat]['left'], ypInfo[seat]['price']) if msg: msg = prefix + msg + u'\n' elif t['station_train_code'] in self.notify['focus']: # 指定车次 # 任意席位 if self.notify['focus'][t['station_train_code']][0] == 'all': msg = prefix else: # 指定席位 for seat in self.notify['focus'][t['station_train_code']]: if seat in ypInfo and ypInfo[seat]['left']: msg += u'座位类型:%s, 剩余车票数量:%s, 票价:%s \n' % ( seat if seat not in seatTypeCode else seatTypeCode[seat], ypInfo[seat]['left'], ypInfo[seat]['price']) if msg: msg = prefix + msg + u'\n' self.notify['mail_content'] += msg printDelimiter() if self.notify['mail_enable'] == 1: if self.notify['mail_content']: self.sendMailNotification() return RET_OK else: length = len(self.notify['dates']) if length > 1: self.train_date = self.notify['dates'][ random.randint( 0, length - 1)] return RET_ERR else: return RET_OK # -1->重新查询/0->退出程序/1~len->车次序号 def selectAction(self): ret = -1 self.current_train_index = 0 trains_num = len(self.trains) print(u'您可以选择:') if self.canWebBuy: print(u'1~%d.选择车次开始订票' % (trains_num)) print(u'p.更换乘车人') print(u's.更改席别') print(u'd.更改乘车日期') print(u'f.更改出发站') print(u't.更改目的站') print(u'a.同时更改乘车日期,出发站和目的站') print(u'u.查询未完成订单') print(u'c.查看订票信息') print(u'r.刷票模式') print(u'n.普通模式') print(u'q.退出') print(u'刷新车票请直接回车') printDelimiter() select = raw_input() select = select.lower() if select.isdigit(): if not self.canWebBuy: print(u'没有可预订的车次, 请刷新车票或者更改查询条件') return -1 index = int(select) if index < 1 or index > trains_num: print(u'输入的序号无效,请重新选择车次(1~%d)' % (trains_num)) index = selectTrain(self.trains) if self.trains[index - 1]['queryLeftNewDTO']['canWebBuy'] != 'Y': print(u'您选择的车次%s没票啦,请重新选择车次' % (self.trains[index - 1]['queryLeftNewDTO']['station_train_code'])) index = selectTrain(self.trains) ret = index self.current_train_index = index - 1 elif select == 'p': self.selectPassengers(0) elif select == 's': seattype = selectSeatType() for p in self.passengers: p['seattype'] = seattype self.printConfig() elif select == 'd': self.train_date = selectDate() elif select == 'f': print(u'请输入出发站:') station = inputStation() self.from_city_name = station['name'] self.from_station_telecode = station['telecode'] elif select == 't': print(u'请输入目的站:') station = inputStation() self.to_city_name = station['name'] self.to_station_telecode = station['telecode'] elif select == 'a': self.train_date = selectDate() print(u'请输入出发站:') station = inputStation() self.from_city_name = station['name'] self.from_station_telecode = station['telecode'] print(u'请输入目的站:') station = inputStation() self.to_city_name = station['name'] self.to_station_telecode = station['telecode'] elif select == 'u': ret = self.queryMyOrderNotComplete() ret = self.selectAction() elif select == 'c': ret = self.printConfig() ret = self.selectAction() elif select == 'r': self.notify['mail_enable'] = 1 ret = -1 elif select == 'n': self.notify['mail_enable'] = 0 ret = -1 elif select == 'q': ret = 0 return ret def initOrder(self): url = 'https://kyfw.12306.cn/otn/login/checkUser' parameters = [ ('_json_att', ''), ] payload = urllib.urlencode(parameters) r = self.post(url, payload) if not r: print(u'初始化订单异常') print(u'准备下单喽') url = 'https://kyfw.12306.cn/otn/leftTicket/submitOrderRequest' parameters = [ #('ODA4NzIx', 'MTU0MTczYmQ2N2I3MjJkOA%3D%3D'), ('myversion', 'undefined'), ('secretStr', self.trains[self.current_train_index]['secretStr']), ('train_date', self.train_date), ('back_train_date', self.back_train_date), ('tour_flag', self.tour_flag), ('purpose_codes', self.purpose_code), ('query_from_station_name', self.from_city_name), ('query_to_station_name', self.to_city_name), ('undefined', '') ] # TODO 注意:此处post不需要做urlencode, 比较奇怪, 不能用urllib.urlencode(parameters) payload = '' length = len(parameters) for i in range(0, length): payload += parameters[i][0] + '=' + parameters[i][1] if i < (length - 1): payload += '&' r = self.post(url, payload) if not r: print(u'下单异常') return RET_ERR # {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"messages":[],"validateMessages":{}} obj = r.json() if not (hasKeys(obj, ['status', 'httpstatus']) and obj['status']): print(u'下单失败啦') dumpObj(obj) return RET_ERR print(u'订单初始化...') self.session.close() # TODO url = 'https://kyfw.12306.cn/otn/confirmPassenger/initDc' parameters = [ ('_json_att', ''), ] payload = urllib.urlencode(parameters) r = self.post(url, payload) if not r: print(u'订单初始化异常') return RET_ERR data = r.text s = data.find('globalRepeatSubmitToken') # TODO e = data.find('global_lang') if s == -1 or e == -1: print(u'找不到 globalRepeatSubmitToken') return RET_ERR buf = data[s:e] s = buf.find("'") e = buf.find("';") if s == -1 or e == -1: print(u'很遗憾, 找不到 globalRepeatSubmitToken') return RET_ERR self.repeatSubmitToken = buf[s + 1:e] s = data.find('key_check_isChange') e = data.find('leftDetails') if s == -1 or e == -1: print(u'找不到 key_check_isChange') return RET_ERR self.keyCheckIsChange = data[s + len('key_check_isChange') + 3:e - 3] return RET_OK def checkOrderInfo(self): if self.checkRandCodeAnsyn('passenger') == RET_ERR: return RET_ERR passengerTicketStr = '' oldPassengerStr = '' passenger_seat_detail = '0' # TODO [0->随机][1->下铺][2->中铺][3->上铺] for p in self.passengers: if p['index'] != 1: passengerTicketStr += 'N_' oldPassengerStr += '1_' passengerTicketStr += '%s,%s,%s,%s,%s,%s,%s,' % ( p['seattype'], passenger_seat_detail, p['tickettype'], p['name'], p['cardtype'], p['id'], p['phone']) oldPassengerStr += '%s,%s,%s,' % ( p['name'], p['cardtype'], p['id']) passengerTicketStr += 'N' oldPassengerStr += '1_' self.passengerTicketStr = passengerTicketStr self.oldPassengerStr = oldPassengerStr print(u'检查订单...') url = 'https://kyfw.12306.cn/otn/confirmPassenger/checkOrderInfo' parameters = [ ('cancel_flag', '2'), # TODO ('bed_level_order_num', '000000000000000000000000000000'), # TODO ('passengerTicketStr', self.passengerTicketStr), ('oldPassengerStr', self.oldPassengerStr), ('tour_flag', self.tour_flag), ('randCode', self.captcha), #('NzA4MTc1', 'NmYyYzZkYWY2OWZkNzg2YQ%3D%3D'), # TODO ('_json_att', ''), ('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken), ] payload = urllib.urlencode(parameters) r = self.post(url, payload) if not r: print(u'检查订单异常') return RET_ERR # {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"submitStatus":true},"messages":[],"validateMessages":{}} obj = r.json() if ( hasKeys(obj, ['status', 'httpstatus', 'data']) and hasKeys(obj['data'], ['submitStatus']) and obj['status'] and obj['data']['submitStatus']): print(u'检查订单成功') return RET_OK else: print(u'检查订单失败') dumpObj(obj) return RET_ERR def getQueueCount(self): print(u'查询排队情况...') url = 'https://kyfw.12306.cn/otn/confirmPassenger/getQueueCount' t = self.trains[self.current_train_index]['queryLeftNewDTO'] parameters = [ ('train_date', date2UTC(self.train_date)), ('train_no', t['train_no']), ('stationTrainCode', t['station_train_code']), ('seatType', '1'), # TODO ('fromStationTelecode', t['from_station_telecode']), ('toStationTelecode', t['to_station_telecode']), ('leftTicket', t['yp_info']), ('purpose_codes', '00'), # TODO ('_json_att', ''), ('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken) ] payload = urllib.urlencode(parameters) r = self.post(url, payload) if not r: print(u'查询排队情况异常') return RET_ERR # {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"count":"0","ticket":"100985109710098535003021350212","op_2":"false","countT":"0","op_1":"false"},"messages":[],"validateMessages":{}} obj = r.json() if not ( hasKeys(obj, ['status', 'httpstatus', 'data']) and hasKeys(obj['data'], ['op_1', 'op_2']) and obj['status']): print(u'查询排队情况失败') dumpObj(obj) return RET_ERR if obj['data']['op_1'] != 'false': print(u'已有人先于您提交相同的购票需求, 到处理您的需求时可能已无票, 建议根据当前余票确定是否排队.') if obj['data']['op_2'] != 'false': print(u'目前排队人数已经超过余票张数,请您选择其他席别或车次,特此提醒。') if 'ticket' in obj['data']: print(u'排队详情:%s' % (obj['data']['ticket'])) # TODO return RET_OK def confirmSingleForQueue(self): print(u'提交订单排队...') url = 'https://kyfw.12306.cn/otn/confirmPassenger/confirmSingleForQueue' t = self.trains[self.current_train_index]['queryLeftNewDTO'] parameters = [ ('passengerTicketStr', self.passengerTicketStr), ('oldPassengerStr', self.oldPassengerStr), ('randCode', self.captcha), ('purpose_codes', '00'), # TODO ('key_check_isChange', self.keyCheckIsChange), ('leftTicketStr', t['yp_info']), ('train_location', t['location_code']), ('_json_att', ''), ('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken), ] payload = urllib.urlencode(parameters) r = self.post(url, payload) if not r: print(u'提交订单排队异常') return RET_ERR # {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"submitStatus":true},"messages":[],"validateMessages":{}} obj = r.json() if ( hasKeys(obj, ['status', 'httpstatus', 'data']) and hasKeys(obj['data'], ['submitStatus']) and obj['status'] and obj['data']['submitStatus']): print(u'订单排队中...') return RET_OK else: print(u'提交订单排队失败') dumpObj(obj) return RET_ERR def queryOrderWaitTime(self): print(u'等待订单流水号...') url = 'https://kyfw.12306.cn/otn/confirmPassenger/queryOrderWaitTime?random=%13d&tourFlag=dc&_json_att=&REPEAT_SUBMIT_TOKEN=%s' % ( random.randint(1000000000000, 1999999999999), self.repeatSubmitToken) r = self.get(url) if not r: print(u'等待订单流水号异常') return RET_ERR # {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"queryOrderWaitTimeStatus":true,"count":0,"waitTime":4,"requestId":5944637152210732219,"waitCount":2,"tourFlag":"dc","orderId":null},"messages":[],"validateMessages":{}} # {"validateMessagesShowId":"_validatorMessage","status":true,"httpstatus":200,"data":{"queryOrderWaitTimeStatus":true,"count":0,"waitTime":-1,"requestId":5944637152210732219,"waitCount":0,"tourFlag":"dc","orderId":"E739900792"},"messages":[],"validateMessages":{}} obj = r.json() if not ( hasKeys(obj, ['status', 'httpstatus', 'data']) and hasKeys(obj['data'], ['orderId']) and obj['status'] and obj['data']['orderId']): print(u'等待订单流水号失败') dumpObj(obj) return RET_ERR self.orderId = obj['data']['orderId'] if (self.orderId and self.orderId != 'null'): print(u'订单流水号为:') print(self.orderId) return RET_OK else: print(u'等待订单流水号失败') return RET_ERR def payOrder(self): print(u'等待订票结果...') url = 'https://kyfw.12306.cn/otn/confirmPassenger/resultOrderForDcQueue' parameters = [ ('orderSequence_no', self.orderId), ('_json_att', ''), ('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken), ] payload = urllib.urlencode(parameters) r = self.post(url, payload) if not r: print(u'等待订票结果异常') return RET_ERR # {'validateMessagesShowId':'_validatorMessage','status':true,'httpstatus':200,'data':{'submitStatus':true},'messages':[],'validateMessages':{}} # {'validateMessagesShowId':'_validatorMessage','status':true,'httpstatus':200,'data':{'errMsg':'获取订单信息失败,请查看未完成订单,继续支付!','submitStatus':false},'messages':[],'validateMessages':{}} obj = r.json() if not ( hasKeys(obj, ['status', 'httpstatus', 'data']) and hasKeys(obj['data'], ['submitStatus']) and obj['status'] and obj['data']['submitStatus']): print(u'等待订票结果失败') dumpObj(obj) return RET_ERR url = 'https://kyfw.12306.cn/otn//payOrder/init?random=%13d' % ( random.randint(1000000000000, 1999999999999)) parameters = [ ('_json_att', ''), ('REPEAT_SUBMIT_TOKEN', self.repeatSubmitToken), ] payload = urllib.urlencode(parameters) r = self.post(url, payload) if not r: print(u'请求异常') return RET_ERR data = r.text if data.find(u'席位已锁定') != -1: print(u'订票成功^_^请在45分钟内完成网上支付,否则系统将自动取消') return RET_OK else: return RET_ERR def queryMyOrderNotComplete(self): print(u'正在查询未完成订单...') url = 'https://kyfw.12306.cn/otn/queryOrder/queryMyOrderNoComplete' parameters = [ ('_json_att', ''), ] payload = urllib.urlencode(parameters) r = self.post(url, payload) if not r: print(u'查询未完成订单异常') return RET_ERR obj = r.json() if not (hasKeys(obj, ['status', 'httpstatus', 'data']) and obj['status']): print(u'查询未完成订单失败') dumpObj(obj) return RET_ERR if (hasKeys(obj['data'], ['orderDBList']) and len(obj['data']['orderDBList'])): print(u'查询到有未完成订单,请先处理') return RET_OK if ( hasKeys(obj['data'], ['orderCacheDTO']) and obj['data']['orderCacheDTO'] and hasKeys(obj['data']['orderCacheDTO'], ['status'])): if obj['data']['orderCacheDTO']['status'] == 0: print(u'查询到cache有未完成订单,请先处理') return RET_OK else: if (hasKeys(obj['data']['orderCacheDTO'], ['message'])): dumpObj(obj['data']['orderCacheDTO']['message']) return RET_ERR def main(): print(getTime()) parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', help='Specify config file') parser.add_argument('-u', '--username', help='Specify username to login') parser.add_argument('-p', '--password', help='Specify password to login') parser.add_argument('-d', '--date', help='Specify train date, 2014-01-01') parser.add_argument('-m', '--mail', help='Send email notification') args = parser.parse_args() order = MyOrder() order.initSession() order.initStation() if args.config: order.readConfig(args.config) # 使用指定的配置文件 else: order.readConfig() # 使用默认的配置文件config.ini if args.username: order.username = args.username # 使用指定的账号代替配置文件中的账号 if args.password: order.password = args.password # 使用指定的密码代替配置文件中的密码 if args.date: if checkDate(args.date): order.train_date = args.date # 使用指定的乘车日期代替配置文件中的乘车日期 else: print(u'乘车日期无效, 请重新选择') order.train_date = selectDate() if args.mail: # 有票时自动发送邮件通知 order.notify['mail_enable'] = 1 if args.mail == '1' else 0 tries = 0 while tries < MAX_TRIES: tries += 1 if order.login() == RET_OK: break else: print(u'失败次数太多,自动退出程序') sys.exit() order.selectPassengers(1) while True: time.sleep(1) # 查询车票 if order.queryTickets() != RET_OK: continue # 显示查询结果 if order.printTrains() != RET_OK: continue # 选择菜单列举的动作之一 action = order.selectAction() if action == -1: continue elif action == 0: break # 订单初始化 if order.initOrder() != RET_OK: continue # 检查订单信息 if order.checkOrderInfo() != RET_OK: continue # 查询排队和余票情况 # if order.getQueueCount() != RET_OK: # continue # 提交订单到队里中 tries = 0 while tries < 2: tries += 1 if order.confirmSingleForQueue() == RET_OK: break # 获取orderId tries = 0 while tries < 2: tries += 1 if order.queryOrderWaitTime() == RET_OK: break # 正式提交订单 if order.payOrder() == RET_OK: break # 访问未完成订单页面检查是否订票成功 if order.queryMyOrderNotComplete() == RET_OK: print(u'订票成功^_^请在45分钟内完成网上支付,否则系统将自动取消') break print(getTime()) raw_input('Press any key to continue') if __name__ == '__main__': main() # EOF
gpl-2.0
-631,110,503,267,683,200
37.075581
273
0.491627
false
3.173478
false
false
false
mjs/juju
acceptancetests/schedule_hetero_control.py
1
3284
#!/usr/bin/env python from __future__ import print_function from argparse import ArgumentParser import json import os import re from jenkins import Jenkins from jujuci import ( add_credential_args, get_credentials, ) from utility import ( find_candidates, get_candidates_path, ) def get_args(argv=None): parser = ArgumentParser() parser.add_argument( 'root_dir', help='Directory containing releases and candidates dir') parser.add_argument( '--all', action='store_true', default=False, help='Schedule all candidates for client-server testing.') add_credential_args(parser) args = parser.parse_args(argv) return args, get_credentials(args) def get_releases(root): release_path = os.path.join(root, 'old-juju') released_pattern = re.compile('^\d+\.\d+\.\d+[^~]*$') for entry in os.listdir(release_path): if not os.path.isdir(os.path.join(release_path, entry)): continue if released_pattern.match(entry): yield entry def get_candidate_info(candidate_path): """ Return candidate version and revision build number. """ with open(os.path.join(candidate_path, 'buildvars.json')) as fp: build_vars = json.load(fp) return build_vars['version'], build_vars['revision_build'] def calculate_jobs(root, schedule_all=False): releases = list(get_releases(root)) candidates_path = get_candidates_path(root) for candidate_path in find_candidates(root, schedule_all): parent, candidate = os.path.split(candidate_path) if candidate.startswith('1.26'): # 1.26 was renamed to 2.0 because it is not compatible with 1.x continue if parent != candidates_path: raise ValueError('Wrong path') candidate_version, revision_build = get_candidate_info(candidate_path) for release in releases: # Releases with the same major number must be compatible. if release[:2] != candidate[:2]: continue for client_os in ('ubuntu', 'osx', 'windows'): yield { 'old_version': release, # Client 'candidate': candidate_version, # Server 'new_to_old': 'true', 'candidate_path': candidate, 'client_os': client_os, 'revision_build': revision_build, } yield { 'old_version': release, # Server 'candidate': candidate_version, # Client 'new_to_old': 'false', 'candidate_path': candidate, 'client_os': client_os, 'revision_build': revision_build, } def build_jobs(credentials, root, jobs): jenkins = Jenkins('http://juju-ci.vapour.ws:8080', *credentials) os_str = {"ubuntu": "", "osx": "-osx", "windows": "-windows"} for job in jobs: jenkins.build_job( 'compatibility-control{}'.format(os_str[job['client_os']]), job) def main(): args, credentials = get_args() build_jobs( credentials, args.root_dir, calculate_jobs(args.root_dir, args.all)) if __name__ == '__main__': main()
agpl-3.0
8,702,900,788,134,910,000
32.510204
78
0.58648
false
4.03936
false
false
false
NicovincX2/Python-3.5
Algèbre/Opération/scalar_product.py
1
1933
# -*- coding: utf-8 -*- import os import seaborn seaborn.set() colors = seaborn.color_palette() import utils # For 3D plotting we need to import some extra stuff from mpl_toolkits.mplot3d import Axes3D # First create two random vectors in 3 dimensional space v1 = rand(3, 1) v2 = rand(3, 1) # And scale them to unit length v1 = v1 / norm(v1) v2 = v2 / norm(v2) # Plot the vectors o = zeros(3) # origin # We'll use the object oriented plotting interface f = figure(figsize=(8, 8)) ax = f.add_subplot(111, projection="3d", axisbg="white") ax.plot(*[[o[i], v1[i]] for i in range(3)], linewidth=3, label="vector1") ax.plot(*[[o[i], v2[i]] for i in range(3)], linewidth=3, label="vector2") for axisl in ["x", "y", "z"]: getattr(ax, "set_%slabel" % axisl)(axisl) # Here's a fun trick legend() f = figure(figsize=(8, 8)) ax = f.add_subplot(111, projection="3d", axisbg="white") ax.plot(*[[o[i], v1[i]] for i in range(3)], linewidth=3, label="vector1") ax.plot(*[[o[i], v2[i]] for i in range(3)], linewidth=3, label="vector2") for axisl in ["x", "y", "z"]: getattr(ax, "set_%slabel" % axisl)(axisl) # Here's a fun trick legend() for i in range(100): # generate a point that is a weighted sum of the 2 vectors w1 = randn(1) w2 = randn(1) point = w1 * v1 + w2 * v2 ax.plot(*point, marker=".", color="k") # We can find a vector that is orthogonal to the plane defined by v1 and v2 # by taking the vector cross product. See the wikipedia page for a # definition of cross product # Must be right shape for cross() v3 = cross(v1.reshape(1, 3), v2.reshape(1, 3)).squeeze() ax.plot(*[[o[i], v3[i]] for i in range(3)], linewidth=3, label="orthogonal vector") legend() print(v3[0] * v1[0] + v3[1] * v1[1] + v3[2] * v1[2]) print(dot(v3, v1)) theta = arccos(dot(v2.T, v1)).squeeze() # and radians can be converted to degrees theta_deg = theta * (180 / pi) print(theta, theta_deg) os.system("pause")
gpl-3.0
-667,495,029,262,324,100
28.287879
75
0.644594
false
2.647945
false
false
false
nuobit/odoo-addons
connector_sage/models/payroll_sage_payslip_line_payroll/binding.py
1
1439
# Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>) # Eric Antones <[email protected]> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl) from odoo import models, fields from odoo.addons.queue_job.job import job class PayslipLinePayrollBinding(models.Model): _name = 'sage.payroll.sage.payslip.line.payroll' _inherit = 'sage.payroll.sage.payslip.line' _sql_constraints = [ ('uniq', 'unique(sage_codigo_empresa, sage_codigo_convenio, sage_fecha_registro_cv, ' 'sage_ano, sage_mesd, sage_tipo_proceso, ' 'sage_codigo_empleado, sage_codigo_concepto_nom)', 'Payroll Payslip with same ID on Sage already exists.'), ] @job(default_channel='root.sage') def import_payslip_lines(self, payslip_id, backend_record): """ Prepare the import of payslip from Sage """ filters = { 'CodigoEmpresa': backend_record.sage_company_id, 'CodigoConvenio': payslip_id.labour_agreement_id.code, 'FechaRegistroCV': fields.Date.from_string(payslip_id.labour_agreement_id.registration_date_cv), 'Año': payslip_id.year, 'MesD': ('between', (payslip_id.month_from, payslip_id.month_to)), 'TipoProceso': payslip_id.process, } self.env['sage.payroll.sage.payslip.line.payroll'].import_batch( backend=backend_record, filters=filters) return True
agpl-3.0
8,891,023,557,051,853,000
36.842105
108
0.649513
false
3.139738
false
false
false
cuihaoleo/PyTest
PyTest.py
1
7675
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ## # PyTest.py # This file is part of PyTest. # # PyTest # Python编写的OI评测器后端 # Copyright (C) 2011 CUI Hao # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Author: 崔灏 (CUI Hao) # Email: [email protected] ## import cmd import os import shlex import pickle from PlayerClass import PyTest_Player from ProblemClass import PyTest_Problem from ProblemConfig import Cfg2Prob class PyTest_Cmd (cmd.Cmd): def __init__ (self): cmd.Cmd.__init__(self) self.prompt = "(PyTest) " self.Name = "Unnamed" self.Players = {} self.Problems = {} self.do_EOF = self.do_quit def AddProb (self, cfg): try: prob = Cfg2Prob(cfg) except Exception as exp: print("无法添加题目 %s : 导入时发生错误") print(exp) else: if prob.Name in self.Problems.keys(): print("无法添加题目 %s : 相同名称的题目已存在" % prob.Name) else: self.Problems[prob.Name] = prob print("添加题目 %s" % prob.Name) def AddPlayer (self, path): try: player = PyTest_Player(path) except Exception as exp: print("无法添加选手 %s : 导入时发生错误") print(exp) else: if player.Name in self.Players.keys(): print("无法添加选手 %s : 相同名称的对象已存在" % player.Name) else: self.Players[player.Name] = player print("添加选手 %s" % player.Name) def DelProb (self, name): try: del self.Problems[name] except KeyError: print("无法删除题目 %s : 题目不存在" % name) else: print("删除题目 %s" % name) def DelPlayer (self, name): try: del self.Players[name] except KeyError: print("无法删除选手 %s : 对象不存在" % name) else: print("删除选手 %s" % name) def Testit (self, pl, pr): try: player = self.Players[pl] except KeyError: print("未知用户 %s" % pl) return try: prob = self.Problems[pr] except KeyError: print("未知用户 %s" % pr) return player.Do(prob) def help_quit (self): print("quit") print("退出") def do_quit (self, line): exit() def help_name (self): print("name [@名称]") print("设置评测名称。若没有提供,显示当前名称") def do_name (self, name): if len(name.strip()) == 0: print(self.Judge.Name) else: self.Judge.Name = name def help_addprob (self): print("addprob @配置文件1 [@配置文件2 [...]]") print("添加题目") def do_addprob (self, line): for path in shlex.split(line): self.AddProb(path) def help_delprob (self): print("delprob @题目1 [@题目2 [...]]") print("删除题目") def do_delprob (self, line): for name in shlex.split(line): self.DelProb(name) def help_prob (self): print("prob") print("显示所有题目") def do_prob (self, line): for p in self.Problems: print("%s: %s" % (p, self.Problems[p].CfgFile)) def help_add (self): print("add @目录1 [@目录2 [...]]") print("添加选手") def do_add (self, line): for path in shlex.split(line): self.AddPlayer(path) def help_addall (self): print("addall @目录1 [@目录2 [...]]") print("添加目录中的所有文件夹作为选手") def do_addall (self, line): for path in shlex.split(line): try: paths = next(os.walk(path))[1] except StopIteration: continue for f in paths: self.AddPlayer(os.path.join(path, f)) def help_del (self): print("del @选手1 [@选手2 [...]]") print("删除选手") def do_del (self, line): for name in shlex.split(line): self.DelPlayer(name) def help_player (self): print("player") print("显示所有选手") def do_player (self, line): for p in self.Players: print("%s: %s" % (p, self.Players[p].Path)) def help_rec (self): print("rec @选手 @题目") print("显示详细评测信息") def do_rec (self, line): arg = shlex.split(line) if len(arg)==2: pl, pr = arg else: return try: li = self.Players[pl].Record[pr] except KeyError: print("记录不存在") return for idx in li: print() print("[测试#%s]" % idx) for dic in li[idx]: print("<文件 %s>" % dic.get("file", "")) print("信息: %s" % dic.get("msg", "")) print("得分: %s" % dic.get("score", "")) def help_print (self): print("打印Python表达式") def do_print (self, line): try: print(eval(line)) except Exception as err: print(err) def help_test (self): print("启动测试") def do_test (self, line): arg = shlex.split(line) if len(arg) == 2: Testit(*arg) elif len(arg) == 0: pls = input("测试对象(默认全部):").split() prs = input("题目(默认全部):").split() if len(pls) == 0: pls = self.Players.keys() if len(prs) == 0: prs = self.Problems.keys() for player in pls: for prob in prs: self.Testit(player, prob) print() def help_save (self): print("储存本次测试") def do_save (self, line): path = shlex.split(line)[0] if os.path.lexists(path): while True: ch = input("文件已存在,是否覆盖(Y/N)?") if ch in ("y", "Y"): break elif ch in ("n", "N"): return f = open(path, "wb") pickle.dump(self.Name, f, pickle.HIGHEST_PROTOCOL) pickle.dump(self.Players, f, pickle.HIGHEST_PROTOCOL) pickle.dump(self.Problems, f, pickle.HIGHEST_PROTOCOL) f.close() def help_load (self): print("加载测试") def do_load (self, line): path = shlex.split(line)[0] try: f = open(path, "rb") except IOError as err: print(err) return self.Name = pickle.load(f) self.Players = pickle.load(f) self.Problems = pickle.load(f) if __name__ == '__main__': pytest = PyTest_Cmd() pytest.cmdloop()
gpl-3.0
1,755,466,251,019,039,200
26
71
0.508379
false
3.098168
true
false
false
geggo/pyface
pyface/workbench/i_workbench_window_layout.py
3
10821
#------------------------------------------------------------------------------ # Copyright (c) 2005, Enthought, Inc. # All rights reserved. # # This software is provided without warranty under the terms of the BSD # license included in enthought/LICENSE.txt and may be redistributed only # under the conditions described in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # Thanks for using Enthought open source! # # Author: Enthought, Inc. # Description: <Enthought pyface package component> #------------------------------------------------------------------------------ """ The workbench window layout interface. """ # Enthought library imports. from traits.api import Event, HasTraits, Instance, Interface, Str from traits.api import provides # Local imports. from .i_editor import IEditor from .i_view import IView class IWorkbenchWindowLayout(Interface): """ The workbench window layout interface. Window layouts are responsible for creating and managing the internal structure of a workbench window (it knows how to add and remove views and editors etc). """ # The Id of the editor area. # FIXME v3: This is toolkit specific. editor_area_id = Str # The workbench window that this is the layout for. window = Instance('pyface.workbench.api.WorkbenchWindow') #### Events #### # Fired when an editor is about to be opened (or restored). editor_opening = Event(IEditor) # Fired when an editor has been opened (or restored). editor_opened = Event(IEditor) # Fired when an editor is about to be closed. editor_closing = Event(IEditor) # Fired when an editor has been closed. editor_closed = Event(IEditor) # Fired when a view is about to be opened (or restored). view_opening = Event(IView) # Fired when a view has been opened (or restored). view_opened = Event(IView) # Fired when a view is about to be closed (*not* hidden!). view_closing = Event(IView) # Fired when a view has been closed (*not* hidden!). view_closed = Event(IView) # FIXME v3: The "just for convenience" returns are a really bad idea. # # Why? They allow the call to be used on the LHS of an expression... # Because they have nothing to do with what the call is supposed to be # doing, they are unlikely to be used (because they are so unexpected and # inconsistently implemented), and only serve to replace two shorter lines # of code with one long one, arguably making code more difficult to read. def activate_editor(self, editor): """ Activate an editor. Returns the editor (just for convenience). """ def activate_view(self, view): """ Activate a view. Returns the view (just for convenience). """ def add_editor(self, editor, title): """ Add an editor. Returns the editor (just for convenience). """ def add_view(self, view, position=None, relative_to=None, size=(-1, -1)): """ Add a view. Returns the view (just for convenience). """ def close_editor(self, editor): """ Close an editor. Returns the editor (just for convenience). """ def close_view(self, view): """ Close a view. FIXME v3: Currently views are never 'closed' in the same sense as an editor is closed. When we close an editor, we destroy its control. When we close a view, we merely hide its control. I'm not sure if this is a good idea or not. It came about after discussion with Dave P. and he mentioned that some views might find it hard to persist enough state that they can be re-created exactly as they were when they are shown again. Returns the view (just for convenience). """ def close(self): """ Close the entire window layout. FIXME v3: Should this be called 'destroy'? """ def create_initial_layout(self, parent): """ Create the initial window layout. Returns the layout. """ def contains_view(self, view): """ Return True if the view exists in the window layout. Note that this returns True even if the view is hidden. """ def hide_editor_area(self): """ Hide the editor area. """ def hide_view(self, view): """ Hide a view. Returns the view (just for convenience). """ def refresh(self): """ Refresh the window layout to reflect any changes. """ def reset_editors(self): """ Activate the first editor in every group. """ def reset_views(self): """ Activate the first view in every region. """ def show_editor_area(self): """ Show the editor area. """ def show_view(self, view): """ Show a view. """ #### Methods for saving and restoring the layout ########################## def get_view_memento(self): """ Returns the state of the views. """ def set_view_memento(self, memento): """ Restores the state of the views. """ def get_editor_memento(self): """ Returns the state of the editors. """ def set_editor_memento(self, memento): """ Restores the state of the editors. """ def get_toolkit_memento(self): """ Return any toolkit-specific data that should be part of the memento. """ def set_toolkit_memento(self, memento): """ Restores any toolkit-specific data. """ @provides(IWorkbenchWindowLayout) class MWorkbenchWindowLayout(HasTraits): """ Mixin containing common code for toolkit-specific implementations. """ #### 'IWorkbenchWindowLayout' interface ################################### # The Id of the editor area. # FIXME v3: This is toolkit specific. editor_area_id = Str # The workbench window that this is the layout for. window = Instance('pyface.workbench.api.WorkbenchWindow') #### Events #### # Fired when an editor is about to be opened (or restored). editor_opening = Event(IEditor) # Fired when an editor has been opened (or restored). editor_opened = Event(IEditor) # Fired when an editor is about to be closed. editor_closing = Event(IEditor) # Fired when an editor has been closed. editor_closed = Event(IEditor) # Fired when a view is about to be opened (or restored). view_opening = Event(IView) # Fired when a view has been opened (or restored). view_opened = Event(IView) # Fired when a view is about to be closed (*not* hidden!). view_closing = Event(IView) # Fired when a view has been closed (*not* hidden!). view_closed = Event(IView) ########################################################################### # 'IWorkbenchWindowLayout' interface. ########################################################################### def activate_editor(self, editor): """ Activate an editor. """ raise NotImplementedError def activate_view(self, view): """ Activate a view. """ raise NotImplementedError def add_editor(self, editor, title): """ Add an editor. """ raise NotImplementedError def add_view(self, view, position=None, relative_to=None, size=(-1, -1)): """ Add a view. """ raise NotImplementedError def close_editor(self, editor): """ Close an editor. """ raise NotImplementedError def close_view(self, view): """ Close a view. """ raise NotImplementedError def close(self): """ Close the entire window layout. """ raise NotImplementedError def create_initial_layout(self, parent): """ Create the initial window layout. """ raise NotImplementedError def contains_view(self, view): """ Return True if the view exists in the window layout. """ raise NotImplementedError def hide_editor_area(self): """ Hide the editor area. """ raise NotImplementedError def hide_view(self, view): """ Hide a view. """ raise NotImplementedError def refresh(self): """ Refresh the window layout to reflect any changes. """ raise NotImplementedError def reset_editors(self): """ Activate the first editor in every group. """ raise NotImplementedError def reset_views(self): """ Activate the first view in every region. """ raise NotImplementedError def show_editor_area(self): """ Show the editor area. """ raise NotImplementedError def show_view(self, view): """ Show a view. """ raise NotImplementedError #### Methods for saving and restoring the layout ########################## def get_view_memento(self): """ Returns the state of the views. """ raise NotImplementedError def set_view_memento(self, memento): """ Restores the state of the views. """ raise NotImplementedError def get_editor_memento(self): """ Returns the state of the editors. """ raise NotImplementedError def set_editor_memento(self, memento): """ Restores the state of the editors. """ raise NotImplementedError def get_toolkit_memento(self): """ Return any toolkit-specific data that should be part of the memento. """ return None def set_toolkit_memento(self, memento): """ Restores any toolkit-specific data. """ return ########################################################################### # Protected 'MWorkbenchWindowLayout' interface. ########################################################################### def _get_editor_references(self): """ Returns a reference to every editor. """ editor_manager = self.window.editor_manager editor_references = {} for editor in self.window.editors: # Create the editor reference. # # If the editor manager returns 'None' instead of a resource # reference then this editor will not appear the next time the # workbench starts up. This is useful for things like text files # that have an editor but have NEVER been saved. editor_reference = editor_manager.get_editor_memento(editor) if editor_reference is not None: editor_references[editor.id] = editor_reference return editor_references #### EOF ######################################################################
bsd-3-clause
-4,267,509,263,042,674,000
26.675192
80
0.590796
false
4.516277
false
false
false
lmregus/Portfolio
python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/output/color_depth.py
1
1497
from __future__ import unicode_literals from prompt_toolkit.utils import is_windows import os __all__ = [ 'ColorDepth', ] class ColorDepth(object): """ Possible color depth values for the output. """ #: One color only. DEPTH_1_BIT = 'DEPTH_1_BIT' #: ANSI Colors. DEPTH_4_BIT = 'DEPTH_4_BIT' #: The default. DEPTH_8_BIT = 'DEPTH_8_BIT' #: 24 bit True color. DEPTH_24_BIT = 'DEPTH_24_BIT' # Aliases. MONOCHROME = DEPTH_1_BIT ANSI_COLORS_ONLY = DEPTH_4_BIT DEFAULT = DEPTH_8_BIT TRUE_COLOR = DEPTH_24_BIT _ALL = [DEPTH_1_BIT, DEPTH_4_BIT, DEPTH_8_BIT, DEPTH_24_BIT] @classmethod def default(cls, term=''): """ If the user doesn't specify a color depth, use this as a default. """ if term in ('linux', 'eterm-color'): return cls.DEPTH_4_BIT # For now, always use 4 bit color on Windows 10 by default, even when # vt100 escape sequences with ENABLE_VIRTUAL_TERMINAL_PROCESSING are # supported. We don't have a reliable way yet to know whether our # console supports true color or only 4-bit. if is_windows() and 'PROMPT_TOOLKIT_COLOR_DEPTH' not in os.environ: return cls.DEPTH_4_BIT # Check the `PROMPT_TOOLKIT_COLOR_DEPTH` environment variable. if os.environ.get('PROMPT_TOOLKIT_COLOR_DEPTH') in cls._ALL: return os.environ['PROMPT_TOOLKIT_COLOR_DEPTH'] return cls.DEPTH_8_BIT
mit
2,101,513,724,883,425,300
27.245283
77
0.616566
false
3.356502
false
false
false
WorldViews/Spirals
dummyServer.py
1
2519
import json, time import flask from flask import Flask, render_template, send_file, \ jsonify, send_from_directory, request from flask_socketio import SocketIO, emit rdb = None try: import rethinkdb as rdb #rdb.connect('localhost', 28015).repl() conn = rdb.connect(db='test') except: print "*** Running without DB ***" rdb = None app = Flask(__name__, static_url_path='') app.debug = True app.config['SECRET_KEY'] = 'secret!' socketio = SocketIO(app) @app.route('/') def index(): return send_file('index.html') @app.route('/regp/', methods=['POST','GET']) def reg(): print "reg path:", request.path print "reg args", request.args t = time.time() name = request.args.get('name') tagStr = request.args.get('tagStr') clientType = request.args.get('clientType') lon = float(request.args.get('longitude')) lat = float(request.args.get('latitude')) room = request.args.get('room') numUsers = int(request.args.get('numUsers')) obj = {'t': t, 'name': name, 'tagStr': tagStr, 'lon': lon, 'lat': lat, 'room': room, 'numUsers': numUsers, 'clientType': clientType} print obj return "ok" @app.route('/Viewer/<path:path>') def send(path): print "send_page", path return send_from_directory('Viewer', path) @app.route('/Cesium/<path:path>') def send_page(path): print "send_page", path return send_from_directory('Cesium', path) @app.route('/db/<path:etype>') def query(etype): #print "query", etype t = time.time() if rdb == None: return flask.jsonify({'error': 'No DB', 't': t, 'records': []}) recs = rdb.table(etype).run(conn) items = [x for x in recs] obj = {'type': etype, 't' : t, 'records': items} return flask.jsonify(obj) @socketio.on('my event') def test_message(message): emit('my response', {'data': 'got it!'}) @socketio.on('chat') def handle_chat(msg): print "handle_chat:", msg emit('chat', msg, broadcast=True) addMsg(msg, 'chat') @socketio.on('notes') def handle_notes(msg): print "handle_notes:", msg emit('notes', msg, broadcast=True) addMsg(msg, 'notes') @socketio.on('people') def handle_people(msg): #print "handle_people:", msg emit('people', msg, broadcast=True) def addMsg(msgStr, etype): obj = json.loads(msgStr) rdb.table(etype).insert(obj).run(conn) if __name__ == '__main__': #socketio.run(app, port=80) socketio.run(app, host="0.0.0.0", port=80)
mit
-4,448,048,485,143,224,300
25.239583
71
0.613339
false
3.08701
false
false
false
nschaetti/nsNLP
data/Text.py
1
2476
# -*- coding: utf-8 -*- # # File : corpus/IQLACorpus.py # Description : . # Date : 16/08/2017 # # Copyright Nils Schaetti, University of Neuchâtel <[email protected]> # Imports from Sample import Sample import codecs # Class to access to a text class Text(Sample): """ Class to access to a text """ # Constructor def __init__(self, text_path, author, text_title): """ Constructor :param text_path: :param author: """ super(Text, self).__init__(text_path, author) self._text_path = text_path self._author = author self._title = text_title # end __init__ ######################################## # Public ######################################## # Get title def get_title(self): """ Get title :return: """ return self._title # end get_title # Get text def get_text(self): """ Get text :return: """ return codecs.open(self._text_path, 'r', encoding='utf-8').read() # end text # Save text def save(self, text): """ Save text :param text: :return: """ return codecs.open(self._text_path, 'w', encoding='utf-8').write(text) # end save # Get author def get_author(self): """ Get author :return: """ return self._author # end author # Get path def get_path(self): """ Get path :return: """ return self._text_path # end get_path # Get X def x(self): """ Get X :return: """ return self.get_text() # end x # Get Y def y(self): """ Get Y :return: """ return self.get_author().get_name() # end y ######################################## # Override ######################################## # To string def __unicode__(self): """ To string :return: """ return u"Text(title: {}, path:{}, author:{})".format(self._title, self._text_path, self._author.get_name()) # end __unicode__ # To string def __str__(self): """ To string :return: """ return "Text(title: {}, path:{}, author:{})".format(self._title, self._text_path, self._author.get_name()) # end __unicode__ # end Text
gpl-3.0
8,279,968,937,901,342,000
19.121951
115
0.446869
false
4.02439
false
false
false
blindfuzzy/LHF
Tools/linuxprivchk.py
1
25080
#!/usr/bin/env python ############################################################################################################### ## [Title]: linuxprivchecker.py -- a Linux Privilege Escalation Check Script ## [Author]: Mike Czumak (T_v3rn1x) -- @SecuritySift ##------------------------------------------------------------------------------------------------------------- ## [Details]: ## This script is intended to be executed locally on a Linux box to enumerate basic system info and ## search for common privilege escalation vectors such as world writable files, misconfigurations, clear-text ## passwords and applicable exploits. ##------------------------------------------------------------------------------------------------------------- ## [Warning]: ## This script comes as-is with no promise of functionality or accuracy. I have no plans to maintain updates, ## I did not write it to be efficient and in some cases you may find the functions may not produce the desired ## results. For example, the function that links packages to running processes is based on keywords and will ## not always be accurate. Also, the exploit list included in this function will need to be updated over time. ## Feel free to change or improve it any way you see fit. ##------------------------------------------------------------------------------------------------------------- ## [Modification, Distribution, and Attribution]: ## You are free to modify and/or distribute this script as you wish. I only ask that you maintain original ## author attribution and not attempt to sell it or incorporate it into any commercial offering (as if it's ## worth anything anyway :) ############################################################################################################### # conditional import for older versions of python not compatible with subprocess try: import subprocess as sub compatmode = 0 # newer version of python, no need for compatibility mode except ImportError: import os # older version of python, need to use os instead compatmode = 1 # title / formatting bigline = "=================================================================================================" smlline = "-------------------------------------------------------------------------------------------------" print bigline print "LINUX PRIVILEGE ESCALATION CHECKER" print bigline print # loop through dictionary, execute the commands, store the results, return updated dict def execCmd(cmdDict): for item in cmdDict: cmd = cmdDict[item]["cmd"] if compatmode == 0: # newer version of python, use preferred subprocess out, error = sub.Popen([cmd], stdout=sub.PIPE, stderr=sub.PIPE, shell=True).communicate() results = out.split('\n') else: # older version of python, use os.popen echo_stdout = os.popen(cmd, 'r') results = echo_stdout.read().split('\n') cmdDict[item]["results"]=results return cmdDict # print results for each previously executed command, no return value def printResults(cmdDict): for item in cmdDict: msg = cmdDict[item]["msg"] results = cmdDict[item]["results"] print "[+] " + msg for result in results: if result.strip() != "": print " " + result.strip() print return def writeResults(msg, results): f = open("privcheckout.txt", "a"); f.write("[+] " + str(len(results)-1) + " " + msg) for result in results: if result.strip() != "": f.write(" " + result.strip()) f.close() return # Basic system info print "[*] GETTING BASIC SYSTEM INFO...\n" results=[] sysInfo = {"OS":{"cmd":"cat /etc/issue","msg":"Operating System","results":results}, "KERNEL":{"cmd":"cat /proc/version","msg":"Kernel","results":results}, "HOSTNAME":{"cmd":"hostname", "msg":"Hostname", "results":results} } sysInfo = execCmd(sysInfo) printResults(sysInfo) # Networking Info print "[*] GETTING NETWORKING INFO...\n" netInfo = {"NETINFO":{"cmd":"/sbin/ifconfig -a", "msg":"Interfaces", "results":results}, "ROUTE":{"cmd":"route", "msg":"Route", "results":results}, "NETSTAT":{"cmd":"netstat -antup | grep -v 'TIME_WAIT'", "msg":"Netstat", "results":results} } netInfo = execCmd(netInfo) printResults(netInfo) # File System Info print "[*] GETTING FILESYSTEM INFO...\n" driveInfo = {"MOUNT":{"cmd":"mount","msg":"Mount results", "results":results}, "FSTAB":{"cmd":"cat /etc/fstab 2>/dev/null", "msg":"fstab entries", "results":results} } driveInfo = execCmd(driveInfo) printResults(driveInfo) # Scheduled Cron Jobs cronInfo = {"CRON":{"cmd":"ls -la /etc/cron* 2>/dev/null", "msg":"Scheduled cron jobs", "results":results}, "CRONW": {"cmd":"ls -aRl /etc/cron* 2>/dev/null | awk '$1 ~ /w.$/' 2>/dev/null", "msg":"Writable cron dirs", "results":results} } cronInfo = execCmd(cronInfo) printResults(cronInfo) # User Info print "\n[*] ENUMERATING USER AND ENVIRONMENTAL INFO...\n" userInfo = {"WHOAMI":{"cmd":"whoami", "msg":"Current User", "results":results}, "ID":{"cmd":"id","msg":"Current User ID", "results":results}, "ALLUSERS":{"cmd":"cat /etc/passwd", "msg":"All users", "results":results}, "SUPUSERS":{"cmd":"grep -v -E '^#' /etc/passwd | awk -F: '$3 == 0{print $1}'", "msg":"Super Users Found:", "results":results}, "HISTORY":{"cmd":"ls -la ~/.*_history; ls -la /root/.*_history 2>/dev/null", "msg":"Root and current user history (depends on privs)", "results":results}, "ENV":{"cmd":"env 2>/dev/null | grep -v 'LS_COLORS'", "msg":"Environment", "results":results}, "SUDOERS":{"cmd":"cat /etc/sudoers 2>/dev/null | grep -v '#' 2>/dev/null", "msg":"Sudoers (privileged)", "results":results}, "LOGGEDIN":{"cmd":"w 2>/dev/null", "msg":"Logged in User Activity", "results":results} } userInfo = execCmd(userInfo) printResults(userInfo) if "root" in userInfo["ID"]["results"][0]: print "[!] ARE YOU SURE YOU'RE NOT ROOT ALREADY?\n" # File/Directory Privs print "[*] ENUMERATING FILE AND DIRECTORY PERMISSIONS/CONTENTS...\n" fdPerms = {"WWDIRSROOT":{"cmd":"find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep root", "msg":"World Writeable Directories for User/Group 'Root'", "results":results}, "WWDIRS":{"cmd":"find / \( -wholename '/home/homedir*' -prune \) -o \( -type d -perm -0002 \) -exec ls -ld '{}' ';' 2>/dev/null | grep -v root", "msg":"World Writeable Directories for Users other than Root", "results":results}, "WWFILES":{"cmd":"find / \( -wholename '/home/homedir/*' -prune -o -wholename '/proc/*' -prune \) -o \( -type f -perm -0002 \) -exec ls -l '{}' ';' 2>/dev/null", "msg":"World Writable Files", "results":results}, "SUID":{"cmd":"find / \( -perm -2000 -o -perm -4000 \) -exec ls -ld {} \; 2>/dev/null", "msg":"SUID/SGID Files and Directories", "results":results}, "ROOTHOME":{"cmd":"ls -ahlR /root 2>/dev/null", "msg":"Checking if root's home folder is accessible", "results":results} } fdPerms = execCmd(fdPerms) printResults(fdPerms) pwdFiles = {"LOGPWDS":{"cmd":"find /var/log -name '*.log' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg":"Logs containing keyword 'password'", "results":results}, "CONFPWDS":{"cmd":"find /etc -name '*.c*' 2>/dev/null | xargs -l10 egrep 'pwd|password' 2>/dev/null", "msg":"Config files containing keyword 'password'", "results":results}, "SHADOW":{"cmd":"cat /etc/shadow 2>/dev/null", "msg":"Shadow File (Privileged)", "results":results} } pwdFiles = execCmd(pwdFiles) printResults(pwdFiles) # Processes and Applications print "[*] ENUMERATING PROCESSES AND APPLICATIONS...\n" if "debian" in sysInfo["KERNEL"]["results"][0] or "ubuntu" in sysInfo["KERNEL"]["results"][0]: getPkgs = "dpkg -l | awk '{$1=$4=\"\"; print $0}'" # debian else: getPkgs = "rpm -qa | sort -u" # RH/other getAppProc = {"PROCS":{"cmd":"ps aux | awk '{print $1,$2,$9,$10,$11}'", "msg":"Current processes", "results":results}, "PKGS":{"cmd":getPkgs, "msg":"Installed Packages", "results":results} } getAppProc = execCmd(getAppProc) printResults(getAppProc) # comment to reduce output otherApps = { "SUDO":{"cmd":"sudo -V | grep version 2>/dev/null", "msg":"Sudo Version (Check out http://www.exploit-db.com/search/?action=search&filter_page=1&filter_description=sudo)", "results":results}, "APACHE":{"cmd":"apache2 -v; apache2ctl -M; httpd -v; apachectl -l 2>/dev/null", "msg":"Apache Version and Modules", "results":results}, "APACHECONF":{"cmd":"cat /etc/apache2/apache2.conf 2>/dev/null", "msg":"Apache Config File", "results":results} } otherApps = execCmd(otherApps) printResults(otherApps) print "[*] IDENTIFYING PROCESSES AND PACKAGES RUNNING AS ROOT OR OTHER SUPERUSER...\n" # find the package information for the processes currently running # under root or another super user procs = getAppProc["PROCS"]["results"] pkgs = getAppProc["PKGS"]["results"] supusers = userInfo["SUPUSERS"]["results"] procdict = {} # dictionary to hold the processes running as super users for proc in procs: # loop through each process relatedpkgs = [] # list to hold the packages related to a process try: for user in supusers: # loop through the known super users if (user != "") and (user in proc): # if the process is being run by a super user procname = proc.split(" ")[4] # grab the process name if "/" in procname: splitname = procname.split("/") procname = splitname[len(splitname)-1] for pkg in pkgs: # loop through the packages if not len(procname) < 3: # name too short to get reliable package results if procname in pkg: if procname in procdict: relatedpkgs = procdict[proc] # if already in the dict, grab its pkg list if pkg not in relatedpkgs: relatedpkgs.append(pkg) # add pkg to the list procdict[proc]=relatedpkgs # add any found related packages to the process dictionary entry except: pass for key in procdict: print " " + key # print the process name try: if not procdict[key][0] == "": # only print the rest if related packages were found print " Possible Related Packages: " for entry in procdict[key]: print " " + entry # print each related package except: pass # EXPLOIT ENUMERATION # First discover the avaialable tools print print "[*] ENUMERATING INSTALLED LANGUAGES/TOOLS FOR SPLOIT BUILDING...\n" devTools = {"TOOLS":{"cmd":"which awk perl python ruby gcc cc vi vim nmap find netcat nc wget tftp ftp 2>/dev/null", "msg":"Installed Tools", "results":results}} devTools = execCmd(devTools) printResults(devTools) print "[+] Related Shell Escape Sequences...\n" escapeCmd = {"vi":[":!bash", ":set shell=/bin/bash:shell"], "awk":["awk 'BEGIN {system(\"/bin/bash\")}'"], "perl":["perl -e 'exec \"/bin/bash\";'"], "find":["find / -exec /usr/bin/awk 'BEGIN {system(\"/bin/bash\")}' \\;"], "nmap":["--interactive"]} for cmd in escapeCmd: for result in devTools["TOOLS"]["results"]: if cmd in result: for item in escapeCmd[cmd]: print " " + cmd + "-->\t" + item print print "[*] FINDING RELEVENT PRIVILEGE ESCALATION EXPLOITS...\n" # Now check for relevant exploits (note: this list should be updated over time; source: Exploit-DB) # sploit format = sploit name : {minversion, maxversion, exploitdb#, language, {keywords for applicability}} -- current keywords are 'kernel', 'proc', 'pkg' (unused), and 'os' sploits= { "2.2.x-2.4.x ptrace kmod local exploit":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"3", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "< 2.4.20 Module Loader Local Root Exploit":{"minver":"0", "maxver":"2.4.20", "exploitdb":"12", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.4.22 "'do_brk()'" local Root Exploit (PoC)":{"minver":"2.4.22", "maxver":"2.4.22", "exploitdb":"129", "lang":"asm", "keywords":{"loc":["kernel"], "val":"kernel"}}, "<= 2.4.22 (do_brk) Local Root Exploit (working)":{"minver":"0", "maxver":"2.4.22", "exploitdb":"131", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.4.x mremap() bound checking Root Exploit":{"minver":"2.4", "maxver":"2.4.99", "exploitdb":"145", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "<= 2.4.29-rc2 uselib() Privilege Elevation":{"minver":"0", "maxver":"2.4.29", "exploitdb":"744", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.4 uselib() Privilege Elevation Exploit":{"minver":"2.4", "maxver":"2.4", "exploitdb":"778", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.4.x / 2.6.x uselib() Local Privilege Escalation Exploit":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"895", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.4/2.6 bluez Local Root Privilege Escalation Exploit (update)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"926", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"bluez"}}, "<= 2.6.11 (CPL 0) Local Root Exploit (k-rad3.c)":{"minver":"0", "maxver":"2.6.11", "exploitdb":"1397", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "MySQL 4.x/5.0 User-Defined Function Local Privilege Escalation Exploit":{"minver":"0", "maxver":"99", "exploitdb":"1518", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"mysql"}}, "2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2004", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (2)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2005", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (3)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2006", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.6.13 <= 2.6.17.4 sys_prctl() Local Root Exploit (4)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2011", "lang":"sh", "keywords":{"loc":["kernel"], "val":"kernel"}}, "<= 2.6.17.4 (proc) Local Root Exploit":{"minver":"0", "maxver":"2.6.17.4", "exploitdb":"2013", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.6.13 <= 2.6.17.4 prctl() Local Root Exploit (logrotate)":{"minver":"2.6.13", "maxver":"2.6.17.4", "exploitdb":"2031", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "Ubuntu/Debian Apache 1.3.33/1.3.34 (CGI TTY) Local Root Exploit":{"minver":"4.10", "maxver":"7.04", "exploitdb":"3384", "lang":"c", "keywords":{"loc":["os"], "val":"debian"}}, "Linux/Kernel 2.4/2.6 x86-64 System Call Emulation Exploit":{"minver":"2.4", "maxver":"2.6", "exploitdb":"4460", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "< 2.6.11.5 BLUETOOTH Stack Local Root Exploit":{"minver":"0", "maxver":"2.6.11.5", "exploitdb":"4756", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"bluetooth"}}, "2.6.17 - 2.6.24.1 vmsplice Local Root Exploit":{"minver":"2.6.17", "maxver":"2.6.24.1", "exploitdb":"5092", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.6.23 - 2.6.24 vmsplice Local Root Exploit":{"minver":"2.6.23", "maxver":"2.6.24", "exploitdb":"5093", "lang":"c", "keywords":{"loc":["os"], "val":"debian"}}, "Debian OpenSSL Predictable PRNG Bruteforce SSH Exploit":{"minver":"0", "maxver":"99", "exploitdb":"5720", "lang":"python", "keywords":{"loc":["os"], "val":"debian"}}, "Linux Kernel < 2.6.22 ftruncate()/open() Local Exploit":{"minver":"0", "maxver":"2.6.22", "exploitdb":"6851", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "< 2.6.29 exit_notify() Local Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.29", "exploitdb":"8369", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.6 UDEV Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8478", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"udev"}}, "2.6 UDEV < 141 Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8572", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"udev"}}, "2.6.x ptrace_attach Local Privilege Escalation Exploit":{"minver":"2.6", "maxver":"2.6.99", "exploitdb":"8673", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.6.29 ptrace_attach() Local Root Race Condition Exploit":{"minver":"2.6.29", "maxver":"2.6.29", "exploitdb":"8678", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "Linux Kernel <=2.6.28.3 set_selection() UTF-8 Off By One Local Exploit":{"minver":"0", "maxver":"2.6.28.3", "exploitdb":"9083", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "Test Kernel Local Root Exploit 0day":{"minver":"2.6.18", "maxver":"2.6.30", "exploitdb":"9191", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "PulseAudio (setuid) Priv. Escalation Exploit (ubu/9.04)(slack/12.2.0)":{"minver":"2.6.9", "maxver":"2.6.30", "exploitdb":"9208", "lang":"c", "keywords":{"loc":["pkg"], "val":"pulse"}}, "2.x sock_sendpage() Local Ring0 Root Exploit":{"minver":"2", "maxver":"2.99", "exploitdb":"9435", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.x sock_sendpage() Local Root Exploit 2":{"minver":"2", "maxver":"2.99", "exploitdb":"9436", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.4/2.6 sock_sendpage() ring0 Root Exploit (simple ver)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9479", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.6 < 2.6.19 (32bit) ip_append_data() ring0 Root Exploit":{"minver":"2.6", "maxver":"2.6.19", "exploitdb":"9542", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.4/2.6 sock_sendpage() Local Root Exploit (ppc)":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9545", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "< 2.6.19 udp_sendmsg Local Root Exploit (x86/x64)":{"minver":"0", "maxver":"2.6.19", "exploitdb":"9574", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "< 2.6.19 udp_sendmsg Local Root Exploit":{"minver":"0", "maxver":"2.6.19", "exploitdb":"9575", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.4/2.6 sock_sendpage() Local Root Exploit [2]":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9598", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.4/2.6 sock_sendpage() Local Root Exploit [3]":{"minver":"2.4", "maxver":"2.6.99", "exploitdb":"9641", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.4.1-2.4.37 and 2.6.1-2.6.32-rc5 Pipe.c Privelege Escalation":{"minver":"2.4.1", "maxver":"2.6.32", "exploitdb":"9844", "lang":"python", "keywords":{"loc":["kernel"], "val":"kernel"}}, "'pipe.c' Local Privilege Escalation Vulnerability":{"minver":"2.4.1", "maxver":"2.6.32", "exploitdb":"10018", "lang":"sh", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.6.18-20 2009 Local Root Exploit":{"minver":"2.6.18", "maxver":"2.6.20", "exploitdb":"10613", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "Apache Spamassassin Milter Plugin Remote Root Command Execution":{"minver":"0", "maxver":"99", "exploitdb":"11662", "lang":"sh", "keywords":{"loc":["proc"], "val":"spamass-milter"}}, "<= 2.6.34-rc3 ReiserFS xattr Privilege Escalation":{"minver":"0", "maxver":"2.6.34", "exploitdb":"12130", "lang":"python", "keywords":{"loc":["mnt"], "val":"reiser"}}, "Ubuntu PAM MOTD local root":{"minver":"7", "maxver":"10.04", "exploitdb":"14339", "lang":"sh", "keywords":{"loc":["os"], "val":"ubuntu"}}, "< 2.6.36-rc1 CAN BCM Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.36", "exploitdb":"14814", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "Kernel ia32syscall Emulation Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"15023", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "Linux RDS Protocol Local Privilege Escalation":{"minver":"0", "maxver":"2.6.36", "exploitdb":"15285", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "<= 2.6.37 Local Privilege Escalation":{"minver":"0", "maxver":"2.6.37", "exploitdb":"15704", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "< 2.6.37-rc2 ACPI custom_method Privilege Escalation":{"minver":"0", "maxver":"2.6.37", "exploitdb":"15774", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "CAP_SYS_ADMIN to root Exploit":{"minver":"0", "maxver":"99", "exploitdb":"15916", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "CAP_SYS_ADMIN to Root Exploit 2 (32 and 64-bit)":{"minver":"0", "maxver":"99", "exploitdb":"15944", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "< 2.6.36.2 Econet Privilege Escalation Exploit":{"minver":"0", "maxver":"2.6.36.2", "exploitdb":"17787", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "Sendpage Local Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"19933", "lang":"ruby", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.4.18/19 Privileged File Descriptor Resource Exhaustion Vulnerability":{"minver":"2.4.18", "maxver":"2.4.19", "exploitdb":"21598", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.2.x/2.4.x Privileged Process Hijacking Vulnerability (1)":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"22362", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "2.2.x/2.4.x Privileged Process Hijacking Vulnerability (2)":{"minver":"2.2", "maxver":"2.4.99", "exploitdb":"22363", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, "Samba 2.2.8 Share Local Privilege Elevation Vulnerability":{"minver":"2.2.8", "maxver":"2.2.8", "exploitdb":"23674", "lang":"c", "keywords":{"loc":["proc","pkg"], "val":"samba"}}, "open-time Capability file_ns_capable() Privilege Escalation":{"minver":"0", "maxver":"99", "exploitdb":"25450", "lang":"c", "keywords":{"loc":["kernel"], "val":"kernel"}}, } # variable declaration os = sysInfo["OS"]["results"][0] version = sysInfo["KERNEL"]["results"][0].split(" ")[2].split("-")[0] langs = devTools["TOOLS"]["results"] procs = getAppProc["PROCS"]["results"] kernel = str(sysInfo["KERNEL"]["results"][0]) mount = driveInfo["MOUNT"]["results"] #pkgs = getAppProc["PKGS"]["results"] # currently not using packages for sploit appicability but my in future # lists to hold ranked, applicable sploits # note: this is a best-effort, basic ranking designed to help in prioritizing priv escalation exploit checks # all applicable exploits should be checked and this function could probably use some improvement avgprob = [] highprob = [] for sploit in sploits: lang = 0 # use to rank applicability of sploits keyword = sploits[sploit]["keywords"]["val"] sploitout = sploit + " || " + "http://www.exploit-db.com/exploits/" + sploits[sploit]["exploitdb"] + " || " + "Language=" + sploits[sploit]["lang"] # first check for kernell applicability if (version >= sploits[sploit]["minver"]) and (version <= sploits[sploit]["maxver"]): # next check language applicability if (sploits[sploit]["lang"] == "c") and (("gcc" in str(langs)) or ("cc" in str(langs))): lang = 1 # language found, increase applicability score elif sploits[sploit]["lang"] == "sh": lang = 1 # language found, increase applicability score elif (sploits[sploit]["lang"] in str(langs)): lang = 1 # language found, increase applicability score if lang == 0: sploitout = sploitout + "**" # added mark if language not detected on system # next check keyword matches to determine if some sploits have a higher probability of success for loc in sploits[sploit]["keywords"]["loc"]: if loc == "proc": for proc in procs: if keyword in proc: highprob.append(sploitout) # if sploit is associated with a running process consider it a higher probability/applicability break break elif loc == "os": if (keyword in os) or (keyword in kernel): highprob.append(sploitout) # if sploit is specifically applicable to this OS consider it a higher probability/applicability break elif loc == "mnt": if keyword in mount: highprob.append(sploitout) # if sploit is specifically applicable to a mounted file system consider it a higher probability/applicability break else: avgprob.append(sploitout) # otherwise, consider average probability/applicability based only on kernel version print " Note: Exploits relying on a compile/scripting language not detected on this system are marked with a '**' but should still be tested!" print print " The following exploits are ranked higher in probability of success because this script detected a related running process, OS, or mounted file system" for exploit in highprob: print " - " + exploit print print " The following exploits are applicable to this kernel version and should be investigated as well" for exploit in avgprob: print " - " + exploit print print "Finished" print bigline
gpl-3.0
6,533,771,653,508,868,000
66.601078
248
0.614872
false
3.089431
false
false
false
oudalab/phyllo
phyllo/extractors/regula_ad_monachoDB.py
1
3765
import sqlite3 import urllib import re from urllib.request import urlopen from bs4 import BeautifulSoup # several names in the <pagehead> but not sure what to put as an author name def main(): # The collection URL below. collURL = 'http://www.thelatinlibrary.com/regula.html' collOpen = urllib.request.urlopen(collURL) collSOUP = BeautifulSoup(collOpen, 'html5lib') author = "unknown" colltitle = collSOUP.title.string.strip() date = "no date found" textsURL = [collURL] with sqlite3.connect('texts.db') as db: c = db.cursor() c.execute( 'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,' ' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,' ' link TEXT, documentType TEXT)') c.execute("DELETE FROM texts WHERE title = 'REGULA AD MONACHOS I'") c.execute("DELETE FROM texts WHERE title = 'SS. PATRUM REGULA AD MONACHOS II.'") c.execute("DELETE FROM texts WHERE title = 'SS. PATRUM REGULA AD MONACHOS III.'") c.execute("DELETE FROM texts WHERE title = 'REGULA ORIENTALIS\nEX PATRUM ORIENTALIUM REGULIS COLLECTA'") for url in textsURL: chapter = "Preface" verse = 0 title = "REGULA AD MONACHOS I" openurl = urllib.request.urlopen(url) textsoup = BeautifulSoup(openurl, 'html5lib') getp = textsoup.find_all('p') for p in getp: # make sure it's not a paragraph without the main text try: if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin', 'internal_navigation']: # these are not part of the main t continue except: pass verses = [] text = p.get_text() text = text.strip() if p.find('b') is not None: if text.startswith("SS.") or text.startswith("REGULA"): # this is the title of a new work title = text chapter = -1 continue else: if text.startswith("CAPUT"): chapter = text print(chapter) verse = 0 continue else: chapter = chapter + ": " + text continue if title == "REGULA AD MONACHOS I": verses.append(text) elif text.startswith("PRAEFATIO"): chapter = text verse = 0 continue elif re.match('[IVXL]+\.', text): chapter = text.split(" ")[0].strip() print(chapter) verse = 0 text = text.replace(chapter, '') verses.append(text) else: verses.append(text) for v in verses: if v.startswith('Christian'): continue if v is None or v == '' or v.isspace(): continue # verse number assignment. verse += 1 c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)", (None, colltitle, title, 'Latin', author, date, chapter, verse, v, url, 'prose')) if __name__ == '__main__': main()
apache-2.0
730,117,628,980,353,400
36.277228
112
0.468526
false
4.585871
false
false
false
iamjake648/jasper-dictionary
Define.py
1
1593
#Written by Jake Schultz #TODO Add more lang support, limit number of results returned import re from urllib2 import Request, urlopen, URLError import json WORDS = ["DEFINE","WHAT DOES %S MEAN","DEFINITION", "WHAT IS [A|AN]? %S"] PRIORITY = 1 def handle(text, mic, profile, recursive=False): text = re.sub(r"(?i)(define|(what is the\s)?definition of|what does|mean|what is (a|an)?)\b","", text ).strip() if len(text) != 0: #Yandex Dictionary API Key dict_key = profile['keys']['YANDEX_DICT'] #method to get the def get_def(text,mic,dict_key) elif not recursive: mic.say("What word would you like to define?") handle(mic.activeListen(), mic, profile, True) def get_def(text,mic,key): #make a call to the API request = Request('https://dictionary.yandex.net/api/v1/dicservice.json/lookup?key='+key+'&lang=en-en&text='+text) try: response = urlopen(request) data = json.load(response) if len(data["def"]) == 0: mic.say("I could not find a definition for " + str(text)) else: #get the word type (noun, verb, ect) word_type = data["def"][0]["pos"] mic.say("The word is a " + word_type) defs = data["def"][0]["tr"] #loop through the definitions for text in defs: mic.say(text["text"]) except URLError, e: mic.say("Unable to reach dictionary API.") def isValid(text): return bool(re.search(r'\Define|what does\s(.*?)\smean|Definition|what is\s\w+\b',text, re.IGNORECASE))
gpl-2.0
1,097,689,116,695,614,700
34.4
118
0.603264
false
3.346639
false
false
false
felipenaselva/repo.felipe
plugin.video.uwc/k18.py
1
2513
''' Ultimate Whitecream Copyright (C) 2015 mortael This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import urllib, urllib2, re, cookielib, os.path, sys, socket import xbmc, xbmcplugin, xbmcgui, xbmcaddon import utils #230: k18.Main() #231: k18.List(url) #232: k18.Playvid(url, name, download) #233: k18.Cat(url) #234: k18.Search(url, keyword) progress = utils.progress def Main(): utils.addDir('[COLOR hotpink]Categories[/COLOR]','http://k18.co/',233,'','') utils.addDir('[COLOR hotpink]Search[/COLOR]','http://k18.co/?s=',234,'','') List('http://k18.co/page/1/') xbmcplugin.endOfDirectory(utils.addon_handle) def List(url): listhtml = utils.getHtml(url, '') match = re.compile(r'class="content-list-thumb">\s+<a href="([^"]+)" title="([^"]+)">.*?src="([^"]+)"', re.DOTALL | re.IGNORECASE).findall(listhtml) for videopage, name, img in match: name = utils.cleantext(name) utils.addDownLink(name, videopage, 232, img, '') try: nextp=re.compile('next page-numbers" href="([^"]+)">&raquo;', re.DOTALL | re.IGNORECASE).findall(listhtml)[0] utils.addDir('Next Page', nextp, 231,'') except: pass xbmcplugin.endOfDirectory(utils.addon_handle) def Search(url, keyword=None): searchUrl = url if not keyword: utils.searchDir(url, 234) else: title = keyword.replace(' ','+') searchUrl = searchUrl + title print "Searching URL: " + searchUrl List(searchUrl) def Cat(url): cathtml = utils.getHtml(url, '') match = re.compile('0" value="([^"]+)">([^<]+)<', re.DOTALL | re.IGNORECASE).findall(cathtml) for catpage, name in match: catpage = 'http://k18.co/?cat=' + catpage utils.addDir(name, catpage, 231, '') xbmcplugin.endOfDirectory(utils.addon_handle) def Playvid(url, name, download=None): utils.PLAYVIDEO(url, name, download)
gpl-2.0
-4,485,078,390,973,765,600
32.959459
152
0.655392
false
3.485437
false
false
false
AutorestCI/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2017_09_01/models/application_gateway_url_path_map.py
1
3397
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .sub_resource import SubResource class ApplicationGatewayUrlPathMap(SubResource): """UrlPathMaps give a url path to the backend mapping information for PathBasedRouting. :param id: Resource ID. :type id: str :param default_backend_address_pool: Default backend address pool resource of URL path map. :type default_backend_address_pool: ~azure.mgmt.network.v2017_09_01.models.SubResource :param default_backend_http_settings: Default backend http settings resource of URL path map. :type default_backend_http_settings: ~azure.mgmt.network.v2017_09_01.models.SubResource :param default_redirect_configuration: Default redirect configuration resource of URL path map. :type default_redirect_configuration: ~azure.mgmt.network.v2017_09_01.models.SubResource :param path_rules: Path rule of URL path map resource. :type path_rules: list[~azure.mgmt.network.v2017_09_01.models.ApplicationGatewayPathRule] :param provisioning_state: Provisioning state of the backend http settings resource. Possible values are: 'Updating', 'Deleting', and 'Failed'. :type provisioning_state: str :param name: Name of the resource that is unique within a resource group. This name can be used to access the resource. :type name: str :param etag: A unique read-only string that changes whenever the resource is updated. :type etag: str :param type: Type of the resource. :type type: str """ _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'default_backend_address_pool': {'key': 'properties.defaultBackendAddressPool', 'type': 'SubResource'}, 'default_backend_http_settings': {'key': 'properties.defaultBackendHttpSettings', 'type': 'SubResource'}, 'default_redirect_configuration': {'key': 'properties.defaultRedirectConfiguration', 'type': 'SubResource'}, 'path_rules': {'key': 'properties.pathRules', 'type': '[ApplicationGatewayPathRule]'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'etag': {'key': 'etag', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__(self, id=None, default_backend_address_pool=None, default_backend_http_settings=None, default_redirect_configuration=None, path_rules=None, provisioning_state=None, name=None, etag=None, type=None): super(ApplicationGatewayUrlPathMap, self).__init__(id=id) self.default_backend_address_pool = default_backend_address_pool self.default_backend_http_settings = default_backend_http_settings self.default_redirect_configuration = default_redirect_configuration self.path_rules = path_rules self.provisioning_state = provisioning_state self.name = name self.etag = etag self.type = type
mit
-8,994,817,424,965,197,000
47.528571
215
0.664999
false
4.137637
true
false
false
chengdh/openerp-ktv
openerp/addons/ktv_sale/room_operate.py
1
3418
# -*- coding: utf-8 -*- from osv import osv,fields from room import room class room_operate(osv.osv): ''' 包厢操作类: 以下操作都属于包厢操作: 1 预定 2 正常开房 3 买钟 4 买断 5 续钟 6 退钟 7 换房 8 并房 包厢通过cur_room_operate_id与room_operate相关联,用于标示当前包厢所对应的操作 room_operate与以上各个操作是one2many的关系,这样通过一个room_operate可以获取所有包厢在开房过程中所进行的操作,结账时遍历所有的操作并进行计算即可 ''' _name = "ktv.room_operate" #由于在其他地方需要引用该对象,所有将name定义为bill_no _rec_name = "bill_no" _description = "包厢操作类,与包厢是many2one的关系" _columns = { "operate_date" : fields.datetime('operate_datetime',required = True), "room_id" : fields.many2one('ktv.room','room_id',required = True), "bill_no" : fields.char("bill_no",size = 64,required = True,help = "账单号"), "room_scheduled_ids" : fields.one2many("ktv.room_scheduled","room_operate_id",help="预定信息列表"), "room_opens_ids" : fields.one2many("ktv.room_opens","room_operate_id",help="开房信息列表"), } _defaults = { 'operate_date' : fields.datetime.now, 'bill_no': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'ktv.room_operate'), } def process_operate(self,cr,uid,operate_values): """ 包厢操作统一入口,调用不同业务类的操作 这样设计的好处是隔离了变化,如果需要修改服务端的逻辑,客户端的调用逻辑不用做任何修改 在客户端新增了业务实体调用,只用增加新的实体即可,其他不用做修改 在js端也需要封装同样的调用接口来隔离变化 :params room_id integer 包厢编码 :operate_values 前端传入的业务操作数据 :operate[osv_name] 要调用的实体业务对象名称,比如ktv.room_checkout 调用示例: 开房操作,返回三个参数 1 操作成功的实体对象 2 包厢应修改的状态 3 cron对象,用于处理对包厢的定时操作: (operate_obj,room_state,cron) = self.pool.get(operate_values['osv_name']).process_operate(cr,uid,opeate_values) 更新当前包厢状态,添加cron对象,返回处理结果 """ room_id = operate_values['room_id'] (operate_obj,room_state,cron) = self.pool.get(operate_values['osv_name']).process_operate(cr,uid,operate_values) #更新包厢状态 self.pool.get('ktv.room').write(cr,uid,room_id,{'state' : room_state}) #TODO 添加cron对象 if cron: self._create_operate_cron(cr,uid,cron) room_fields = self.pool.get('ktv.room').fields_get(cr,uid).keys() room = self.pool.get('ktv.room').read(cr,uid,room_id,room_fields) #返回两个对象room和room_operate return {'room' : room,'room_operate' : operate_obj} def _create_operate_cron(self,cr,uid,cron_vals): """ 创建cron定时执行任务,在需要定时执行关房任务时,需要执行 :params dict cron_vals 定时任务相关属性 """ return self.pool.get('ir.cron').create(cr,uid,cron_vals)
agpl-3.0
-4,107,985,104,135,844,400
35.166667
120
0.621352
false
1.917526
false
false
false
dhimmel/hetio
hetnetpy/permute.py
1
9110
import collections import random import logging from hetnetpy.hetnet import Graph def permute_graph(graph, multiplier=10, seed=0, metaedge_to_excluded=dict(), log=False): """ Derive a permuted hetnet from an input hetnet. This method applies the XSwap algorithm separately for each metaedge. Hence, node degree is preserved for each type of edge. However, edges are randomized / shuffled. Users are recommended to interrogate the reported statistics to ensure that edges appear to be sufficiently randomized. Primarily, the number of edges of a given metaedge that remain unchanged from the original hetnet should have reached an assymptote. If the number of unchanged edges has not yet stabalized, further randomization is possible with this approach. Parameters ---------- graph : hetnetpy.hetnet.Graph Input hetnet to create a permuted derivative from multiplier : int or float This is multiplied by the number of edges for each metaedge to determine the number of swaps to attempt. seed : int Seed to initialize Python random number generator. When creating many permuted hetnets, it's recommended to increment this number, such that each round of permutation shuffles edges in a different order. metaedge_to_excluded : dict (metaedge -> set) Edges to exclude. This argument has not been extensively used in practice. log : bool Whether to log diagnostic INFO via python's logging module. Returns ------- permuted_graph : hetnetpy.hetnet.Graph A permuted hetnet derived from the input graph. stats : list of dicts A list where each item is a dictionary of permutation statistics at a checkpoint for a specific metaedge. These statistics allow tracking the progress of the permutation as the number of attempted swaps increases. """ if log: logging.info("Creating permuted graph template") permuted_graph = Graph(graph.metagraph) for (metanode_identifier, node_identifier), node in graph.node_dict.items(): permuted_graph.add_node( metanode_identifier, node_identifier, name=node.name, data=node.data ) if log: logging.info("Retrieving graph edges") metaedge_to_edges = graph.get_metaedge_to_edges(exclude_inverts=True) if log: logging.info("Adding permuted edges") all_stats = list() for metaedge, edges in metaedge_to_edges.items(): if log: logging.info(metaedge) excluded_pair_set = metaedge_to_excluded.get(metaedge, set()) pair_list = [(edge.source.get_id(), edge.target.get_id()) for edge in edges] directed = metaedge.direction != "both" permuted_pair_list, stats = permute_pair_list( pair_list, directed=directed, multiplier=multiplier, excluded_pair_set=excluded_pair_set, seed=seed, log=log, ) for stat in stats: stat["metaedge"] = metaedge stat["abbrev"] = metaedge.abbrev all_stats.extend(stats) for pair in permuted_pair_list: permuted_graph.add_edge(pair[0], pair[1], metaedge.kind, metaedge.direction) return permuted_graph, all_stats def permute_pair_list( pair_list, directed=False, multiplier=10, excluded_pair_set=set(), seed=0, log=False, inplace=False, ): """ Permute edges (of a single type) in a graph according to the XSwap function described in https://doi.org/f3mn58. This method selects two edges and attempts to swap their endpoints. If the swap would result in a valid edge, the swap proceeds. Otherwise, the swap is skipped. The end result is that node degree is preserved, but edges are shuffled, thereby losing their original meaning. Parameters ---------- pair_list : list of tuples List of edges to permute. Each edge is represented as a (source, target) tuple. source and target represent nodes and can be any Python objects that define __eq__. In other words, this function does not assume any specific format for nodes. If the edges are from a bipartite or directed graph, then all tuples must have the same alignment. For example, if the edges represent the bipartite Compound-binds-Gene relationship, all tuples should be of the form (compound, gene) and not intermixed with (gene, compound) tuples. The only instance where order of the source and target is not important is for an undirected edge type where the source and target nodes are of the same type, such as Gene-interacts-Gene. directed : bool Whether the edge should be considered directed. If False, a swap that creates an a-b edge will be invalid if a b-a edge already exists. multiplier : int or float This is multiplied by the number of edges in pair_list to determine the number of swaps to attempt. excluded_pair_set : set of tuples: Set of possible edges to forbid. If a swap would create an edge in this set, it would be considered invalid and hence skipped. seed : int Seed to initialize Python random number generator. log : bool Whether to log diagnostic INFO via python's logging module. inplace : bool Whether to modify the edge list in place. Returns ------- pair_list : list of tuples The permuted edges, derived from the input pair_list. stats : list of dicts A list where each item is a dictionary of permutation statistics at a checkpoint. Statistics are collected at 10 checkpoints, spaced evenly by the number of attempts. """ random.seed(seed) if not inplace: pair_list = pair_list.copy() pair_set = set(pair_list) assert len(pair_set) == len(pair_list) edge_number = len(pair_list) n_perm = int(edge_number * multiplier) count_same_edge = 0 count_self_loop = 0 count_duplicate = 0 count_undir_dup = 0 count_excluded = 0 if log: logging.info( "{} edges, {} permutations (seed = {}, directed = {}, {} excluded_edges)".format( edge_number, n_perm, seed, directed, len(excluded_pair_set) ) ) orig_pair_set = pair_set.copy() step = max(1, n_perm // 10) print_at = list(range(step, n_perm, step)) + [n_perm - 1] stats = list() for i in range(n_perm): # Same two random edges i_0 = random.randrange(edge_number) i_1 = random.randrange(edge_number) # Same edge selected twice if i_0 == i_1: count_same_edge += 1 continue pair_0 = pair_list.pop(i_0) pair_1 = pair_list.pop(i_1 - 1 if i_0 < i_1 else i_1) new_pair_0 = pair_0[0], pair_1[1] new_pair_1 = pair_1[0], pair_0[1] valid = False for pair in new_pair_0, new_pair_1: if pair[0] == pair[1]: count_self_loop += 1 break # edge is a self-loop if pair in pair_set: count_duplicate += 1 break # edge is a duplicate if not directed and (pair[1], pair[0]) in pair_set: count_undir_dup += 1 break # edge is a duplicate if pair in excluded_pair_set: count_excluded += 1 break # edge is excluded else: # edge passed all validity conditions valid = True # If new edges are invalid if not valid: for pair in pair_0, pair_1: pair_list.append(pair) # If new edges are valid else: for pair in pair_0, pair_1: pair_set.remove(pair) for pair in new_pair_0, new_pair_1: pair_set.add(pair) pair_list.append(pair) if i in print_at: stat = collections.OrderedDict() stat["cumulative_attempts"] = i index = print_at.index(i) stat["attempts"] = ( print_at[index] + 1 if index == 0 else print_at[index] - print_at[index - 1] ) stat["complete"] = (i + 1) / n_perm stat["unchanged"] = len(orig_pair_set & pair_set) / len(pair_set) stat["same_edge"] = count_same_edge / stat["attempts"] stat["self_loop"] = count_self_loop / stat["attempts"] stat["duplicate"] = count_duplicate / stat["attempts"] stat["undirected_duplicate"] = count_undir_dup / stat["attempts"] stat["excluded"] = count_excluded / stat["attempts"] stats.append(stat) count_same_edge = 0 count_self_loop = 0 count_duplicate = 0 count_undir_dup = 0 count_excluded = 0 assert len(pair_set) == edge_number return pair_list, stats
cc0-1.0
7,102,797,442,596,513,000
36.03252
93
0.615038
false
4.083371
false
false
false
coetzeevs/chiron
mysite/polls/views.py
1
1976
from django.shortcuts import render, get_object_or_404 from django.http import HttpResponse, HttpResponseRedirect from django.template import loader from .models import Question, Choice from django.urls import reverse from django.views import generic from django.utils import timezone from django.core.mail import EmailMessage from django.conf import settings class IndexView(generic.ListView): template_name = 'polls/index.html' context_object_name = 'latest_question_list' def get_queryset(self): """ Return the last five published questions (not including those set to be published in the future). """ return Question.objects.filter( pub_date__lte=timezone.now() ).order_by('-pub_date')[:5] class DetailView(generic.DetailView): model = Question template_name = 'polls/detail.html' def get_queryset(self): """ Excludes any questions that aren't published yet. """ return Question.objects.filter(pub_date__lte=timezone.now()) class ResultsView(generic.DetailView): model = Question template_name = 'polls/results.html' def email(request): email = EmailMessage('hello', 'Hello Johan, Minder OLX en meer ChiChi',settings.EMAIL_HOST_USER, to=['[email protected]']) email.send() return HttpResponse("Hello, world. You're at the polls index.") def vote(request, question_id): question = get_object_or_404(Question, pk=question_id) try: selected_choice = question.choice_set.get(pk=request.POST['choice']) except (KeyError, Choice.DoesNotExist): # Redisplay the question voting form. return render(request, 'polls/detail.html', { 'question': question, 'error_message': "You didn't select a choice.", }) else: selected_choice.votes += 1 selected_choice.save() # Always return an HttpResponseRedirect after successfully dealing # with POST data. This prevents data from being posted twice if a # user hits the Back button. return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
mit
-8,582,128,274,714,350,000
30.380952
130
0.745951
false
3.509769
false
false
false
LACNIC/simon
simon-web/simon_app/templatetags/simon_extras.py
1
2401
from django import template from datetime import datetime from simon_app.functions import GMTUY import operator """ Module that holds the Simon """ register = template.Library() @register.filter(name="substract") def substract(value, arg): """ Substract """ return value - arg @register.filter(name="divide") def divide(value, arg): """ Float division """ return float(value) / float(arg) @register.filter(name="percentage") def percentage(value, arg): """ Percentage """ return 100.0 * divide(value, arg) @register.filter(name="unit_shortener") def unit_shortener(value): """ Unit converter """ try: int(value) float(value) except: return "N/A" K = 1000 M = K * K G = K * M T = K * G if value > T: return "%.1f %s" % (1.0 * value / T, 'T') if value > G: return "%.1f %s" % (1.0 * value / G, 'G') if value > M: return "%.1f %s" % (1.0 * value / M, 'M') if value > K: return "%.1f %s" % (1.0 * value / K, 'K') return value @register.filter(name="time_since") def time_since(value): """ :param now: :return: """ td = datetime.now(GMTUY()) - value print td if td.days > 1: return "mas de un dia" elif td.seconds > 3600: mins = "%.0f minutos" % ((td.seconds % 3600) / 60) horas = "%.0f %s" % (td.seconds / 3600, "horas" if td.seconds / 3600 > 1 else "hora") return "%s %s" % (horas, mins) elif td.seconds > 60: return "%.0f minutos" % (td.seconds / 60) else: return "%.0f segundos" % td.seconds @register.filter(name="max") def max_(value, arg): """ :param value: :param arg: :return: """ if arg == 'v6_rate': return str(max([v.v6_rate for v in value])) return "%s %s" % (value, arg) @register.filter(name="get_by_attribute") def get_by_attribute(objects, raw_args): print raw_args key, value = raw_args.split(' ') print key, value func = operator.attrgetter(key) for o in objects: if func(o) == value: return o class Object(): pass a = Object() setattr(a, key, 0) return a @register.filter(name="get_attribute") def get_attribute(object, attr): func = operator.attrgetter(attr) return func(object)
gpl-2.0
4,992,362,165,801,028,000
18.208
93
0.548105
false
3.222819
false
false
false
gpailler/AtlassianBot
plugins/stash.py
1
1971
# coding: utf-8 import requests from utils import rest class Stash(object): def __init__(self, server): self.__server = server def get_stash_branches(self, repos, project, filter): results = [] for repo in repos: path = '/rest/api/1.0/projects/{project}/repos/{repo}/branches'\ .format(project=project, repo=repo) data = { 'filterText': filter, 'details': True, 'limit': 100 } request = rest.get(self.__server, path, data) for result in request.json()['values']: results.append(( repo, result['id'], result['displayId'], result['latestChangeset'])) return results def branch_merged(self, project, basebranches, repo, branch): for to in basebranches: path = ('/rest/api/1.0/projects/{project}/repos/{repo}/' 'compare/changes/').format(project=project, repo=repo) data = { 'from': branch, 'to': to, 'limit': 1 } request = rest.get(self.__server, path, data) if request.status_code != requests.codes.ok: raise Exception(request.text) else: if request.json()['size'] == 0: return True return False def remove_git_branches(self, project, repo, branchkey, changeset): path = ('/rest/branch-utils/1.0/projects/{project}/repos/{repo}/' 'branches').format(project=project, repo=repo) data = { 'name': branchkey, 'endPoint': changeset, 'dryRun': False } request = rest.delete(self.__server, path, data) if request.status_code != requests.codes.no_content: raise Exception(request.text)
mit
-3,836,757,531,418,483,700
30.790323
76
0.499239
false
4.489749
false
false
false
samdoshi/teletype
utils/docs.py
1
6073
#!/usr/bin/env python3 import sys from pathlib import Path import jinja2 import pypandoc import pytoml as toml from common import list_ops, list_mods, validate_toml, get_tt_version if (sys.version_info.major, sys.version_info.minor) < (3, 6): raise Exception("need Python 3.6 or later") THIS_FILE = Path(__file__).resolve() ROOT_DIR = THIS_FILE.parent.parent TEMPLATE_DIR = ROOT_DIR / "utils" / "templates" DOCS_DIR = ROOT_DIR / "docs" OP_DOCS_DIR = DOCS_DIR / "ops" FONTS_DIR = ROOT_DIR / "utils" / "fonts" TT_VERSION = get_tt_version() VERSION_STR = " ".join(["Teletype", TT_VERSION["tag"], TT_VERSION["hash"], "Documentation"]) env = jinja2.Environment( autoescape=False, loader=jinja2.FileSystemLoader(str(TEMPLATE_DIR)), trim_blocks=True, lstrip_blocks=True, cache_size=0, auto_reload=True ) # determines the order in which sections are displayed OPS_SECTIONS = [ "variables", "hardware", "patterns", "controlflow", "maths", "metronome", "delay", "stack", "queue", "turtle", "grid", "ansible", "whitewhale", "meadowphysics", "earthsea", "orca", "justfriends", "telex_i", "telex_o", "er301", "fader", "wslash", "matrixarchate" ] def deep_merge_dict(source, destination): for key, value in source.items(): if isinstance(value, dict): node = destination.setdefault(key, {}) deep_merge_dict(value, node) else: destination[key] = value return destination def common_md(): print(f"Pandoc version: {pypandoc.get_pandoc_version()}") print(f"Using docs directory: {DOCS_DIR}") print(f"Using ops docs directory: {OP_DOCS_DIR}") print() op_table_template = env.get_template("op_table.jinja2.md") op_extended_template = env.get_template("op_extended.jinja2.md") output = "" output += Path(DOCS_DIR / "intro.md") \ .read_text().replace("VERSION", TT_VERSION["tag"][1:]) + "\n\n" output += Path(DOCS_DIR / "whats_new.md").read_text() + "\n\n" output += Path(DOCS_DIR / "quickstart.md").read_text() + "\n\n" output += Path(DOCS_DIR / "keys.md").read_text() + "\n\n" output += Path(DOCS_DIR / "ops.md").read_text() + "\n\n" all_ops = set(list_ops()) | set(list_mods()) all_ops_dict = {} ops_with_docs = set() for section in OPS_SECTIONS: md_file = Path(OP_DOCS_DIR, section + ".md") toml_file = Path(OP_DOCS_DIR, section + ".toml") output += "\\newpage\n" if md_file.exists() and md_file.is_file(): print(f"Reading {md_file}") output += md_file.read_text() + "\n\n" output += "\n" if toml_file.exists() and toml_file.is_file(): print(f"Reading {toml_file}") extended = [] # n.b. Python 3.6 dicts maintain insertion order ops = toml.loads(toml_file.read_text()) validate_toml(ops) deep_merge_dict(ops, all_ops_dict) for key in ops: if key not in all_ops: print(f" - WARNING: unknown {key}") ops_with_docs.add(key) if "aliases" in ops[key]: ops_with_docs |= set(ops[key]["aliases"]) if "description" in ops[key]: render = op_extended_template.render(name=key, **ops[key]) extended.append((key, render)) output += op_table_template.render(ops=ops.values()) output += "\n" output += "\n".join([e[1] for e in extended]) + "\n\n" output += Path(DOCS_DIR / "advanced.md").read_text() + "\n\n" output += "\\appendix\n\n" output += "# Alphabetical list of OPs and MODs\n\n" sorted_ops = [kv[1] for kv in sorted(all_ops_dict.items())] output += op_table_template.render(ops=sorted_ops) output += "\n\n# Missing documentation\n\n" missing_ops = all_ops - ops_with_docs output += ", ".join([f"`{o}`" for o in sorted(missing_ops)]) + "\n\n" output += Path(ROOT_DIR / "CHANGELOG.md").read_text() + "\n\n" return output def main(): if len(sys.argv) <= 1: sys.exit("Please supply a filename") input_format = "markdown" output = common_md() print() for arg in sys.argv[1:]: p = Path(arg).resolve() print(f"Generating: {p}") ext = p.suffix if ext == ".md": p.write_text(output) elif ext == ".html": output = "# " + VERSION_STR + "\n\n" + output pypandoc.convert_text( output, format=input_format, to="html5", outputfile=str(p), extra_args=["--standalone", "--self-contained", "--toc", "--toc-depth=2", "--css=" + str(TEMPLATE_DIR / "docs.css"), "--template=" + str(TEMPLATE_DIR / "template.html5")]) elif ext == ".pdf" or ext == ".tex": latex_preamble = env.get_template("latex_preamble.jinja2.md") latex = latex_preamble \ .render(title=VERSION_STR, fonts_dir=FONTS_DIR) + "\n\n" latex += output pandoc_version = int(pypandoc.get_pandoc_version()[0]) engine = ("--pdf-engine=xelatex" if pandoc_version >= 2 else "--latex-engine=xelatex") pypandoc.convert_text( latex, format=input_format, to=ext[1:], outputfile=str(p), extra_args=["--standalone", "--column=80", "--toc", "--toc-depth=2", engine, "--variable=papersize:A4"]) if __name__ == "__main__": main()
gpl-2.0
-4,714,646,216,518,015,000
29.671717
78
0.513091
false
3.526713
false
false
false
mylokin/redisext
tests/test_expire.py
1
1260
from __future__ import absolute_import import redisext.counter import redisext.key import redisext.serializer from . import fixture class ExpireCounter(redisext.counter.Counter, redisext.key.Expire): EXPIRE = 60 CONNECTION = fixture.Connection SERIALIZER = redisext.serializer.Numeric class ExpireCounterTestCase(fixture.TestCase): def setUp(self): self.counter = ExpireCounter('key') self.counter.incr() self.counter.expire() def test_expire(self): self.assertTrue(60 >= self.counter.ttl() > 0) def test_persist(self): self.counter.persist() self.assertEqual(self.counter.ttl(), -1) class UnspecifiedExpireCounter(redisext.counter.Counter, redisext.key.Expire): CONNECTION = fixture.Connection SERIALIZER = redisext.serializer.Numeric class UnspecifiedExpireCounterTestCase(fixture.TestCase): def setUp(self): self.counter = UnspecifiedExpireCounter('key') def test_expire_unspecified(self): self.counter.incr() with self.assertRaises(ValueError): self.counter.expire() def test_expire_specified(self): self.counter.incr() self.counter.expire(60) self.assertTrue(60 >= self.counter.ttl() > 0)
mit
-488,243,151,904,911,100
25.808511
78
0.694444
false
3.631124
true
false
false
Instanssi/Instanssi.org
Instanssi/screenshow/migrations/0003_auto_20210511_0020.py
1
1515
# Generated by Django 3.2.2 on 2021-05-10 21:20 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('screenshow', '0002_auto_20180711_2110'), ] operations = [ migrations.AlterField( model_name='ircmessage', name='id', field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'), ), migrations.AlterField( model_name='message', name='id', field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'), ), migrations.AlterField( model_name='npsong', name='id', field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'), ), migrations.AlterField( model_name='playlistvideo', name='id', field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'), ), migrations.AlterField( model_name='screenconfig', name='id', field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'), ), migrations.AlterField( model_name='sponsor', name='id', field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'), ), ]
mit
-6,303,775,865,883,706,000
34.232558
111
0.588779
false
4.139344
false
false
false
tabalinas/jsgrid-django
clients/views.py
1
1522
from django.http import HttpResponse from django.core import serializers from django.shortcuts import render from simple_rest import Resource from .models import Client def index(request): return render(request, 'index.html') class Clients(Resource): def get(self, request): clients = Client.objects.all() \ .filter(name__contains = request.GET.get('name')) \ .filter(address__contains = request.GET.get('address')); return HttpResponse(self.to_json(clients), content_type = 'application/json', status = 200) def post(self, request): Client.objects.create( name = request.POST.get("name"), age = request.POST.get("age"), address = request.POST.get("address"), married = True if request.POST.get("married") == 'true' else False ) return HttpResponse(status = 201) def put(self, request, client_id): client = Client.objects.get(pk = client_id) client.name = request.PUT.get("name") client.age = request.PUT.get("age") client.address = request.PUT.get("address") client.married = True if request.PUT.get("married") == 'true' else False client.save() return HttpResponse(status = 200) def delete(self, request, client_id): client = Client.objects.get(pk = client_id) client.delete() return HttpResponse(status = 200) def to_json(self, objects): return serializers.serialize('json', objects)
mit
-5,578,314,184,561,130,000
32.822222
99
0.631406
false
3.994751
false
false
false
qbuat/rootpy
rootpy/tree/tree.py
1
33528
# Copyright 2012 the rootpy developers # distributed under the terms of the GNU General Public License from __future__ import absolute_import import sys import re import fnmatch import uuid import ROOT from .. import log; log = log[__name__] from .. import asrootpy, QROOT from ..extern.ordereddict import OrderedDict from ..context import set_directory, thread_specific_tmprootdir, do_nothing from ..base import NamedObject from ..decorators import snake_case_methods, method_file_check, method_file_cd from ..plotting.base import Plottable from ..plotting import Hist, Canvas from ..memory.keepalive import keepalive from .cut import Cut from .treebuffer import TreeBuffer from .treetypes import Scalar, Array, BaseChar from .model import TreeModel __all__ = [ 'Tree', 'Ntuple', ] class UserData(object): pass class BaseTree(NamedObject): DRAW_PATTERN = re.compile( '^(?P<branches>.+?)' '(?P<redirect>\>\>[\+]?' '(?P<name>[^\(]+)' '(?P<binning>.+)?)?$') def _post_init(self): """ The standard rootpy _post_init method that is used to initialize both new Trees and Trees retrieved from a File. """ if not hasattr(self, '_buffer'): # only set _buffer if model was not specified in the __init__ self._buffer = TreeBuffer() self.read_branches_on_demand = False self._branch_cache = {} self._current_entry = 0 self._always_read = [] self.userdata = UserData() self._inited = True def always_read(self, branches): """ Always read these branches, even when in caching mode. Maybe you have caching enabled and there are branches you want to be updated for each entry even though you never access them directly. This is useful if you are iterating over an input tree and writing to an output tree sharing the same TreeBuffer and you want a direct copy of certain branches. If you have caching enabled but these branches are not specified here and never accessed then they will never be read from disk, so the values of branches in memory will remain unchanged. Parameters ---------- branches : list, tuple these branches will always be read from disk for every GetEntry """ if type(branches) not in (list, tuple): raise TypeError("branches must be a list or tuple") self._always_read = branches @classmethod def branch_type(cls, branch): """ Return the string representation for the type of a branch """ typename = branch.GetClassName() if not typename: leaf = branch.GetListOfLeaves()[0] typename = leaf.GetTypeName() # check if leaf has multiple elements length = leaf.GetLen() if length > 1: typename = '{0}[{1:d}]'.format(typename, length) return typename @classmethod def branch_is_supported(cls, branch): """ Currently the branch must only have one leaf but the leaf may have one or multiple elements """ return branch.GetNleaves() == 1 def create_buffer(self, ignore_unsupported=False): """ Create this tree's TreeBuffer """ bufferdict = OrderedDict() for branch in self.iterbranches(): # only include activated branches if not self.GetBranchStatus(branch.GetName()): continue if not BaseTree.branch_is_supported(branch): log.warning( "ignore unsupported branch `{0}`".format(branch.GetName())) continue bufferdict[branch.GetName()] = Tree.branch_type(branch) self.set_buffer(TreeBuffer( bufferdict, ignore_unsupported=ignore_unsupported)) def create_branches(self, branches): """ Create branches from a TreeBuffer or dict mapping names to type names Parameters ---------- branches : TreeBuffer or dict """ if not isinstance(branches, TreeBuffer): branches = TreeBuffer(branches) self.set_buffer(branches, create_branches=True) def update_buffer(self, treebuffer, transfer_objects=False): """ Merge items from a TreeBuffer into this Tree's TreeBuffer Parameters ---------- buffer : rootpy.tree.buffer.TreeBuffer The TreeBuffer to merge into this Tree's buffer transfer_objects : bool, optional (default=False) If True then all objects and collections on the input buffer will be transferred to this Tree's buffer. """ self._buffer.update(treebuffer) if transfer_objects: self._buffer.set_objects(treebuffer) def set_buffer(self, treebuffer, branches=None, ignore_branches=None, create_branches=False, visible=True, ignore_missing=False, ignore_duplicates=False, transfer_objects=False): """ Set the Tree buffer Parameters ---------- treebuffer : rootpy.tree.buffer.TreeBuffer a TreeBuffer branches : list, optional (default=None) only include these branches from the TreeBuffer ignore_branches : list, optional (default=None) ignore these branches from the TreeBuffer create_branches : bool, optional (default=False) If True then the branches in the TreeBuffer should be created. Use this option if initializing the Tree. A ValueError is raised if an attempt is made to create a branch with the same name as one that already exists in the Tree. If False the addresses of existing branches will be set to point at the addresses in this buffer. visible : bool, optional (default=True) If True then the branches will be added to the buffer and will be accessible as attributes of the Tree. ignore_missing : bool, optional (default=False) If True then any branches in this buffer that do not exist in the Tree will be ignored, otherwise a ValueError will be raised. This option is only valid when ``create_branches`` is False. ignore_duplicates : bool, optional (default=False) If False then raise a ValueError if the tree already has a branch with the same name as an entry in the buffer. If True then skip branches that already exist. This option is only valid when ``create_branches`` is True. transfer_objects : bool, optional (default=False) If True, all tree objects and collections will be transferred from the buffer into this Tree's buffer. """ # determine branches to keep while preserving branch order if branches is None: branches = treebuffer.keys() if ignore_branches is not None: branches = [b for b in branches if b not in ignore_branches] if create_branches: for name in branches: value = treebuffer[name] if self.has_branch(name): if ignore_duplicates: log.warning( "Skipping entry in buffer with the same name " "as an existing branch: `{0}`".format(name)) continue raise ValueError( "Attempting to create two branches " "with the same name: `{0}`".format(name)) if isinstance(value, Scalar): self.Branch(name, value, '{0}/{1}'.format( name, value.type)) elif isinstance(value, Array): self.Branch(name, value, '{0}[{2:d}]/{1}'.format( name, value.type, len(value))) else: self.Branch(name, value) else: for name in branches: value = treebuffer[name] if self.has_branch(name): self.SetBranchAddress(name, value) elif not ignore_missing: raise ValueError( "Attempting to set address for " "branch `{0}` which does not exist".format(name)) else: log.warning( "Skipping entry in buffer for which no " "corresponding branch in the " "tree exists: `{0}`".format(name)) if visible: newbuffer = TreeBuffer() for branch in branches: if branch in treebuffer: newbuffer[branch] = treebuffer[branch] newbuffer.set_objects(treebuffer) self.update_buffer(newbuffer, transfer_objects=transfer_objects) def activate(self, branches, exclusive=False): """ Activate branches Parameters ---------- branches : str or list branch or list of branches to activate exclusive : bool, optional (default=False) if True deactivate the remaining branches """ if exclusive: self.SetBranchStatus('*', 0) if isinstance(branches, basestring): branches = [branches] for branch in branches: if '*' in branch: matched_branches = self.glob(branch) for b in matched_branches: self.SetBranchStatus(b, 1) elif self.has_branch(branch): self.SetBranchStatus(branch, 1) def deactivate(self, branches, exclusive=False): """ Deactivate branches Parameters ---------- branches : str or list branch or list of branches to deactivate exclusive : bool, optional (default=False) if True activate the remaining branches """ if exclusive: self.SetBranchStatus('*', 1) if isinstance(branches, basestring): branches = [branches] for branch in branches: if '*' in branch: matched_branches = self.glob(branch) for b in matched_branches: self.SetBranchStatus(b, 0) elif self.has_branch(branch): self.SetBranchStatus(branch, 0) @property def branches(self): """ List of the branches """ return [branch for branch in self.GetListOfBranches()] def iterbranches(self): """ Iterator over the branches """ for branch in self.GetListOfBranches(): yield branch @property def branchnames(self): """ List of branch names """ return [branch.GetName() for branch in self.GetListOfBranches()] def iterbranchnames(self): """ Iterator over the branch names """ for branch in self.iterbranches(): yield branch.GetName() def glob(self, patterns, exclude=None): """ Return a list of branch names that match ``pattern``. Exclude all matched branch names which also match a pattern in ``exclude``. ``exclude`` may be a string or list of strings. Parameters ---------- patterns: str or list branches are matched against this pattern or list of patterns where globbing is performed with '*'. exclude : str or list, optional (default=None) branches matching this pattern or list of patterns are excluded even if they match a pattern in ``patterns``. Returns ------- matches : list List of matching branch names """ if isinstance(patterns, basestring): patterns = [patterns] if isinstance(exclude, basestring): exclude = [exclude] matches = [] for pattern in patterns: matches += fnmatch.filter(self.iterbranchnames(), pattern) if exclude is not None: for exclude_pattern in exclude: matches = [match for match in matches if not fnmatch.fnmatch(match, exclude_pattern)] return matches def __getitem__(self, item): """ Get an entry in the tree or a branch Parameters ---------- item : str or int if item is a str then return the value of the branch with that name if item is an int then call GetEntry """ if isinstance(item, basestring): return self._buffer[item] self.GetEntry(item) return self def GetEntry(self, entry): """ Get an entry. Tree collections are reset (see ``rootpy.tree.treeobject``) Parameters ---------- entry : int entry index Returns ------- ROOT.TTree.GetEntry : int The number of bytes read """ if not (0 <= entry < self.GetEntries()): raise IndexError("entry index out of range: {0:d}".format(entry)) self._buffer.reset_collections() return super(BaseTree, self).GetEntry(entry) def __iter__(self): """ Iterator over the entries in the Tree. """ if not self._buffer: self.create_buffer() if self.read_branches_on_demand: self._buffer.set_tree(self) # drop all branches from the cache self.DropBranchFromCache('*') for attr in self._always_read: try: branch = self._branch_cache[attr] except KeyError: # one-time hit branch = self.GetBranch(attr) if not branch: raise AttributeError( "branch `{0}` specified in " "`always_read` does not exist".format(attr)) self._branch_cache[attr] = branch # add branches that we should always read to cache self.AddBranchToCache(branch) for i in xrange(self.GetEntries()): # Only increment current entry. # getattr on a branch will then GetEntry on only that branch # see ``TreeBuffer.get_with_read_if_cached``. self._current_entry = i self.LoadTree(i) for attr in self._always_read: # Always read branched in ``self._always_read`` since # these branches may never be getattr'd but the TreeBuffer # should always be updated to reflect their current values. # This is useful if you are iterating over an input tree # and writing to an output tree that shares the same # TreeBuffer but you don't getattr on all branches of the # input tree in the logic that determines which entries # to keep. self._branch_cache[attr].GetEntry(i) self._buffer._entry.set(i) yield self._buffer self._buffer.next_entry() self._buffer.reset_collections() else: for i in xrange(self.GetEntries()): # Read all activated branches (can be slow!). super(BaseTree, self).GetEntry(i) self._buffer._entry.set(i) yield self._buffer self._buffer.reset_collections() def __setattr__(self, attr, value): if '_inited' not in self.__dict__ or attr in self.__dict__: return super(BaseTree, self).__setattr__(attr, value) try: return self._buffer.__setattr__(attr, value) except AttributeError: raise AttributeError( "`{0}` instance has no attribute `{1}`".format( self.__class__.__name__, attr)) def __getattr__(self, attr): if '_inited' not in self.__dict__: raise AttributeError( "`{0}` instance has no attribute `{1}`".format( self.__class__.__name__, attr)) try: return getattr(self._buffer, attr) except AttributeError: raise AttributeError( "`{0}` instance has no attribute `{1}`".format( self.__class__.__name__, attr)) def __setitem__(self, item, value): self._buffer[item] = value def __len__(self): """ Same as GetEntries """ return self.GetEntries() def __contains__(self, branch): """ Same as has_branch """ return self.has_branch(branch) def has_branch(self, branch): """ Determine if this Tree contains a branch with the name ``branch`` Parameters ---------- branch : str branch name Returns ------- has_branch : bool True if this Tree contains a branch with the name ``branch`` or False otherwise. """ return not not self.GetBranch(branch) def csv(self, sep=',', branches=None, include_labels=True, limit=None, stream=None): """ Print csv representation of tree only including branches of basic types (no objects, vectors, etc..) Parameters ---------- sep : str, optional (default=',') The delimiter used to separate columns branches : list, optional (default=None) Only include these branches in the CSV output. If None, then all basic types will be included. include_labels : bool, optional (default=True) Include a first row of branch names labelling each column. limit : int, optional (default=None) Only include up to a maximum of ``limit`` rows in the CSV. stream : file, (default=None) Stream to write the CSV output on. By default the CSV will be written to ``sys.stdout``. """ if stream is None: stream = sys.stdout if not self._buffer: self.create_buffer(ignore_unsupported=True) if branches is None: branchdict = OrderedDict([ (name, self._buffer[name]) for name in self.iterbranchnames() if isinstance(self._buffer[name], (Scalar, Array))]) else: branchdict = OrderedDict() for name in branches: if not isinstance(self._buffer[name], (Scalar, Array)): raise TypeError( "selected branch `{0}` " "is not a scalar or array type".format(name)) branchdict[name] = self._buffer[name] if not branchdict: raise RuntimeError( "no branches selected or no " "branches of scalar or array types exist") if include_labels: # expand array types to f[0],f[1],f[2],... print >> stream, sep.join( name if isinstance(value, (Scalar, BaseChar)) else sep.join('{0}[{1:d}]'.format(name, idx) for idx in xrange(len(value))) for name, value in branchdict.items()) # even though 'entry' is not used, enumerate or simply iterating over # self is required to update the buffer with the new branch values at # each tree entry. for i, entry in enumerate(self): print >> stream, sep.join( str(v.value) if isinstance(v, (Scalar, BaseChar)) else sep.join(map(str, v)) for v in branchdict.values()) if limit is not None and i + 1 == limit: break def Scale(self, value): """ Scale the weight of the Tree by ``value`` Parameters ---------- value : int, float Scale the Tree weight by this value """ self.SetWeight(self.GetWeight() * value) def GetEntries(self, cut=None, weighted_cut=None, weighted=False): """ Get the number of (weighted) entries in the Tree Parameters ---------- cut : str or rootpy.tree.cut.Cut, optional (default=None) Only entries passing this cut will be included in the count weighted_cut : str or rootpy.tree.cut.Cut, optional (default=None) Apply a weighted selection and determine the weighted number of entries. weighted : bool, optional (default=False) Multiply the number of (weighted) entries by the Tree weight. """ if weighted_cut: hist = Hist(1, -1, 2) branch = self.GetListOfBranches()[0].GetName() weight = self.GetWeight() self.SetWeight(1) self.Draw('{0}=={1}>>{2}'.format(branch, branch, hist.GetName()), weighted_cut * cut) self.SetWeight(weight) entries = hist.Integral() elif cut: entries = super(BaseTree, self).GetEntries(str(cut)) else: entries = super(BaseTree, self).GetEntries() if weighted: entries *= self.GetWeight() return entries def GetMaximum(self, expression, cut=None): """ TODO: we need a better way of determining the maximum value of an expression. """ if cut: self.Draw(expression, cut, 'goff') else: self.Draw(expression, '', 'goff') vals = self.GetV1() n = self.GetSelectedRows() vals = [vals[i] for i in xrange(min(n, 10000))] return max(vals) def GetMinimum(self, expression, cut=None): """ TODO: we need a better way of determining the minimum value of an expression. """ if cut: self.Draw(expression, cut, "goff") else: self.Draw(expression, "", "goff") vals = self.GetV1() n = self.GetSelectedRows() vals = [vals[i] for i in xrange(min(n, 10000))] return min(vals) def CopyTree(self, selection, *args, **kwargs): """ Copy the tree while supporting a rootpy.tree.cut.Cut selection in addition to a simple string. """ return super(BaseTree, self).CopyTree(str(selection), *args, **kwargs) def reset_branch_values(self): """ Reset all values in the buffer to their default values """ self._buffer.reset() @method_file_cd def Write(self, *args, **kwargs): super(BaseTree, self).Write(*args, **kwargs) def Draw(self, expression, selection="", options="", hist=None, create_hist=False, **kwargs): """ Draw a TTree with a selection as usual, but return the created histogram. Parameters ---------- expression : str The expression to draw. Multidimensional expressions are separated by ":". rootpy reverses the expressions along each dimension so the order matches the order of the elements identifying a location in the resulting histogram. By default ROOT takes the expression "Y:X" to mean Y versus X but we argue that this is counterintuitive and that the order should be "X:Y" so that the expression along the first dimension identifies the location along the first axis, etc. selection : str or rootpy.tree.Cut, optional (default="") The cut expression. Only entries satisfying this selection are included in the filled histogram. options : str, optional (default="") Draw options passed to ROOT.TTree.Draw hist : ROOT.TH1, optional (default=None) The histogram to be filled. If not specified, rootpy will attempt to find what ROOT created and return that. create_hist : bool (default=False) If True and `hist`` is not specified and a histogram name is not specified in the draw expression, then override ROOT's default behaviour and fill a new histogram. ROOT will otherwise add points to a TGraph or TPolyMarker3D if not drawing in more than two dimensions. kwargs : dict, optional Remaining keword arguments are used to set the style attributes of the histogram. Returns ------- If ``hist`` is specified, None is returned. If ``hist`` is left unspecified, an attempt is made to retrieve the generated histogram which is then returned. """ # Check that we have a valid draw expression and pick out components exprmatch = re.match(BaseTree.DRAW_PATTERN, expression) if not exprmatch: raise ValueError( "not a valid draw expression: `{0}`".format(expression)) # Reverse variable order to match order in hist constructor exprdict = exprmatch.groupdict() fields = exprdict['branches'].split(':') num_dimensions = len(fields) expression = ':'.join(fields[:3][::-1] + fields[3:]) if exprdict['redirect'] is not None: expression += exprdict['redirect'] if not isinstance(selection, Cut): # Let Cut handle any extra processing (i.e. ternary operators) selection = Cut(selection) graphics = 'goff' not in options if hist is not None: if not isinstance(hist, ROOT.TH1): raise TypeError("Cannot draw into a `{0}`".format(type(hist))) # Check that the dimensionality of the expression and object match if num_dimensions != hist.GetDimension(): raise TypeError( "The dimensionality of the expression `{0}` ({1:d}) " "does not match the dimensionality of a `{2}`".format( expression, num_dimensions, hist.__class__.__name__)) # Handle graphics ourselves if graphics: if options: options += ' ' options += 'goff' if exprdict['name'] is None: # Draw into histogram supplied by user expression = '{0}>>+{1}'.format(expression, hist.GetName()) else: if exprdict['name'] != hist.GetName(): # If the user specified a name to draw into then check that # this is consistent with the specified object. raise ValueError( "The name specified in the draw " "expression `{0}` does not match the " "name of the specified object `{1}`".format( exprdict['name'], hist.GetName())) # Check that binning is not specified if exprdict['binning'] is not None: raise ValueError( "When specifying the object to draw into, do not " "specify a binning in the draw expression") else: if create_hist and exprdict['name'] is None: if num_dimensions > 4: raise ValueError( "Cannot create a histogram for expressions with " "more than 4 dimensions") newname = uuid.uuid4().hex expression += '>>{0}'.format(newname) exprdict['name'] = newname pad = ROOT.gPad.func() own_pad = False if graphics and not pad: # Create a new canvas if one doesn't exist yet own_pad = True pad = Canvas() # Note: TTree.Draw() pollutes gDirectory, make a temporary one with thread_specific_tmprootdir(): if hist is not None: # If a custom histogram is specified (i.e, it's not being # created root side), then temporarily put it into the # temporary thread-specific directory. context = set_directory(hist) else: context = do_nothing() with context: super(BaseTree, self).Draw(expression, selection, options) if hist is None: # Retrieve histogram made by TTree.Draw if num_dimensions == 1 or exprdict['name'] is not None: # a TH1 hist = asrootpy(self.GetHistogram(), warn=False) elif num_dimensions == 2: # a TGraph hist = asrootpy(pad.GetPrimitive('Graph'), warn=False) else: # ROOT: For a three and four dimensional Draw the TPolyMarker3D # is unnamed, and cannot be retrieved. Why, ROOT? log.warning( "Cannot retrieve the TPolyMarker3D for " "3D and 4D expressions") if graphics and own_pad: # Since we cannot access the TPolyMarker3D we use self to # keep the canvas alive keepalive(self, pad) if hist: # is not None if isinstance(hist, Plottable): hist.decorate(**kwargs) # ROOT, don't try to delete this object! (See issue #277) hist.SetBit(ROOT.kCanDelete, False) if graphics: if own_pad: # The usual bug is that the histogram is garbage # collected and we want the canvas to keep the # histogram alive, but here the canvas has been # created locally and we are returning the histogram, # so we want the histogram to keep the canvas alive. keepalive(hist, pad) # Redraw the histogram since we may have specified style # attributes in **kwargs hist.Draw() if graphics: pad.Modified() pad.Update() return hist def to_array(self, *args, **kwargs): """ Convert this tree into a NumPy structured array """ from root_numpy import tree2array return tree2array(self, *args, **kwargs) @snake_case_methods class Tree(BaseTree, QROOT.TTree): """ Inherits from TTree so all regular TTree methods are available but certain methods (i.e. Draw) have been overridden to improve usage in Python. Parameters ---------- name : str, optional (default=None) The Tree name (a UUID if None) title : str, optional (default=None) The Tree title (empty string if None) model : TreeModel, optional (default=None) If specified then this TreeModel will be used to create the branches """ _ROOT = QROOT.TTree @method_file_check def __init__(self, name=None, title=None, model=None): super(Tree, self).__init__(name=name, title=title) self._buffer = TreeBuffer() if model is not None: if not issubclass(model, TreeModel): raise TypeError("the model must subclass TreeModel") self.set_buffer(model(), create_branches=True) self._post_init() def Fill(self, reset=False): """ Fill the Tree with the current values in the buffer Parameters ---------- reset : bool, optional (default=False) Reset the values in the buffer to their default values after filling. """ super(Tree, self).Fill() # reset all branches if reset: self._buffer.reset() @snake_case_methods class Ntuple(BaseTree, QROOT.TNtuple): """ Inherits from TNtuple so all regular TNtuple/TTree methods are available but certain methods (i.e. Draw) have been overridden to improve usage in Python. Parameters ---------- varlist : list of str A list of the field names name : str, optional (default=None) The Ntuple name (a UUID if None) title : str, optional (default=None) The Ntuple title (empty string if None) bufsize : int, optional (default=32000) Basket buffer size """ _ROOT = QROOT.TNtuple @method_file_check def __init__(self, varlist, name=None, title=None, bufsize=32000): super(Ntuple, self).__init__(':'.join(varlist), bufsize, name=name, title=title) self._post_init()
gpl-3.0
-4,759,378,811,842,880,000
36.006623
79
0.547691
false
4.87184
false
false
false
Jason-Zhao-Jie/MagicTower
Assets/Firebase/Editor/generate_xml_from_google_services_json.py
1
13865
#!/usr/bin/python # Copyright 2016 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Stand-alone implementation of the Gradle Firebase plugin. Converts the services json file to xml: https://googleplex-android.googlesource.com/platform/tools/base/+/studio-master-dev/build-system/google-services/src/main/groovy/com/google/gms/googleservices """ __author__ = 'Wouter van Oortmerssen' import argparse import json import os import sys from xml.etree import ElementTree # Input filename if it isn't set. DEFAULT_INPUT_FILENAME = 'app/google-services.json' # Output filename if it isn't set. DEFAULT_OUTPUT_FILENAME = 'res/values/googleservices.xml' # Input filename for .plist files, if it isn't set. DEFAULT_PLIST_INPUT_FILENAME = 'GoogleService-Info.plist' # Output filename for .json files, if it isn't set. DEFAULT_JSON_OUTPUT_FILENAME = 'google-services-desktop.json' # Indicates a web client in the oauth_client list. OAUTH_CLIENT_TYPE_WEB = 3 def read_xml_value(xml_node): """Utility method for reading values from the plist XML. Args: xml_node: An ElementTree node, that contains a value. Returns: The value of the node, or None, if it could not be read. """ if xml_node.tag == 'string': return xml_node.text elif xml_node.tag == 'integer': return int(xml_node.text) elif xml_node.tag == 'real': return float(xml_node.text) elif xml_node.tag == 'false': return 0 elif xml_node.tag == 'true': return 1 else: # other types of input are ignored. (data, dates, arrays, etc.) return None def construct_plist_dictionary(xml_root): """Constructs a dictionary of values based on the contents of a plist file. Args: xml_root: An ElementTree node, that represents the root of the xml file that is to be parsed. (Which should be a dictionary containing key-value pairs of the properties that need to be extracted.) Returns: A dictionary, containing key-value pairs for all (supported) entries in the node. """ xml_dict = xml_root.find('dict') if xml_dict is None: return None plist_dict = {} i = 0 while i < len(xml_dict): if xml_dict[i].tag == 'key': key = xml_dict[i].text i += 1 if i < len(xml_dict): value = read_xml_value(xml_dict[i]) if value is not None: plist_dict[key] = value i += 1 return plist_dict def construct_google_services_json(xml_dict): """Constructs a google services json file from a dictionary. Args: xml_dict: A dictionary of all the key/value pairs that are needed for the output json file. Returns: A string representing the output json file. """ try: json_struct = { 'project_info': { 'project_number': xml_dict['GCM_SENDER_ID'], 'firebase_url': xml_dict['DATABASE_URL'], 'project_id': xml_dict['PROJECT_ID'], 'storage_bucket': xml_dict['STORAGE_BUCKET'] }, 'client': [{ 'client_info': { 'mobilesdk_app_id': xml_dict['GOOGLE_APP_ID'], 'android_client_info': { 'package_name': xml_dict['BUNDLE_ID'] } }, 'oauth_client': [{ 'client_id': xml_dict['CLIENT_ID'], }], 'api_key': [{ 'current_key': xml_dict['API_KEY'] }], 'services': { 'analytics_service': { 'status': xml_dict['IS_ANALYTICS_ENABLED'] }, 'appinvite_service': { 'status': xml_dict['IS_APPINVITE_ENABLED'] } } },], 'configuration_version': '1' } return json.dumps(json_struct, indent=2) except KeyError as e: sys.stderr.write('Could not find key in plist file: [%s]\n' % (e.args[0])) return None def convert_plist_to_json(plist_string, input_filename): """Converts an input plist string into a .json file and saves it. Args: plist_string: The contents of the loaded plist file. input_filename: The file name that the plist data was read from. Returns: the converted string, or None if there were errors. """ try: root = ElementTree.fromstring(plist_string) except ElementTree.ParseError: sys.stderr.write('Error parsing file %s.\n' 'It does not appear to be valid XML.\n' % (input_filename)) return None plist_dict = construct_plist_dictionary(root) if plist_dict is None: sys.stderr.write('In file %s, could not locate a top-level \'dict\' ' 'element.\n' 'File format should be plist XML, with a top-level ' 'dictionary containing project settings as key-value ' 'pairs.\n' % (input_filename)) return None json_string = construct_google_services_json(plist_dict) return json_string def gen_string(parent, name, text): """Generate one <string /> element and put into the list of keeps. Args: parent: The object that will hold the string. name: The name to store the string under. text: The text of the string. """ if text: prev = parent.get('tools:keep', '') if prev: prev += ',' parent.set('tools:keep', prev + '@string/' + name) child = ElementTree.SubElement(parent, 'string', { 'name': name, 'translatable': 'false' }) child.text = text def indent(elem, level=0): """Recurse through XML tree and add indentation. Args: elem: The element to recurse over level: The current indentation level. """ i = '\n' + level*' ' if elem is not None: if not elem.text or not elem.text.strip(): elem.text = i + ' ' if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indent(elem, level+1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def main(): parser = argparse.ArgumentParser( description=(( 'Converts a Firebase %s into %s similar to the Gradle plugin, or ' 'converts a Firebase %s into a %s suitible for use on desktop apps.' % (DEFAULT_INPUT_FILENAME, DEFAULT_OUTPUT_FILENAME, DEFAULT_PLIST_INPUT_FILENAME, DEFAULT_JSON_OUTPUT_FILENAME)))) parser.add_argument('-i', help='Override input file name', metavar='FILE', required=False) parser.add_argument('-o', help='Override destination file name', metavar='FILE', required=False) parser.add_argument('-p', help=('Package ID to select within the set of ' 'packages in the input file. If this is ' 'not specified, the first package in the ' 'input file is selected.')) parser.add_argument('-l', help=('List all package IDs referenced by the ' 'input file. If this is specified, ' 'the output file is not created.'), action='store_true', default=False, required=False) parser.add_argument('-f', help=('Print project fields from the input file ' 'in the form \'name=value\\n\' for each ' 'field. If this is specified, the output ' 'is not created.'), action='store_true', default=False, required=False) parser.add_argument( '--plist', help=( 'Specifies a plist file to convert to a JSON configuration file. ' 'If this is enabled, the script will expect a .plist file as input, ' 'which it will convert into %s file. The output file is ' '*not* suitable for use with Firebase on Android.' % (DEFAULT_JSON_OUTPUT_FILENAME)), action='store_true', default=False, required=False) args = parser.parse_args() if args.plist: input_filename = DEFAULT_PLIST_INPUT_FILENAME output_filename = DEFAULT_JSON_OUTPUT_FILENAME else: input_filename = DEFAULT_INPUT_FILENAME output_filename = DEFAULT_OUTPUT_FILENAME if args.i: input_filename = args.i if args.o: output_filename = args.o with open(input_filename, 'r') as ifile: file_string = ifile.read() json_string = None if args.plist: json_string = convert_plist_to_json(file_string, input_filename) if json_string is None: return 1 jsobj = json.loads(json_string) else: jsobj = json.loads(file_string) root = ElementTree.Element('resources') root.set('xmlns:tools', 'http://schemas.android.com/tools') project_info = jsobj.get('project_info') if project_info: gen_string(root, 'firebase_database_url', project_info.get('firebase_url')) gen_string(root, 'gcm_defaultSenderId', project_info.get('project_number')) gen_string(root, 'google_storage_bucket', project_info.get('storage_bucket')) gen_string(root, 'project_id', project_info.get('project_id')) if args.f: if not project_info: sys.stderr.write('No project info found in %s.' % input_filename) return 1 for field, value in project_info.iteritems(): sys.stdout.write('%s=%s\n' % (field, value)) return 0 packages = set() client_list = jsobj.get('client') if client_list: # Search for the user specified package in the file. selected_package_name = '' selected_client = client_list[0] find_package_name = args.p for client in client_list: package_name = client.get('client_info', {}).get( 'android_client_info', {}).get('package_name', '') if not package_name: package_name = client.get('oauth_client', {}).get( 'android_info', {}).get('package_name', '') if package_name: if not selected_package_name: selected_package_name = package_name selected_client = client if package_name == find_package_name: selected_package_name = package_name selected_client = client packages.add(package_name) if args.p and selected_package_name != find_package_name: sys.stderr.write('No packages found in %s which match the package ' 'name %s\n' '\n' 'Found the following:\n' '%s\n' % (input_filename, find_package_name, '\n'.join(packages))) return 1 client_api_key = selected_client.get('api_key') if client_api_key: client_api_key0 = client_api_key[0] gen_string(root, 'google_api_key', client_api_key0.get('current_key')) gen_string(root, 'google_crash_reporting_api_key', client_api_key0.get('current_key')) client_info = selected_client.get('client_info') if client_info: gen_string(root, 'google_app_id', client_info.get('mobilesdk_app_id')) oauth_client_list = selected_client.get('oauth_client') if oauth_client_list: for oauth_client in oauth_client_list: client_type = oauth_client.get('client_type') client_id = oauth_client.get('client_id') if client_type and client_type == OAUTH_CLIENT_TYPE_WEB and client_id: gen_string(root, 'default_web_client_id', client_id) # Only include the first matching OAuth web client ID. break services = selected_client.get('services') if services: ads_service = services.get('ads_service') if ads_service: gen_string(root, 'test_banner_ad_unit_id', ads_service.get('test_banner_ad_unit_id')) gen_string(root, 'test_interstitial_ad_unit_id', ads_service.get('test_interstitial_ad_unit_id')) analytics_service = services.get('analytics_service') if analytics_service: analytics_property = analytics_service.get('analytics_property') if analytics_property: gen_string(root, 'ga_trackingId', analytics_property.get('tracking_id')) # enable this once we have an example if this service being present # in the json data: maps_service_enabled = False if maps_service_enabled: maps_service = services.get('maps_service') if maps_service: maps_api_key = maps_service.get('api_key') if maps_api_key: for k in range(0, len(maps_api_key)): # generates potentially multiple of these keys, which is # the same behavior as the java plugin. gen_string(root, 'google_maps_key', maps_api_key[k].get('maps_api_key')) tree = ElementTree.ElementTree(root) indent(root) if args.l: for package in packages: if package: sys.stdout.write(package + '\n') else: path = os.path.dirname(output_filename) if path and not os.path.exists(path): os.makedirs(path) if not args.plist: tree.write(output_filename, 'utf-8', True) else: with open(output_filename, 'w') as ofile: ofile.write(json_string) return 0 if __name__ == '__main__': sys.exit(main())
bsd-3-clause
2,530,400,691,713,250,300
33.150246
158
0.60714
false
3.84072
false
false
false
sealcode/gpandoc
ui/recipe_ui.py
1
3014
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'recipe.ui' # # Created by: PyQt5 UI code generator 5.7.1 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName("Dialog") Dialog.resize(355, 478) self.verticalLayout = QtWidgets.QVBoxLayout(Dialog) self.verticalLayout.setObjectName("verticalLayout") self.vertical_layout_1 = QtWidgets.QVBoxLayout() self.vertical_layout_1.setObjectName("vertical_layout_1") self.label_1 = QtWidgets.QLabel(Dialog) self.label_1.setObjectName("label_1") self.vertical_layout_1.addWidget(self.label_1) self.combo_box_1 = QtWidgets.QComboBox(Dialog) self.combo_box_1.setObjectName("combo_box_1") self.vertical_layout_1.addWidget(self.combo_box_1) self.verticalLayout.addLayout(self.vertical_layout_1) self.vertical_layout_2 = QtWidgets.QVBoxLayout() self.vertical_layout_2.setObjectName("vertical_layout_2") self.scroll_1 = QtWidgets.QScrollArea(Dialog) self.scroll_1.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn) self.scroll_1.setWidgetResizable(True) self.scroll_1.setObjectName("scroll_1") self.content_1 = QtWidgets.QWidget() self.content_1.setGeometry(QtCore.QRect(0, 0, 300, 378)) self.content_1.setMaximumSize(QtCore.QSize(300, 600)) self.content_1.setObjectName("content_1") self.label_2 = QtWidgets.QLabel(self.content_1) self.label_2.setGeometry(QtCore.QRect(8, 3, 301, 421)) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.label_2.sizePolicy().hasHeightForWidth()) self.label_2.setSizePolicy(sizePolicy) self.label_2.setAlignment(QtCore.Qt.AlignCenter) self.label_2.setObjectName("label_2") self.scroll_1.setWidget(self.content_1) self.vertical_layout_2.addWidget(self.scroll_1) self.button_box_1 = QtWidgets.QDialogButtonBox(Dialog) self.button_box_1.setOrientation(QtCore.Qt.Horizontal) self.button_box_1.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok) self.button_box_1.setObjectName("button_box_1") self.vertical_layout_2.addWidget(self.button_box_1) self.verticalLayout.addLayout(self.vertical_layout_2) self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate("Dialog", "Wybór przepisu")) self.label_1.setText(_translate("Dialog", "Lista przepisów:")) self.label_2.setText(_translate("Dialog", "TextLabel"))
lgpl-3.0
-7,895,148,468,539,092,000
47.580645
109
0.701527
false
3.677656
false
false
false
SqueezeStudioAnimation/omtk
python/omtk/libs/libPymel.py
1
14241
import logging import pymel.core as pymel from maya import OpenMaya # # A PyNodeChain is a special pymel-related object that act exactly like a standard array. # However it allow us to have more bells and whistles. # def is_valid_PyNode(val): return (val and hasattr(val, 'exists') and val.exists()) if val else None def distance_between_nodes(x, y): """ Return the distance between two pynodes. """ ax, ay, az = x.getTranslation(space="world") bx, b, bz = y.getTranslation(space="world") return ((ax - bx) ** 2 + (ay - b) ** 2 + (az - bz) ** 2) ** 0.5 def distance_between_vectors(a, b): """ http://darkvertex.com/wp/2010/06/05/python-distance-between-2-vectors/ """ return ((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2) ** 0.5 def is_child_of(node, potential_parent): while node: if node == potential_parent: return True node = node.getParent() return False class PyNodeChain(list): """A container for manipulating lists of hosts""" @property def start(self): return next(iter(self), None) @property def end(self): return self[-1] if len(self) > 0 else None @property def chain(self): return self def duplicate(self): # Hack - Convert self into list even if self is a list to prevent duplicate self parameter in pymel.duplicate new_chain = pymel.duplicate(list(self), renameChildren=True, parentOnly=True) return PyNodeChain(new_chain) def setParent(self, new_parent, **kwargs): for node in self: if node != new_parent and node.getParent() != new_parent: node.setParent(new_parent, **kwargs) # todo: convert to property? def length(self): length = 0 for i in range(len(self) - 1): head = self[i] tail = self[i + 1] length += distance_between_nodes(head, tail) return length # get the first pynode that have the attr def __getattr__(self, key): logging.warning("Searching unknow attribute {key} in {self}", key=key, self=self) first_node = next((node for node in self.__dict__['_list'] if hasattr(node, key)), None) if first_node is not None: return getattr(first_node, key) raise AttributeError # set all the pynodes that have the attr def __setattr__(self, key, value): for node in self: try: setattr(node, key, value) except Exception, e: logging.error(str(e)) def duplicate_chain(chain): new_chain = pymel.duplicate(chain, renameChildren=True, parentOnly=True) return PyNodeChain(new_chain) def get_num_parents(obj): num_parents = -1 while obj is not None: obj = obj.getParent() num_parents += 1 return num_parents def get_chains_from_objs(objs): """ Take an arbitraty collection of joints and sort them in hyerarchies represented by lists. """ chains = [] objs = sorted(objs, key=get_num_parents) for obj in objs: parent = obj.getParent() if parent not in objs: chains.append([obj]) else: for chain in chains: if parent in chain: chain.append(obj) return [PyNodeChain(chain) for chain in chains] def iter_parents(obj): while obj.getParent() is not None: obj = obj.getParent() yield obj def get_parents(obj): return list(iter_parents(obj)) ''' parents = [] while obj.getParent() is not None: parent = obj.getParent() parents.append(parent) obj = parent return parents ''' def get_common_parents(objs): """ Return the first parent that all provided objects share. :param objs: A list of pymel.PyNode instances. :return: A pymel.PyNode instance. """ parent_sets = set() for jnt in objs: parent_set = set(get_parents(jnt)) if not parent_sets: parent_sets = parent_set else: parent_sets &= parent_set result = next(iter(reversed(sorted(parent_sets, key=get_num_parents))), None) if result and result in objs: result = result.getParent() return result class Tree(object): __slots__ = ('val', 'children', 'parent') def __init__(self, val): self.val = val self.children = [] self.parent = None def append(self, tree): self.children.append(tree) tree.parent = self def __repr__(self): return '<Tree {0}>'.format(self.val) def get_tree_from_objs(objs, sort=False): """ Sort all provided objects in a tree fashion. Support missing objects between hierarchy. Note that tree root value will always be None, representing the root node. """ dagpaths = sorted([obj.fullPath() for obj in objs]) root = Tree(None) def dag_is_child_of(dag_parent, dag_child): return dag_child.startswith(dag_parent + '|') last_knot = root for dagpath in dagpaths: knot = Tree(dagpath) # Resolve the new knot parent p = last_knot while not (p.val is None or dag_is_child_of(p.val, dagpath)): p = p.parent p.append(knot) # Save the last knot, since we are iterating in alphabetical order, # we can assume that the next knot parent can be found using this knot. last_knot = knot return root # # ls() reimplementations # def ls(*args, **kwargs): return PyNodeChain(pymel.ls(*args, **kwargs)) # Wrapper for pymel.ls that return only objects without parents. def ls_root(*args, **kwargs): # TODO: Better finding of the root joint return PyNodeChain(filter(lambda x: x.getParent() is None or type(x.getParent()) != pymel.nt.Joint, iter(pymel.ls(*args, **kwargs)))) def ls_root_anms(pattern='anm*', **kwargs): return ls_root(pattern, type='transform', **kwargs) def ls_root_geos(pattern='geo*', **kwargs): return ls_root(pattern, type='transform', **kwargs) def ls_root_rigs(pattern='rig*', **kwargs): return ls_root(pattern, type='transform', **kwargs) def ls_root_jnts(pattern='jnt*', **kwargs): return ls_root(pattern, type='transform', **kwargs) # # isinstance() reimplementation # # Class check for transform PyNodes def isinstance_of_transform(obj, cls=pymel.nodetypes.Transform): return isinstance(obj, cls) # Class check for shape PyNodes def isinstance_of_shape(obj, cls=pymel.nodetypes.Shape): if isinstance(obj, pymel.nodetypes.Transform): return any((shape for shape in obj.getShapes() if isinstance(shape, cls))) elif isinstance(obj, pymel.nodetypes.Shape): return isinstance(obj, cls) def create_zero_grp(obj): zero_grp = pymel.createNode('transform') new_name = obj.name() + '_' + 'zero_grp' zero_grp.rename(new_name) # Note: Removed for performance zero_grp.setMatrix(obj.getMatrix(worldSpace=True)) parent = obj.getParent() if parent: zero_grp.setParent(parent) obj.setParent(zero_grp) return zero_grp def zero_out_objs(objs): for o in objs: create_zero_grp(o) # # pymel.datatypes extensions. # class Segment(object): """ In Maya there's no class to represent a segment. This is the pymel.datatypes.Segment I've always wanted. """ def __init__(self, pos_s, pos_e): self.pos_s = pos_s self.pos_e = pos_e # self.pos_s = numpy.array(pos_s.x, pos_s.y, pos_s.z) # self.pos_e = numpy.array(pos_e.x, pos_e.y, pos_e.z) def closest_point(self, p): """ http://stackoverflow.com/questions/3120357/get-closest-point-to-a-line """ a = self.pos_s b = self.pos_e a_to_p = p - a a_to_b = b - a ab_length = a_to_b.length() ap_length = a_to_p.length() a_to_p_norm = a_to_p.normal() a_to_b_norm = a_to_b.normal() atp_dot_atb = a_to_p_norm * (a_to_b_norm) # dot product dist_norm = atp_dot_atb * ap_length / ab_length return pymel.datatypes.Vector( a.x + a_to_b.x * dist_norm, a.y + a_to_b.y * dist_norm, a.z + a_to_b.z * dist_norm ) def closest_point_normalized_distance(self, p, epsilon=0.001): """ Same things as .closest_point but only return the distance relative from the length of a to b. Available for optimisation purpose. """ a = self.pos_s b = self.pos_e a_to_p = p - a a_to_b = b - a ab_length = a_to_b.length() ap_length = a_to_p.length() a_to_p_norm = a_to_p.normal() a_to_b_norm = a_to_b.normal() atp_dot_atb = a_to_p_norm * a_to_b_norm return (atp_dot_atb * ap_length / ab_length) if abs(ab_length) > epsilon else 0.0 class SegmentCollection(object): def __init__(self, segments=None): if segments is None: segments = [] self.segments = segments self.knots = [segment.pos_s for segment in self.segments] self.knots.append(self.segments[-1].pos_e) def closest_segment(self, pos): bound_min = -0.000000000001 # Damn float imprecision bound_max = 1.0000000000001 # Damn float imprecision num_segments = len(self.segments) for i, segment in enumerate(self.segments): distance_normalized = segment.closest_point_normalized_distance(pos) if bound_min <= distance_normalized <= bound_max: return segment, distance_normalized elif i == 0 and distance_normalized < bound_min: # Handle out-of-bound return segment, 0.0 elif i == (num_segments - 1) and distance_normalized > bound_max: # Handle out-of-bound return segment, 1.0 raise Exception("Can't resolve segment for {0}".format(pos)) def closest_segment_index(self, pos): closest_segment, ratio = self.closest_segment(pos) index = self.segments.index(closest_segment) return index, ratio def get_knot_weights(self, dropoff=1.0, normalize=True): num_knots = len(self.knots) knots_weights = [] for i, knot in enumerate(self.knots): if i == 0: weights = [0] * num_knots weights[0] = 1.0 elif i == (num_knots - 1): weights = [0] * num_knots weights[-1] = 1.0 else: weights = [] total_weight = 0.0 for j in range(num_knots): distance = abs(j - i) weight = max(0, 1.0 - (distance / dropoff)) total_weight += weight weights.append(weight) weights = [weight / total_weight for weight in weights] knots_weights.append(weights) return knots_weights ''' def get_weights(self, pos, dropoff=1.0, normalize=True): # Compute the 'SegmentCollection' relative ratio and return the weight for each knots. closest_segment, relative_ratio = self.closest_segment(pos) index = self.segments.index(closest_segment) absolute_ratio = relative_ratio + index weights = [] total_weights = 0.0 for segment_ratio in range(len(self.knots)): #segment_ratio += 0.5 # center of the joint #print segment_ratio, absolute_ratio distance = abs(segment_ratio - absolute_ratio) weight = max(0, 1.0-(distance/dropoff)) # Apply cubic interpolation for greater results. #weight = interp_cubic(weight) total_weights += weight weights.append(weight) if normalize: weights = [weight / total_weights for weight in weights] return weights ''' @classmethod def from_transforms(cls, objs): segments = [] num_objs = len(objs) for i in range(num_objs - 1): obj_s = objs[i] obj_e = objs[i + 1] mfn_transform_s = obj_s.__apimfn__() mfn_transform_e = obj_e.__apimfn__() pos_s = OpenMaya.MVector(mfn_transform_s.getTranslation(OpenMaya.MSpace.kWorld)) pos_e = OpenMaya.MVector(mfn_transform_e.getTranslation(OpenMaya.MSpace.kWorld)) segment = Segment(pos_s, pos_e) segments.append(segment) return cls(segments) @classmethod def from_positions(cls, positions): segments = [] num_positions = len(positions) for i in range(num_positions - 1): pos_s = positions[i] pos_e = positions[i + 1] segment = Segment(pos_s, pos_e) segments.append(segment) return cls(segments) def get_rotation_from_matrix(tm): """ Bypass pymel bug see https://github.com/LumaPictures/pymel/issues/355 """ return pymel.datatypes.TransformationMatrix(tm).rotate def makeIdentity_safe(obj, translate=False, rotate=False, scale=False, apply=False, **kwargs): """ Extended pymel.makeIdentity method that won't crash for idiotic reasons. """ from . import libAttr affected_attrs = [] # Ensure the shape don't have any extra transformation. if apply: if translate: libAttr.unlock_translation(obj) affected_attrs.extend([ obj.translate, obj.translateX, obj.translateY, obj.translateZ ]) if rotate: libAttr.unlock_rotation(obj) affected_attrs.extend([ obj.rotate, obj.rotateX, obj.rotateY, obj.rotateZ ]) if scale: libAttr.unlock_scale(obj) affected_attrs.extend([ obj.scale, obj.scaleX, obj.scaleY, obj.scaleZ ]) # Make identify will faile if attributes are connected... with libAttr.context_disconnected_attrs(affected_attrs, hold_inputs=True, hold_outputs=False): pymel.makeIdentity(obj, apply=apply, translate=translate, rotate=rotate, scale=scale, **kwargs)
mit
3,718,975,997,942,480,400
29.17161
117
0.59118
false
3.587154
false
false
false
microsoft/task_oriented_dialogue_as_dataflow_synthesis
src/dataflow/leaderboard/predict.py
1
2613
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. """ Semantic Machines\N{TRADE MARK SIGN} software. Creates the prediction files from onmt_translate output for the leaderboard. """ import argparse from typing import List import jsons from more_itertools import chunked from dataflow.core.dialogue import TurnId from dataflow.core.io import save_jsonl_file from dataflow.core.turn_prediction import TurnPrediction def build_prediction_report_datum( datum_id_line: str, src_line: str, nbest_lines: List[str], ) -> TurnPrediction: datum_id = jsons.loads(datum_id_line.strip(), TurnId) return TurnPrediction( datum_id=datum_id, user_utterance=src_line.strip(), lispress=nbest_lines[0].strip(), ) def create_onmt_prediction_report( datum_id_jsonl: str, src_txt: str, ref_txt: str, nbest_txt: str, nbest: int, ): prediction_report = [ build_prediction_report_datum( datum_id_line=datum_id_line, src_line=src_line, nbest_lines=nbest_lines, ) for datum_id_line, src_line, ref_line, nbest_lines in zip( open(datum_id_jsonl), open(src_txt), open(ref_txt), chunked(open(nbest_txt), nbest), ) ] save_jsonl_file(prediction_report, "predictions.jsonl") def main( datum_id_jsonl: str, src_txt: str, ref_txt: str, nbest_txt: str, nbest: int, ) -> None: """Creates 1-best predictions and saves them to files.""" create_onmt_prediction_report( datum_id_jsonl=datum_id_jsonl, src_txt=src_txt, ref_txt=ref_txt, nbest_txt=nbest_txt, nbest=nbest, ) def add_arguments(argument_parser: argparse.ArgumentParser) -> None: argument_parser.add_argument("--datum_id_jsonl", help="datum ID file") argument_parser.add_argument("--src_txt", help="source sequence file") argument_parser.add_argument("--ref_txt", help="target sequence reference file") argument_parser.add_argument("--nbest_txt", help="onmt_translate output file") argument_parser.add_argument("--nbest", type=int, help="number of hypos per datum") if __name__ == "__main__": cmdline_parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawTextHelpFormatter ) add_arguments(cmdline_parser) args = cmdline_parser.parse_args() print("Semantic Machines\N{TRADE MARK SIGN} software.") main( datum_id_jsonl=args.datum_id_jsonl, src_txt=args.src_txt, ref_txt=args.ref_txt, nbest_txt=args.nbest_txt, nbest=args.nbest, )
mit
-9,164,045,024,337,558,000
30.865854
87
0.66284
false
3.337165
false
false
false
nashgul/weechat
audacious_script/audacious.py
1
3067
# audacious now playing for weechat # nashgul <[email protected]> # version 0.1 # white => "00", black => "01", darkblue => "02", darkgreen => "03", lightred => "04", darkred => "05", magenta => "06", orange => "07", yellow => "08", lightgreen => "09", cyan => "10", lightcyan => "11", lightblue => "12", lightmagenta => "13", gray => "14", lightgray => "15" import weechat import subprocess weechat.register("audacious_np", "nashgul", "0.01", "GPL2", "now playing for audacious (usage: /audacious)", "", "") name = 'audacious' description = 'show now playing for audacious' hook = weechat.hook_command(name, description, '', '', '', 'now_playing', '') def get_info_array(): info_list = (['audtool current-song', 'audtool current-song-length', 'audtool current-song-output-length', 'audtool current-song-bitrate-kbps', 'audtool current-song-filename']) results = [] for x in info_list: temporal = subprocess.Popen(x, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) b = temporal.stdout.read().splitlines() results.append(b[0]) return results def now_playing(datos, channel, args): colors = { 'white' : '00', 'black' : '01', 'darkblue' : '02', 'darkgreen' : '03', 'lightred' : '04', 'darkred' : '05', 'magenta' : '06', 'orange' : '07', 'yellow' : '08', 'lightgreen' : '09', 'cyan' : '10', 'lightcyan' : '11', 'lightblue' : '12', 'lightmagenta' : '13', 'gray' : '14', 'lightgray' : '15' } info_array = get_info_array() message_color = "%s" % colors['darkblue'] message = u'\x03' + message_color + 'esta reproduciendo' + u'\x0f' song_color = "%s" % colors['lightred'] song = u'\x03' + song_color + info_array[0] + u'\x0f' song_filename_color = "%s" % colors['lightred'] song_filename = u'\x03' + song_filename_color + info_array[4] + u'\x0f' brackets_color = "%s" % colors['yellow'] bracket_1 = u'\x03' + brackets_color + '[' + u'\x0f' bracket_2 = u'\x03' + brackets_color + ']' + u'\x0f' hyphen_color = "%s" % colors['yellow'] hyphen = u'\x03' + hyphen_color + ' - ' + u'\x0f' at_color = "%s" % colors['yellow'] at_sym = u'\x03' + at_color + '@' + u'\x0f' output_length_color = "%s" % colors['lightblue'] output_length = u'\x03' + output_length_color + info_array[2] + u'\x0f' length = '' if info_array[1] != '0:00': length_color = "%s" % colors['lightblue'] length = u'\x03' + length_color + hyphen + ' ' + info_array[1] + ' ' + u'\x0f' bitrate_color = "%s" % colors['lightmagenta'] bitrate = u'\x03' + bitrate_color + info_array[3] + ' kbps' + u'\x0f' string = "%s %s %s%s %s %s" %(bracket_1, output_length, length, at_sym, bitrate, bracket_2) source = '' if song_filename.lower().startswith('http'): source = song_filename output_string = "%s: %s %s %s" %(message, source, song, string) weechat.command(channel, "/me %s" % (output_string)) return weechat.WEECHAT_RC_OK
gpl-2.0
-6,693,482,823,102,077,000
45.469697
278
0.573525
false
2.879812
false
false
false
SerpentAI/SerpentAI
serpent/game_frame.py
1
5346
import skimage.color import skimage.measure import skimage.transform import skimage.filters import skimage.morphology import numpy as np import io from PIL import Image class GameFrameError(BaseException): pass class GameFrame: def __init__(self, frame_data, frame_variants=None, timestamp=None, **kwargs): if isinstance(frame_data, bytes): self.frame_bytes = frame_data self.frame_array = None elif isinstance(frame_data, np.ndarray): self.frame_bytes = None self.frame_array = frame_data self.frame_variants = frame_variants or dict() self.timestamp = timestamp self.offset_x = kwargs.get("offset_x") or 0 self.offset_y = kwargs.get("offset_y") or 0 self.resize_order = kwargs.get("resize_order") or 1 @property def frame(self): return self.frame_array if self.frame_array is not None else self.frame_bytes @property def half_resolution_frame(self): """ A quarter-sized version of the frame (half-width, half-height)""" if "half" not in self.frame_variants: self.frame_variants["half"] = self._to_half_resolution() return self.frame_variants["half"] @property def quarter_resolution_frame(self): """ A sixteenth-sized version of the frame (quarter-width, quarter-height)""" if "quarter" not in self.frame_variants: self.frame_variants["quarter"] = self._to_quarter_resolution() return self.frame_variants["quarter"] @property def eighth_resolution_frame(self): """ A 1/32-sized version of the frame (eighth-width, eighth-height)""" if "eighth" not in self.frame_variants: self.frame_variants["eighth"] = self._to_eighth_resolution() return self.frame_variants["eighth"] @property def eighth_resolution_grayscale_frame(self): """ A 1/32-sized, grayscale version of the frame (eighth-width, eighth-height)""" if "eighth_grayscale" not in self.frame_variants: self.frame_variants["eighth_grayscale"] = self._to_eighth_grayscale_resolution() return self.frame_variants["eighth_grayscale"] @property def grayscale_frame(self): """ A full-size grayscale version of the frame""" if "grayscale" not in self.frame_variants: self.frame_variants["grayscale"] = self._to_grayscale() return self.frame_variants["grayscale"] @property def ssim_frame(self): """ A 100x100 grayscale frame to be used for SSIM""" if "ssim" not in self.frame_variants: self.frame_variants["ssim"] = self._to_ssim() return self.frame_variants["ssim"] @property def top_color(self): height, width, channels = self.eighth_resolution_frame.shape values, counts = np.unique(self.eighth_resolution_frame.reshape(width * height, channels), axis=0, return_counts=True) return [int(i) for i in values[np.argsort(counts)[::-1][0]]] def compare_ssim(self, previous_game_frame): return skimage.measure.compare_ssim(previous_game_frame.ssim_frame, self.ssim_frame) def difference(self, previous_game_frame): current = skimage.filters.gaussian(self.grayscale_frame, 8) previous = skimage.filters.gaussian(previous_game_frame.grayscale_frame, 8) return current - previous def to_pil(self): return Image.fromarray(self.frame) def to_png_bytes(self): pil_frame = Image.fromarray(skimage.util.img_as_ubyte(self.frame)) if len(self.frame.shape) == 3: pil_frame = pil_frame.convert("RGB") png_frame = io.BytesIO() pil_frame.save(png_frame, format="PNG", compress_level=3) png_frame.seek(0) return png_frame.read() # TODO: Refactor Fraction of Resolution Frames... def _to_half_resolution(self): shape = ( self.frame_array.shape[0] // 2, self.frame_array.shape[1] // 2 ) return np.array(skimage.transform.resize(self.frame_array, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8") def _to_quarter_resolution(self): shape = ( self.frame_array.shape[0] // 4, self.frame_array.shape[1] // 4 ) return np.array(skimage.transform.resize(self.frame_array, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8") def _to_eighth_resolution(self): shape = ( self.frame_array.shape[0] // 8, self.frame_array.shape[1] // 8 ) return np.array(skimage.transform.resize(self.frame_array, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8") def _to_eighth_grayscale_resolution(self): shape = ( self.frame_array.shape[0] // 8, self.frame_array.shape[1] // 8 ) return np.array(skimage.transform.resize(self.grayscale_frame, shape, mode="reflect", order=self.resize_order) * 255, dtype="uint8") def _to_grayscale(self): return np.array(skimage.color.rgb2gray(self.frame_array) * 255, dtype="uint8") def _to_ssim(self): grayscale = self.grayscale_frame return skimage.transform.resize(grayscale, (100, 100), mode="reflect", order=0)
mit
-799,488,949,246,343,400
31.204819
140
0.633558
false
3.554521
false
false
false
agraubert/agutil
agutil/parallel/src/dispatcher.py
1
2555
from .exceptions import _ParallelBackgroundException from .worker import ThreadWorker, ProcessWorker from itertools import zip_longest WORKERTYPE_THREAD = ThreadWorker WORKERTYPE_PROCESS = ProcessWorker class IterDispatcher: def __init__( self, func, *args, maximum=15, workertype=WORKERTYPE_THREAD, **kwargs ): self.func = func self.maximum = maximum self.args = [iter(arg) for arg in args] self.kwargs = {key: iter(v) for (key, v) in kwargs.items()} self.worker = workertype def run(self): yield from self.dispatch() def dispatch(self): self.worker = self.worker(self.maximum) try: output = [] for args, kwargs in self._iterargs(): # _args = args if args is not None else [] # _kwargs = kwargs if kwargs is not None else {} output.append(self.worker.work( self.func, *args, **kwargs )) for callback in output: result = callback() if isinstance(result, _ParallelBackgroundException): raise result.exc yield result finally: self.worker.close() def _iterargs(self): while True: args = [] had_arg = False for src in self.args: try: args.append(next(src)) had_arg = True except StopIteration: return # args.append(None) kwargs = {} for key, src in self.kwargs.items(): try: kwargs[key] = next(src) had_arg = True except StopIteration: return # kwargs[key] = None if not had_arg: return yield args, kwargs def __iter__(self): yield from self.dispatch() def is_alive(self): return self.worker.is_alive() class DemandDispatcher: def __init__(self, func, maximum=15, workertype=WORKERTYPE_THREAD): self.maximum = maximum self.func = func self.worker = workertype(self.maximum) def dispatch(self, *args, **kwargs): try: return self.worker.work(self.func, *args, **kwargs) except BaseException: self.worker.close() raise def close(self): self.worker.close()
mit
1,826,956,604,685,546,800
27.707865
71
0.508415
false
4.620253
false
false
false
juliancantillo/royal-films
config/settings/local.py
1
1950
# -*- coding: utf-8 -*- ''' Local settings - Run in Debug mode - Use console backend for emails - Add Django Debug Toolbar - Add django-extensions as app ''' from .common import * # noqa # DEBUG # ------------------------------------------------------------------------------ DEBUG = env.bool('DJANGO_DEBUG', default=True) TEMPLATES[0]['OPTIONS']['debug'] = DEBUG # SECRET CONFIGURATION # ------------------------------------------------------------------------------ # See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key # Note: This key only used for development and testing. SECRET_KEY = env("DJANGO_SECRET_KEY", default='t3kohnptyzfb7v@s@4dlm2o1356rz&^oamd-y34qat^^69b+s(') # Mail settings # ------------------------------------------------------------------------------ EMAIL_HOST = 'localhost' EMAIL_PORT = 1025 EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.console.EmailBackend') # CACHING # ------------------------------------------------------------------------------ CACHES = { 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': '' } } # django-debug-toolbar # ------------------------------------------------------------------------------ MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',) INSTALLED_APPS += ('debug_toolbar', ) INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',) DEBUG_TOOLBAR_CONFIG = { 'DISABLE_PANELS': [ 'debug_toolbar.panels.redirects.RedirectsPanel', ], 'SHOW_TEMPLATE_CONTEXT': True, } # django-extensions # ------------------------------------------------------------------------------ INSTALLED_APPS += ('django_extensions', ) # TESTING # ------------------------------------------------------------------------------ TEST_RUNNER = 'django.test.runner.DiscoverRunner' # Your local stuff: Below this line define 3rd party library settings
mit
-7,272,907,053,856,914,000
30.451613
99
0.488205
false
4.211663
false
false
false
grimoirelab/GrimoireELK
grimoire_elk/enriched/meetup.py
1
13379
# -*- coding: utf-8 -*- # # Copyright (C) 2015-2019 Bitergia # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Authors: # Alvaro del Castillo San Felix <[email protected]> # import copy import logging from grimoirelab_toolkit.datetime import unixtime_to_datetime from .enrich import Enrich, metadata from ..elastic_mapping import Mapping as BaseMapping MAX_SIZE_BULK_ENRICHED_ITEMS = 200 logger = logging.getLogger(__name__) class Mapping(BaseMapping): @staticmethod def get_elastic_mappings(es_major): """Get Elasticsearch mapping. :param es_major: major version of Elasticsearch, as string :returns: dictionary with a key, 'items', with the mapping """ mapping = """ { "properties": { "description_analyzed": { "type": "text", "index": true }, "comment": { "type": "text", "index": true }, "venue_geolocation": { "type": "geo_point" }, "group_geolocation": { "type": "geo_point" } } } """ return {"items": mapping} class MeetupEnrich(Enrich): mapping = Mapping def get_field_author(self): return "author" def get_identities(self, item): ''' Return the identities from an item ''' item = item['data'] # Creators if 'event_hosts' in item: user = self.get_sh_identity(item['event_hosts'][0]) yield user # rsvps rsvps = item.get('rsvps', []) for rsvp in rsvps: user = self.get_sh_identity(rsvp['member']) yield user # Comments for comment in item['comments']: user = self.get_sh_identity(comment['member']) yield user def get_sh_identity(self, item, identity_field=None): identity = {'username': None, 'email': None, 'name': None} if not item: return identity user = item if 'data' in item and type(item) == dict: user = item['data'][identity_field] identity['username'] = str(user["id"]) identity['email'] = None identity['name'] = user["name"] return identity def get_project_repository(self, eitem): return eitem['tag'] @metadata def get_rich_item(self, item): # We need to detect the category of item: activities (report), events or users eitem = {} if 'time' not in item['data']: logger.warning("[meetup] Not processing %s: no time field", item['uuid']) return eitem for f in self.RAW_FIELDS_COPY: if f in item: eitem[f] = item[f] else: eitem[f] = None event = item['data'] # data fields to copy copy_fields = ["id", "how_to_find_us"] for f in copy_fields: if f in event: eitem[f] = event[f] else: eitem[f] = None # Fields which names are translated map_fields = { "link": "url", "rsvp_limit": "rsvps_limit" } for fn in map_fields: if fn in event: eitem[map_fields[fn]] = event[fn] else: eitem[f] = None # event host fields: author of the event if 'event_hosts' in event: host = event['event_hosts'][0] if 'photo' in host: eitem['member_photo_url'] = host['photo']['photo_link'] eitem['member_photo_id'] = host['photo']['id'] eitem['member_photo_type'] = host['photo']['type'] eitem['member_is_host'] = True eitem['member_id'] = host['id'] eitem['member_name'] = host['name'] eitem['member_url'] = "https://www.meetup.com/members/" + str(host['id']) eitem['event_url'] = event['link'] # data fields to copy with meetup`prefix copy_fields = ["description", "plain_text_description", "name", "status", "utc_offset", "visibility", "waitlist_count", "yes_rsvp_count", "duration", "featured", "rsvpable"] copy_fields_time = ["time", "updated", "created"] for f in copy_fields: if f in event: eitem["meetup_" + f] = event[f] else: eitem[f] = None for f in copy_fields_time: if f in event: eitem["meetup_" + f] = unixtime_to_datetime(event[f] / 1000).isoformat() else: eitem[f] = None rsvps = event.get('rsvps', []) eitem['num_rsvps'] = len(rsvps) eitem['num_comments'] = len(event['comments']) try: if 'time' in event: eitem['time_date'] = unixtime_to_datetime(event['time'] / 1000).isoformat() else: logger.warning("time field nof found in event") return {} except ValueError: logger.warning("Wrong datetime for %s: %s", eitem['url'], event['time']) # If no datetime for the enriched item, it is useless for Kibana return {} if 'venue' in event: venue = event['venue'] copy_fields = ["id", "name", "city", "state", "zip", "country", "localized_country_name", "repinned", "address_1"] for f in copy_fields: if f in venue: eitem["venue_" + f] = venue[f] else: eitem[f] = None eitem['venue_geolocation'] = { "lat": event['venue']['lat'], "lon": event['venue']['lon'], } if 'series' in event: eitem['series_id'] = event['series']['id'] eitem['series_description'] = event['series']['description'] eitem['series_start_date'] = event['series']['start_date'] if 'group' in event: group = event['group'] copy_fields = ["id", "created", "join_mode", "name", "url_name", "who"] for f in copy_fields: if f in group: eitem["group_" + f] = group[f] else: eitem[f] = None eitem['group_geolocation'] = { "lat": group['lat'], "lon": group['lon'], } eitem['group_topics'] = [] eitem['group_topics_keys'] = [] if 'topics' in group: group_topics = [topic['name'] for topic in group['topics']] group_topics_keys = [topic['urlkey'] for topic in group['topics']] eitem['group_topics'] = group_topics eitem['group_topics_keys'] = group_topics_keys if len(rsvps) > 0: eitem['group_members'] = rsvps[0]['group']['members'] created = unixtime_to_datetime(event['created'] / 1000).isoformat() eitem['type'] = "meetup" # time_date is when the meetup will take place, the needed one in this index # created is when the meetup entry was created and it is not the interesting date eitem.update(self.get_grimoire_fields(eitem['time_date'], eitem['type'])) if self.sortinghat: eitem.update(self.get_item_sh(event)) if self.prjs_map: eitem.update(self.get_item_project(eitem)) self.add_repository_labels(eitem) self.add_metadata_filter_raw(eitem) return eitem def get_item_sh(self, item): """ Add sorting hat enrichment fields """ sh_fields = {} # Not shared common get_item_sh because it is pretty specific if 'member' in item: # comment and rsvp identity = self.get_sh_identity(item['member']) elif 'event_hosts' in item: # meetup event identity = self.get_sh_identity(item['event_hosts'][0]) else: return sh_fields created = unixtime_to_datetime(item['created'] / 1000) sh_fields = self.get_item_sh_fields(identity, created) return sh_fields def get_rich_item_comments(self, comments, eitem): for comment in comments: ecomment = copy.deepcopy(eitem) created = unixtime_to_datetime(comment['created'] / 1000).isoformat() ecomment['url'] = comment['link'] ecomment['id'] = ecomment['id'] + '_comment_' + str(comment['id']) ecomment['comment'] = comment['comment'] ecomment['like_count'] = comment['like_count'] ecomment['type'] = 'comment' ecomment.update(self.get_grimoire_fields(created, ecomment['type'])) ecomment.pop('is_meetup_meetup') # event host fields: author of the event member = comment['member'] if 'photo' in member: ecomment['member_photo_url'] = member['photo']['photo_link'] ecomment['member_photo_id'] = member['photo']['id'] ecomment['member_photo_type'] = member['photo']['type'] if 'event_context' in member: ecomment['member_is_host'] = member['event_context']['host'] ecomment['member_id'] = member['id'] ecomment['member_name'] = member['name'] ecomment['member_url'] = "https://www.meetup.com/members/" + str(member['id']) if self.sortinghat: ecomment.update(self.get_item_sh(comment)) yield ecomment def get_rich_item_rsvps(self, rsvps, eitem): for rsvp in rsvps: ersvp = copy.deepcopy(eitem) ersvp['type'] = 'rsvp' created = unixtime_to_datetime(rsvp['created'] / 1000).isoformat() ersvp.update(self.get_grimoire_fields(created, ersvp['type'])) ersvp.pop('is_meetup_meetup') # event host fields: author of the event member = rsvp['member'] if 'photo' in member: ersvp['member_photo_url'] = member['photo']['photo_link'] ersvp['member_photo_id'] = member['photo']['id'] ersvp['member_photo_type'] = member['photo']['type'] ersvp['member_is_host'] = member['event_context']['host'] ersvp['member_id'] = member['id'] ersvp['member_name'] = member['name'] ersvp['member_url'] = "https://www.meetup.com/members/" + str(member['id']) ersvp['id'] = ersvp['id'] + '_rsvp_' + str(rsvp['event']['id']) + "_" + str(member['id']) ersvp['url'] = "https://www.meetup.com/members/" + str(member['id']) ersvp['rsvps_guests'] = rsvp['guests'] ersvp['rsvps_updated'] = rsvp['updated'] ersvp['rsvps_response'] = rsvp['response'] if self.sortinghat: ersvp.update(self.get_item_sh(rsvp)) yield ersvp def get_field_unique_id(self): return "id" def enrich_items(self, ocean_backend): items_to_enrich = [] num_items = 0 ins_items = 0 for item in ocean_backend.fetch(): eitem = self.get_rich_item(item) if 'uuid' not in eitem: continue items_to_enrich.append(eitem) if 'comments' in item['data'] and 'id' in eitem: comments = item['data']['comments'] rich_item_comments = self.get_rich_item_comments(comments, eitem) items_to_enrich.extend(rich_item_comments) if 'rsvps' in item['data'] and 'id' in eitem: rsvps = item['data']['rsvps'] rich_item_rsvps = self.get_rich_item_rsvps(rsvps, eitem) items_to_enrich.extend(rich_item_rsvps) if len(items_to_enrich) < MAX_SIZE_BULK_ENRICHED_ITEMS: continue num_items += len(items_to_enrich) ins_items += self.elastic.bulk_upload(items_to_enrich, self.get_field_unique_id()) items_to_enrich = [] if len(items_to_enrich) > 0: num_items += len(items_to_enrich) ins_items += self.elastic.bulk_upload(items_to_enrich, self.get_field_unique_id()) if num_items != ins_items: missing = num_items - ins_items logger.error("%s/%s missing items for Meetup", str(missing), str(num_items)) else: logger.info("%s items inserted for Meetup", str(num_items)) return num_items
gpl-3.0
-4,369,405,093,059,499,000
33.660622
101
0.52545
false
3.831329
false
false
false
ratschlab/ASP
applications/msplicer/content_sensors.py
1
2271
# # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Written (W) 2006-2007 Soeren Sonnenburg # Written (W) 2007 Gunnar Raetsch # Copyright (C) 2007-2008 Fraunhofer Institute FIRST and Max-Planck-Society # import numpy class content_sensors: def __init__(self, model): self.dict_weights_intron=numpy.array(model.dict_weights_intron, dtype=numpy.float64) self.dict_weights_coding=numpy.array(model.dict_weights_coding, dtype=numpy.float64) self.dicts=numpy.concatenate((self.dict_weights_coding,self.dict_weights_intron, self.dict_weights_coding, self.dict_weights_intron, self.dict_weights_coding,self.dict_weights_intron, self.dict_weights_coding, self.dict_weights_intron), axis=0) self.dicts[0, 64:] = 0 # only order 3 info self.dicts[1, 64:] = 0 # only order 3 info self.dicts[2, 0:64] = 0 # only order 4 info self.dicts[2, 320:] = 0 self.dicts[3, 0:64] = 0 # only order 4 info self.dicts[3, 320:] = 0 self.dicts[4, 0:320] = 0 # only order 5 info self.dicts[4, 1344:] = 0 self.dicts[5, 0:320] = 0 # only order 5 info self.dicts[5, 1344:] = 0 self.dicts[6, 0:1344] = 0 # only order 6 info self.dicts[7, 0:1344] = 0 # only order 6 info self.model = model def get_dict_weights(self): return self.dicts.T def initialize_content(self, dyn): dyn.init_svm_arrays(len(self.model.word_degree), len(self.model.mod_words)) word_degree = numpy.array(self.model.word_degree, numpy.int32) dyn.init_word_degree_array(word_degree) mod_words = numpy.array(4**word_degree, numpy.int32) dyn.init_num_words_array(mod_words) cum_mod_words=numpy.zeros(len(mod_words)+1, numpy.int32) cum_mod_words[1:] = numpy.cumsum(mod_words) dyn.init_cum_num_words_array(cum_mod_words) dyn.init_mod_words_array(numpy.array(self.model.mod_words, numpy.int32)) dyn.init_sign_words_array(numpy.array(self.model.sign_words, numpy.bool)) dyn.init_string_words_array(numpy.zeros(len(self.model.sign_words), numpy.int32)) assert(dyn.check_svm_arrays())
gpl-2.0
-2,526,926,866,745,545,700
39.553571
246
0.693087
false
2.821118
false
false
false
disqus/django-old
tests/regressiontests/admin_validation/tests.py
1
9982
from django import forms from django.core.exceptions import ImproperlyConfigured from django.test import TestCase from django.contrib import admin from django.contrib.admin.validation import validate, validate_inline from models import Song, Book, Album, TwoAlbumFKAndAnE, State, City class SongForm(forms.ModelForm): pass class ValidFields(admin.ModelAdmin): form = SongForm fields = ['title'] class InvalidFields(admin.ModelAdmin): form = SongForm fields = ['spam'] class ValidationTestCase(TestCase): def test_readonly_and_editable(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ["original_release"] fieldsets = [ (None, { "fields": ["title", "original_release"], }), ] validate(SongAdmin, Song) def test_custom_modelforms_with_fields_fieldsets(self): """ # Regression test for #8027: custom ModelForms with fields/fieldsets """ validate(ValidFields, Song) self.assertRaisesMessage(ImproperlyConfigured, "'InvalidFields.fields' refers to field 'spam' that is missing from the form.", validate, InvalidFields, Song) def test_exclude_values(self): """ Tests for basic validation of 'exclude' option values (#12689) """ class ExcludedFields1(admin.ModelAdmin): exclude = ('foo') self.assertRaisesMessage(ImproperlyConfigured, "'ExcludedFields1.exclude' must be a list or tuple.", validate, ExcludedFields1, Book) def test_exclude_duplicate_values(self): class ExcludedFields2(admin.ModelAdmin): exclude = ('name', 'name') self.assertRaisesMessage(ImproperlyConfigured, "There are duplicate field(s) in ExcludedFields2.exclude", validate, ExcludedFields2, Book) def test_exclude_in_inline(self): class ExcludedFieldsInline(admin.TabularInline): model = Song exclude = ('foo') class ExcludedFieldsAlbumAdmin(admin.ModelAdmin): model = Album inlines = [ExcludedFieldsInline] self.assertRaisesMessage(ImproperlyConfigured, "'ExcludedFieldsInline.exclude' must be a list or tuple.", validate, ExcludedFieldsAlbumAdmin, Album) def test_exclude_inline_model_admin(self): """ # Regression test for #9932 - exclude in InlineModelAdmin # should not contain the ForeignKey field used in ModelAdmin.model """ class SongInline(admin.StackedInline): model = Song exclude = ['album'] class AlbumAdmin(admin.ModelAdmin): model = Album inlines = [SongInline] self.assertRaisesMessage(ImproperlyConfigured, "SongInline cannot exclude the field 'album' - this is the foreign key to the parent model admin_validation.Album.", validate, AlbumAdmin, Album) def test_app_label_in_admin_validation(self): """ Regression test for #15669 - Include app label in admin validation messages """ class RawIdNonexistingAdmin(admin.ModelAdmin): raw_id_fields = ('nonexisting',) self.assertRaisesMessage(ImproperlyConfigured, "'RawIdNonexistingAdmin.raw_id_fields' refers to field 'nonexisting' that is missing from model 'admin_validation.Album'.", validate, RawIdNonexistingAdmin, Album) def test_fk_exclusion(self): """ Regression test for #11709 - when testing for fk excluding (when exclude is given) make sure fk_name is honored or things blow up when there is more than one fk to the parent model. """ class TwoAlbumFKAndAnEInline(admin.TabularInline): model = TwoAlbumFKAndAnE exclude = ("e",) fk_name = "album1" validate_inline(TwoAlbumFKAndAnEInline, None, Album) def test_inline_self_validation(self): class TwoAlbumFKAndAnEInline(admin.TabularInline): model = TwoAlbumFKAndAnE self.assertRaisesMessage(Exception, "<class 'regressiontests.admin_validation.models.TwoAlbumFKAndAnE'> has more than 1 ForeignKey to <class 'regressiontests.admin_validation.models.Album'>", validate_inline, TwoAlbumFKAndAnEInline, None, Album) def test_inline_with_specified(self): class TwoAlbumFKAndAnEInline(admin.TabularInline): model = TwoAlbumFKAndAnE fk_name = "album1" validate_inline(TwoAlbumFKAndAnEInline, None, Album) def test_readonly(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("title",) validate(SongAdmin, Song) def test_readonly_on_method(self): def my_function(obj): pass class SongAdmin(admin.ModelAdmin): readonly_fields = (my_function,) validate(SongAdmin, Song) def test_readonly_on_modeladmin(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("readonly_method_on_modeladmin",) def readonly_method_on_modeladmin(self, obj): pass validate(SongAdmin, Song) def test_readonly_method_on_model(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("readonly_method_on_model",) validate(SongAdmin, Song) def test_nonexistant_field(self): class SongAdmin(admin.ModelAdmin): readonly_fields = ("title", "nonexistant") self.assertRaisesMessage(ImproperlyConfigured, "SongAdmin.readonly_fields[1], 'nonexistant' is not a callable or an attribute of 'SongAdmin' or found in the model 'Song'.", validate, SongAdmin, Song) def test_nonexistant_field_on_inline(self): class CityInline(admin.TabularInline): model = City readonly_fields=['i_dont_exist'] # Missing attribute self.assertRaisesMessage(ImproperlyConfigured, "CityInline.readonly_fields[0], 'i_dont_exist' is not a callable or an attribute of 'CityInline' or found in the model 'City'.", validate_inline, CityInline, None, State) def test_extra(self): class SongAdmin(admin.ModelAdmin): def awesome_song(self, instance): if instance.title == "Born to Run": return "Best Ever!" return "Status unknown." validate(SongAdmin, Song) def test_readonly_lambda(self): class SongAdmin(admin.ModelAdmin): readonly_fields = (lambda obj: "test",) validate(SongAdmin, Song) def test_graceful_m2m_fail(self): """ Regression test for #12203/#12237 - Fail more gracefully when a M2M field that specifies the 'through' option is included in the 'fields' or the 'fieldsets' ModelAdmin options. """ class BookAdmin(admin.ModelAdmin): fields = ['authors'] self.assertRaisesMessage(ImproperlyConfigured, "'BookAdmin.fields' can't include the ManyToManyField field 'authors' because 'authors' manually specifies a 'through' model.", validate, BookAdmin, Book) def test_cannot_include_through(self): class FieldsetBookAdmin(admin.ModelAdmin): fieldsets = ( ('Header 1', {'fields': ('name',)}), ('Header 2', {'fields': ('authors',)}), ) self.assertRaisesMessage(ImproperlyConfigured, "'FieldsetBookAdmin.fieldsets[1][1]['fields']' can't include the ManyToManyField field 'authors' because 'authors' manually specifies a 'through' model.", validate, FieldsetBookAdmin, Book) def test_nested_fields(self): class NestedFieldsAdmin(admin.ModelAdmin): fields = ('price', ('name', 'subtitle')) validate(NestedFieldsAdmin, Book) def test_nested_fieldsets(self): class NestedFieldsetAdmin(admin.ModelAdmin): fieldsets = ( ('Main', {'fields': ('price', ('name', 'subtitle'))}), ) validate(NestedFieldsetAdmin, Book) def test_explicit_through_override(self): """ Regression test for #12209 -- If the explicitly provided through model is specified as a string, the admin should still be able use Model.m2m_field.through """ class AuthorsInline(admin.TabularInline): model = Book.authors.through class BookAdmin(admin.ModelAdmin): inlines = [AuthorsInline] # If the through model is still a string (and hasn't been resolved to a model) # the validation will fail. validate(BookAdmin, Book) def test_non_model_fields(self): """ Regression for ensuring ModelAdmin.fields can contain non-model fields that broke with r11737 """ class SongForm(forms.ModelForm): extra_data = forms.CharField() class Meta: model = Song class FieldsOnFormOnlyAdmin(admin.ModelAdmin): form = SongForm fields = ['title', 'extra_data'] validate(FieldsOnFormOnlyAdmin, Song) def test_non_model_first_field(self): """ Regression for ensuring ModelAdmin.field can handle first elem being a non-model field (test fix for UnboundLocalError introduced with r16225). """ class SongForm(forms.ModelForm): extra_data = forms.CharField() class Meta: model = Song class FieldsOnFormOnlyAdmin(admin.ModelAdmin): form = SongForm fields = ['extra_data', 'title'] validate(FieldsOnFormOnlyAdmin, Song)
bsd-3-clause
1,530,670,665,720,559,000
34.523132
167
0.619415
false
4.482263
true
false
false
aestheticblasphemy/aestheticBlasphemy
pl_messages/migrations/0002_auto_20200828_2129.py
1
1972
# Generated by Django 3.1 on 2020-08-28 15:59 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('pl_messages', '0001_initial'), ] operations = [ migrations.AlterField( model_name='messages', name='parent', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='pl_messages.messages'), ), migrations.AlterField( model_name='participantnotifications', name='participant', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='notified_participant', to=settings.AUTH_USER_MODEL, verbose_name='Notification Participant'), ), migrations.AlterField( model_name='participantthreads', name='participant', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='thread_participant', to=settings.AUTH_USER_MODEL, verbose_name='Thread Participant'), ), migrations.AlterField( model_name='participantthreads', name='threads', field=models.ManyToManyField(related_name='participant_threads', to='pl_messages.Thread', verbose_name='Participant Threads'), ), migrations.AlterField( model_name='thread', name='last_message', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='last_message_in_thread', to='pl_messages.messages', verbose_name='Last Message'), ), migrations.AlterField( model_name='thread', name='messages', field=models.ManyToManyField(related_name='thread_messages', to='pl_messages.Messages', verbose_name='Thread Messages'), ), ]
gpl-3.0
7,672,388,963,139,680,000
41.869565
188
0.647566
false
4.391982
false
false
false
dmwyatt/disney.api
pages/timepicker.py
1
3504
import datetime import logging import os import time import re import webbrowser from dateutil import parser from selenium import webdriver from selenium.common.exceptions import WebDriverException, NoSuchElementException from selenium.webdriver.support.select import Select from helpers import roundTime, difference_in_minutes, format_dt from pages.helpers import wait_for logger = logging.getLogger(__name__) class TimeNotBookableError(Exception): pass class BasicTimePicker: select_selector = 'select#diningAvailabilityForm-searchTime' def __init__(self, browser: webdriver.PhantomJS): self.browser = browser @property def select_element(self): return self.browser.find_element_by_css_selector(self.select_selector) @property def select(self): return Select(self.select_element) @property def option_elements(self): return self.select_element.find_elements_by_tag_name('option') @property def selectable_values(self): return [x.get_attribute('value') for x in self.option_elements] @property def selectable_texts(self): return [x.text for x in self.option_elements] def select_exact_time(self, desired_dt: datetime.datetime): the_time = desired_dt.strftime('%H:%M') if not the_time in self.selectable_values: raise TimeNotBookableError("Cannot select '{}' from {}".format(the_time, self.selectable_values)) self.select.select_by_value(the_time) def select_time_with_leeway(self, desired_dt: datetime.datetime, leeway: int): closest = None closest_delta = None for sv in self.selectable_values: if not re.match('\d\d:\d\d', sv): continue sv_dt = time_to_datetime(sv, desired_dt) if not closest: closest = sv_dt closest_delta = difference_in_minutes(desired_dt, closest) curr_sv_delta = difference_in_minutes(sv_dt, desired_dt) if curr_sv_delta < closest_delta: closest = sv_dt closest_delta = curr_sv_delta if closest_delta <= leeway: self.select_exact_time(closest) else: raise TimeNotBookableError("There is no selectable time that's " "less than {} minutes from {} " "in {}".format(leeway, format_dt(desired_dt), self.selectable_values)) def select_closest_time(self, desired_dt: datetime.datetime): closest = None closest_delta = None for sv in self.selectable_values: if not re.match('\d\d:\d\d', sv): continue sv_dt = time_to_datetime(sv, desired_dt) if not closest: closest = sv_dt closest_delta = difference_in_minutes(desired_dt, closest) curr_sv_delta = difference_in_minutes(sv_dt, desired_dt) if curr_sv_delta < closest_delta: closest = sv_dt closest_delta = curr_sv_delta self.select_exact_time(closest) def select_meal(self, meal): try: self.select.select_by_visible_text(meal) except NoSuchElementException: raise TimeNotBookableError("Cannot select '{}' from {}".format(meal, self.selectable_texts)) def select_breakfast(self): self.select_meal('Breakfast') def select_lunch(self): self.select_meal('Lunch') def select_dinner(self): self.select_meal('Dinner') def time_to_datetime(the_time: str, reference_dt: datetime.datetime) -> datetime.datetime: """ Takes a string representing a time and a datetime.datetime that represents the day that time is on, and returns a datetime.datetime on that day with the new time. """ dt = parser.parse(the_time) return dt.replace(year=reference_dt.year, month=reference_dt.month, day=reference_dt.day)
mit
4,530,609,695,152,849,400
27.958678
100
0.720034
false
3.250464
false
false
false
jakesyl/fail2ban
fail2ban/protocol.py
1
9025
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*- # vi: set ft=python sts=4 ts=4 sw=4 noet : # This file is part of Fail2Ban. # # Fail2Ban is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Fail2Ban is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Fail2Ban; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # Author: Cyril Jaquier # __author__ = "Cyril Jaquier" __copyright__ = "Copyright (c) 2004 Cyril Jaquier" __license__ = "GPL" import textwrap ## # Describes the protocol used to communicate with the server. protocol = [ ['', "BASIC", ""], ["start", "starts the server and the jails"], ["reload", "reloads the configuration"], ["reload <JAIL>", "reloads the jail <JAIL>"], ["stop", "stops all jails and terminate the server"], ["status", "gets the current status of the server"], ["ping", "tests if the server is alive"], ["help", "return this output"], ["version", "return the server version"], ['', "LOGGING", ""], ["set loglevel <LEVEL>", "sets logging level to <LEVEL>. Levels: CRITICAL, ERROR, WARNING, NOTICE, INFO, DEBUG"], ["get loglevel", "gets the logging level"], ["set logtarget <TARGET>", "sets logging target to <TARGET>. Can be STDOUT, STDERR, SYSLOG or a file"], ["get logtarget", "gets logging target"], ["flushlogs", "flushes the logtarget if a file and reopens it. For log rotation."], ['', "DATABASE", ""], ["set dbfile <FILE>", "set the location of fail2ban persistent datastore. Set to \"None\" to disable"], ["get dbfile", "get the location of fail2ban persistent datastore"], ["set dbpurgeage <SECONDS>", "sets the max age in <SECONDS> that history of bans will be kept"], ["get dbpurgeage", "gets the max age in seconds that history of bans will be kept"], ['', "JAIL CONTROL", ""], ["add <JAIL> <BACKEND>", "creates <JAIL> using <BACKEND>"], ["start <JAIL>", "starts the jail <JAIL>"], ["stop <JAIL>", "stops the jail <JAIL>. The jail is removed"], ["status <JAIL> [FLAVOR]", "gets the current status of <JAIL>, with optional flavor or extended info"], ['', "JAIL CONFIGURATION", ""], ["set <JAIL> idle on|off", "sets the idle state of <JAIL>"], ["set <JAIL> addignoreip <IP>", "adds <IP> to the ignore list of <JAIL>"], ["set <JAIL> delignoreip <IP>", "removes <IP> from the ignore list of <JAIL>"], ["set <JAIL> addlogpath <FILE> ['tail']", "adds <FILE> to the monitoring list of <JAIL>, optionally starting at the 'tail' of the file (default 'head')."], ["set <JAIL> dellogpath <FILE>", "removes <FILE> from the monitoring list of <JAIL>"], ["set <JAIL> logencoding <ENCODING>", "sets the <ENCODING> of the log files for <JAIL>"], ["set <JAIL> addjournalmatch <MATCH>", "adds <MATCH> to the journal filter of <JAIL>"], ["set <JAIL> deljournalmatch <MATCH>", "removes <MATCH> from the journal filter of <JAIL>"], ["set <JAIL> addfailregex <REGEX>", "adds the regular expression <REGEX> which must match failures for <JAIL>"], ["set <JAIL> delfailregex <INDEX>", "removes the regular expression at <INDEX> for failregex"], ["set <JAIL> ignorecommand <VALUE>", "sets ignorecommand of <JAIL>"], ["set <JAIL> addignoreregex <REGEX>", "adds the regular expression <REGEX> which should match pattern to exclude for <JAIL>"], ["set <JAIL> delignoreregex <INDEX>", "removes the regular expression at <INDEX> for ignoreregex"], ["set <JAIL> findtime <TIME>", "sets the number of seconds <TIME> for which the filter will look back for <JAIL>"], ["set <JAIL> bantime <TIME>", "sets the number of seconds <TIME> a host will be banned for <JAIL>"], ["set <JAIL> datepattern <PATTERN>", "sets the <PATTERN> used to match date/times for <JAIL>"], ["set <JAIL> usedns <VALUE>", "sets the usedns mode for <JAIL>"], ["set <JAIL> banip <IP>", "manually Ban <IP> for <JAIL>"], ["set <JAIL> unbanip <IP>", "manually Unban <IP> in <JAIL>"], ["set <JAIL> maxretry <RETRY>", "sets the number of failures <RETRY> before banning the host for <JAIL>"], ["set <JAIL> maxlines <LINES>", "sets the number of <LINES> to buffer for regex search for <JAIL>"], ["set <JAIL> addaction <ACT>[ <PYTHONFILE> <JSONKWARGS>]", "adds a new action named <NAME> for <JAIL>. Optionally for a Python based action, a <PYTHONFILE> and <JSONKWARGS> can be specified, else will be a Command Action"], ["set <JAIL> delaction <ACT>", "removes the action <ACT> from <JAIL>"], ["", "COMMAND ACTION CONFIGURATION", ""], ["set <JAIL> action <ACT> actionstart <CMD>", "sets the start command <CMD> of the action <ACT> for <JAIL>"], ["set <JAIL> action <ACT> actionstop <CMD>", "sets the stop command <CMD> of the action <ACT> for <JAIL>"], ["set <JAIL> action <ACT> actioncheck <CMD>", "sets the check command <CMD> of the action <ACT> for <JAIL>"], ["set <JAIL> action <ACT> actionban <CMD>", "sets the ban command <CMD> of the action <ACT> for <JAIL>"], ["set <JAIL> action <ACT> actionunban <CMD>", "sets the unban command <CMD> of the action <ACT> for <JAIL>"], ["set <JAIL> action <ACT> timeout <TIMEOUT>", "sets <TIMEOUT> as the command timeout in seconds for the action <ACT> for <JAIL>"], ["", "GENERAL ACTION CONFIGURATION", ""], ["set <JAIL> action <ACT> <PROPERTY> <VALUE>", "sets the <VALUE> of <PROPERTY> for the action <ACT> for <JAIL>"], ["set <JAIL> action <ACT> <METHOD>[ <JSONKWARGS>]", "calls the <METHOD> with <JSONKWARGS> for the action <ACT> for <JAIL>"], ['', "JAIL INFORMATION", ""], ["get <JAIL> logpath", "gets the list of the monitored files for <JAIL>"], ["get <JAIL> logencoding", "gets the encoding of the log files for <JAIL>"], ["get <JAIL> journalmatch", "gets the journal filter match for <JAIL>"], ["get <JAIL> ignoreip", "gets the list of ignored IP addresses for <JAIL>"], ["get <JAIL> ignorecommand", "gets ignorecommand of <JAIL>"], ["get <JAIL> failregex", "gets the list of regular expressions which matches the failures for <JAIL>"], ["get <JAIL> ignoreregex", "gets the list of regular expressions which matches patterns to ignore for <JAIL>"], ["get <JAIL> findtime", "gets the time for which the filter will look back for failures for <JAIL>"], ["get <JAIL> bantime", "gets the time a host is banned for <JAIL>"], ["get <JAIL> datepattern", "gets the patern used to match date/times for <JAIL>"], ["get <JAIL> usedns", "gets the usedns setting for <JAIL>"], ["get <JAIL> maxretry", "gets the number of failures allowed for <JAIL>"], ["get <JAIL> maxlines", "gets the number of lines to buffer for <JAIL>"], ["get <JAIL> actions", "gets a list of actions for <JAIL>"], ["", "COMMAND ACTION INFORMATION",""], ["get <JAIL> action <ACT> actionstart", "gets the start command for the action <ACT> for <JAIL>"], ["get <JAIL> action <ACT> actionstop", "gets the stop command for the action <ACT> for <JAIL>"], ["get <JAIL> action <ACT> actioncheck", "gets the check command for the action <ACT> for <JAIL>"], ["get <JAIL> action <ACT> actionban", "gets the ban command for the action <ACT> for <JAIL>"], ["get <JAIL> action <ACT> actionunban", "gets the unban command for the action <ACT> for <JAIL>"], ["get <JAIL> action <ACT> timeout", "gets the command timeout in seconds for the action <ACT> for <JAIL>"], ["", "GENERAL ACTION INFORMATION", ""], ["get <JAIL> actionproperties <ACT>", "gets a list of properties for the action <ACT> for <JAIL>"], ["get <JAIL> actionmethods <ACT>", "gets a list of methods for the action <ACT> for <JAIL>"], ["get <JAIL> action <ACT> <PROPERTY>", "gets the value of <PROPERTY> for the action <ACT> for <JAIL>"], ] ## # Prints the protocol in a "man" format. This is used for the # "-h" output of fail2ban-client. def printFormatted(): INDENT=4 MARGIN=41 WIDTH=34 firstHeading = False for m in protocol: if m[0] == '' and firstHeading: print firstHeading = True first = True if len(m[0]) >= MARGIN: m[1] = ' ' * WIDTH + m[1] for n in textwrap.wrap(m[1], WIDTH, drop_whitespace=False): if first: line = ' ' * INDENT + m[0] + ' ' * (MARGIN - len(m[0])) + n.strip() first = False else: line = ' ' * (INDENT + MARGIN) + n.strip() print line ## # Prints the protocol in a "mediawiki" format. def printWiki(): firstHeading = False for m in protocol: if m[0] == '': if firstHeading: print "|}" __printWikiHeader(m[1], m[2]) firstHeading = True else: print "|-" print "| <span style=\"white-space:nowrap;\"><tt>" + m[0] + "</tt></span> || || " + m[1] print "|}" def __printWikiHeader(section, desc): print print "=== " + section + " ===" print print desc print print "{|" print "| '''Command''' || || '''Description'''"
gpl-2.0
-5,112,870,207,491,845,000
53.041916
224
0.668033
false
3.116367
false
false
false
aravindalwan/unyque
unyque/rdimension.py
1
3726
'''Representation of a random variable used in stochastic collocation''' __copyright__ = 'Copyright (C) 2011 Aravind Alwan' __license__ = ''' This file is part of UnyQuE. UnyQuE is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. UnyQuE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. For a copy of the GNU General Public License, please see <http://www.gnu.org/licenses/>. ''' class RandomDimension(object): '''Random dimension object that encapsulates the operations along one of the dimensions in random space, which corresponds to one of the random variables ''' kmax = 0 nodes = None def __init__(self, bound): self._bound = bound @classmethod def set_maximum_interp_level(cls, value): cls.kmax = value cls._init_nodes() @classmethod def _init_nodes(cls): '''Initialize nodes in a hierarchical fashion as a list of sublists, where each sublist contains the nodes added at the corresponding level ''' cls.nodes = [] if cls.kmax > 0: cls.nodes.append([0.5]) if cls.kmax > 1: cls.nodes.append([0.0, 1.0]) if cls.kmax > 2: for k in xrange(3, cls.kmax+1): cls.nodes.append([ (1.0 + 2.0*j)/(2**(k-1)) for j in xrange(2**(k-2))]) def get_node(self, level, idx, normalized = False): '''Return the scaled coordinates of a node at the given level and index ''' if normalized: return self.nodes[level-1][idx] else: lo = self._bound[0] hi = self._bound[1] return lo + (hi-lo)*self.nodes[level-1][idx] @classmethod def _interpolate(cls, pt1, x2): '''Evaluate basis function centered at pt1, at x2. pt1 has to be a tuple of the form (level, index) that specifies the interpolation level and the index of the node at that level. x2 is any float value between 0 and 1, specifying the location where the basis function is to be evaluated. ''' level1, idx1 = pt1 x1 = cls.nodes[level1-1][idx1] if level1 == 1: return 1.0 else: m = 2**(level1-1) + 1 # Number of nodes at this level return (abs(x1-x2) < 1./(m-1)) * (1. - (m-1)*abs(x1-x2)) def interpolate(self, pt1, x): '''Evaluate basis function centered at pt1, at the location x. This method scales x to be in [0,1] and calls _interpolate to get the actual interpolated value ''' lo = self._bound[0] hi = self._bound[1] if lo <= x <= hi: return self._interpolate(pt1, float(x-lo)/float(hi-lo)) else: return 0. def get_basis_function(self, pt): '''Return bounds of the piece-wise linear basis function centered at pt. ''' lo = self._bound[0] hi = self._bound[1] level, idx = pt if level == 1: return (lo, hi, pt) elif level == 2: lo = (lo + hi)/2 if idx == 1 else lo hi = (lo + hi)/2 if idx == 0 else hi return (lo, hi, pt) else: m = 2**(level-1) + 1 # Number of nodes at this level x = lo + (hi-lo)*self.nodes[level-1][idx] return (x-(hi-lo)/(m-1), x+(hi-lo)/(m-1), pt)
gpl-3.0
-7,993,344,271,732,080,000
31.973451
80
0.580247
false
3.763636
false
false
false
endlessm/chromium-browser
native_client/pnacl/driver/pnacl-readelf.py
2
3751
#!/usr/bin/python # Copyright (c) 2012 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import print_function from driver_env import env from driver_log import Log import driver_tools import filetype EXTRA_ENV = { 'INPUTS': '', 'FLAGS': '', } PATTERNS = [ ( '(-.*)', "env.append('FLAGS', $0)"), ( '(.*)', "env.append('INPUTS', pathtools.normalize($0))"), ] def main(argv): env.update(EXTRA_ENV) driver_tools.ParseArgs(argv, PATTERNS) inputs = env.get('INPUTS') if len(inputs) == 0: Log.Fatal("No input files given") for infile in inputs: driver_tools.CheckPathLength(infile) env.push() env.set('input', infile) if filetype.IsLLVMBitcode(infile): # Hack to support newlib build. # Newlib determines whether the toolchain supports .init_array, etc., by # compiling a small test and looking for a specific section tidbit using # "readelf -S". Since pnacl compiles to bitcode, readelf isn't available. # (there is a line: "if ${READELF} -S conftest | grep -e INIT_ARRAY" # in newlib's configure file). # TODO(sehr): we may want to implement a whole readelf on bitcode. flags = env.get('FLAGS') if len(flags) == 1 and flags[0] == '-S': print('INIT_ARRAY') return 0 Log.Fatal('Cannot handle pnacl-readelf %s' % str(argv)) return 1 driver_tools.Run('"${READELF}" ${FLAGS} ${input}') env.pop() # only reached in case of no errors return 0 def get_help(unused_argv): return """ Usage: %s <option(s)> elf-file(s) Display information about the contents of ELF format files Options are: -a --all Equivalent to: -h -l -S -s -r -d -V -A -I -h --file-header Display the ELF file header -l --program-headers Display the program headers --segments An alias for --program-headers -S --section-headers Display the sections' header --sections An alias for --section-headers -g --section-groups Display the section groups -t --section-details Display the section details -e --headers Equivalent to: -h -l -S -s --syms Display the symbol table --symbols An alias for --syms -n --notes Display the core notes (if present) -r --relocs Display the relocations (if present) -u --unwind Display the unwind info (if present) -d --dynamic Display the dynamic section (if present) -V --version-info Display the version sections (if present) -A --arch-specific Display architecture specific information (if any). -c --archive-index Display the symbol/file index in an archive -D --use-dynamic Use the dynamic section info when displaying symbols -x --hex-dump=<number|name> Dump the contents of section <number|name> as bytes -p --string-dump=<number|name> Dump the contents of section <number|name> as strings -R --relocated-dump=<number|name> Dump the contents of section <number|name> as relocated bytes -w[lLiaprmfFsoR] or --debug-dump[=rawline,=decodedline,=info,=abbrev,=pubnames,=aranges,=macro,=frames,=str,=loc,=Ranges] Display the contents of DWARF2 debug sections -I --histogram Display histogram of bucket list lengths -W --wide Allow output width to exceed 80 characters @<file> Read options from <file> -H --help Display this information -v --version Display the version number of readelf """ % env.getone('SCRIPT_NAME')
bsd-3-clause
4,530,193,218,533,686,300
39.333333
103
0.628099
false
3.710188
false
false
false
kreatorkodi/repository.torrentbr
script.module.urlresolver/lib/urlresolver/plugins/lib/recaptcha_v2.py
1
7299
# -*- coding: utf-8 -*- """ urlresolver XBMC Addon Copyright (C) 2016 tknorris Derived from Shani's LPro Code (https://github.com/Shani-08/ShaniXBMCWork2/blob/master/plugin.video.live.streamspro/unCaptcha.py) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. reusable captcha methods """ import re import os import xbmcgui from urlresolver import common class cInputWindow(xbmcgui.WindowDialog): def __init__(self, *args, **kwargs): bg_image = os.path.join(common.addon_path, 'resources', 'images', 'DialogBack2.png') check_image = os.path.join(common.addon_path, 'resources', 'images', 'checked.png') button_fo = os.path.join(common.kodi.get_path(), 'resources', 'skins', 'Default', 'media', 'button-fo.png') button_nofo = os.path.join(common.kodi.get_path(), 'resources', 'skins', 'Default', 'media', 'button-nofo.png') self.cancelled = False self.chk = [0] * 9 self.chkbutton = [0] * 9 self.chkstate = [False] * 9 imgX, imgY, imgw, imgh = 436, 210, 408, 300 ph, pw = imgh / 3, imgw / 3 x_gap = 70 y_gap = 70 button_gap = 40 button_h = 40 button_y = imgY + imgh + button_gap middle = imgX + (imgw / 2) win_x = imgX - x_gap win_y = imgY - y_gap win_h = imgh + 2 * y_gap + button_h + button_gap win_w = imgw + 2 * x_gap ctrlBackgound = xbmcgui.ControlImage(win_x, win_y, win_w, win_h, bg_image) self.addControl(ctrlBackgound) self.msg = '[COLOR red]%s[/COLOR]' % (kwargs.get('msg')) self.strActionInfo = xbmcgui.ControlLabel(imgX, imgY - 30, imgw, 20, self.msg, 'font13') self.addControl(self.strActionInfo) img = xbmcgui.ControlImage(imgX, imgY, imgw, imgh, kwargs.get('captcha')) self.addControl(img) self.iteration = kwargs.get('iteration') self.strActionInfo = xbmcgui.ControlLabel(imgX, imgY + imgh, imgw, 20, common.i18n('captcha_round') % (str(self.iteration)), 'font40') self.addControl(self.strActionInfo) self.cancelbutton = xbmcgui.ControlButton(middle - 110, button_y, 100, button_h, common.i18n('cancel'), focusTexture=button_fo, noFocusTexture=button_nofo, alignment=2) self.okbutton = xbmcgui.ControlButton(middle + 10, button_y, 100, button_h, common.i18n('ok'), focusTexture=button_fo, noFocusTexture=button_nofo, alignment=2) self.addControl(self.okbutton) self.addControl(self.cancelbutton) for i in xrange(9): row = i / 3 col = i % 3 x_pos = imgX + (pw * col) y_pos = imgY + (ph * row) self.chk[i] = xbmcgui.ControlImage(x_pos, y_pos, pw, ph, check_image) self.addControl(self.chk[i]) self.chk[i].setVisible(False) self.chkbutton[i] = xbmcgui.ControlButton(x_pos, y_pos, pw, ph, str(i + 1), font='font1', focusTexture=button_fo, noFocusTexture=button_nofo) self.addControl(self.chkbutton[i]) for i in xrange(9): row_start = (i / 3) * 3 right = row_start + (i + 1) % 3 left = row_start + (i - 1) % 3 up = (i - 3) % 9 down = (i + 3) % 9 self.chkbutton[i].controlRight(self.chkbutton[right]) self.chkbutton[i].controlLeft(self.chkbutton[left]) if i <= 2: self.chkbutton[i].controlUp(self.okbutton) else: self.chkbutton[i].controlUp(self.chkbutton[up]) if i >= 6: self.chkbutton[i].controlDown(self.okbutton) else: self.chkbutton[i].controlDown(self.chkbutton[down]) self.okbutton.controlLeft(self.cancelbutton) self.okbutton.controlRight(self.cancelbutton) self.cancelbutton.controlLeft(self.okbutton) self.cancelbutton.controlRight(self.okbutton) self.okbutton.controlDown(self.chkbutton[2]) self.okbutton.controlUp(self.chkbutton[8]) self.cancelbutton.controlDown(self.chkbutton[0]) self.cancelbutton.controlUp(self.chkbutton[6]) self.setFocus(self.okbutton) def get(self): self.doModal() self.close() if not self.cancelled: return [i for i in xrange(9) if self.chkstate[i]] def onControl(self, control): if control == self.okbutton and any(self.chkstate): self.close() elif control == self.cancelbutton: self.cancelled = True self.close() else: label = control.getLabel() if label.isnumeric(): index = int(label) - 1 self.chkstate[index] = not self.chkstate[index] self.chk[index].setVisible(self.chkstate[index]) def onAction(self, action): if action == 10: self.cancelled = True self.close() class UnCaptchaReCaptcha: net = common.Net() def processCaptcha(self, key, lang): headers = {'Referer': 'https://www.google.com/recaptcha/api2/demo', 'Accept-Language': lang} html = self.net.http_GET('http://www.google.com/recaptcha/api/fallback?k=%s' % (key), headers=headers).content token = '' iteration = 0 while True: payload = re.findall('"(/recaptcha/api2/payload[^"]+)', html) iteration += 1 message = re.findall('<label[^>]+class="fbc-imageselect-message-text"[^>]*>(.*?)</label>', html) if not message: message = re.findall('<div[^>]+class="fbc-imageselect-message-error">(.*?)</div>', html) if not message: token = re.findall('"this\.select\(\)">(.*?)</textarea>', html)[0] if token: common.log_utils.log_debug('Captcha Success: %s' % (token)) else: common.log_utils.log_debug('Captcha Failed: %s') break else: message = message[0] payload = payload[0] cval = re.findall('name="c"\s+value="([^"]+)', html)[0] captcha_imgurl = 'https://www.google.com%s' % (payload.replace('&amp;', '&')) message = re.sub('</?strong>', '', message) oSolver = cInputWindow(captcha=captcha_imgurl, msg=message, iteration=iteration) captcha_response = oSolver.get() if not captcha_response: break data = {'c': cval, 'response': captcha_response} html = self.net.http_POST("http://www.google.com/recaptcha/api/fallback?k=%s" % (key), form_data=data, headers=headers).content return token
gpl-2.0
-6,874,958,783,085,708,000
42.96988
176
0.590218
false
3.574437
false
false
false
mandiant/ioc_writer
ioc_writer/scripts/iocdump.py
1
2104
# iocdump.py # # Copyright 2016 FireEye # Licensed under the Apache 2.0 license. Developed for Mandiant by William # Gibb. # # Mandiant licenses this file to you under the Apache License, Version # 2.0 (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # # Allows for the upgrade of OpenIOC 1.0 IOCs to OpenIOC 1.1 format # # Stdlib from __future__ import print_function import argparse import logging import os import sys # Third Party code # Custom Code from ..managers import IOCManager log = logging.getLogger(__name__) def main(options): if not options.verbose: logging.disable(logging.DEBUG) iocm = IOCManager() for i in options.input: iocm.insert(i) for ioc_obj in iocm.iocs.values(): if options.hide_params: ioc_obj.display_params = False print(ioc_obj) def makeargpaser(): parser = argparse.ArgumentParser(description="Display a textual representation of an IOC or directory of IOCs") parser.add_argument('input', type=str, nargs='+', help='Input files or folders') parser.add_argument('-n', '--no-params', dest='hide_params', default=False, action='store_true', help='Do not display parameters attached to an IOC.') parser.add_argument('-v', '--verbose', dest='verbose', default=False, action='store_true', help='Enable verbose output') return parser def _main(): logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s [%(filename)s:%(funcName)s]') p = makeargpaser() opts = p.parse_args() main(opts) if __name__ == '__main__': _main()
apache-2.0
-8,542,234,613,581,370,000
32.951613
120
0.678232
false
3.710758
false
false
false
tayebzaidi/snova_analysis
Miscellaneous/typ1a_features.py
1
2252
import matplotlib.pyplot as plt import scipy.interpolate as scinterp import numpy as np import peakfinding import peak_original import smoothing import plotter import random import readin import sys import os if __name__== '__main__': Mbdata = [] delM15data = [] path = "/Users/zaidi/Documents/REU/restframe/" filenames = os.listdir(path) random.shuffle(filenames) for filename in filenames: current_file = os.path.join(path, filename) data= readin.readin_SNrest(filename) indB = np.where((data.band == 'B')) Bdata = data[indB] Bdata = np.sort(Bdata) if len(Bdata.phase) > 3: spl = scinterp.UnivariateSpline(Bdata.phase, Bdata.mag) spl.set_smoothing_factor(2./len(Bdata.phase)) phase_new = np.arange(Bdata.phase[0], Bdata.phase[-1], 1) mag_new = spl(phase_new) maxp, minp = peak_original.peakdet(mag_new, 0.5, phase_new) if len(minp) > 0 and minp[0][0] < 5 and minp[0][0] > -5: Mb = minp[0][1] delM15 = minp[0][1] - spl(minp[0][0]+15) Mbdata.append(Mb) delM15data.append(delM15) if delM15 > 0 or delM15 < -5: print minp print filename print spl(minp[0][0] + 15) fig = plt.figure(1) ax = fig.add_subplot(1,1,1) ax.plot(phase_new, mag_new) ax.plot(Bdata.phase, Bdata.mag) if len(minp) > 0: ax.scatter(minp[:,0],minp[:,1]) plt.show(fig) ''' maxp, minp = peakfinding.peakdetect(mag_new, phase_new, 200, 1.5) if len(minp) > 0: print minp print filename fig = plt.figure(1) ax = fig.add_subplot(1,1,1) #ax.scatter(minp[:,0], minp[:,1],'bo') #ax.plot(Bdata.phase, Bdata.mag) #plt.show(fig) ''' #interp = smoothing.Interpolate1D(data.phase print Mbdata print delM15data fig = plt.figure(2) ax = fig.add_subplot(1,1,1) ax.scatter(Mbdata, delM15data) plt.show(fig)
gpl-3.0
-7,822,784,365,329,970,000
33.121212
73
0.521314
false
3.292398
false
false
false
DREAM-ODA-OS/tools
metadata/dimap2eop.py
1
3474
#!/usr/bin/env python #------------------------------------------------------------------------------ # # Extract O&M-EOP metadata document. # # Project: EO Metadata Handling # Authors: Martin Paces <[email protected]> # #------------------------------------------------------------------------------- # Copyright (C) 2013 EOX IT Services GmbH # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies of this Software or works derived from this Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. #------------------------------------------------------------------------------- import traceback import sys import os.path from lxml import etree as et from profiles.interfaces import ProfileDimap from profiles.spot6_ortho import ProfileSpot6Ortho from profiles.spot_view import ProfileSpotView from profiles.spot_scene_1a import ProfileSpotScene1a from profiles.pleiades1_ortho import ProfilePleiades1Ortho XML_OPTS = {'pretty_print': True, 'xml_declaration': True, 'encoding': 'utf-8'} PROFILES = ( ProfileSpotScene1a, ProfileSpotView, ProfileSpot6Ortho, ProfilePleiades1Ortho, ) def main(fname): xml = et.parse(fname, et.XMLParser(remove_blank_text=True)) profile = get_profile(xml) print et.tostring(profile.extract_eop_metadata(xml, file_name=fname), **XML_OPTS) def get_profile(xml): for item in PROFILES: if item.check_profile(xml): return item prf = ProfileDimap.get_dimap_profile(xml) if prf is None: raise ValueError("Not a DIMAP XML document!") profile, version = prf raise ValueError("Unsupported DIMAP version %s profile '%s'!"%(version, profile)) #------------------------------------------------------------------------------ if __name__ == "__main__": EXENAME = os.path.basename(sys.argv[0]) DEBUG = False try: XML = sys.argv[1] for arg in sys.argv[2:]: if arg == "DEBUG": DEBUG = True # dump debuging output except IndexError: print >>sys.stderr, "ERROR: %s: Not enough input arguments!"%EXENAME print >>sys.stderr print >>sys.stderr, "Extract EOP XML metadata from DIMAP XML metadata." print >>sys.stderr print >>sys.stderr, "USAGE: %s <input-xml> [DEBUG]"%EXENAME sys.exit(1) if DEBUG: print >>sys.stderr, "input-xml: ", XML try: main(XML) except Exception as exc: print >>sys.stderr, "ERROR: %s: %s "%(EXENAME, exc) if DEBUG: print >>sys.stderr, traceback.format_exc() sys.exit(1)
mit
-2,620,814,347,079,095,300
36.354839
85
0.632988
false
4.00692
false
false
false
billbrod/spatial-frequency-preferences
sfp/image_computable.py
1
6815
#!/usr/bin/python """code to help run the image-computable version of the model we're using this primarily to check the effect of vignetting, but this does make our project image-computable (though it's a linear model and so will fail in some trivial cases) """ import itertools import argparse import numpy as np import pandas as pd import pyrtools as pt from scipy import interpolate def upsample(signal, target_shape): """upsample a signal to target_shape this uses scipy's interpolate.interp2d (and so will end up with a smoothed signal) """ x = np.linspace(-(signal.shape[0]-1)/2, (signal.shape[0]-1)/2, num=signal.shape[0]) y = np.linspace(-(signal.shape[1]-1)/2, (signal.shape[1]-1)/2, num=signal.shape[1]) f = interpolate.interp2d(x, y, signal) x = np.linspace(-(signal.shape[0]-1)/2, (signal.shape[0]-1)/2, num=target_shape[0]) y = np.linspace(-(signal.shape[1]-1)/2, (signal.shape[1]-1)/2, num=target_shape[1]) return f(x,y) def calc_energy_and_filters(stim, stim_df, n_orientations=6, save_path_template=None): """this creates the energy and filter arrays We assume the stimuli have natural groups, here indexed by the "class_idx" column in stim_df, and all stimuli within these groups should be considered the same stimuli, that is, we sum the energy across all of them. for the spatial frequency project, these are the different phases of the gratings (because of how we structure our experiment, we estimate a response amplitude to all phases together). Note that this will take a while to run (~10 or 20 minutes). Since it only needs to run once per experiment, didn't bother to make it efficient at all. The outputs will also be very large, totalling about 11GB Parameters ---------- stim : np.ndarray The stimuli to produce energy for. Should have shape (n, *img_size), where n is the number of total stimuli. stim_df : pd.DataFrame The DataFrame describing the stimuli. Must contain the column "class_idx", which indexes the different stimulus classes (see above) n_orientations : int the number of orientations in the steerable pyramid. 6 is the number used to model fMRI voxels in Roth, Z. N., Heeger, D., & Merriam, E. (2018). Stimulus vignetting and orientation selectivity in human visual cortex. bioRxiv. save_path_template : str or None the template string for the save path we'll use for energy and filters. should end in .npy and contain one %s, which we'll replace with "energy" and "filters". Returns ------- energy : np.ndarray energy has shape (stim_df.class_idx.nunique(), max_ht, n_orientations, *img_size) and contains the energy (square and absolute value the complex valued output of SteerablePyramidFreq; equivalently, square and sum the output of the quadrature pair of filters that make up the pyramid) for each image, at each scale and orientation. the energy has all been upsampled to the size of the initial image. filters : np.ndarray filters has shape (max_ht, n_orientations, *img_size) and is the fourier transform of the filters at each scale and orientation, zero-padded so they all have the same size. we only have one set of filters (instead of one per stimulus class) because the same pyramid was used for each of them; we ensure this by getting the filters for each stimulus class and checking that they're individually equal to the average across classes. """ img_size = stim.shape[1:] # this computation comes from the SteerablePyramidFreq code max_ht = int(np.floor(np.log2(min(img_size))) - 2) energy = np.zeros((stim_df.class_idx.nunique(), max_ht, n_orientations, *img_size), dtype=np.float32) filters = np.zeros_like(energy) for i, g in stim_df.groupby('class_idx'): idx = g.index filled_filters = False for j in idx: pyr = pt.pyramids.SteerablePyramidFreq(stim[j], order=n_orientations-1, is_complex=True) for k, l in itertools.product(range(max_ht), range(n_orientations)): energy[int(i), k, l, :, :] += upsample(np.abs(pyr.pyr_coeffs[(k, l)])**2, img_size) # we only want to run this once per stimulus class if not filled_filters: if k > 0: lomask = pyr._lomasks[k-1] else: lomask = pyr._lo0mask filt = pyr._anglemasks[k][l] * pyr._himasks[k] * lomask pad_num = [] for m in range(2): pad_num.append([(img_size[m] - filt.shape[m])//2, (img_size[m] - filt.shape[m])//2]) if filt.shape[m] + 2*pad_num[m][0] != img_size[m]: pad_num[m][0] += img_size[m] - (filt.shape[m] + 2*pad_num[m][0]) filters[int(i), k, l, :, :] = np.pad(filt, pad_num, 'constant', constant_values=0) filled_filters = True filter_mean = np.mean(filters, 0) for i in range(filters.shape[0]): if not(np.allclose(filter_mean, filters[i,:,:,:,:])): raise Exception("Something has gone terribly wrong, the filters for stim class %d are different than the rest!" % i) filters = filter_mean if save_path_template is not None: np.save(save_path_template % "energy", energy) np.save(save_path_template % "filters", filters) return energy, filters if __name__ == '__main__': parser = argparse.ArgumentParser( description=("Calculate and save the energy for each stimulus class, as well as the Fourier" " transform of the filters of the steerable pyramid we use to get this. For " "use with image-computable version of this model"), formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("stimuli", help=("Path to the stimulus .npy file.")) parser.add_argument("stimuli_description_df", help=("Path to the stimulus description dataframe .csv file.")) parser.add_argument("save_path_template", help=("Path template (with .npy extension) where we'll save the results. " "Should contain one %s.")) parser.add_argument('--n_orientations', '-n', default=6, type=int, help=("The number of orientations in the steerable pyramid used here.")) args = vars(parser.parse_args()) stim = np.load(args.pop('stimuli')) stim_df = pd.read_csv(args.pop('stimuli_description_df')) calc_energy_and_filters(stim, stim_df, **args)
mit
7,969,133,745,800,178,000
51.423077
128
0.634776
false
3.784009
false
false
false
ProjectQ-Framework/ProjectQ
projectq/meta/_loop.py
1
9774
# -*- coding: utf-8 -*- # Copyright 2017 ProjectQ-Framework (www.projectq.ch) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tools to implement loops. Example: .. code-block:: python with Loop(eng, 4): H | qb Rz(M_PI/3.) | qb """ from copy import deepcopy from projectq.cengines import BasicEngine from projectq.ops import Allocate, Deallocate from ._util import insert_engine, drop_engine_after class QubitManagementError(Exception): """Exception raised when the lifetime of a qubit is problematic within a loop""" class LoopTag: """ Loop meta tag """ def __init__(self, num): self.num = num self.id = LoopTag.loop_tag_id LoopTag.loop_tag_id += 1 def __eq__(self, other): return isinstance(other, LoopTag) and self.id == other.id and self.num == other.num def __ne__(self, other): return not self.__eq__(other) loop_tag_id = 0 class LoopEngine(BasicEngine): """ Stores all commands and, when done, executes them num times if no loop tag handler engine is available. If there is one, it adds a loop_tag to the commands and sends them on. """ def __init__(self, num): """ Initialize a LoopEngine. Args: num (int): Number of loop iterations. """ BasicEngine.__init__(self) self._tag = LoopTag(num) self._cmd_list = [] self._allocated_qubit_ids = set() self._deallocated_qubit_ids = set() # key: qubit id of a local qubit, i.e. a qubit which has been allocated # and deallocated within the loop body. # value: list contain reference to each weakref qubit with this qubit # id either within control_qubits or qubits. self._refs_to_local_qb = dict() self._next_engines_support_loop_tag = False def run(self): """ Apply the loop statements to all stored commands. Unrolls the loop if LoopTag is not supported by any of the following engines, i.e., if .. code-block:: python is_meta_tag_supported(next_engine, LoopTag) == False """ error_message = ( "\n Error. Qubits have been allocated in with " "Loop(eng, num) context,\n which have not " "explicitely been deallocated in the Loop context.\n" "Correct usage:\nwith Loop(eng, 5):\n" " qubit = eng.allocate_qubit()\n" " ...\n" " del qubit[0]\n" ) if not self._next_engines_support_loop_tag: # pylint: disable=too-many-nested-blocks # Unroll the loop # Check that local qubits have been deallocated: if self._deallocated_qubit_ids != self._allocated_qubit_ids: raise QubitManagementError(error_message) if len(self._allocated_qubit_ids) == 0: # No local qubits, just send the circuit num times for i in range(self._tag.num): self.send(deepcopy(self._cmd_list)) else: # Ancilla qubits have been allocated in loop body # For each iteration, allocate and deallocate a new qubit and # replace the qubit id in all commands using it. for i in range(self._tag.num): if i == 0: # Don't change local qubit ids self.send(deepcopy(self._cmd_list)) else: # Change local qubit ids before sending them for refs_loc_qubit in self._refs_to_local_qb.values(): new_qb_id = self.main_engine.get_new_qubit_id() for qubit_ref in refs_loc_qubit: qubit_ref.id = new_qb_id self.send(deepcopy(self._cmd_list)) else: # Next engines support loop tag so no unrolling needed only # check that all qubits have been deallocated which have been # allocated in the loop body if self._deallocated_qubit_ids != self._allocated_qubit_ids: raise QubitManagementError(error_message) def receive(self, command_list): # pylint: disable=too-many-branches """ Receive (and potentially temporarily store) all commands. Add LoopTag to all receiving commands and send to the next engine if a further engine is a LoopTag-handling engine. Otherwise store all commands (to later unroll them). Check that within the loop body, all allocated qubits have also been deallocated. If loop needs to be unrolled and ancilla qubits have been allocated within the loop body, then store a reference all these qubit ids (to change them when unrolling the loop) Args: command_list (list<Command>): List of commands to store and later unroll or, if there is a LoopTag-handling engine, add the LoopTag. """ # pylint: disable=too-many-nested-blocks if self._next_engines_support_loop_tag or self.next_engine.is_meta_tag_supported(LoopTag): # Loop tag is supported, send everything with a LoopTag # Don't check is_meta_tag_supported anymore self._next_engines_support_loop_tag = True if self._tag.num == 0: return for cmd in command_list: if cmd.gate == Allocate: self._allocated_qubit_ids.add(cmd.qubits[0][0].id) elif cmd.gate == Deallocate: self._deallocated_qubit_ids.add(cmd.qubits[0][0].id) cmd.tags.append(self._tag) self.send([cmd]) else: # LoopTag is not supported, save the full loop body self._cmd_list += command_list # Check for all local qubits allocated and deallocated in loop body for cmd in command_list: if cmd.gate == Allocate: self._allocated_qubit_ids.add(cmd.qubits[0][0].id) # Save reference to this local qubit self._refs_to_local_qb[cmd.qubits[0][0].id] = [cmd.qubits[0][0]] elif cmd.gate == Deallocate: self._deallocated_qubit_ids.add(cmd.qubits[0][0].id) # Save reference to this local qubit self._refs_to_local_qb[cmd.qubits[0][0].id].append(cmd.qubits[0][0]) else: # Add a reference to each place a local qubit id is # used as within either control_qubit or qubits for control_qubit in cmd.control_qubits: if control_qubit.id in self._allocated_qubit_ids: self._refs_to_local_qb[control_qubit.id].append(control_qubit) for qureg in cmd.qubits: for qubit in qureg: if qubit.id in self._allocated_qubit_ids: self._refs_to_local_qb[qubit.id].append(qubit) class Loop: """ Loop n times over an entire code block. Example: .. code-block:: python with Loop(eng, 4): # [quantum gates to be executed 4 times] Warning: If the code in the loop contains allocation of qubits, those qubits have to be deleted prior to exiting the 'with Loop()' context. This code is **NOT VALID**: .. code-block:: python with Loop(eng, 4): qb = eng.allocate_qubit() H | qb # qb is still available!!! The **correct way** of handling qubit (de-)allocation is as follows: .. code-block:: python with Loop(eng, 4): qb = eng.allocate_qubit() ... del qb # sends deallocate gate """ def __init__(self, engine, num): """ Enter a looped section. Args: engine: Engine handling the commands (usually MainEngine) num (int): Number of loop iterations Example: .. code-block:: python with Loop(eng, 4): H | qb Rz(M_PI/3.) | qb Raises: TypeError: If number of iterations (num) is not an integer ValueError: If number of iterations (num) is not >= 0 """ self.engine = engine if not isinstance(num, int): raise TypeError("Number of loop iterations must be an int.") if num < 0: raise ValueError("Number of loop iterations must be >=0.") self.num = num self._loop_eng = None def __enter__(self): if self.num != 1: self._loop_eng = LoopEngine(self.num) insert_engine(self.engine, self._loop_eng) def __exit__(self, exc_type, exc_value, exc_traceback): if self.num != 1: # remove loop handler from engine list (i.e. skip it) self._loop_eng.run() self._loop_eng = None drop_engine_after(self.engine)
apache-2.0
-7,039,027,857,358,893,000
37.031128
115
0.562717
false
4.138019
false
false
false
shellphish/puppeteer
examples/ructf_2014_pwn200/doit.py
1
2635
import puppeteer as p import logging try: import standard_logging # pylint: disable=W0611 except ImportError: pass #logging.getLogger("puppeteer.connection").setLevel(logging.DEBUG) #logging.getLogger("puppeteer.manipulator").setLevel(logging.DEBUG) #logging.getLogger("puppeteer.vuln_decorators").setLevel(logging.DEBUG) #logging.getLogger("puppeteer.formatter").setLevel(logging.DEBUG) class Aggravator(p.Manipulator): def __init__(self, host, port): p.Manipulator.__init__(self, p.x86) # some initial info from IDA # TODO: maybe use IDALink to get this automatically? self.permanent_info['main_start'] = 0x0804A9B3 self.permanent_info['main_end'] = 0x0804A9D1 self.permanent_info['main_stackframe_size'] = 0x24 self.c = self.set_connection(p.Connection(host=host, port=port).connect()) self.c.read_until("> ") @p.printf(byte_offset=244, max_length=31, forbidden={'\x00', '\x0a'}) def stats_printf(self, fmt): self.c.send("stats " + fmt + "\n") self.c.read_until("kill top:\n") try: result = self.c.read_until("\n"*5, timeout=3)[:-5] self.c.read_until("> ", timeout=3) except EOFError: print "Program didn't finish the print" return "" #print "GOT:",repr(result) return result def main(): # Create the Aggravator! a = Aggravator(sys.argv[1], int(sys.argv[2])) # And now, we can to stuff! # We can read the stack! #print "STACKZ",a.dump_stack(1000).encode('hex') print "Testing memory read." assert a.do_memory_read(0x0804A9C3, 16) == '\x00\x8B\x44\x24\x1C\x89\x04\x24\xE8\x20\xFE\xFF\xFF\xC9\xC3\x66' ## We can figure out where __libc_start_main is! lcsm = a.main_return_address(start_offset=390) print "main() will return to (presumably, this is in libc):",hex(lcsm) # interactive memory explorer! a.memory_explorer(lcsm) # now dump it! libc = a.dump_elf(lcsm) #- 0x1000 # the minus is because on my test machine, the address has a \x00 in it print "dumped %d pages from libc" % len(libc) #a.dump_libc("aggregator_libc", start_offset=390) # We can overwrite memory with ease! a.do_memory_write(0x0804C344, "OK") assert a.do_memory_read(0x0804C344, 2) == "OK" a.c.send("quit\n") #libc_page_start = lcsm & 0xfffff000 #libc_page_content = a.do_memory_read(libc_page_start, 0x1000) #open("dumped", "w").write(libc_page_content) #print "read out %d bytes from libc!" % len(libc_page_content) if __name__ == '__main__': import sys main()
gpl-3.0
-910,140,454,615,389,800
33.671053
113
0.639848
false
2.987528
false
false
false
jpaasen/cos
framework/Window.py
1
5925
#from TypeExtensions import Ndarray from gfuncs import processArgs from mynumpy import pi, dot, cos, sin, exp #ones, complex, sin, linspace, exp, pi, dot, angle import mynumpy as np #from pylab import plot, subplot, xlabel, ylabel, grid, show, figure, ion, ioff class Window(np.Ndarray): def __new__(self, type='rect', **kwargs): from gfuncs import error if type == 'rect': return self.rect(self,kwargs) elif type == 'kaiser': return self.kaiser(kwargs) else: error(self, 'The window type %s is not recognised'%type) # # Configurable.__init__(self) # Operable.__init__(self) class Rect(np.Ndarray): def __new__(self, M=10, phi=0, normalised=True): # Create the window if phi == 0: win = np.ones( (M,), dtype=None ) / M else: wc = np.ones( M, dtype=complex ) # Window coefficients m = np.arange(0,M) # Create M indeces from 0 to 1 a = exp(-1j*2*pi*m*phi) # Steering vector ws = dot(wc, a) # Normalisation factor win = a * wc / ws # Steered and normalised window w = np.Ndarray.__new__(self, win) # axes=('M',), # desc = 'Rectangular (phi=%d)'%phi) # desc='Rectangular (phi=%d)'%phi, # shape_desc=('M','1')) return w class Trig(np.Ndarray): def __new__(self, M=10, a=0.54, phi=0, normalised=True): # Create the window if phi == 0: wc = a + (1-a)*np.cos(2*pi*np.linspace(-0.5,0.5,M)) win = wc / sum(wc) # Normalised window else: n = np.linspace(-0.5,0.5,M) wc = a + (1-a)*np.cos(2*pi*n) # Window coefficients m = np.arange(0,M) # Create M indeces from 0 to 1 aa = exp(-1j*2*pi*m*phi) # Steering vector ws = dot(wc, aa) # Normalisation factor win = aa * wc / ws # Steered and normalised window w = np.Ndarray.__new__(self, win) # axes=('M',), # desc = 'Rectangular (phi=%d)'%phi) # desc='Rectangular (phi=%d)'%phi, # shape_desc=('M','1')) return w class Kaiser(np.Ndarray): '''kaiser( M=10, beta=1, phi=0, normalised=True ) The Kaiser window is a taper formed by using a Bessel function. Parameters ---------- M (int) : Number of points in the output window. beta (float) : Shape parameter for window. phi (float) : Steering angle. normalised (boolean) : Use normalised window coefficients? ''' def __new__(self, M=10, beta=1, phi=0, normalised=True, inverted=False): if not inverted: if phi == 0: wc = np.kaiser(M, beta) # Window coefficients win = wc / sum(wc) # Normalised window else: wc = np.kaiser(M, beta) # Window coefficients m = np.arange(0,M) # Create M indeces from 0 to 1 a = exp(-1j*2*pi*m*phi) # Steering vector ws = dot(wc, a) # Normalisation factor win = a * wc / ws # Steered and normalised window else: if phi == 0: wc = 1 / np.kaiser(M, beta) # Window coefficients win = wc / sum(wc) # Normalised window else: wc = 1 / np.kaiser(M, beta) # Window coefficients m = np.arange(0,M) # Create M indeces from 0 to 1 a = exp(-1j*2*pi*m*phi) # Steering vector ws = dot(wc,a) # Normalisation factor win = a * wc / ws # Steered and normalised window w = np.Ndarray.__new__(self, win) # axes=('M',), # desc = 'Kaiser (beta=%d, phi=%d)'%(beta,phi)) # shape_desc=('M','1')) return w # def plot(self, **kwargs): # # # Set some default options # opts = {'magnitude':True, 'angle':False, 'grid':True, 'degrees':True} # # # Add the user-specified options # for key,val in kwargs.iteritems(): # if opts.has_key(key): # opts[key] = val # else: # opts[key] = val # print 'WW: Window.plot() - Supplied parameter '+key+' is unknown.' # # ion() # if opts['magnitude'] and opts['angle']: # figure() # subplot(2,1,1) # plot( abs(self.w) ) # xlabel( 'Channel #' ) # ylabel( 'Magnitude' ) # grid( opts['grid'] ) # # subplot(2,1,2) # plot( angle(self.w, deg=opts['degrees']) ) # xlabel( 'Channel #' ) # if opts['degrees']: # ylabel( 'Angle [degrees]' ) # else: # ylabel( 'Angle [radians]' ) # grid( opts['grid'] ) ## show() # # elif opts['magnitude']: # figure() # plot( abs(self.w) ) # xlabel( 'Channel #' ) # ylabel( 'Magnitude' ) # grid( opts['grid'] ) ## show() # # else: # figure() # plot( angle(self.w, deg=opts['degrees']) ) # xlabel( 'Channel #' ) # if opts['degrees']: # ylabel( 'Angle [degrees]' ) # else: # ylabel( 'Angle [radians]' ) # grid( opts['grid'] ) ## show() # ioff()
mit
5,193,458,981,794,878,000
36.5
93
0.4427
false
3.58006
false
false
false
masterdje/wibfi
conf.py
1
22684
# -*- coding: utf-8 -*- from __future__ import unicode_literals import time # Configuration, please edit # Data about this site BLOG_AUTHOR = "Dje" BLOG_TITLE = "Write it before forget it!" # This is the main URL for your site. It will be used # in a prominent link SITE_URL = "http://wibfi.virtua-peanuts.net/" # This is the URL where nikola's output will be deployed. # If not set, defaults to SITE_URL # BASE_URL = "http://getnikola.com/" BLOG_EMAIL = "[email protected]" BLOG_DESCRIPTION = "Write it before forget it !" # Nikola is multilingual! # # Currently supported languages are: # en English # bg Bulgarian # ca Catalan # zh_cn Chinese (Simplified) # hr Croatian # nl Dutch # fr French # el Greek [NOT gr!] # de German # it Italian # jp Japanese # fa Persian # pl Polish # pt_br Portuguese (Brasil) # ru Russian # es Spanish # tr_tr Turkish (Turkey) # # If you want to use Nikola with a non-supported language you have to provide # a module containing the necessary translations # (p.e. look at the modules at: ./nikola/data/themes/default/messages/fr.py). # If a specific post is not translated to a language, then the version # in the default language will be shown instead. #from nikola import filters #FILTERS = { # ".css": [filters.yui_compressor], #".js": [filters.yui_compressor], #} # What is the default language? DEFAULT_LANG = "fr" LOCALES = {'fr': 'fr_FR.utf8', 'en': 'en_US.utf8'} # What other languages do you have? # The format is {"translationcode" : "path/to/translation" } # the path will be used as a prefix for the generated pages location TRANSLATIONS = { DEFAULT_LANG: "", # Example for another language: "en": "./en", } TRANSLATIONS_PATTERN = "{path}.{ext}.{lang}" # Links for the sidebar / navigation bar. # You should provide a key-value pair for each used language. NAVIGATION_LINKS = { DEFAULT_LANG: ( ('/stories/cheatsheets.html', "Cheat-Sheets"), ('/stories/what-s-next.html', "What's next"), ('/archive.html', 'Archives'), ('/categories/index.html', 'Tags'), ('/rss.xml', 'RSS'), ), "en": ( ('/en/stories/cheatsheets.html', "Cheat-Sheets"), ('/en/stories/what-s-next.html', "What's next"), ('/en/archive.html', 'Archives'), ('/en/categories/index.html', 'Tags'), ('/en/rss.xml', 'RSS'), ), } # Below this point, everything is optional # POSTS and PAGES contains (wildcard, destination, template) tuples. # # The wildcard is used to generate a list of reSt source files # (whatever/thing.txt). # # That fragment could have an associated metadata file (whatever/thing.meta), # and opcionally translated files (example for spanish, with code "es"): # whatever/thing.txt.es and whatever/thing.meta.es # # From those files, a set of HTML fragment files will be generated: # cache/whatever/thing.html (and maybe cache/whatever/thing.html.es) # # These files are combinated with the template to produce rendered # pages, which will be placed at # output / TRANSLATIONS[lang] / destination / pagename.html # # where "pagename" is the "slug" specified in the metadata file. # # The difference between POSTS and PAGES is that POSTS are added # to feeds and are considered part of a blog, while PAGES are # just independent HTML pages. # POSTS = ( ("posts/*.txt", "posts", "post.tmpl"), ("posts/*.rst", "posts", "post.tmpl"), ) PAGES = ( ("stories/*.txt", "stories", "story.tmpl"), ("stories/*.rst", "stories", "story.tmpl"), ) # One or more folders containing files to be copied as-is into the output. # The format is a dictionary of "source" "relative destination". # Default is: FILES_FOLDERS = { 'test': '', 'test': 'posts/','test': 'stories/'} # Which means copy 'files' into 'output' # A mapping of languages to file-extensions that represent that language. # Feel free to add or delete extensions to any list, but don't add any new # compilers unless you write the interface for it yourself. # # 'rest' is reStructuredText # 'markdown' is MarkDown # 'html' assumes the file is html and just copies it COMPILERS = { "rest": ('.rst', '.txt'), "markdown": ('.md', '.mdown', '.markdown'), "textile": ('.textile',), "txt2tags": ('.t2t',), "bbcode": ('.bb',), "wiki": ('.wiki',), "ipynb": ('.ipynb',), "html": ('.html', '.htm'), } # Create by default posts in one file format? # Set to False for two-file posts, with separate metadata. ONE_FILE_POSTS = True # If this is set to True, then posts that are not translated to a language # LANG will not be visible at all in the pages in that language. # If set to False, the DEFAULT_LANG version will be displayed for # untranslated posts. # HIDE_UNTRANSLATED_POSTS = False # Paths for different autogenerated bits. These are combined with the # translation paths. # Final locations are: # output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags) # output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag) # output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag) TAG_PATH = "categories" # If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain # the posts themselves. If set to False, it will be just a list of links. # TAG_PAGES_ARE_INDEXES = True # Final location is output / TRANSLATION[lang] / INDEX_PATH / index-*.html # INDEX_PATH = "" # Create per-month archives instead of per-year CREATE_MONTHLY_ARCHIVE = True # Final locations for the archives are: # output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME # output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html # output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html # ARCHIVE_PATH = "" # ARCHIVE_FILENAME = "archive.html" # Final locations are: # output / TRANSLATION[lang] / RSS_PATH / rss.xml # RSS_PATH = "" # Number of posts in RSS feeds FEED_LENGTH = 10 # Slug the Tag URL easier for users to type, special characters are # often removed or replaced as well. # SLUG_TAG_PATH = True # A list of redirection tuples, [("foo/from.html", "/bar/to.html")]. # # A HTML file will be created in output/foo/from.html that redirects # to the "/bar/to.html" URL. notice that the "from" side MUST be a # relative URL. # # If you don't need any of these, just set to [] # REDIRECTIONS = [] # Commands to execute to deploy. Can be anything, for example, # you may use rsync: # And then do a backup, or ping pingomatic. # To do manual deployment, set it to [] DEPLOY_COMMANDS = ['lftp -e "mirror --delete-first -R output/ .;exit" [email protected]/wibfi', 'echo Save ...', 'cd .. ; tar cvjf _save-wibfi_.tgz wibfi/ ; lftp -e "put _save-wibfi_.tgz;exit" [email protected]/wibfi/backup; cd wibfi'] # Where the output site should be located # If you don't use an absolute path, it will be considered as relative # to the location of conf.py # OUTPUT_FOLDER = 'output' # where the "cache" of partial generated content should be located # default: 'cache' # CACHE_FOLDER = 'cache' # Filters to apply to the output. # A directory where the keys are either: a file extensions, or # a tuple of file extensions. # # And the value is a list of commands to be applied in order. # # Each command must be either: # # A string containing a '%s' which will # be replaced with a filename. The command *must* produce output # in place. # # Or: # # A python callable, which will be called with the filename as # argument. # # By default, there are no filters. # FILTERS = { # ".jpg": ["jpegoptim --strip-all -m75 -v %s"], # } # Create a gzipped copy of each generated file. Cheap server-side optimization. GZIP_FILES = True # File extensions that will be compressed GZIP_EXTENSIONS = ('.txt','.rst', '.htm', '.html', '.css', '.js', '.json') # ############################################################################# # Image Gallery Options # ############################################################################# # Galleries are folders in galleries/ # Final location of galleries will be output / GALLERY_PATH / gallery_name # GALLERY_PATH = "galleries" # THUMBNAIL_SIZE = 180 # MAX_IMAGE_SIZE = 1280 # USE_FILENAME_AS_TITLE = True # ############################################################################# # HTML fragments and diverse things that are used by the templates # ############################################################################# # Data about post-per-page indexes # INDEXES_TITLE = "" # If this is empty, the default is BLOG_TITLE # INDEXES_PAGES = "" # If this is empty, the default is 'old posts page %d' # translated # Name of the theme to use. THEME = "w2" # Color scheme to be used for code blocks. If your theme provides # "assets/css/code.css" this is ignored. # Can be any of autumn borland bw colorful default emacs friendly fruity manni # monokai murphy native pastie perldoc rrt tango trac vim vs CODE_COLOR_SCHEME = 'borland' # If you use 'site-reveal' theme you can select several subthemes # THEME_REVEAL_CONFIG_SUBTHEME = 'sky' # You can also use: beige/serif/simple/night/default # Again, if you use 'site-reveal' theme you can select several transitions # between the slides # THEME_REVEAL_CONFIG_TRANSITION = 'cube' # You can also use: page/concave/linear/none/default # date format used to display post dates. # (str used by datetime.datetime.strftime) # DATE_FORMAT = '%Y-%m-%d %H:%M' # FAVICONS contains (name, file, size) tuples. # Used for create favicon link like this: # <link rel="name" href="file" sizes="size"/> # For creating favicons, take a look at: # http://www.netmagazine.com/features/create-perfect-favicon FAVICONS = { ("icon", "/favicon.ico", "16x16"), ("icon", "/favicon.png", "64x64"), } # Show only teasers in the index pages? Defaults to False. INDEX_TEASERS = True # A HTML fragment with the Read more... link. # The following tags exist and are replaced for you: # {link} A link to the full post page. # {read_more} The string “Read more” in the current language. # {{ A literal { (U+007B LEFT CURLY BRACKET) # }} A literal } (U+007D RIGHT CURLY BRACKET) READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>' # A HTML fragment describing the license, for the sidebar. LICENSE = '<a rel="license" href="http://creativecommons.org/licenses/by-sa/3.0/fr/"><img title="TL;DR" alt="Licence Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by-sa/3.0/fr/88x31.png" /></a>' # I recommend using the Creative Commons' wizard: # http://creativecommons.org/choose/ # LICENSE = """ # <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/2.5/ar/"> # <img alt="Creative Commons License BY-NC-SA" # style="border-width:0; margin-bottom:12px;" # src="http://i.creativecommons.org/l/by-nc-sa/2.5/ar/88x31.png"></a>""" # A small copyright notice for the page footer (in HTML). # Default is '' XITI = """<a href="http://www.xiti.com/xiti.asp?s=538203" title="WebAnalytics" target="_top"> <script type="text/javascript"> <!-- Xt_param = 's=538203&p=index'; try {Xt_r = top.document.referrer;} catch(e) {Xt_r = document.referrer; } Xt_h = new Date(); Xt_i = '<img width="80" height="15" border="0" alt="" '; Xt_i += 'src="http://logv4.xiti.com/g.xiti?'+Xt_param; Xt_i += '&hl='+Xt_h.getHours()+'x'+Xt_h.getMinutes()+'x'+Xt_h.getSeconds(); if(parseFloat(navigator.appVersion)>=4) {Xt_s=screen;Xt_i+='&r='+Xt_s.width+'x'+Xt_s.height+'x'+Xt_s.pixelDepth+'x'+Xt_s.colorDepth;} document.write(Xt_i+'&ref='+Xt_r.replace(/[<>"]/g, '').replace(/&/g, '$')+'" title="Internet Audience">'); //--> </script> <noscript> <img width="80" height="15" src="http://logv4.xiti.com/g.xiti?s=538203&p=index" alt="WebAnalytics" /> </noscript></a>""" CONTENT_FOOTER = '2013 - {date} <a href="mailto:{email}">{author}</a> mais c\'est <a href="http://getnikola.com">Nikola</a> qui propulse. {license} - {xiti}' CONTENT_FOOTER = CONTENT_FOOTER.format(email=BLOG_EMAIL, author=BLOG_AUTHOR, date=time.gmtime().tm_year, license=LICENSE, xiti=XITI) # To use comments, you can choose between different third party comment # systems, one of "disqus", "livefyre", "intensedebate", "moot", # "googleplus" or "facebook" COMMENT_SYSTEM = "disqus" # And you also need to add your COMMENT_SYSTEM_ID which # depends on what comment system you use. The default is # "nikolademo" which is a test account for Disqus. More information # is in the manual. COMMENT_SYSTEM_ID = "wibfi" # Create index.html for story folders? # STORY_INDEX = False # Enable comments on story pages? # COMMENTS_IN_STORIES = False # Enable comments on picture gallery pages? # COMMENTS_IN_GALLERIES = False # What file should be used for directory indexes? # Defaults to index.html # Common other alternatives: default.html for IIS, index.php # INDEX_FILE = "index.html" # If a link ends in /index.html, drop the index.html part. # http://mysite/foo/bar/index.html => http://mysite/foo/bar/ # (Uses the INDEX_FILE setting, so if that is, say, default.html, # it will instead /foo/default.html => /foo) # (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4) # Default = False # STRIP_INDEXES = False # Should the sitemap list directories which only include other directories # and no files. # Default to True # If this is False # e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap # if /2012 includes any files (including index.html)... add it to the sitemap # SITEMAP_INCLUDE_FILELESS_DIRS = True # Instead of putting files in <slug>.html, put them in # <slug>/index.html. Also enables STRIP_INDEXES # This can be disabled on a per-page/post basis by adding # .. pretty_url: False # to the metadata # PRETTY_URLS = False # If True, publish future dated posts right away instead of scheduling them. # Defaults to False. # FUTURE_IS_NOW = False # If True, future dated posts are allowed in deployed output # Only the individual posts are published/deployed; not in indexes/sitemap # Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value. # DEPLOY_FUTURE = False # If False, draft posts will not be deployed # DEPLOY_DRAFTS = True # Allows scheduling of posts using the rule specified here (new_post -s) # Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html #SCHEDULE_RULE = 'RRULE:FREQ=DAILY;BYHOUR=12;BYMINUTE=0;BYSECOND=0' # If True, use the scheduling rule to all posts by default SCHEDULE_ALL = False # If True, schedules post to today if possible, even if scheduled hour is over # SCHEDULE_FORCE_TODAY = False # Do you want a add a Mathjax config file? # MATHJAX_CONFIG = "" # If you are using the compile-ipynb plugin, just add this one: #MATHJAX_CONFIG = """ #<script type="text/x-mathjax-config"> #MathJax.Hub.Config({ # tex2jax: { # inlineMath: [ ['$','$'], ["\\\(","\\\)"] ], # displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ] # }, # displayAlign: 'left', // Change this to 'center' to center equations. # "HTML-CSS": { # styles: {'.MathJax_Display': {"margin": 0}} # } #}); #</script> #""" # What MarkDown extensions to enable? # You will also get gist, nikola and podcast because those are # done in the code, hope you don't mind ;-) # MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite'] # Social buttons. This is sample code for AddThis (which was the default for a # long time). Insert anything you want here, or even make it empty. SOCIAL_BUTTONS_CODE = "" # <!-- Social buttons --> # <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style"> # <a class="addthis_button_more">Share</a> # <ul><li><a class="addthis_button_facebook"></a> # <li><a class="addthis_button_google_plusone_share"></a> # <li><a class="addthis_button_linkedin"></a> # <li><a class="addthis_button_twitter"></a> # </ul> # </div> # <script type="text/javascript" src="//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script> # <!-- End of social buttons --> #""" # Hide link to source for the posts? HIDE_SOURCELINK = True # Copy the source files for your pages? # Setting it to False implies HIDE_SOURCELINK = True COPY_SOURCES = False # Modify the number of Post per Index Page # Defaults to 10 INDEX_DISPLAY_POST_COUNT = 10 # RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None, # the base.tmpl will use the feed Nikola generates. However, you may want to # change it for a feedburner feed or something else. # RSS_LINK = None # Show only teasers in the RSS feed? Default to True RSS_TEASERS = True # A search form to search this site, for the sidebar. You can use a google # custom search (http://www.google.com/cse/) # Or a duckduckgo search: https://duckduckgo.com/search_box.html # Default is no search form. # SEARCH_FORM = "" # # This search form works for any site and looks good in the "site" theme where # it appears on the navigation bar: # SEARCH_FORM = """ <!-- Custom search --> <form method="get" id="search" action="http://duckduckgo.com/" class="navbar-form pull-left"> <input type="hidden" name="sites" value="%s"/> <input type="hidden" name="k8" value="#444444"/> <input type="hidden" name="k9" value="#D51920"/> <input type="hidden" name="kt" value="h"/> <input type="text" name="q" maxlength="255" placeholder="DuckDuckGo&hellip;" class="span2 form-control input-sm" style="width:65%%; padding:0; height:2em;"/> <input type="submit" value="DuckDuckGo Search" style="visibility: hidden; width: 5%%" /> </form> <!-- End of custom search --> """ % SITE_URL # # If you prefer a google search form, here's an example that should just work: #SEARCH_FORM = """ #<!-- Custom search with google--> #<form id="search" action="http://google.com/search" method="get" class="navbar-form pull-left"> #<input type="hidden" name="q" value="site:%s" /> #<input type="text" name="q" maxlength="255" results="0" placeholder="Search"/> #</form> #<!-- End of custom search --> #""" % SITE_URL # Also, there is a local search plugin you can use, based on Tipue, but it requires setting several # options: # SEARCH_FORM = """ # <span class="navbar-form pull-left"> # <input type="text" id="tipue_search_input"> # </span>""" # # BODY_END = """ # <script type="text/javascript" src="/assets/js/tipuesearch_set.js"></script> # <script type="text/javascript" src="/assets/js/tipuesearch.js"></script> # <script type="text/javascript"> # $(document).ready(function() { # $('#tipue_search_input').tipuesearch({ # 'mode': 'json', # 'contentLocation': '/assets/js/tipuesearch_content.json', # 'showUrl': false # }); # }); # </script> # """ EXTRA_HEAD_DATA = """ <script type="text/javascript"> var _gaq = _gaq || []; _gaq.push(['_setAccount', 'UA-44317802-1']); _gaq.push(['_setDomainName', 'virtua-peanuts.net']); _gaq.push(['_trackPageview']); (function() { var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); })(); </script>""" # <link rel="stylesheet" type="text/css" href="/assets/css/tipuesearch.css"> # <div id="tipue_search_content" style="margin-left: auto; margin-right: auto; padding: 20px;"></div> # ENABLED_EXTRAS = ['local_search'] # # Use content distribution networks for jquery and twitter-bootstrap css and js # If this is True, jquery is served from the Google CDN and twitter-bootstrap # is served from the NetDNA CDN # Set this to False if you want to host your site without requiring access to # external resources. # USE_CDN = False # Extra things you want in the pages HEAD tag. This will be added right # before </HEAD> # EXTRA_HEAD_DATA = "" # Google analytics or whatever else you use. Added to the bottom of <body> # in the default template (base.tmpl). # BODY_END = "" # The possibility to extract metadata from the filename by using a # regular expression. # To make it work you need to name parts of your regular expression. # The following names will be used to extract metadata: # - title # - slug # - date # - tags # - link # - description # # An example re is the following: # '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md' # FILE_METADATA_REGEXP = None # Additional metadata that is added to a post when creating a new_post ADDITIONAL_METADATA = {} # Nikola supports Twitter Card summaries / Open Graph. # Twitter cards make it possible for you to attach media to Tweets # that link to your content. # # IMPORTANT: # Please note, that you need to opt-in for using Twitter Cards! # To do this please visit # https://dev.twitter.com/form/participate-twitter-cards # # Uncomment and modify to following lines to match your accounts. # Specifying the id for either 'site' or 'creator' will be preferred # over the cleartext username. Specifying an ID is not necessary. # Displaying images is currently not supported. # TWITTER_CARD = { # # 'use_twitter_cards': True, # enable Twitter Cards / Open Graph # # 'site': '@website', # twitter nick for the website # # 'site:id': 123456, # Same as site, but the website's Twitter user ID # # instead. # # 'creator': '@username', # Username for the content creator / author. # # 'creator:id': 654321, # Same as creator, but the Twitter user's ID. # } # If you want to use formatted post time in W3C-DTF Format # (ex. 2012-03-30T23:00:00+02:00), # set timzone if you want a localized posted date. # TIMEZONE = 'Europe/Paris' # If webassets is installed, bundle JS and CSS to make site loading faster # USE_BUNDLES = True # Plugins you don't want to use. Be careful :-) # DISABLED_PLUGINS = ["render_galleries"] # Experimental plugins - use at your own risk. # They probably need some manual adjustments - please see their respective # readme. ENABLED_EXTRAS = [ # 'planetoid', # 'ipynb', # 'local_search', # 'render_mustache', ] # List of regular expressions, links matching them will always be considered # valid by "nikola check -l" # LINK_CHECK_WHITELIST = [] # If set to True, enable optional hyphenation in your posts (requires pyphen) # HYPHENATE = False # Put in global_context things you want available on all your templates. # It can be anything, data, functions, modules, etc. GLOBAL_CONTEXT = {}
gpl-3.0
-2,642,146,339,466,596,400
34.269051
225
0.670915
false
3.153226
false
false
false
xuanthuong/golfgame
models/work_history.py
1
2217
# -*- coding: utf-8 -*- # Description: work_history table # By Thuong.Tran # Date: 29 Aug 2017 from sqlalchemy import create_engine, Table, Column, MetaData, Integer, Text, DateTime, Float from sqlalchemy import select, and_ import datetime as dt class work_history(): def __init__(self, db_url): _engine = create_engine(db_url) _connection = _engine.connect() _metadata = MetaData() _work_history = Table("work_history", _metadata, Column("WRK_HIS_ID", Integer, primary_key=True), Column("USR_ID", Integer), Column("PROC_NM", Text), Column("ST_DT", DateTime), Column("END_DT", DateTime), Column("LD_TM", Float), Column("CRE_DT", DateTime)) _metadata.create_all(_engine) self.connection = _connection self.work_history = _work_history pass def insert_to(self, data): is_valid = True # for item in data: # if not item: # is_valid = False # raise DropItem("Missing %s!" % item) if is_valid: ins_query = self.work_history.insert().values(data) self.connection.execute(ins_query) def get_all(self): s = select([self.work_history]).order_by('PROC_NM') result = self.connection.execute(s) return result def get_by_period(self, start_date, end_date): s = select([self.work_history]).where(and_(self.work_history.c.ST_DT >= start_date, self.work_history.c.END_DT <= end_date)) result = self.connection.execute(s) return result def get_finalized_process_of_one_day(self, today, worker): lower = dt.datetime(today.year, today.month, today.day, 0, 0, 0) upper = dt.datetime(today.year, today.month, today.day, 23, 59, 59) print(lower) print(upper) s = select([self.work_history]).where(and_(self.work_history.c.END_DT > lower, self.work_history.c.END_DT < upper, self.work_history.c.USR_ID == worker)) result = self.connection.execute(s) return result
mit
6,242,151,486,691,357,000
35.95
93
0.564727
false
3.744932
false
false
false
qtproject/qt-creator
scripts/generateClangFormatChecksUI.py
3
9025
#!/usr/bin/env python ############################################################################ # # Copyright (C) 2019 The Qt Company Ltd. # Contact: https://www.qt.io/licensing/ # # This file is part of Qt Creator. # # Commercial License Usage # Licensees holding valid commercial Qt licenses may use this file in # accordance with the commercial license agreement provided with the # Software or, alternatively, in accordance with the terms contained in # a written agreement between you and The Qt Company. For licensing terms # and conditions see https://www.qt.io/terms-conditions. For further # information use the contact form at https://www.qt.io/contact-us. # # GNU General Public License Usage # Alternatively, this file may be used under the terms of the GNU # General Public License version 3 as published by the Free Software # Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT # included in the packaging of this file. Please review the following # information to ensure the GNU General Public License requirements will # be met: https://www.gnu.org/licenses/gpl-3.0.html. # ############################################################################ import argparse import json import os import docutils.nodes import docutils.parsers.rst import docutils.utils def full_ui_content(checks): return '''<?xml version="1.0" encoding="UTF-8"?> <ui version="4.0"> <class>ClangFormat::ClangFormatChecksWidget</class> <widget class="QWidget" name="ClangFormat::ClangFormatChecksWidget"> <property name="maximumSize"> <size> <width>480</width> <height>16777215</height> </size> </property> <layout class="QGridLayout" name="checksLayout"> ''' + checks + ''' </layout> </widget> <resources/> <connections/> </ui> ''' def parse_arguments(): parser = argparse.ArgumentParser(description='Clazy checks header file generator') parser.add_argument('--clang-format-options-rst', help='path to ClangFormatStyleOptions.rst', default=None, dest='options_rst') return parser.parse_args() def parse_rst(text): parser = docutils.parsers.rst.Parser() components = (docutils.parsers.rst.Parser,) settings = docutils.frontend.OptionParser(components=components).get_default_values() document = docutils.utils.new_document('<rst-doc>', settings=settings) parser.parse(text, document) return document def createItem(key, value, index): label = ''' <item row="''' + str(index) + '''" column="0"> <widget class="QLabel" name="label''' + key + '''"> <property name="text"> <string notr="true">''' + key + '''</string> </property> </widget> </item> ''' value_item = '' if value[0] == 'bool': value_item = ''' <item row="''' + str(index) + '''" column="1"> <widget class="QComboBox" name="''' + key + '''"> <property name="focusPolicy"> <enum>Qt::StrongFocus</enum> </property> <item> <property name="text"> <string notr="true">Default</string> </property> </item> <item> <property name="text"> <string notr="true">true</string> </property> </item> <item> <property name="text"> <string notr="true">false</string> </property> </item> </widget> </item> ''' elif value[0].startswith('std::string') or value[0] == 'unsigned' or value[0] == 'int': value_item = ''' <item row="''' + str(index) + '''" column="1"> <layout class="QHBoxLayout"> <item> <widget class="QLineEdit" name="''' + key + '''"> </widget> </item> <item> <widget class="QPushButton" name="set''' + key + '''"> <property name="maximumSize"> <size> <width>40</width> <height>16777215</height> </size> </property> <property name="text"> <string notr="true">Set</string> </property> </widget> </item> </layout> </item> ''' elif value[0].startswith('std::vector'): value_item = ''' <item row="''' + str(index) + '''" column="1"> <layout class="QHBoxLayout"> <item> <widget class="QPlainTextEdit" name="''' + key + '''"> <property name="sizePolicy"> <sizepolicy hsizetype="Expanding" vsizetype="Fixed"/> </property> <property name="maximumSize"> <size> <width>16777215</width> <height>50</height> </size> </property> </widget> </item> <item> <widget class="QPushButton" name="set''' + key + '''"> <property name="maximumSize"> <size> <width>40</width> <height>16777215</height> </size> </property> <property name="text"> <string notr="true">Set</string> </property> </widget> </item> </layout> </item> ''' else: if ' ' in value[1]: value_item = '' for i, val in enumerate(value): if i == 0: continue index += 1 space_index = val.find(' ') val = val[space_index + 1:] value_item += ''' <item row="''' + str(index) + '''" column="0"> <widget class="QLabel" name="label''' + val + '''"> <property name="text"> <string notr="true"> ''' + val + '''</string> </property> </widget> </item> ''' value_item += ''' <item row="''' + str(index) + '''" column="1"> <widget class="QComboBox" name="''' + val + '''"> <property name="focusPolicy"> <enum>Qt::StrongFocus</enum> </property> <item> <property name="text"> <string notr="true">Default</string> </property> </item> <item> <property name="text"> <string notr="true">true</string> </property> </item> <item> <property name="text"> <string notr="true">false</string> </property> </item> </widget> </item> ''' else: value_item = ''' <item row="''' + str(index) + '''" column="1"> <widget class="QComboBox" name="''' + key + '''"> <property name="focusPolicy"> <enum>Qt::StrongFocus</enum> </property> ''' if key == 'Language': value_item += ''' <property name="enabled"> <bool>false</bool> </property> ''' if index > 0: value_item += ''' <item> <property name="text"> <string notr="true">Default</string> </property> </item> ''' for i, val in enumerate(value): if i == 0: continue underline_index = val.find('_') val = val[underline_index + 1:] value_item += ''' <item> <property name="text"> <string notr="true">''' + val + '''</string> </property> </item> ''' value_item += ''' </widget> </item> ''' return label + value_item, index class MyVisitor(docutils.nodes.NodeVisitor): in_bullet_list = False in_bullet_list_paragraph = False tree = {} last_key = '' def visit_term(self, node): node_values = node.traverse(condition=docutils.nodes.Text) name = node_values[0].astext() self.last_key = name self.tree[name] = [node_values[2].astext()] def visit_bullet_list(self, node): self.in_bullet_list = True def depart_bullet_list(self, node): self.in_bullet_list = False def visit_paragraph(self, node): if self.in_bullet_list: self.in_bullet_list_paragraph = True def depart_paragraph(self, node): self.in_bullet_list_paragraph = False def visit_literal(self, node): if self.in_bullet_list_paragraph: value = node.traverse(condition=docutils.nodes.Text)[0].astext() self.tree[self.last_key].append(value) self.in_bullet_list_paragraph = False def unknown_visit(self, node): """Called for all other node types.""" #print(node) pass def unknown_departure(self, node): pass def main(): arguments = parse_arguments() content = file(arguments.options_rst).read() document = parse_rst(content) visitor = MyVisitor(document) document.walkabout(visitor) keys = visitor.tree.keys() basedOnStyleKey = 'BasedOnStyle' keys.remove(basedOnStyleKey) keys.sort() text = '' line, index = createItem(basedOnStyleKey, visitor.tree[basedOnStyleKey], 0) text += line index = 1 for key in keys: line, index = createItem(key, visitor.tree[key], index) text += line index += 1 current_path = os.path.dirname(os.path.abspath(__file__)) ui_path = os.path.abspath(os.path.join(current_path, '..', 'src', 'plugins', 'clangformat', 'clangformatchecks.ui')) with open(ui_path, 'w') as f: f.write(full_ui_content(text)) if __name__ == "__main__": main()
gpl-3.0
-6,718,310,938,669,209,000
30.013746
97
0.560665
false
3.718583
false
false
false
Ircam-Web/mezzanine-organization
organization/agenda/migrations/0033_dynamicmultimediaevent.py
1
1459
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2018-11-30 10:33 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import mezzanine.core.fields class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0002_remove_content_type_name'), ('mezzanine_agenda', '0028_auto_20180926_1235'), ('organization-agenda', '0032_auto_20181108_1636'), ] operations = [ migrations.CreateModel( name='DynamicMultimediaEvent', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('_order', mezzanine.core.fields.OrderField(null=True, verbose_name='Order')), ('object_id', models.PositiveIntegerField(editable=False, null=True, verbose_name='related object')), ('content_type', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType', verbose_name='content type')), ('event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='dynamic_multimedia', to='mezzanine_agenda.Event', verbose_name='event')), ], options={ 'verbose_name': 'Multimedia', 'ordering': ('_order',), }, ), ]
agpl-3.0
7,624,932,403,393,662,000
43.212121
199
0.626456
false
3.90107
false
false
false
JamieCressey/apt-s3
apts3/__init__.py
1
6562
# Copyright 2016 Jamie Cressey # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os.path import boto3 import botocore.exceptions import logging import sys import json import pwd import os import apt.resources from datetime import datetime, timedelta from time import sleep __author__ = 'Jamie Cressey' __version__ = '0.9.0' class AptS3(object): def __init__(self, args): self.log = self._logger() self.args = args self.debs = args.files.split() if args.action == 'upload': self.upload_debs() elif args.action == 'delete': self.delete_debs() else: self.log.error('Unknown command: {}'.format(args.action)) def _logger(self): log = logging.getLogger('apt-s3') log.setLevel(logging.INFO) handler = logging.StreamHandler(sys.stdout) handler.setFormatter( logging.Formatter( '%(asctime)s %(levelname)-8s %(name)s: %(message)s', '%Y-%m-%d %H:%M:%S')) log.addHandler(handler) return log def _s3_conn(self): boto3.setup_default_session( profile_name=self.args.profile, region_name=self.args.region) self.s3 = boto3.client('s3') def _check_debs_exist(self, deb): if not os.path.isfile(deb): self.log.error('File {0} doesn\'t exist'.format(deb)) exit(1) def _check_lock(self, arch): if self.args.lock: lockfile = 'dists/{0}/{1}/binary-{2}/apts3_lockfile'.format( self.args.codename, self.args.component, arch) ts_now = datetime.utcnow() ts_stop = ts_now + timedelta(seconds=self.args.lock_timeout) while ts_now < ts_stop: try: lock = self.s3.get_object( Bucket=self.args.bucket, Key=lockfile) lock_body = json.loads(lock['Body'].read()) self.log.info( "Repository is locked by another user: {0}@{1}".format( lock_body['user'], lock_body['host'])) ts_now = datetime.utcnow() ts_lock = lock['LastModified'].replace(tzinfo=None) ts_diff = ts_now - ts_lock if ts_diff.seconds > self.args.lock_timeout: self.log.error( 'Repository lock is too old: {}. Please investigate.'.format(ts_diff)) exit(1) sleep(10) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'NoSuchKey': break else: raise self.log.info("Attempting to obtain a lock") lock_body = json.dumps({ "user": pwd.getpwuid(os.getuid()).pw_name, "host": os.uname()[1] }) self.s3.put_object( Body=lock_body, Bucket=self.args.bucket, Key=lockfile) self.log.info("Locked repository for updates") def _delete_lock(self, arch): if self.args.lock: self.log.info('Removing lockfile') lockfile = 'dists/{0}/{1}/binary-{2}/apts3_lockfile'.format( self.args.codename, self.args.component, arch) self.s3.delete_object( Bucket=self.args.bucket, Key=lockfile) def _parse_manifest(self, arch): self.manifests[arch] = apt.resources.Manifest( bucket=self.args.bucket, codename=self.args.codename, component=self.args.component, architecture=arch, visibility=self.args.visibility, s3=self.s3) def _parse_package(self, deb): self.log.info("Examining package file {}".format(deb)) pkg = apt.resources.Package(deb) if self.args.arch: arch = self.args.arch elif pkg.architecture: arch = pkg.architecture else: self.log.error( "No architcture given and unable to determine one for {0}. Please specify one with --arch [i386|amd64].".format(deb)) exit(1) if arch == 'all' and len(self.manifests) == 0: self.log.error( 'Package {0} had architecture "all" however noexisting package lists exist. This can often happen if the first package you are add to a new repository is an "all" architecture file. Please use --arch [i386|amd64] or another platform type to upload the file.'.format(deb)) exit(1) if arch not in self.manifests: self._parse_manifest(arch) self.manifests[arch].add(pkg) if arch == 'all': self.packages_arch_all.append(pkg) def _update_manifests(self): for arch, manifest in self.manifests.iteritems(): if arch == 'all': continue for pkg in self.packages_arch_all: manifest.add(pkg) def _upload_manifests(self): self.log.info('Uploading packages and new manifests to S3') for arch, manifest in self.manifests.iteritems(): self._check_lock(arch) manifest.write_to_s3() self.release.update_manifest(manifest) self.log.info('Update complete.') self._delete_lock(arch) def upload_debs(self): if not self.debs: self.log.error('You must specify at least one file to upload') exit(1) map(self._check_debs_exist, self.debs) self._s3_conn() self.log.info("Retrieving existing manifests") self.release = apt.resources.Release(self.args) self.manifests = {} map(self._parse_manifest, self.release['architectures']) self.packages_arch_all = [] map(self._parse_package, self.debs)
apache-2.0
-2,980,966,164,605,318,700
32.141414
287
0.556995
false
4.015912
false
false
false
noam09/deluge-telegramer
telegramer/include/telegram/passport/credentials.py
1
17262
#!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2018 # Leandro Toledo de Souza <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. try: import ujson as json except ImportError: import json from base64 import b64decode from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric.padding import OAEP, MGF1 from cryptography.hazmat.primitives.ciphers import Cipher from cryptography.hazmat.primitives.ciphers.algorithms import AES from cryptography.hazmat.primitives.ciphers.modes import CBC from cryptography.hazmat.primitives.hashes import SHA512, SHA256, Hash, SHA1 from future.utils import bord from telegram import TelegramObject, TelegramError class TelegramDecryptionError(TelegramError): """ Something went wrong with decryption. """ def __init__(self, message): super(TelegramDecryptionError, self).__init__("TelegramDecryptionError: " "{}".format(message)) def decrypt(secret, hash, data): """ Decrypt per telegram docs at https://core.telegram.org/passport. Args: secret (:obj:`str` or :obj:`bytes`): The encryption secret, either as bytes or as a base64 encoded string. hash (:obj:`str` or :obj:`bytes`): The hash, either as bytes or as a base64 encoded string. data (:obj:`str` or :obj:`bytes`): The data to decrypt, either as bytes or as a base64 encoded string. file (:obj:`bool`): Force data to be treated as raw data, instead of trying to b64decode it. Raises: :class:`TelegramDecryptionError`: Given hash does not match hash of decrypted data. Returns: :obj:`bytes`: The decrypted data as bytes. """ # Make a SHA512 hash of secret + update digest = Hash(SHA512(), backend=default_backend()) digest.update(secret + hash) secret_hash_hash = digest.finalize() # First 32 chars is our key, next 16 is the initialisation vector key, iv = secret_hash_hash[:32], secret_hash_hash[32:32 + 16] # Init a AES-CBC cipher and decrypt the data cipher = Cipher(AES(key), CBC(iv), backend=default_backend()) decryptor = cipher.decryptor() data = decryptor.update(data) + decryptor.finalize() # Calculate SHA256 hash of the decrypted data digest = Hash(SHA256(), backend=default_backend()) digest.update(data) data_hash = digest.finalize() # If the newly calculated hash did not match the one telegram gave us if data_hash != hash: # Raise a error that is caught inside telegram.PassportData and transformed into a warning raise TelegramDecryptionError("Hashes are not equal! {} != {}".format(data_hash, hash)) # Return data without padding return data[bord(data[0]):] def decrypt_json(secret, hash, data): """Decrypts data using secret and hash and then decodes utf-8 string and loads json""" return json.loads(decrypt(secret, hash, data).decode('utf-8')) class EncryptedCredentials(TelegramObject): """Contains data required for decrypting and authenticating EncryptedPassportElement. See the Telegram Passport Documentation for a complete description of the data decryption and authentication processes. Attributes: data (:class:`telegram.Credentials` or :obj:`str`): Decrypted data with unique user's nonce, data hashes and secrets used for EncryptedPassportElement decryption and authentication or base64 encrypted data. hash (:obj:`str`): Base64-encoded data hash for data authentication. secret (:obj:`str`): Decrypted or encrypted secret used for decryption. Args: data (:class:`telegram.Credentials` or :obj:`str`): Decrypted data with unique user's nonce, data hashes and secrets used for EncryptedPassportElement decryption and authentication or base64 encrypted data. hash (:obj:`str`): Base64-encoded data hash for data authentication. secret (:obj:`str`): Decrypted or encrypted secret used for decryption. **kwargs (:obj:`dict`): Arbitrary keyword arguments. Note: This object is decrypted only when originating from :obj:`telegram.PassportData.decrypted_credentials`. """ def __init__(self, data, hash, secret, bot=None, **kwargs): # Required self.data = data self.hash = hash self.secret = secret self._id_attrs = (self.data, self.hash, self.secret) self.bot = bot self._decrypted_secret = None self._decrypted_data = None @classmethod def de_json(cls, data, bot): if not data: return None data = super(EncryptedCredentials, cls).de_json(data, bot) return cls(bot=bot, **data) @property def decrypted_secret(self): """ :obj:`str`: Lazily decrypt and return secret. Raises: telegram.TelegramDecryptionError: Decryption failed. Usually due to bad private/public key but can also suggest malformed/tampered data. """ if self._decrypted_secret is None: # Try decrypting according to step 1 at # https://core.telegram.org/passport#decrypting-data # We make sure to base64 decode the secret first. # Telegram says to use OAEP padding so we do that. The Mask Generation Function # is the default for OAEP, the algorithm is the default for PHP which is what # Telegram's backend servers run. try: self._decrypted_secret = self.bot.private_key.decrypt(b64decode(self.secret), OAEP( mgf=MGF1(algorithm=SHA1()), algorithm=SHA1(), label=None )) except ValueError as e: # If decryption fails raise exception raise TelegramDecryptionError(e) return self._decrypted_secret @property def decrypted_data(self): """ :class:`telegram.Credentials`: Lazily decrypt and return credentials data. This object also contains the user specified nonce as `decrypted_data.nonce`. Raises: telegram.TelegramDecryptionError: Decryption failed. Usually due to bad private/public key but can also suggest malformed/tampered data. """ if self._decrypted_data is None: self._decrypted_data = Credentials.de_json(decrypt_json(self.decrypted_secret, b64decode(self.hash), b64decode(self.data)), self.bot) return self._decrypted_data class Credentials(TelegramObject): """ Attributes: secure_data (:class:`telegram.SecureData`): Credentials for encrypted data nonce (:obj:`str`): Bot-specified nonce """ def __init__(self, secure_data, nonce, bot=None, **kwargs): # Required self.secure_data = secure_data self.nonce = nonce self.bot = bot @classmethod def de_json(cls, data, bot): if not data: return None data['secure_data'] = SecureData.de_json(data.get('secure_data'), bot=bot) return cls(bot=bot, **data) class SecureData(TelegramObject): """ This object represents the credentials that were used to decrypt the encrypted data. All fields are optional and depend on fields that were requested. Attributes: personal_details (:class:`telegram.SecureValue`, optional): Credentials for encrypted personal details. passport (:class:`telegram.SecureValue`, optional): Credentials for encrypted passport. internal_passport (:class:`telegram.SecureValue`, optional): Credentials for encrypted internal passport. driver_license (:class:`telegram.SecureValue`, optional): Credentials for encrypted driver license. identity_card (:class:`telegram.SecureValue`, optional): Credentials for encrypted ID card address (:class:`telegram.SecureValue`, optional): Credentials for encrypted residential address. utility_bill (:class:`telegram.SecureValue`, optional): Credentials for encrypted utility bill. bank_statement (:class:`telegram.SecureValue`, optional): Credentials for encrypted bank statement. rental_agreement (:class:`telegram.SecureValue`, optional): Credentials for encrypted rental agreement. passport_registration (:class:`telegram.SecureValue`, optional): Credentials for encrypted registration from internal passport. temporary_registration (:class:`telegram.SecureValue`, optional): Credentials for encrypted temporary registration. """ def __init__(self, personal_details=None, passport=None, internal_passport=None, driver_license=None, identity_card=None, address=None, utility_bill=None, bank_statement=None, rental_agreement=None, passport_registration=None, temporary_registration=None, bot=None, **kwargs): # Optionals self.temporary_registration = temporary_registration self.passport_registration = passport_registration self.rental_agreement = rental_agreement self.bank_statement = bank_statement self.utility_bill = utility_bill self.address = address self.identity_card = identity_card self.driver_license = driver_license self.internal_passport = internal_passport self.passport = passport self.personal_details = personal_details self.bot = bot @classmethod def de_json(cls, data, bot): if not data: return None data['temporary_registration'] = SecureValue.de_json(data.get('temporary_registration'), bot=bot) data['passport_registration'] = SecureValue.de_json(data.get('passport_registration'), bot=bot) data['rental_agreement'] = SecureValue.de_json(data.get('rental_agreement'), bot=bot) data['bank_statement'] = SecureValue.de_json(data.get('bank_statement'), bot=bot) data['utility_bill'] = SecureValue.de_json(data.get('utility_bill'), bot=bot) data['address'] = SecureValue.de_json(data.get('address'), bot=bot) data['identity_card'] = SecureValue.de_json(data.get('identity_card'), bot=bot) data['driver_license'] = SecureValue.de_json(data.get('driver_license'), bot=bot) data['internal_passport'] = SecureValue.de_json(data.get('internal_passport'), bot=bot) data['passport'] = SecureValue.de_json(data.get('passport'), bot=bot) data['personal_details'] = SecureValue.de_json(data.get('personal_details'), bot=bot) return cls(bot=bot, **data) class SecureValue(TelegramObject): """ This object represents the credentials that were used to decrypt the encrypted value. All fields are optional and depend on the type of field. Attributes: data (:class:`telegram.DataCredentials`, optional): Credentials for encrypted Telegram Passport data. Available for "personal_details", "passport", "driver_license", "identity_card", "identity_passport" and "address" types. front_side (:class:`telegram.FileCredentials`, optional): Credentials for encrypted document's front side. Available for "passport", "driver_license", "identity_card" and "internal_passport". reverse_side (:class:`telegram.FileCredentials`, optional): Credentials for encrypted document's reverse side. Available for "driver_license" and "identity_card". selfie (:class:`telegram.FileCredentials`, optional): Credentials for encrypted selfie of the user with a document. Can be available for "passport", "driver_license", "identity_card" and "internal_passport". translation (List[:class:`telegram.FileCredentials`], optional): Credentials for an encrypted translation of the document. Available for "passport", "driver_license", "identity_card", "internal_passport", "utility_bill", "bank_statement", "rental_agreement", "passport_registration" and "temporary_registration". files (List[:class:`telegram.FileCredentials`], optional): Credentials for encrypted files. Available for "utility_bill", "bank_statement", "rental_agreement", "passport_registration" and "temporary_registration" types. """ def __init__(self, data=None, front_side=None, reverse_side=None, selfie=None, files=None, translation=None, bot=None, **kwargs): self.data = data self.front_side = front_side self.reverse_side = reverse_side self.selfie = selfie self.files = files self.translation = translation self.bot = bot @classmethod def de_json(cls, data, bot): if not data: return None data['data'] = DataCredentials.de_json(data.get('data'), bot=bot) data['front_side'] = FileCredentials.de_json(data.get('front_side'), bot=bot) data['reverse_side'] = FileCredentials.de_json(data.get('reverse_side'), bot=bot) data['selfie'] = FileCredentials.de_json(data.get('selfie'), bot=bot) data['files'] = FileCredentials.de_list(data.get('files'), bot=bot) data['translation'] = FileCredentials.de_list(data.get('translation'), bot=bot) return cls(bot=bot, **data) def to_dict(self): data = super(SecureValue, self).to_dict() data['files'] = [p.to_dict() for p in self.files] data['translation'] = [p.to_dict() for p in self.translation] return data class _CredentialsBase(TelegramObject): """Base class for DataCredentials and FileCredentials.""" def __init__(self, hash, secret, bot=None, **kwargs): self.hash = hash self.secret = secret # Aliases just be be sure self.file_hash = self.hash self.data_hash = self.hash self.bot = bot @classmethod def de_json(cls, data, bot): if not data: return None return cls(bot=bot, **data) @classmethod def de_list(cls, data, bot): if not data: return [] credentials = list() for c in data: credentials.append(cls.de_json(c, bot=bot)) return credentials class DataCredentials(_CredentialsBase): """ These credentials can be used to decrypt encrypted data from the data field in EncryptedPassportData. Args: data_hash (:obj:`str`): Checksum of encrypted data secret (:obj:`str`): Secret of encrypted data Attributes: hash (:obj:`str`): Checksum of encrypted data secret (:obj:`str`): Secret of encrypted data """ def __init__(self, data_hash, secret, **kwargs): super(DataCredentials, self).__init__(data_hash, secret, **kwargs) def to_dict(self): data = super(DataCredentials, self).to_dict() del data['file_hash'] del data['hash'] return data class FileCredentials(_CredentialsBase): """ These credentials can be used to decrypt encrypted files from the front_side, reverse_side, selfie and files fields in EncryptedPassportData. Args: file_hash (:obj:`str`): Checksum of encrypted file secret (:obj:`str`): Secret of encrypted file Attributes: hash (:obj:`str`): Checksum of encrypted file secret (:obj:`str`): Secret of encrypted file """ def __init__(self, file_hash, secret, **kwargs): super(FileCredentials, self).__init__(file_hash, secret, **kwargs) def to_dict(self): data = super(FileCredentials, self).to_dict() del data['data_hash'] del data['hash'] return data
gpl-3.0
-7,892,456,958,463,140,000
38.321185
99
0.630344
false
4.321983
false
false
false
ngsutils/ngsutils
ngsutils/gtf/add_xref.py
1
3246
#!/usr/bin/env python ## category General ## desc Appends name annotation from UCSC Xref file '''Adds gene name annotations to a GTF file (xref) This adds gene name annotations based upon the KnownGene annotations from the UCSC Genome Browser. Gene names will be taken from the kgXref table. This table must be downloaded separately from the UCSC Genome Browser. This assumes that the file will be in tab-delimited format and that there is one line for each transcript. You may specify which column represents the gene name. In the standard "kgXref.txt" file, this is column #5. This will add the following attributes: gene_name ''' import sys import os from ngsutils.support import gzip_reader def gtf_add_xref(gtf, xref, column=4, out=sys.stdout, quiet=False): gene_names = {} if not quiet: sys.stderr.write('Reading xref...\n') for line in gzip_reader(xref): if line[0] == '#': continue cols = line.rstrip().split('\t') gene_names[cols[0]] = cols[column] if not quiet: sys.stderr.write('Reading/writing GTF...\n') for line in gzip_reader(gtf): try: comment = None idx = line.find('#') if idx > -1: if idx == 0: sys.stdout.write(line) continue comment = line[idx:] line = line[:-idx] chrom, source, feature, start, end, score, strand, frame, attrs = line.rstrip().split('\t') transcript_id = None for key, val in [x.split(' ') for x in [x.strip() for x in attrs.split(';')] if x]: if val[0] == '"' and val[-1] == '"': val = val[1:-1] if key == 'transcript_id': transcript_id = val if attrs[-1] != ';': attrs = '%s;' % attrs if transcript_id in gene_names: attrs = '%s gene_name "%s";' % (attrs, gene_names[transcript_id]) out.write('\t'.join([chrom, source, feature, start, end, score, strand, frame, attrs])) if comment: out.write('\t%s' % comment) out.write('\n') except: import traceback sys.stderr.write('Error parsing line:\n%s\n' % line) traceback.print_exc() sys.exit(1) def usage(msg=None): if msg: print msg print __doc__ print '''\ Usage: gtfutils add_xref {-col num} filename.gtf kgXref.txt Options: -col num The gene name is stored in column {num} (1-based) (default:5) ''' sys.exit(1) if __name__ == '__main__': gtf = None xref = None column = 4 last = None for arg in sys.argv[1:]: if last == '-col': column = int(arg) - 1 last = None elif not gtf and (os.path.exists(arg) or arg == '-'): gtf = arg elif not xref and (os.path.exists(arg) or arg == '-'): xref = arg elif arg in ['-col']: last = arg if not gtf or not xref: usage() if gtf == '-' and xref == '-': usage('Both GTF and Xref files can not be from stdin') gtf_add_xref(gtf, xref, column)
bsd-3-clause
-469,859,513,389,635,600
29.336449
103
0.541282
false
3.743945
false
false
false
Seegnify/Elasticcrawler
lib/curlheaders.py
1
1702
""" API to extract bits and pieces from CURL (command line utility) headers file. The headers can be obtained by calling: curl -D 'headers' 'url'. Currenlty supported formats are for protocols: HTTP, HTTPS. """ class Curlheaders: # response codes and headers container reponses = list() def __init__(self, headers = None): if headers is not None: self.load(headers) def load(self, headers): # read headers with open(headers) as f: lines = [line.strip() for line in f] # create response list resps = list() line_iter = iter(lines) # consume response code line = next(line_iter, None) resp = dict() resp['code'] = line.split()[1] resp['head'] = dict() # iterate over headers for line in line_iter: if len(line) is 0: # append last response resps.append(resp) # consume response code line = next(line_iter, None) if line is None: break resp = dict() resp['code'] = line.split()[1] resp['head'] = dict() else: # consume response header head = line.find(': ') name = line[0:head].lower() val = line[head+2:len(line)] resp['head'][name] = val # update loaded reponses self.responses = resps def response_count(self): return len(self.responses) def http_code(self, response_index): return self.responses[response_index]['code'] def http_header(self, response_index, header_name): header_name = header_name.lower() try: return self.responses[response_index]['head'][header_name] except KeyError: return None
bsd-3-clause
1,003,504,171,861,701,200
25.59375
77
0.59342
false
3.894737
false
false
false
raviii/ravii
items/fields.py
1
1881
from django.db.models.fields.files import ImageField, ImageFieldFile from PIL import Image import os def _add_thumb(s): """ Modifies a string (filename, URL) containing an image filename, to insert '.thumb' """ parts = s.split(".") parts.insert(-1, "thumb") if parts[-1].lower() not in ['jpeg', 'jpg']: parts[-1] = 'jpg' return ".".join(parts) class ThumbnailImageFieldFile(ImageFieldFile): def _get_thumb_path(self): return _add_thumb(self.path) thumb_path = property(_get_thumb_path) def _get_thumb_url(self): return _add_thumb(self.url) thumb_url = property(_get_thumb_url) def save(self, name, content, save=True): super(ThumbnailImageFieldFile, self).save(name, content, save) img = Image.open(self.path) img.thumbnail( (self.field.thumb_width, self.field.thumb_height), Image.ANTIALIAS ) img.save(self.thumb_path, 'JPEG') def delete(self, save=True): if os.path.exists(self.thumb_path): os.remove(self.thumb_path) super(ThumbnailImageFieldFile, self).delete(save) class ThumbnailImageField(ImageField): """ Behaves like a regular ImageField, but stores an extra (JPEG) thumbnail image, providing FIELD.thumb_url and FIELD.thumb_path. Accepts two additional, optional arguments: thumb_width and thumb_height, both defaulting to 128 (pixels). Resizing will preserve aspect ratio while staying inside the requested dimensions; see PIL's Image.thumbnail() method documentation for details. """ attr_class = ThumbnailImageFieldFile def __init__(self, thumb_width=128, thumb_height=128, *args, **kwargs): self.thumb_width = thumb_width self.thumb_height = thumb_height super(ThumbnailImageField, self).__init__(*args, **kwargs)
bsd-3-clause
6,572,445,099,955,995,000
33.2
78
0.654971
false
3.769539
false
false
false
trec-kba/streamcorpus-pipeline
streamcorpus_pipeline/tests/test_language.py
1
1657
'''tests for langauge detection transform .. This software is released under an MIT/X11 open source license. Copyright 2012-2015 Diffeo, Inc. ''' from __future__ import absolute_import import os import pytest import streamcorpus_pipeline from streamcorpus_pipeline._clean_html import clean_html from streamcorpus_pipeline._language import language from streamcorpus import make_stream_item, ContentItem @pytest.mark.skipif(True, reason='no longer used') def test_langauge(test_data_dir): path = os.path.join(test_data_dir, 'test/raw-unicode-issues.html') si = make_stream_item(None, 'test') si.body = ContentItem(raw=open(path).read()) context = {} lang = language(config={}) lang(si, context) assert si.body.language.name == 'Japanese' assert si.body.language.code == 'ja' @pytest.mark.skipif(True, reason='no longer used') @pytest.mark.parametrize('with_clean_html', [(True,), (False,)]) def test_language_unreliable_on_raw(test_data_dir, with_clean_html): path = os.path.join(test_data_dir, 'test/unreliable-language-detect-on-raw.html') si = make_stream_item(None, 'http://bbs.sjtu.edu.cn/bbsanc?path=%2Fgroups%2FGROUP_0%2Fmessage%2FD4EFC2634%2FD7AC8E3A8%2FG.1092960050.A') raw = open(path).read() #raw = raw.decode('GB2312', 'ignore').encode('utf8') si.body = ContentItem(raw=raw) si.body.encoding = 'GB2312' si.body.media_type = 'text/html' context = {} if with_clean_html: ch = clean_html(config={}) ch(si, context) lang = language(config={}) lang(si, context) assert si.body.language.name == 'Chinese' assert si.body.language.code == 'zh'
mit
-3,371,838,387,927,223,000
35.021739
140
0.694629
false
3.132325
true
false
false
MechanisM/musicdb
musicdb/common/management/commands/initial_import_fixups.py
1
3570
from django.core.management.base import NoArgsCommand from musicdb.classical.models import * class Command(NoArgsCommand): def handle_noargs(self, **options): work_pairs = ( ('felix-mendelssohn', ('string-quartet-in-e-flat', 'string-quartet-in-e-flat-1')), ('ludvig-van-beethoven', ('piano-trio-in-e-flat-triosatz', 'piano-trio-in-e-flat-triosatz-1')), ('fryderyk-chopin', ('ballade-no-4-op-52-in-f-minor', 'ballade-no-4-op-52-in-f-minor-1')), ) for a, (b, c) in work_pairs: try: Work.objects.get(composer__slug=a, slug=b).merge_from( Work.objects.get(composer__slug=a, slug=c) ) except Work.DoesNotExist: print "W: Skipping", a, b, c ensemble_pairs = ( ('chamber-orchestra-of-europe', 'chamber-orchestra-of-europe-1'), ('orquestra-sinfonica-haydn-de-bolzano-e-trento', 'orquestra-sinfonica-haydn-de-bolzano-e-trento-1'), ('i-solisti-veneti', 'i-solisti-veneti-1'), ('london-symphony-orchestra', 'london-symphony-orchestra-principals'), ('vienna-philharmonic-orchestra', 'wiener-philharmoniker'), ) for a, b in ensemble_pairs: try: Ensemble.objects.get(slug=a).merge_from(Ensemble.objects.get(slug=b)) except Ensemble.DoesNotExist: print "W: Skipping", a, b relationships = { 'arrangement': ( ('orchesographie', 'capriol-suite-for-string-orchestra'), ), 'revision': ( ('brandenburg-concerto-no-5-early-version-bwv-1050a-in-d', 'brandenburg-concerto-no-5-bwv-1050-in-d'), ('brandenburg-concerto-no-1-early-version-bwv-1046a-in-f', 'brandenburg-concerto-no-1-bwv-1046-in-f'), ), 'variations': ( ('twelve-variations-on-ah-vous-dirai-je-maman-k-265-in-c', 'romantic-piece-op-18'), ), 'transcription': ( ('brandenburg-concerto-no-4-bwv-1049-in-g', 'concerto-for-harpsichord-and-two-recorders-transcription-of-brandenburg-concerto-no-4-bwv-1057'), ('violin-concerto-bwv-1041-in-a-minor', 'harpsichord-concerto-bwv-1058r-in-g-minor'), ('violin-concerto-bwv-1042-in-e', 'harpsichord-concerto-bwv-1054-in-d'), ('concerto-for-oboe-and-violin-bwv-1060r-in-g-minor', 'concerto-for-two-harpsichords-bwv-1060-in-c-minor'), ('double-violin-concerto-bwv-1043-in-d-minor', 'concerto-for-two-harpsichords-bwv-1062-in-c-minor'), ('concerto-for-three-violins-bwv-1064r-in-d', 'concerto-for-three-harpsichords-bwv-1064-in-c'), ('concerto-for-four-violins-op-3-no-10-rv-580-in-b-minor', 'concerto-for-three-harpsichords-bwv-1064-in-c'), ('concerto-for-oboe-damore-bwv-1055r-in-a', 'harpsichord-concerto-bwv-1055-in-a'), ) } for nature, data in relationships.items(): for x, y in data: WorkRelationship.objects.create( source=Work.objects.get(slug=x), derived=Work.objects.get(slug=y), nature=nature, ) to_delete = () for klass, pks in to_delete: for pk in pks: try: klass.objects.get(pk=pk).delete() except klass.DoesNotExist: print "W: Skipping deletion of", klass, pk
agpl-3.0
4,716,751,957,606,330,000
47.243243
158
0.564986
false
2.862871
false
false
false
michaelBenin/sqlalchemy
lib/sqlalchemy/sql/naming.py
1
5728
# sqlalchemy/naming.py # Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Establish constraint and index naming conventions. """ from .schema import Constraint, ForeignKeyConstraint, PrimaryKeyConstraint, \ UniqueConstraint, CheckConstraint, Index, Table, Column from .. import event, events from .. import exc from .elements import _truncated_label import re class conv(_truncated_label): """Mark a string indicating that a name has already been converted by a naming convention. This is a string subclass that indicates a name that should not be subject to any further naming conventions. E.g. when we create a :class:`.Constraint` using a naming convention as follows:: m = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}) t = Table('t', m, Column('x', Integer), CheckConstraint('x > 5', name='x5')) The name of the above constraint will be rendered as ``"ck_t_x5"``. That is, the existing name ``x5`` is used in the naming convention as the ``constraint_name`` token. In some situations, such as in migration scripts, we may be rendering the above :class:`.CheckConstraint` with a name that's already been converted. In order to make sure the name isn't double-modified, the new name is applied using the :func:`.schema.conv` marker. We can use this explicitly as follows:: m = MetaData(naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}) t = Table('t', m, Column('x', Integer), CheckConstraint('x > 5', name=conv('ck_t_x5'))) Where above, the :func:`.schema.conv` marker indicates that the constraint name here is final, and the name will render as ``"ck_t_x5"`` and not ``"ck_t_ck_t_x5"`` .. versionadded:: 0.9.4 .. seealso:: :ref:`constraint_naming_conventions` """ class ConventionDict(object): def __init__(self, const, table, convention): self.const = const self._is_fk = isinstance(const, ForeignKeyConstraint) self.table = table self.convention = convention self._const_name = const.name def _key_table_name(self): return self.table.name def _column_X(self, idx): if self._is_fk: fk = self.const.elements[idx] return fk.parent else: return list(self.const.columns)[idx] def _key_constraint_name(self): if not self._const_name: raise exc.InvalidRequestError( "Naming convention including " "%(constraint_name)s token requires that " "constraint is explicitly named." ) if not isinstance(self._const_name, conv): self.const.name = None return self._const_name def _key_column_X_name(self, idx): return self._column_X(idx).name def _key_column_X_label(self, idx): return self._column_X(idx)._label def _key_referred_table_name(self): fk = self.const.elements[0] refs = fk.target_fullname.split(".") if len(refs) == 3: refschema, reftable, refcol = refs else: reftable, refcol = refs return reftable def _key_referred_column_X_name(self, idx): fk = self.const.elements[idx] refs = fk.target_fullname.split(".") if len(refs) == 3: refschema, reftable, refcol = refs else: reftable, refcol = refs return refcol def __getitem__(self, key): if key in self.convention: return self.convention[key](self.const, self.table) elif hasattr(self, '_key_%s' % key): return getattr(self, '_key_%s' % key)() else: col_template = re.match(r".*_?column_(\d+)_.+", key) if col_template: idx = col_template.group(1) attr = "_key_" + key.replace(idx, "X") idx = int(idx) if hasattr(self, attr): return getattr(self, attr)(idx) raise KeyError(key) _prefix_dict = { Index: "ix", PrimaryKeyConstraint: "pk", CheckConstraint: "ck", UniqueConstraint: "uq", ForeignKeyConstraint: "fk" } def _get_convention(dict_, key): for super_ in key.__mro__: if super_ in _prefix_dict and _prefix_dict[super_] in dict_: return dict_[_prefix_dict[super_]] elif super_ in dict_: return dict_[super_] else: return None @event.listens_for(Constraint, "after_parent_attach") @event.listens_for(Index, "after_parent_attach") def _constraint_name(const, table): if isinstance(table, Column): # for column-attached constraint, set another event # to link the column attached to the table as this constraint # associated with the table. event.listen(table, "after_parent_attach", lambda col, table: _constraint_name(const, table) ) elif isinstance(table, Table): metadata = table.metadata convention = _get_convention(metadata.naming_convention, type(const)) if convention is not None: if const.name is None or "constraint_name" in convention: newname = conv( convention % ConventionDict(const, table, metadata.naming_convention) ) if const.name is None: const.name = newname
mit
-7,769,484,997,395,845,000
33.506024
97
0.597416
false
4.022472
false
false
false
anthonyfok/frescobaldi
frescobaldi_app/logtool/__init__.py
1
3820
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/ # # Copyright (c) 2008 - 2014 by Wilbert Berendsen # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # See http://www.gnu.org/licenses/ for more information. """ The log dockwindow. """ from PyQt5.QtCore import QSettings, Qt from PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets import QAction import actioncollection import actioncollectionmanager import app import panel class LogTool(panel.Panel): """A dockwidget showing the log of running Jobs.""" def __init__(self, mainwindow): super(LogTool, self).__init__(mainwindow) self.hide() self.toggleViewAction().setShortcut(QKeySequence("Meta+Alt+L")) ac = self.actionCollection = Actions() ac.log_next_error.triggered.connect(self.slotNextError) ac.log_previous_error.triggered.connect(self.slotPreviousError) actioncollectionmanager.manager(mainwindow).addActionCollection(ac) mainwindow.addDockWidget(Qt.BottomDockWidgetArea, self) app.jobStarted.connect(self.slotJobStarted) app.jobFinished.connect(self.slotJobFinished) def translateUI(self): self.setWindowTitle(_("LilyPond Log")) self.toggleViewAction().setText(_("LilyPond &Log")) def createWidget(self): from . import logwidget return logwidget.LogWidget(self) def slotJobStarted(self, doc, job): """Called whenever job starts, decides whether to follow it and show the log.""" import jobattributes jattrs = jobattributes.get(job) if doc == self.mainwindow().currentDocument() or self.mainwindow() == jattrs.mainwindow: self.widget().switchDocument(doc) if not jattrs.hidden and QSettings().value("log/show_on_start", True, bool): self.show() def slotJobFinished(self, document, job, success): import jobattributes if (not success and not job.is_aborted() and not jobattributes.get(job).hidden and document == self.mainwindow().currentDocument()): self.show() def slotNextError(self): """Jumps to the position pointed to by the next error message.""" self.activate() self.widget().gotoError(1) def slotPreviousError(self): """Jumps to the position pointed to by the next error message.""" self.activate() self.widget().gotoError(-1) class Actions(actioncollection.ActionCollection): name = "logtool" def createActions(self, parent=None): self.log_next_error = QAction(parent) self.log_previous_error = QAction(parent) self.log_next_error.setShortcut(QKeySequence("Ctrl+E")) self.log_previous_error.setShortcut(QKeySequence("Ctrl+Shift+E")) def translateUI(self): self.log_next_error.setText(_("Next Error Message")) self.log_previous_error.setText(_("Previous Error Message")) # log errors by initializing Errors instance @app.jobStarted.connect def _log_errors(document): from . import errors errors.errors(document)
gpl-2.0
-6,433,061,246,334,416,000
36.087379
96
0.685079
false
4.042328
false
false
false
joshgeller/PyPardot
pypardot/objects/opportunities.py
1
3806
class Opportunities(object): """ A class to query and use Pardot opportunities. Opportunity field reference: http://developer.pardot.com/kb/api-version-3/object-field-references/#opportunity """ def __init__(self, client): self.client = client def query(self, **kwargs): """ Returns the opportunities matching the specified criteria parameters. Supported search criteria: http://developer.pardot.com/kb/api-version-3/opportunities/#supported-search-criteria """ response = self._get(path='/do/query', params=kwargs) # Ensure result['opportunity'] is a list, no matter what. result = response.get('result') if result['total_results'] == 0: result['opportunity'] = [] elif result['total_results'] == 1: result['opportunity'] = [result['opportunity']] return result def create_by_email(self, prospect_email=None, name=None, value=None, probability=None, **kwargs): """ Creates a new opportunity using the specified data. <prospect_email> must correspond to an existing prospect. """ kwargs.update({'name': name, 'value': value, 'probability': probability}) response = self._post( path='/do/create/prospect_email/{prospect_email}'.format(prospect_email=prospect_email), params=kwargs) return response def create_by_id(self, prospect_id=None, name=None, value=None, probability=None, **kwargs): """ Creates a new opportunity using the specified data. <prospect_id> must correspond to an existing prospect. """ kwargs.update({'name': name, 'value': value, 'probability': probability}) response = self._post( path='/do/create/prospect_id/{prospect_id}'.format(prospect_id=prospect_id), params=kwargs) return response def read(self, id=None): """ Returns the data for the opportunity specified by <id>, including campaign assignment and associated visitor activities. <id> is the Pardot ID for the target opportunity. """ response = self._post(path='/do/read/id/{id}'.format(id=id)) return response def update(self, id=None): """ Updates the provided data for the opportunity specified by <id>. <id> is the Pardot ID for the target opportunity. Fields that are not updated by the request remain unchanged. Returns an updated version of the opportunity. """ response = self._post(path='/do/update/id/{id}'.format(id=id)) return response def delete(self, id=None): """ Deletes the opportunity specified by <id>. <id> is the Pardot ID for the target opportunity. Returns no response on success. """ response = self._post(path='/do/delete/id/{id}'.format(id=id)) return response def undelete(self, id=None): """ Un-deletes the opportunity specified by <id>. <id> is the Pardot ID for the target opportunity. Returns no response on success. """ response = self._post(path='/do/undelete/id/{id}'.format(id=id)) return response def _get(self, object_name='opportunity', path=None, params=None): """GET requests for the Opportunity object.""" if params is None: params = {} response = self.client.get(object_name=object_name, path=path, params=params) return response def _post(self, object_name='opportunity', path=None, params=None): """POST requests for the Opportunity object.""" if params is None: params = {} response = self.client.post(object_name=object_name, path=path, params=params) return response
mit
-5,657,057,192,978,973,000
40.824176
120
0.625854
false
4.219512
false
false
false
dmsimard/ansible
lib/ansible/plugins/lookup/first_found.py
1
7109
# (c) 2013, seth vidal <[email protected]> red hat, inc # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ name: first_found author: Seth Vidal (!UNKNOWN) <[email protected]> version_added: historical short_description: return first file found from list description: - this lookup checks a list of files and paths and returns the full path to the first combination found. - As all lookups, when fed relative paths it will try use the current task's location first and go up the chain to the containing role/play/include/etc's location. - The list of files has precedence over the paths searched. i.e, A task in a role has a 'file1' in the play's relative path, this will be used, 'file2' in role's relative path will not. - Either a list of files C(_terms) or a key `files` with a list of files is required for this plugin to operate. notes: - This lookup can be used in 'dual mode', either passing a list of file names or a dictionary that has C(files) and C(paths). options: _terms: description: list of file names files: description: list of file names type: list default: [] paths: description: list of paths in which to look for the files type: list default: [] skip: type: boolean default: False description: Return an empty list if no file is found, instead of an error. """ EXAMPLES = """ - name: show first existing file or ignore if none do debug: msg={{lookup('first_found', findme, errors='ignore')}} vars: findme: - "/path/to/foo.txt" - "bar.txt" # will be looked in files/ dir relative to role and/or play - "/path/to/biz.txt" - name: | include tasks only if files exist. Note the use of query() to return a blank list for the loop if no files are found. import_tasks: '{{ item }}' vars: params: files: - path/tasks.yaml - path/other_tasks.yaml loop: "{{ query('first_found', params, errors='ignore') }}" - name: | copy first existing file found to /some/file, looking in relative directories from where the task is defined and including any play objects that contain it copy: src={{lookup('first_found', findme)}} dest=/some/file vars: findme: - foo - "{{inventory_hostname}}" - bar - name: same copy but specific paths copy: src={{lookup('first_found', params)}} dest=/some/file vars: params: files: - foo - "{{inventory_hostname}}" - bar paths: - /tmp/production - /tmp/staging - name: INTERFACES | Create Ansible header for /etc/network/interfaces template: src: "{{ lookup('first_found', findme)}}" dest: "/etc/foo.conf" vars: findme: - "{{ ansible_virtualization_type }}_foo.conf" - "default_foo.conf" - name: read vars from first file found, use 'vars/' relative subdir include_vars: "{{lookup('first_found', params)}}" vars: params: files: - '{{ansible_distribution}}.yml' - '{{ansible_os_family}}.yml' - default.yml paths: - 'vars' """ RETURN = """ _raw: description: - path to file found type: list elements: path """ import os from jinja2.exceptions import UndefinedError from ansible.errors import AnsibleLookupError, AnsibleUndefinedVariable from ansible.module_utils.common._collections_compat import Mapping, Sequence from ansible.module_utils.six import string_types from ansible.plugins.lookup import LookupBase def _split_on(terms, spliters=','): # TODO: fix as it does not allow spaces in names termlist = [] if isinstance(terms, string_types): for spliter in spliters: terms = terms.replace(spliter, ' ') termlist = terms.split(' ') else: # added since options will already listify for t in terms: termlist.extend(_split_on(t, spliters)) return termlist class LookupModule(LookupBase): def _process_terms(self, terms, variables, kwargs): total_search = [] skip = False # can use a dict instead of list item to pass inline config for term in terms: if isinstance(term, Mapping): self.set_options(var_options=variables, direct=term) elif isinstance(term, string_types): self.set_options(var_options=variables, direct=kwargs) elif isinstance(term, Sequence): partial, skip = self._process_terms(term, variables, kwargs) total_search.extend(partial) continue else: raise AnsibleLookupError("Invalid term supplied, can handle string, mapping or list of strings but got: %s for %s" % (type(term), term)) files = self.get_option('files') paths = self.get_option('paths') # NOTE: this is used as 'global' but can be set many times?!?!? skip = self.get_option('skip') # magic extra spliting to create lists filelist = _split_on(files, ',;') pathlist = _split_on(paths, ',:;') # create search structure if pathlist: for path in pathlist: for fn in filelist: f = os.path.join(path, fn) total_search.append(f) elif filelist: # NOTE: this seems wrong, should be 'extend' as any option/entry can clobber all total_search = filelist else: total_search.append(term) return total_search, skip def run(self, terms, variables, **kwargs): total_search, skip = self._process_terms(terms, variables, kwargs) # NOTE: during refactor noticed that the 'using a dict' as term # is designed to only work with 'one' otherwise inconsistencies will appear. # see other notes below. # actually search subdir = getattr(self, '_subdir', 'files') path = None for fn in total_search: try: fn = self._templar.template(fn) except (AnsibleUndefinedVariable, UndefinedError): continue # get subdir if set by task executor, default to files otherwise path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True) # exit if we find one! if path is not None: return [path] # if we get here, no file was found if skip: # NOTE: global skip wont matter, only last 'skip' value in dict term return [] raise AnsibleLookupError("No file was found when using first_found. Use errors='ignore' to allow this task to be skipped if no files are found")
gpl-3.0
-7,768,599,136,003,885,000
33.014354
152
0.608524
false
4.116387
false
false
false
chrys87/orca-beep
src/orca/speechdispatcherfactory.py
1
20607
# Copyright 2006, 2007, 2008, 2009 Brailcom, o.p.s. # # Author: Tomas Cerha <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., Franklin Street, Fifth Floor, # Boston MA 02110-1301 USA. # # [[[TODO: richb - Pylint is giving us a bunch of warnings along these # lines throughout this file: # # W0142:202:SpeechServer._send_command: Used * or ** magic # # So for now, we just disable these warnings in this module.]]] # # pylint: disable-msg=W0142 """Provides an Orca speech server for Speech Dispatcher backend.""" __id__ = "$Id$" __version__ = "$Revision$" __date__ = "$Date$" __author__ = "Tomas Cerha <[email protected]>" __copyright__ = "Copyright (c) 2006-2008 Brailcom, o.p.s." __license__ = "LGPL" from gi.repository import GLib import re import time from . import chnames from . import debug from . import guilabels from . import messages from . import speechserver from . import settings from . import orca_state from . import punctuation_settings from .acss import ACSS try: import speechd except: _speechd_available = False else: _speechd_available = True try: getattr(speechd, "CallbackType") except AttributeError: _speechd_version_ok = False else: _speechd_version_ok = True PUNCTUATION = re.compile('[^\w\s]', re.UNICODE) ELLIPSIS = re.compile('(\342\200\246|(?<!\.)\.{3,4}(?=(\s|\Z)))') class SpeechServer(speechserver.SpeechServer): # See the parent class for documentation. _active_servers = {} DEFAULT_SERVER_ID = 'default' _SERVER_NAMES = {DEFAULT_SERVER_ID: guilabels.DEFAULT_SYNTHESIZER} def getFactoryName(): return guilabels.SPEECH_DISPATCHER getFactoryName = staticmethod(getFactoryName) def getSpeechServers(): servers = [] default = SpeechServer._getSpeechServer(SpeechServer.DEFAULT_SERVER_ID) if default is not None: servers.append(default) for module in default.list_output_modules(): servers.append(SpeechServer._getSpeechServer(module)) return servers getSpeechServers = staticmethod(getSpeechServers) def _getSpeechServer(cls, serverId): """Return an active server for given id. Attempt to create the server if it doesn't exist yet. Returns None when it is not possible to create the server. """ if serverId not in cls._active_servers: cls(serverId) # Don't return the instance, unless it is succesfully added # to `_active_Servers'. return cls._active_servers.get(serverId) _getSpeechServer = classmethod(_getSpeechServer) def getSpeechServer(info=None): if info is not None: thisId = info[1] else: thisId = SpeechServer.DEFAULT_SERVER_ID return SpeechServer._getSpeechServer(thisId) getSpeechServer = staticmethod(getSpeechServer) def shutdownActiveServers(): for server in list(SpeechServer._active_servers.values()): server.shutdown() shutdownActiveServers = staticmethod(shutdownActiveServers) # *** Instance methods *** def __init__(self, serverId): super(SpeechServer, self).__init__() self._id = serverId self._client = None self._current_voice_properties = {} self._acss_manipulators = ( (ACSS.RATE, self._set_rate), (ACSS.AVERAGE_PITCH, self._set_pitch), (ACSS.GAIN, self._set_volume), (ACSS.FAMILY, self._set_family), ) if not _speechd_available: msg = 'ERROR: Speech Dispatcher is not available' debug.println(debug.LEVEL_WARNING, msg, True) return if not _speechd_version_ok: msg = 'ERROR: Speech Dispatcher version 0.6.2 or later is required.' debug.println(debug.LEVEL_WARNING, msg, True) return # The following constants must be initialized in runtime since they # depend on the speechd module being available. self._PUNCTUATION_MODE_MAP = { settings.PUNCTUATION_STYLE_ALL: speechd.PunctuationMode.ALL, settings.PUNCTUATION_STYLE_MOST: speechd.PunctuationMode.SOME, settings.PUNCTUATION_STYLE_SOME: speechd.PunctuationMode.SOME, settings.PUNCTUATION_STYLE_NONE: speechd.PunctuationMode.NONE, } self._CALLBACK_TYPE_MAP = { speechd.CallbackType.BEGIN: speechserver.SayAllContext.PROGRESS, speechd.CallbackType.CANCEL: speechserver.SayAllContext.INTERRUPTED, speechd.CallbackType.END: speechserver.SayAllContext.COMPLETED, #speechd.CallbackType.INDEX_MARK:speechserver.SayAllContext.PROGRESS, } self._default_voice_name = guilabels.SPEECH_DEFAULT_VOICE % serverId try: self._init() except: debug.printException(debug.LEVEL_WARNING) msg = 'ERROR: Speech Dispatcher service failed to connect' debug.println(debug.LEVEL_WARNING, msg, True) else: SpeechServer._active_servers[serverId] = self self._lastKeyEchoTime = None def _init(self): self._client = client = speechd.SSIPClient('Orca', component=self._id) client.set_priority(speechd.Priority.MESSAGE) if self._id != self.DEFAULT_SERVER_ID: client.set_output_module(self._id) self._current_voice_properties = {} mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle] client.set_punctuation(mode) def updateCapitalizationStyle(self): """Updates the capitalization style used by the speech server.""" if settings.capitalizationStyle == settings.CAPITALIZATION_STYLE_ICON: style = 'icon' elif settings.capitalizationStyle == settings.CAPITALIZATION_STYLE_SPELL: style = 'spell' else: style = 'none' self._client.set_cap_let_recogn(style) def updatePunctuationLevel(self): """ Punctuation level changed, inform this speechServer. """ mode = self._PUNCTUATION_MODE_MAP[settings.verbalizePunctuationStyle] self._client.set_punctuation(mode) def _send_command(self, command, *args, **kwargs): if hasattr(speechd, 'SSIPCommunicationError'): try: return command(*args, **kwargs) except speechd.SSIPCommunicationError: msg = "SPEECH DISPATCHER: Connection lost. Trying to reconnect." debug.println(debug.LEVEL_INFO, msg, True) self.reset() return command(*args, **kwargs) except: pass else: # It is not possible tho catch the error with older SD versions. return command(*args, **kwargs) def _set_rate(self, acss_rate): rate = int(2 * max(0, min(99, acss_rate)) - 98) self._send_command(self._client.set_rate, rate) def _set_pitch(self, acss_pitch): pitch = int(20 * max(0, min(9, acss_pitch)) - 90) self._send_command(self._client.set_pitch, pitch) def _set_volume(self, acss_volume): volume = int(15 * max(0, min(9, acss_volume)) - 35) self._send_command(self._client.set_volume, volume) def _set_family(self, acss_family): familyLocale = acss_family.get(speechserver.VoiceFamily.LOCALE) if not familyLocale: import locale familyLocale, encoding = locale.getdefaultlocale() if familyLocale: lang = familyLocale.split('_')[0] if lang and len(lang) == 2: self._send_command(self._client.set_language, str(lang)) try: # This command is not available with older SD versions. set_synthesis_voice = self._client.set_synthesis_voice except AttributeError: pass else: name = acss_family.get(speechserver.VoiceFamily.NAME) if name != self._default_voice_name: self._send_command(set_synthesis_voice, name) def _debug_sd_values(self, prefix=""): if debug.debugLevel > debug.LEVEL_INFO: return try: sd_rate = self._send_command(self._client.get_rate) sd_pitch = self._send_command(self._client.get_pitch) except: sd_rate = "(exception occurred)" sd_pitch = "(exception occurred)" current = self._current_voice_properties msg = "SPEECH DISPATCHER: %sOrca rate %s, pitch %s; " \ "SD rate %s, pitch %s" % \ (prefix, self._current_voice_properties.get(ACSS.RATE), self._current_voice_properties.get(ACSS.AVERAGE_PITCH), sd_rate, sd_pitch) debug.println(debug.LEVEL_INFO, msg, True) def _apply_acss(self, acss): if acss is None: acss = settings.voices[settings.DEFAULT_VOICE] current = self._current_voice_properties for acss_property, method in self._acss_manipulators: value = acss.get(acss_property) if value is not None: if current.get(acss_property) != value: method(value) current[acss_property] = value elif acss_property == ACSS.AVERAGE_PITCH: method(5.0) current[acss_property] = 5.0 elif acss_property == ACSS.FAMILY \ and acss == settings.voices[settings.DEFAULT_VOICE]: # We need to explicitly reset (at least) the family. # See bgo#626072. # method({}) current[acss_property] = {} def __addVerbalizedPunctuation(self, oldText): """Depending upon the users verbalized punctuation setting, adjust punctuation symbols in the given text to their pronounced equivalents. The pronounced text will either replace the punctuation symbol or be inserted before it. In the latter case, this is to retain spoken prosity. Arguments: - oldText: text to be parsed for punctuation. Returns a text string with the punctuation symbols adjusted accordingly. """ spokenEllipsis = messages.SPOKEN_ELLIPSIS + " " newText = re.sub(ELLIPSIS, spokenEllipsis, oldText) symbols = set(re.findall(PUNCTUATION, newText)) for symbol in symbols: try: level, action = punctuation_settings.getPunctuationInfo(symbol) except: continue if level != punctuation_settings.LEVEL_NONE: # Speech Dispatcher should handle it. # continue charName = " %s " % chnames.getCharacterName(symbol) if action == punctuation_settings.PUNCTUATION_INSERT: charName += symbol newText = re.sub(symbol, charName, newText) if orca_state.activeScript: newText = orca_state.activeScript.utilities.adjustForDigits(newText) return newText def _speak(self, text, acss, **kwargs): if isinstance(text, ACSS): text = '' text = self.__addVerbalizedPunctuation(text) if orca_state.activeScript: text = orca_state.activeScript.\ utilities.adjustForPronunciation(text) # Replace no break space characters with plain spaces since some # synthesizers cannot handle them. See bug #591734. # text = text.replace('\u00a0', ' ') # Replace newline followed by full stop, since # this seems to crash sd, see bgo#618334. # text = text.replace('\n.', '\n') self._apply_acss(acss) self._debug_sd_values("Speaking '%s' " % text) self._send_command(self._client.speak, text, **kwargs) def _say_all(self, iterator, orca_callback): """Process another sayAll chunk. Called by the gidle thread. """ try: context, acss = next(iterator) except StopIteration: pass else: def callback(callbackType, index_mark=None): # This callback is called in Speech Dispatcher listener thread. # No subsequent Speech Dispatcher interaction is allowed here, # so we pass the calls to the gidle thread. t = self._CALLBACK_TYPE_MAP[callbackType] if t == speechserver.SayAllContext.PROGRESS: if index_mark: context.currentOffset = int(index_mark) else: context.currentOffset = context.startOffset elif t == speechserver.SayAllContext.COMPLETED: context.currentOffset = context.endOffset GLib.idle_add(orca_callback, context, t) if t == speechserver.SayAllContext.COMPLETED: GLib.idle_add(self._say_all, iterator, orca_callback) self._speak(context.utterance, acss, callback=callback, event_types=list(self._CALLBACK_TYPE_MAP.keys())) return False # to indicate, that we don't want to be called again. def _cancel(self): self._send_command(self._client.cancel) def _change_default_speech_rate(self, step, decrease=False): acss = settings.voices[settings.DEFAULT_VOICE] delta = step * (decrease and -1 or +1) try: rate = acss[ACSS.RATE] except KeyError: rate = 50 acss[ACSS.RATE] = max(0, min(99, rate + delta)) msg = 'SPEECH DISPATCHER: Rate set to %d' % rate debug.println(debug.LEVEL_INFO, msg, True) self.speak(decrease and messages.SPEECH_SLOWER \ or messages.SPEECH_FASTER, acss=acss) def _change_default_speech_pitch(self, step, decrease=False): acss = settings.voices[settings.DEFAULT_VOICE] delta = step * (decrease and -1 or +1) try: pitch = acss[ACSS.AVERAGE_PITCH] except KeyError: pitch = 5 acss[ACSS.AVERAGE_PITCH] = max(0, min(9, pitch + delta)) msg = 'SPEECH DISPATCHER: Pitch set to %d' % pitch debug.println(debug.LEVEL_INFO, msg, True) self.speak(decrease and messages.SPEECH_LOWER \ or messages.SPEECH_HIGHER, acss=acss) def _change_default_speech_volume(self, step, decrease=False): acss = settings.voices[settings.DEFAULT_VOICE] delta = step * (decrease and -1 or +1) try: volume = acss[ACSS.GAIN] except KeyError: volume = 5 acss[ACSS.GAIN] = max(0, min(9, volume + delta)) msg = 'SPEECH DISPATCHER: Volume set to %d' % volume debug.println(debug.LEVEL_INFO, msg, True) self.speak(decrease and messages.SPEECH_SOFTER \ or messages.SPEECH_LOUDER, acss=acss) def getInfo(self): return [self._SERVER_NAMES.get(self._id, self._id), self._id] def getVoiceFamilies(self): # Always offer the configured default voice with a language # set according to the current locale. from locale import getlocale, LC_MESSAGES locale = getlocale(LC_MESSAGES)[0] if locale is None or locale == 'C': lang = None dialect = None else: lang, dialect = locale.split('_') voices = ((self._default_voice_name, lang, None),) try: # This command is not available with older SD versions. list_synthesis_voices = self._client.list_synthesis_voices except AttributeError: pass else: try: voices += self._send_command(list_synthesis_voices) except: pass families = [speechserver.VoiceFamily({ \ speechserver.VoiceFamily.NAME: name, #speechserver.VoiceFamily.GENDER: speechserver.VoiceFamily.MALE, speechserver.VoiceFamily.DIALECT: dialect, speechserver.VoiceFamily.LOCALE: lang}) for name, lang, dialect in voices] return families def speak(self, text=None, acss=None, interrupt=True): #if interrupt: # self._cancel() # "We will not interrupt a key echo in progress." (Said the comment in # speech.py where these next two lines used to live. But the code here # suggests we haven't been doing anything with the lastKeyEchoTime in # years. TODO - JD: Dig into this and if it's truly useless, kill it.) if self._lastKeyEchoTime: interrupt = interrupt and (time.time() - self._lastKeyEchoTime) > 0.5 if text: self._speak(text, acss) def speakUtterances(self, utteranceList, acss=None, interrupt=True): #if interrupt: # self._cancel() for utterance in utteranceList: if utterance: self._speak(utterance, acss) def sayAll(self, utteranceIterator, progressCallback): GLib.idle_add(self._say_all, utteranceIterator, progressCallback) def speakCharacter(self, character, acss=None): self._apply_acss(acss) if character == '\n': self._send_command(self._client.sound_icon, 'end-of-line') return name = chnames.getCharacterName(character) if not name: self._send_command(self._client.char, character) return if orca_state.activeScript: name = orca_state.activeScript.\ utilities.adjustForPronunciation(name) self.speak(name, acss) def speakKeyEvent(self, event): if event.isPrintableKey() and event.event_string.isupper(): acss = settings.voices[settings.UPPERCASE_VOICE] else: acss = ACSS(settings.voices[settings.DEFAULT_VOICE]) event_string = event.getKeyName() if orca_state.activeScript: event_string = orca_state.activeScript.\ utilities.adjustForPronunciation(event_string) lockingStateString = event.getLockingStateString() event_string = "%s %s" % (event_string, lockingStateString) self.speak(event_string, acss=acss) self._lastKeyEchoTime = time.time() def increaseSpeechRate(self, step=5): self._change_default_speech_rate(step) def decreaseSpeechRate(self, step=5): self._change_default_speech_rate(step, decrease=True) def increaseSpeechPitch(self, step=0.5): self._change_default_speech_pitch(step) def decreaseSpeechPitch(self, step=0.5): self._change_default_speech_pitch(step, decrease=True) def increaseSpeechVolume(self, step=0.5): self._change_default_speech_volume(step) def decreaseSpeechVolume(self, step=0.5): self._change_default_speech_volume(step, decrease=True) def stop(self): self._cancel() def shutdown(self): self._client.close() del SpeechServer._active_servers[self._id] def reset(self, text=None, acss=None): self._client.close() self._init() def list_output_modules(self): """Return names of available output modules as a tuple of strings. This method is not a part of Orca speech API, but is used internally by the Speech Dispatcher backend. The returned tuple can be empty if the information can not be obtained (e.g. with an older Speech Dispatcher version). """ try: return self._send_command(self._client.list_output_modules) except AttributeError: return () except speechd.SSIPCommandError: return ()
lgpl-2.1
5,530,888,183,352,487,000
36.950276
81
0.609841
false
3.897674
false
false
false
mpetyx/pychatbot
SemanticWebApproach/RoboWriter/allegrordf-1.0.1/franz/miniclient/request.py
1
6234
import StringIO, pycurl, urllib, cjson, locale from threading import Lock class Pool: def __init__(self, create): self.create = create self.lock = Lock() self.pool = [] def get(self): self.lock.acquire() try: if len(self.pool): return self.pool.pop() else: return self.create() finally: self.lock.release() def put(self, value): self.lock.acquire() try: self.pool.append(value) finally: self.lock.release() curlPool = Pool(pycurl.Curl) class RequestError(Exception): def __init__(self, status, message): print status, message self.status = status self.message = message def __str__(self): return "Server returned %s: %s" % (self.status, self.message) def urlenc(**args): buf = [] def enc(name, val): buf.append(urllib.quote(name) + "=" + urllib.quote(val)) def encval(name, val): if val is None: pass elif isinstance(val, bool): enc(name, (val and "true") or "false") elif isinstance(val, int): enc(name, "%d" % val) elif isinstance(val, float): enc(name, "%g" % val) elif isinstance(val, list) or isinstance(val, tuple): for elt in val: encval(name, elt) elif isinstance(val, basestring): enc(name, val.encode("utf-8")) else: enc(name, unicode(val).encode("utf-8")) for name, val in args.iteritems(): encval(name, val) return "&".join(buf) def makeRequest(obj, method, url, body=None, accept="*/*", contentType=None, callback=None, errCallback=None): curl = curlPool.get() if obj: if obj.user and obj.password: curl.setopt(pycurl.USERPWD, "%s:%s" % (obj.user, obj.password)) curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) if url.startswith("/"): url = obj.url + url postbody = method == "POST" or method == "PUT" curl.setopt(pycurl.POSTFIELDS, "") if body: if postbody: curl.setopt(pycurl.POSTFIELDS, body) else: url = url + "?" + body curl.setopt(pycurl.POST, (postbody and 1) or 0) curl.setopt(pycurl.CUSTOMREQUEST, method) curl.setopt(pycurl.URL, url) # The "Expect:" is there to suppress "Expect: 100-continue" # behaviour that is the default in libcurl when posting large # bodies. headers = ["Connection: keep-alive", "Accept: " + accept, "Expect:"] if contentType and postbody: headers.append("Content-Type: " + contentType) if callback: headers.append("Connection: close") curl.setopt(pycurl.HTTPHEADER, headers) curl.setopt(pycurl.ENCODING, "") # which means 'any encoding that curl supports' if callback: status = [None] error = [] def headerfunc(string): if status[0] is None: status[0] = locale.atoi(string.split(" ")[1]) return len(string) def writefunc(string): if status[0] == 200: callback(string) else: error.append(string.decode("utf-8")) curl.setopt(pycurl.WRITEFUNCTION, writefunc) curl.setopt(pycurl.HEADERFUNCTION, headerfunc) curl.perform() if status[0] != 200: errCallback(curl.getinfo(pycurl.RESPONSE_CODE), "".join(error)) else: buf = StringIO.StringIO() curl.setopt(pycurl.WRITEFUNCTION, buf.write) curl.perform() response = buf.getvalue().decode("utf-8") buf.close() result = (curl.getinfo(pycurl.RESPONSE_CODE), response) curlPool.put(curl) return result def jsonRequest(obj, method, url, body=None, contentType="application/x-www-form-urlencoded", rowreader=None, accept="application/json"): if rowreader is None: status, body = makeRequest(obj, method, url, body, accept, contentType) if (status == 200): if accept in ('application/json', 'text/integer', "application/x-quints+json"): body = cjson.decode(body) return body else: raise RequestError(status, body) else: def raiseErr(status, message): raise RequestError(status, message) makeRequest(obj, method, url, body, accept, contentType, callback=rowreader.process, errCallback=raiseErr) def nullRequest(obj, method, url, body=None, contentType="application/x-www-form-urlencoded"): status, body = makeRequest(obj, method, url, body, "application/json", contentType) if (status < 200 or status > 204): raise RequestError(status, body) class RowReader: def __init__(self, callback): self.hasNames = None self.names = None self.skipNextBracket = False self.callback = callback self.backlog = None def process(self, string): if self.hasNames is None: self.hasNames = string[0] == "{" if not self.hasNames: self.skipNextBracket = True ln = len(string) if self.backlog: string = self.backlog + string pos = [0] def useArray(arr): if self.hasNames: if self.names: self.callback(arr, self.names) else: self.names = arr self.skipNextBracket = True else: self.callback(arr, None) def takeArrayAt(start): scanned = start + 1 while True: end = string.find("]", scanned) if end == -1: return False try: useArray(cjson.decode(string[start : end + 1].decode("utf-8"))) pos[0] = end + 1 return True except cjson.DecodeError: scanned = end + 1 while True: start = string.find("[", pos[0]) if self.skipNextBracket: self.skipNextBracket = False pos[0] = start + 1 elif start == -1 or not takeArrayAt(start): break if pos[0] == 0: self.backlog = string return ln else: self.backlog = None return pos[0]
apache-2.0
4,239,466,818,039,334,400
34.622857
137
0.569458
false
3.906015
false
false
false
simonkrogmann/planets
gui/object_3D.py
1
4105
# -*- coding: cp1252 -*- import vector import time class Planet3D: """ein 3D-Objekt für das graphics-Modul, dass mit einem Planeten verbunden ist""" def __init__(self, Parent, Planet): self.Parent = Parent self.Planet = Planet self.Planet.Register(self) self.Positions = [Planet["position"].Tuple()] self.Trace = [] self.Color = Planet["color"] self.TraceState = -1 self.Drawing = self.Parent.Canvas.create_oval(-5, -5, -6, -6, fill = Planet["color"], outline = "") self.Redraw() def ResetTrace(self): """löscht die bisher gezeichnete Spur des Planeten""" for Line in self.Trace: self.Parent.Canvas.delete(Line.Drawing) self.Parent.Drawings.remove(Line) self.Trace = [] self.TraceState = -1 self.Positions = [self.Positions[-1]] def Redraw(self): """zeichnet den Planeten neu""" C = self.Parent.DisplayPosition(self.Positions[-1]) if C: Diameter = self.Parent.DisplayDiameter(self.Positions[-1], self.Planet["diameter"]) Coordinates = (C[0] - Diameter, C[1] - Diameter, C[0] + Diameter, C[1] + Diameter) self.Parent.Canvas.coords(self.Drawing, Coordinates) else: self.Parent.Canvas.coords(self.Drawing, -5, -5, -6, -6) def Update(self, Tag, Value): """ändert die Zeichnung des Planeten entsprechend der Daten. Mögliche Daten sind die Planetenattribute.""" if Tag == "position": if type(Value) == tuple: Tuple = Value else: Tuple = Value.Tuple() if self.Planet["trace"] and self.Planet.Parent.Trace: # fasst jeweils 5 Linien für die Spur zusammen self.TraceState = (self.TraceState + 1) % 5 if not self.TraceState: self.Trace.append(Line3D(self.Parent, self.Positions[-1], Tuple, self.Color)) self.Parent.Drawings.append(self.Trace[-1]) self.Positions.append(Tuple) else: self.Positions[-1] = Tuple self.Trace[-1].End = Tuple self.Trace[-1].Redraw() else: self.Positions = [Tuple] self.Redraw() elif Tag == "diameter": self.Redraw() elif Tag == "color": self.SetColor(Value) elif Tag == "trace" and not Value: self.ResetTrace() def SetColor(self, Color): """ändert die Planetenfarbe""" self.Color = Color self.Parent.Canvas.itemconfig(self.Drawing, fill = Color) def Delete(self): """entfernt den Planeten aus der Zeichnung""" for Line in self.Trace: self.Parent.Canvas.delete(Line.Drawing) self.Parent.Drawings.remove(Line) self.Parent.Canvas.delete(self.Drawing) self.Planet.Deregister(self) def MidPoint(self): """gibt den Mittelpunkt des Planeten zurück""" return self.Positions[-1] class Line3D: """eine 3D-Linie für das graphics-Modul""" def __init__(self, Parent, Begin, End, Color): self.Parent = Parent self.Begin = Begin self.End = End self.OnScreen = False self.Drawing = self.Parent.Canvas.create_line(-5, -5, -5, -5, fill = Color) self.Redraw() def Redraw(self): """zeichnet die Linie neu""" Coordinates = self.Parent.LineDisplayCoordinates(self.Begin, self.End) if Coordinates != (-5,-5,-5,-5): self.Parent.Canvas.coords(self.Drawing, Coordinates) self.OnScreen = True elif self.OnScreen: self.OnScreen = False self.Parent.Canvas.coords(self.Drawing, Coordinates) def MidPoint(self): """gibt den Mittelpunkt der Linie zurück""" return ((self.Begin[0] + self.End[0])/ 2, (self.Begin[1] + self.End[1])/ 2, (self.Begin[2] + self.End[2])/ 2)
mit
-6,019,683,214,109,623,000
37.009259
97
0.557613
false
3.526632
false
false
false
Aloomaio/googleads-python-lib
examples/ad_manager/v201805/product_package_item_service/get_product_package_items_for_product_package.py
1
2302
#!/usr/bin/env python # # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This example gets all product package items belonging to a product package. """ # Import appropriate modules from the client library. from googleads import ad_manager PRODUCT_PACKAGE_ID = 'INSERT_PRODUCT_PACKAGE_ID_HERE' def main(client, product_package_id): # Initialize appropriate service. product_package_item_service = client.GetService( 'ProductPackageItemService', version='v201805') # Create a statement to select product package items. statement = (ad_manager.StatementBuilder(version='v201805') .Where('productPackageId = :productPackageId') .WithBindVariable('productPackageId', product_package_id)) # Retrieve a small amount of product package items at a time, paging # through until all product package items have been retrieved. while True: response = product_package_item_service.getProductPackageItemsByStatement( statement.ToStatement()) if 'results' in response and len(response['results']): for product_package_item in response['results']: # Print out some information for each product package item. print('Product package item with ID "%d", product ID "%d", and product ' 'package ID "%d" was found.\n' % (product_package_item['id'], product_package_item['productId'], product_package_item['productPackageId'])) statement.offset += statement.limit else: break print '\nNumber of results found: %s' % response['totalResultSetSize'] if __name__ == '__main__': # Initialize client object. ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage() main(ad_manager_client, PRODUCT_PACKAGE_ID)
apache-2.0
1,369,016,464,488,928,300
40.107143
80
0.715899
false
4.193078
false
false
false
baidu/palo
build-support/run_clang_format.py
2
5703
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # Modified from Apache Arrow project. from __future__ import print_function import lintutils from subprocess import PIPE import argparse import difflib import multiprocessing as mp import sys from functools import partial # examine the output of clang-format and if changes are # present assemble a (unified)patch of the difference def _check_one_file(filename, formatted): with open(filename, "rb") as reader: original = reader.read() if formatted != original: # Run the equivalent of diff -u diff = list(difflib.unified_diff( original.decode('utf8').splitlines(True), formatted.decode('utf8').splitlines(True), fromfile=filename, tofile="{} (after clang format)".format( filename))) else: diff = None return filename, diff def _check_dir(arguments, source_dir, exclude_globs): formatted_filenames = [] for path in lintutils.get_sources(source_dir, exclude_globs): formatted_filenames.append(str(path)) if arguments.fix: if not arguments.quiet: print("\n".join(map(lambda x: "Formatting {}".format(x), formatted_filenames))) # Break clang-format invocations into chunks: each invocation formats # 16 files. Wait for all processes to complete results = lintutils.run_parallel([ [arguments.clang_format_binary, "-style=file", "-i"] + some for some in lintutils.chunk(formatted_filenames, 16) ]) for returncode, stdout, stderr in results: # if any clang-format reported a parse error, bubble it if returncode != 0: sys.exit(returncode) else: # run an instance of clang-format for each source file in parallel, # then wait for all processes to complete results = lintutils.run_parallel([ [arguments.clang_format_binary, "-style=file", filename] for filename in formatted_filenames ], stdout=PIPE, stderr=PIPE) checker_args = [] for filename, res in zip(formatted_filenames, results): # if any clang-format reported a parse error, bubble it returncode, stdout, stderr = res if returncode != 0: print(stderr) sys.exit(returncode) checker_args.append((filename, stdout)) error = False pool = mp.Pool() try: # check the output from each invocation of clang-format in parallel for filename, diff in pool.starmap(_check_one_file, checker_args): if not arguments.quiet: print("Checking {}".format(filename)) if diff: print("{} had clang-format style issues".format(filename)) # Print out the diff to stderr error = True # pad with a newline print(file=sys.stderr) sys.stderr.writelines(diff) except Exception: error = True raise finally: pool.terminate() pool.join() sys.exit(1 if error else 0) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Runs clang-format on all of the source " "files. If --fix is specified enforce format by " "modifying in place, otherwise compare the output " "with the existing file and output any necessary " "changes as a patch in unified diff format") parser.add_argument("--clang_format_binary", required=True, help="Path to the clang-format binary") parser.add_argument("--exclude_globs", help="Filename containing globs for files " "that should be excluded from the checks") parser.add_argument("--source_dirs", required=True, help="Comma-separated root directories of the source code") parser.add_argument("--fix", default=False, action="store_true", help="If specified, will re-format the source " "code instead of comparing the re-formatted " "output, defaults to %(default)s") parser.add_argument("--quiet", default=False, action="store_true", help="If specified, only print errors") arguments = parser.parse_args() exclude_globs = [] if arguments.exclude_globs: with open(arguments.exclude_globs) as f: exclude_globs.extend(line.strip() for line in f) for source_dir in arguments.source_dirs.split(','): if len(source_dir) > 0: _check_dir(arguments, source_dir, exclude_globs)
apache-2.0
-5,497,260,695,327,298,000
38.604167
83
0.60512
false
4.678425
false
false
false
tisnik/fabric8-analytics-common
dashboard/src/jacoco_to_codecov.py
1
4579
"""Module to convert JaCoCo coverage report into the report compatible with Pycov utility.""" import csv def format_coverage_line(text, statements, missed, coverage, missed_lines=False): """Format one line with code coverage report of one class or for a summary.""" format_string = "{:80} {:3d} {:3d} {:3d}%" if missed_lines: format_string += " N/A" return format_string.format(text, statements, missed, coverage) def compute_coverage(statements, covered): """Compute code coverage based on number of all statemts and number of covered statements.""" return 100.0 * covered / statements class JavaClassCoverageReport: """Class representing code coverage report for one Java class.""" def __init__(self, record): """Initialize the object by using record read from the CSV file.""" self.group = record[0] self.package = record[1] self.class_name = record[2] self.missed = int(record[7]) self.covered = int(record[8]) self.statements = self.covered + self.missed self.coverage = compute_coverage(self.statements, self.covered) def __str__(self): """Return readable text representation compatible with Pycov utility output.""" pc = "{package}/{class_name}".format(package=self.package, class_name=self.class_name) return format_coverage_line(pc, self.statements, self.missed, int(self.coverage)) class ProjectCoverageReport: """Class to perform conversion from JaCoCo output to report compatible with Pycov utility.""" def __init__(self, csv_input_file_name): """Initialize the object, store the name of input (CSV) file.""" self.csv_input_file_name = csv_input_file_name @staticmethod def read_csv(csv_input_file_name, skip_first_line=False): """Read the given CSV file, parse it, and return as list of records.""" output = [] with open(csv_input_file_name, 'r') as fin: csv_content = csv.reader(fin, delimiter=',') if skip_first_line: next(csv_content, None) for row in csv_content: output.append(row) return output @staticmethod def write_horizontal_rule(fout): """Write horizontal rule into the output file.""" fout.write("-" * 108) fout.write("\n") @staticmethod def write_coverage_report_header(fout): """Write header compatible with Pycov to the output file.""" fout.write("{:80} {:5} {:4} {:5} {}\n".format( "Name", "Stmts", "Miss", "Cover", "Missing")) ProjectCoverageReport.write_horizontal_rule(fout) @staticmethod def write_coverage_report_summary(fout, statements, missed, coverage): """Write summary compatible with Pycov to the output file.""" ProjectCoverageReport.write_horizontal_rule(fout) fout.write(format_coverage_line("TOTAL", statements, missed, int(coverage))) fout.write("\n") def read_java_classes(self): """Read and parse into about Java classes from JaCoCo results.""" data = ProjectCoverageReport.read_csv(self.csv_input_file_name, True) return [JavaClassCoverageReport(record) for record in data] def convert_code_coverage_report(self, output_file_name): """Convert code coverage report that would be compatible with PyCov output.""" java_classes = self.read_java_classes() statements, missed, coverage = ProjectCoverageReport.compute_total(java_classes) with open(output_file_name, "w") as fout: ProjectCoverageReport.write_coverage_report_header(fout) for java_class in java_classes: fout.write(str(java_class) + "\n") ProjectCoverageReport.write_coverage_report_summary(fout, statements, missed, coverage) @staticmethod def compute_total(records): """Compute total/summary from all Java class coverage reports.""" statements = 0 covered = 0 missed = 0 for record in records: statements += record.statements covered += record.covered missed += record.missed coverage = compute_coverage(statements, covered) return statements, missed, coverage def main(): """Just a test ATM.""" p = ProjectCoverageReport("fabric8-analytics-jenkins-plugin.coverage.csv") p.convert_code_coverage_report("fabric8-analytics-jenkins-plugin.coverage.txt") if __name__ == "__main__": # execute only if run as a script main()
apache-2.0
-754,818,450,584,529,800
39.166667
99
0.647085
false
4.084746
false
false
false
huiyiqun/check_mk
cmk_base/core.py
1
9266
#!/usr/bin/python # -*- encoding: utf-8; py-indent-offset: 4 -*- # +------------------------------------------------------------------+ # | ____ _ _ __ __ _ __ | # | / ___| |__ ___ ___| | __ | \/ | |/ / | # | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | # | | |___| | | | __/ (__| < | | | | . \ | # | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | # | | # | Copyright Mathias Kettner 2014 [email protected] | # +------------------------------------------------------------------+ # # This file is part of Check_MK. # The official homepage is at http://mathias-kettner.de/check_mk. # # check_mk is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation in version 2. check_mk is distributed # in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- # out even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. See the GNU General Public License for more de- # tails. You should have received a copy of the GNU General Public # License along with GNU Make; see the file COPYING. If not, write # to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, # Boston, MA 02110-1301 USA. """All core related things like direct communication with the running core""" import fcntl import os import socket import subprocess import sys import cmk.paths import cmk.debug import cmk.tty as tty import livestatus from cmk.exceptions import MKGeneralException import cmk_base.console as console import cmk_base.config as config import cmk_base.core_config as core_config import cmk_base.core_nagios as core_nagios from cmk_base.exceptions import MKTimeout from cmk_base import config_cache try: import cmk_base.cee.core_cmc as core_cmc except ImportError: core_cmc = None _restart_lock_fd = None #. # .--Control-------------------------------------------------------------. # | ____ _ _ | # | / ___|___ _ __ | |_ _ __ ___ | | | # | | | / _ \| '_ \| __| '__/ _ \| | | # | | |__| (_) | | | | |_| | | (_) | | | # | \____\___/|_| |_|\__|_| \___/|_| | # | | # +----------------------------------------------------------------------+ # | Invoke actions affecting the core like reload/restart | # '----------------------------------------------------------------------' def do_reload(): do_restart(True) # TODO: Cleanup duplicate code with automation_restart() def do_restart(only_reload = False): try: backup_path = None if try_get_activation_lock(): # TODO: Replace by MKBailOut()/MKTerminate()? console.error("Other restart currently in progress. Aborting.\n") sys.exit(1) # Save current configuration if os.path.exists(cmk.paths.nagios_objects_file): backup_path = cmk.paths.nagios_objects_file + ".save" console.verbose("Renaming %s to %s\n", cmk.paths.nagios_objects_file, backup_path, stream=sys.stderr) os.rename(cmk.paths.nagios_objects_file, backup_path) else: backup_path = None try: core_config.do_create_config(with_agents=True) except Exception, e: # TODO: Replace by MKBailOut()/MKTerminate()? console.error("Error creating configuration: %s\n" % e) if backup_path: os.rename(backup_path, cmk.paths.nagios_objects_file) if cmk.debug.enabled(): raise sys.exit(1) if config.monitoring_core == "cmc" or core_nagios.do_check_nagiosconfig(): if backup_path: os.remove(backup_path) core_config.precompile() do_core_action(only_reload and "reload" or "restart") else: # TODO: Replace by MKBailOut()/MKTerminate()? console.error("Configuration for monitoring core is invalid. Rolling back.\n") broken_config_path = "%s/check_mk_objects.cfg.broken" % cmk.paths.tmp_dir file(broken_config_path, "w").write(file(cmk.paths.nagios_objects_file).read()) console.error("The broken file has been copied to \"%s\" for analysis.\n" % broken_config_path) if backup_path: os.rename(backup_path, cmk.paths.nagios_objects_file) else: os.remove(cmk.paths.nagios_objects_file) sys.exit(1) except Exception, e: try: if backup_path and os.path.exists(backup_path): os.remove(backup_path) except: pass if cmk.debug.enabled(): raise # TODO: Replace by MKBailOut()/MKTerminate()? console.error("An error occurred: %s\n" % e) sys.exit(1) def try_get_activation_lock(): global _restart_lock_fd # In some bizarr cases (as cmk -RR) we need to avoid duplicate locking! if config.restart_locking and _restart_lock_fd == None: lock_file = cmk.paths.default_config_dir + "/main.mk" _restart_lock_fd = os.open(lock_file, os.O_RDONLY) # Make sure that open file is not inherited to monitoring core! fcntl.fcntl(_restart_lock_fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) try: console.verbose("Waiting for exclusive lock on %s.\n" % lock_file, stream=sys.stderr) fcntl.flock(_restart_lock_fd, fcntl.LOCK_EX | ( config.restart_locking == "abort" and fcntl.LOCK_NB or 0)) except: return True return False # Action can be restart, reload, start or stop def do_core_action(action, quiet=False): if not quiet: console.output("%sing monitoring core..." % action.title()) if config.monitoring_core == "nagios": os.putenv("CORE_NOVERIFY", "yes") command = [ "%s/etc/init.d/core" % cmk.paths.omd_root, action ] else: command = [ "omd", action, "cmc" ] p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True) result = p.wait() if result != 0: output = p.stdout.read() if not quiet: console.output("ERROR: %s\n" % output) raise MKGeneralException("Cannot %s the monitoring core: %s" % (action, output)) else: if not quiet: console.output(tty.ok + "\n") #. # .--Timeperiods---------------------------------------------------------. # | _____ _ _ _ | # | |_ _(_)_ __ ___ ___ _ __ ___ _ __(_) ___ __| |___ | # | | | | | '_ ` _ \ / _ \ '_ \ / _ \ '__| |/ _ \ / _` / __| | # | | | | | | | | | | __/ |_) | __/ | | | (_) | (_| \__ \ | # | |_| |_|_| |_| |_|\___| .__/ \___|_| |_|\___/ \__,_|___/ | # | |_| | # +----------------------------------------------------------------------+ # | Fetching timeperiods from the core | # '----------------------------------------------------------------------' # Check if a timeperiod is currently active. We have no other way than # doing a Livestatus query. This is not really nice, but if you have a better # idea, please tell me... def check_timeperiod(timeperiod): # Let exceptions happen, they will be handled upstream. try: update_timeperiods_cache() except MKTimeout: raise except: if cmk.debug.enabled(): raise # If the query is not successful better skip this check then fail return True # Note: This also returns True when the timeperiod is unknown # The following function timeperiod_active handles this differently return config_cache.get_dict("timeperiods_cache").get(timeperiod, True) == True # Returns # True : active # False: inactive # None : unknown timeperiod # # Raises an exception if e.g. a timeout or connection error appears. # This way errors can be handled upstream. def timeperiod_active(timeperiod): update_timeperiods_cache() return config_cache.get_dict("timeperiods_cache").get(timeperiod) def update_timeperiods_cache(): # { "last_update": 1498820128, "timeperiods": [{"24x7": True}] } # The value is store within the config cache since we need a fresh start on reload tp_cache = config_cache.get_dict("timeperiods_cache") if not tp_cache: response = livestatus.LocalConnection().query("GET timeperiods\nColumns: name in") for tp_name, tp_active in response: tp_cache[tp_name] = bool(tp_active) def cleanup_timeperiod_caches(): config_cache.get_dict("timeperiods_cache").clear()
gpl-2.0
3,326,192,585,655,252,000
38.598291
113
0.506583
false
3.780498
true
false
false
pfalcon/picotui
picotui/editorext.py
1
5118
# # Extended VT100 terminal text editor, etc. widgets # Copyright (c) 2015 Paul Sokolovsky # Distributed under MIT License # import sys import os from .editor import * # Edit single line, quit on Enter/Esc class LineEditor(Editor): def handle_cursor_keys(self, key): if super().handle_cursor_keys(key): self.just_started = False return True return False def handle_key(self, key): if key in (KEY_ENTER, KEY_ESC): return key if self.just_started: # Overwrite initial string with new content self.set_lines([""]) self.col = 0 self.just_started = False return super().handle_key(key) def edit(self, line): self.set_lines([line]) self.col = len(line) self.adjust_cursor_eol() self.just_started = True key = self.loop() if key == KEY_ENTER: return self.content[0] return None class Viewer(Editor): def handle_key(self, key): if key in (KEY_ENTER, KEY_ESC): return key if super().handle_cursor_keys(key): return True # Viewer with colored lines, (whole line same color) class LineColorViewer(Viewer): def show_line(self, l, i): if self.is_dict_color: c = self.lines_c.get(i, self.def_c) else: try: c = self.lines_c[i] except IndexError: c = self.def_c self.attr_color(c) super().show_line(l, i) self.attr_reset() def set_line_colors(self, default_color, color_list={}): self.def_c = default_color self.lines_c = color_list self.is_dict_color = isinstance(color_list, dict) # Viewer with color support, (echo line may consist of spans # of different colors) class CharColorViewer(Viewer): def show_line(self, l, i): # TODO: handle self.margin, self.width length = 0 for span in l: if isinstance(span, tuple): span, c = span else: c = self.def_c self.attr_color(c) self.wr(span) length += len(span) self.attr_color(self.def_c) self.clear_num_pos(self.width - length) self.attr_reset() def set_def_color(self, default_color): self.def_c = default_color class EditorExt(Editor): screen_width = 80 def __init__(self, left=0, top=0, width=80, height=24): super().__init__(left, top, width, height) # +1 assumes there's a border around editor pane self.status_y = top + height + 1 def get_cur_line(self): return self.content[self.cur_line] def line_visible(self, no): return self.top_line <= no < self.top_line + self.height # If line "no" is already on screen, just reposition cursor to it and # return False. Otherwise, show needed line either at the center of # screen or at the top, and return True. def goto_line(self, no, col=None, center=True): self.cur_line = no if self.line_visible(no): self.row = no - self.top_line if col is not None: self.col = col if self.adjust_cursor_eol(): self.redraw() self.set_cursor() return False if center: c = self.height // 2 if no > c: self.top_line = no - c self.row = c else: self.top_line = 0 self.row = no else: self.top_line = no self.row = 0 if col is not None: self.col = col self.adjust_cursor_eol() self.redraw() return True def show_status(self, msg): self.cursor(False) self.goto(0, self.status_y) self.wr(msg) self.clear_to_eol() self.set_cursor() self.cursor(True) def show_cursor_status(self): self.cursor(False) self.goto(0, 31) self.wr("% 3d:% 3d" % (self.cur_line, self.col + self.margin)) self.set_cursor() self.cursor(True) def dialog_edit_line(self, left=None, top=8, width=40, height=3, line="", title=""): if left is None: left = (self.screen_width - width) / 2 self.dialog_box(left, top, width, height, title) e = LineEditor(left + 1, top + 1, width - 2, height - 2) return e.edit(line) if __name__ == "__main__": with open(sys.argv[1]) as f: content = f.read().splitlines() #content = f.readlines() #os.write(1, b"\x1b[18t") #key = os.read(0, 32) #print(repr(key)) #key = os.read(0, 32) #print(repr(key)) #1/0 e = EditorExt(left=1, top=1, width=60, height=25) e.init_tty() e.enable_mouse() s = e.dialog_edit_line(10, 5, 40, 3, title="Enter name:", line="test") e.cls() e.deinit_tty() print() print(s) 1/0 # e.cls() e.draw_box(0, 0, 62, 27) e.set_lines(content) e.loop() e.deinit_tty()
mit
-3,940,507,001,391,494,700
24.979695
88
0.540445
false
3.484003
false
false
false
anthropo-lab/XP
EPHEMER/insider_trading_project/insider_trading/consumers.py
1
7993
from channels import Group as channelsGroup from channels.sessions import channel_session import random from .models import Group as OtreeGroup, Subsession as OtreeSubsession, Constants import json import channels import logging from otree import constants_internal import django.test from otree.common_internal import (get_admin_secret_code) from threading import Event import time client = django.test.Client() ADMIN_SECRET_CODE = get_admin_secret_code() ############################################# ############################################# # Connected to websocket.connect def ws_admin_connect(message): print("*********CONNECT************") channelsGroup("adminreport").add(message.reply_channel) # Connected to websocket.receive def ws_admin_message(message): print("*********RECEIVE************") # Decrypt the url: No info in the url in this app # Decrypt the received message jsonmessage = json.loads(message.content['text']) subsession_pk = jsonmessage['subsession_pk'] mysubsession = OtreeSubsession.objects.get(pk=subsession_pk) if 'order' in jsonmessage: order = jsonmessage['order'] if order == "push_all_players_on_page": page_name = jsonmessage['page_name'] round_nb = jsonmessage['round_nb'] for p in mysubsession.get_players(): if ((str(p.participant._current_page_name) == page_name) & (p.participant._round_number == round_nb)): # This player is one of those who needs to be advanced try: if p.participant._current_form_page_url: resp = client.post( p.participant._current_form_page_url, data={ constants_internal.timeout_happened: True, constants_internal.admin_secret_code: ADMIN_SECRET_CODE }, follow=True ) else: resp = client.get(p.participant._start_url(), follow=True) except: logging.exception("Failed to advance participant.") raise assert resp.status_code < 400 p.participant.vars['participant_was_pushed'] = 'True' p.participant.save() channels.Group( 'auto-advance-{}'.format(p.participant.code) ).send( {'text': json.dumps( {'auto_advanced': True})} ) elif order == "push_active_players_on_page": group_pk = jsonmessage['group_pk'] mygroup = OtreeGroup.objects.get(pk=group_pk) page_name = jsonmessage['page_name'] round_nb = jsonmessage['round_nb'] for p in mygroup.get_players(): if ((str(p.participant._current_page_name) == page_name) & (p.participant._round_number == round_nb) & (p.participant.vars['active_flag'] != 'Inactive')): # This player is one of those who needs to be advanced try: if p.participant._current_form_page_url: resp = client.post( p.participant._current_form_page_url, data={ constants_internal.timeout_happened: True, constants_internal.admin_secret_code: ADMIN_SECRET_CODE }, follow=True ) else: resp = client.get(p.participant._start_url(), follow=True) except: logging.exception("Failed to advance participant.") raise assert resp.status_code < 400 p.participant.vars['participant_was_pushed'] = 'True' p.participant.save() channels.Group( 'auto-advance-{}'.format(p.participant.code) ).send( {'text': json.dumps( {'auto_advanced': True})} ) elif order == "push_inactive_players_on_page": group_pk = jsonmessage['group_pk'] mygroup = OtreeGroup.objects.get(pk=group_pk) page_name = jsonmessage['page_name'] round_nb = jsonmessage['round_nb'] for p in mygroup.get_players(): if ((str(p.participant._current_page_name) == page_name) & (p.participant._round_number == round_nb) & (p.participant.vars['active_flag'] == 'Inactive')): # This player is one of those who needs to be advanced try: if p.participant._current_form_page_url: resp = client.post( p.participant._current_form_page_url, data={ constants_internal.timeout_happened: True, constants_internal.admin_secret_code: ADMIN_SECRET_CODE }, follow=True ) else: resp = client.get(p.participant._start_url(), follow=True) except: logging.exception("Failed to advance participant.") raise assert resp.status_code < 400 p.participant.vars['participant_was_pushed'] = 'True' p.participant.save() channels.Group( 'auto-advance-{}'.format(p.participant.code) ).send( {'text': json.dumps( {'auto_advanced': True})} ) elif order == "deactivate_all_group_on_page": group_pk = jsonmessage['group_pk'] mygroup = OtreeGroup.objects.get(pk=group_pk) page_name = jsonmessage['page_name'] round_nb = jsonmessage['round_nb'] for p in mygroup.get_players(): if ((str(p.participant._current_page_name) == page_name) & (p.participant._round_number == round_nb)): p.participant.vars['active_flag'] = 'Inactive' p.participant.save() elif order == "reactivate_all_group_on_page": group_pk = jsonmessage['group_pk'] mygroup = OtreeGroup.objects.get(pk=group_pk) page_name = jsonmessage['page_name'] round_nb = jsonmessage['round_nb'] for p in mygroup.get_players(): if ((str(p.participant._current_page_name) == page_name) & (p.participant._round_number == round_nb)): p.participant.vars['active_flag'] = 'Playing_No_Change_Game' p.participant.save() elif order == "make_grouping_phase1": mysubsession.make_grouping_phase1() elif order == "make_grouping_phase2": mysubsession.make_grouping_phase2() ############################################# # Give feedback channelsGroup("adminreport").send({'text': json.dumps( {"order": "refresh"})} ) # Connected to websocket.disconnect def ws_admin_disconnect(message): print("*********DISCONNECT************") channelsGroup("adminreport").discard(message.reply_channel)
gpl-3.0
-7,776,490,304,224,844,000
44.936782
91
0.477793
false
4.829607
false
false
false
icarito/sugar
src/jarabe/journal/listmodel.py
1
10564
# Copyright (C) 2009, Tomeu Vizoso # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging import time import json from gi.repository import GObject from gi.repository import Gtk from gettext import gettext as _ from sugar3.graphics.xocolor import XoColor from sugar3.graphics import style from sugar3 import util from jarabe.journal import model from jarabe.journal import misc DS_DBUS_SERVICE = 'org.laptop.sugar.DataStore' DS_DBUS_INTERFACE = 'org.laptop.sugar.DataStore' DS_DBUS_PATH = '/org/laptop/sugar/DataStore' class ListModel(GObject.GObject, Gtk.TreeModel, Gtk.TreeDragSource): __gtype_name__ = 'JournalListModel' __gsignals__ = { 'ready': (GObject.SignalFlags.RUN_FIRST, None, ([])), 'progress': (GObject.SignalFlags.RUN_FIRST, None, ([])), } COLUMN_UID = 0 COLUMN_FAVORITE = 1 COLUMN_ICON = 2 COLUMN_ICON_COLOR = 3 COLUMN_TITLE = 4 COLUMN_TIMESTAMP = 5 COLUMN_CREATION_TIME = 6 COLUMN_FILESIZE = 7 COLUMN_PROGRESS = 8 COLUMN_BUDDY_1 = 9 COLUMN_BUDDY_2 = 10 COLUMN_BUDDY_3 = 11 COLUMN_SELECT = 12 _COLUMN_TYPES = { COLUMN_UID: str, COLUMN_FAVORITE: bool, COLUMN_ICON: str, COLUMN_ICON_COLOR: object, COLUMN_TITLE: str, COLUMN_TIMESTAMP: str, COLUMN_CREATION_TIME: str, COLUMN_FILESIZE: str, COLUMN_PROGRESS: int, COLUMN_BUDDY_1: object, COLUMN_BUDDY_3: object, COLUMN_BUDDY_2: object, COLUMN_SELECT: bool, } _PAGE_SIZE = 10 def __init__(self, query): GObject.GObject.__init__(self) self._last_requested_index = None self._temp_drag_file_uid = None self._cached_row = None self._query = query self._all_ids = [] t = time.time() self._result_set = model.find(query, ListModel._PAGE_SIZE) logging.debug('init resultset: %r', time.time() - t) self._temp_drag_file_path = None self._selected = [] # HACK: The view will tell us that it is resizing so the model can # avoid hitting D-Bus and disk. self.view_is_resizing = False # Store the changes originated in the treeview so we do not need # to regenerate the model and stuff up the scroll position self._updated_entries = {} self._result_set.ready.connect(self.__result_set_ready_cb) self._result_set.progress.connect(self.__result_set_progress_cb) def get_all_ids(self): return self._all_ids def __result_set_ready_cb(self, **kwargs): t = time.time() self._all_ids = self._result_set.find_ids(self._query) logging.debug('get all ids: %r', time.time() - t) self.emit('ready') def __result_set_progress_cb(self, **kwargs): self.emit('progress') def setup(self, updated_callback=None): self._result_set.setup() self._updated_callback = updated_callback def stop(self): self._result_set.stop() def get_metadata(self, path): return model.get(self[path][ListModel.COLUMN_UID]) def do_get_n_columns(self): return len(ListModel._COLUMN_TYPES) def do_get_column_type(self, index): return ListModel._COLUMN_TYPES[index] def do_iter_n_children(self, iterator): if iterator is None: return self._result_set.length else: return 0 def set_value(self, iterator, column, value): index = iterator.user_data self._result_set.seek(index) metadata = self._result_set.read() if column == ListModel.COLUMN_FAVORITE: metadata['keep'] = value if column == ListModel.COLUMN_TITLE: metadata['title'] = value self._updated_entries[metadata['uid']] = metadata if self._updated_callback is not None: model.updated.disconnect(self._updated_callback) model.write(metadata, update_mtime=False, ready_callback=self.__reconnect_updates_cb) def __reconnect_updates_cb(self, metadata, filepath, uid): logging.error('__reconnect_updates_cb') if self._updated_callback is not None: model.updated.connect(self._updated_callback) def do_get_value(self, iterator, column): if self.view_is_resizing: return None index = iterator.user_data if index == self._last_requested_index: return self._cached_row[column] if index >= self._result_set.length: return None self._result_set.seek(index) metadata = self._result_set.read() metadata.update(self._updated_entries.get(metadata['uid'], {})) self._last_requested_index = index self._cached_row = [] self._cached_row.append(metadata['uid']) self._cached_row.append(metadata.get('keep', '0') == '1') self._cached_row.append(misc.get_icon_name(metadata)) if misc.is_activity_bundle(metadata): xo_color = XoColor('%s,%s' % (style.COLOR_BUTTON_GREY.get_svg(), style.COLOR_TRANSPARENT.get_svg())) else: xo_color = misc.get_icon_color(metadata) self._cached_row.append(xo_color) title = GObject.markup_escape_text(metadata.get('title', _('Untitled'))) self._cached_row.append('<b>%s</b>' % (title, )) try: timestamp = float(metadata.get('timestamp', 0)) except (TypeError, ValueError): timestamp_content = _('Unknown') else: timestamp_content = util.timestamp_to_elapsed_string(timestamp) self._cached_row.append(timestamp_content) try: creation_time = float(metadata.get('creation_time')) except (TypeError, ValueError): self._cached_row.append(_('Unknown')) else: self._cached_row.append( util.timestamp_to_elapsed_string(float(creation_time))) try: size = int(metadata.get('filesize')) except (TypeError, ValueError): size = None self._cached_row.append(util.format_size(size)) try: progress = int(float(metadata.get('progress', 100))) except (TypeError, ValueError): progress = 100 self._cached_row.append(progress) buddies = [] if metadata.get('buddies'): try: buddies = json.loads(metadata['buddies']).values() except json.decoder.JSONDecodeError, exception: logging.warning('Cannot decode buddies for %r: %s', metadata['uid'], exception) if not isinstance(buddies, list): logging.warning('Content of buddies for %r is not a list: %r', metadata['uid'], buddies) buddies = [] for n_ in xrange(0, 3): if buddies: try: nick, color = buddies.pop(0) except (AttributeError, ValueError), exception: logging.warning('Malformed buddies for %r: %s', metadata['uid'], exception) else: self._cached_row.append([nick, XoColor(color)]) continue self._cached_row.append(None) return self._cached_row[column] def do_iter_nth_child(self, parent_iter, n): return (False, None) def do_get_path(self, iterator): treepath = Gtk.TreePath((iterator.user_data,)) return treepath def do_get_iter(self, path): idx = path.get_indices()[0] iterator = Gtk.TreeIter() iterator.user_data = idx return (True, iterator) def do_iter_next(self, iterator): idx = iterator.user_data + 1 if idx >= self._result_set.length: iterator.stamp = -1 return (False, iterator) else: iterator.user_data = idx return (True, iterator) def do_get_flags(self): return Gtk.TreeModelFlags.ITERS_PERSIST | Gtk.TreeModelFlags.LIST_ONLY def do_iter_children(self, iterator): return (False, iterator) def do_iter_has_child(self, iterator): return False def do_iter_parent(self, iterator): return (False, Gtk.TreeIter()) def do_drag_data_get(self, path, selection): uid = self[path][ListModel.COLUMN_UID] target_atom = selection.get_target() target_name = target_atom.name() if target_name == 'text/uri-list': # Only get a new temp path if we have a new file, the frame # requests a path many times and if we give it a new path it # ends up with a broken path if uid != self._temp_drag_file_uid: # Get hold of a reference so the temp file doesn't get deleted self._temp_drag_file_path = model.get_file(uid) self._temp_drag_file_uid = uid logging.debug('putting %r in selection', self._temp_drag_file_path) selection.set(target_atom, 8, self._temp_drag_file_path) return True elif target_name == 'journal-object-id': # uid is unicode but Gtk.SelectionData.set() needs str selection.set(target_atom, 8, str(uid)) return True return False def set_selected(self, uid, value): if value: self._selected.append(uid) else: self._selected.remove(uid) def is_selected(self, uid): return uid in self._selected def get_selected_items(self): return self._selected def restore_selection(self, selected): self._selected = selected def select_all(self): self._selected = self._all_ids[:] def select_none(self): self._selected = []
gpl-3.0
-7,650,490,496,236,193,000
32.220126
79
0.591064
false
3.886681
false
false
false
MuffinMedic/znc-weblog
weblog.py
1
5161
import znc import os def is_safe_path(basedir, path): return os.path.abspath(path).startswith(basedir) class weblog(znc.Module): module_types = [znc.CModInfo.GlobalModule] description = "Allowings viewing of log files from the ZNC webadmin" wiki_page = "Weblog" def OnLoad(self, args, message): return True def WebRequiresLogin(self): return True def WebRequiresAdmin(self): return False def GetWebMenuTitle(self): return "Log Viewer" def OnWebRequest(self, sock, page, tmpl): user = sock.GetUser() dir = sock.GetParam('dir', False) if page == "index": if sock.GetRawParam('scope', True): scope = sock.GetRawParam('scope', True) self.setscope(scope, sock, tmpl) try: self.listdir(tmpl, dir, sock) except KeyError: row = tmpl.AddRow("ErrorLoop") row["error"] = "No scope set. Please set one above." elif page == "log" or page == "raw": self.viewlog(tmpl, dir, sock, page) self.getscopes(sock, tmpl) return True def listdir(self, tmpl, dir, sock): base = self.getbase(sock) try: dir_list = sorted(os.listdir(base + dir)) self.breadcrumbs(tmpl, dir, False) if len(dir_list) > 0: for item in dir_list: row = tmpl.AddRow("ListLoop") rel = dir + '/' + item if dir else item path = base + rel if os.path.isfile(path): url = 'log?dir=' + rel.replace('#', '%23') size = str(os.path.getsize(path) >> 10) + " KB" elif os.path.isdir(path): url = '?dir=' + rel.replace('#', '%23') size = len([name for name in os.listdir(path)]) row["scope"] = url row["item"] = item row["size"] = str(size) else: row = tmpl.AddRow("ErrorLoop") row["error"] = "Directory empty." except FileNotFoundError: row = tmpl.AddRow("ErrorLoop") row["error"] = "Directory does not exist. Please make sure you have the log module enabled and that you are attempting to access logs at the appropriate level (global, user, or network)." def viewlog(self, tmpl, dir, sock, page): base = self.getbase(sock) if not is_safe_path(base, base + dir): if page == "raw": row = tmpl.AddRow("LogLoop") row['log'] = "Error: invalid directory provided." return row = tmpl.AddRow("ErrorLoop") row["error"] = "Invalid directory provided." return path = base + dir row = tmpl.AddRow("LogLoop") with open(path, 'r', encoding='utf8') as log: log = log.read() if page == "raw": log = log.replace('<', '&lt;').replace('>', '&gt;') row['log'] = log if page == "log": self.breadcrumbs(tmpl, dir, True) row['raw'] = 'raw?dir=' + dir.replace('#', '%23') def breadcrumbs(self, tmpl, dir, islog): folders = dir.split('/') crumbs = ['<a href="">logs / </a>'] row = tmpl.AddRow("BreadcrumbLoop") row["crumbtext"] = "logs" row["crumburl"] = "" for i in range(0, len(folders)): if folders[i]: row = tmpl.AddRow("BreadcrumbLoop") row["crumbtext"] = folders[i] url = '/'.join(folders[0:i+1]) url = url.replace('#', '%23') row["crumburl"] = url if i == len(folders) - 1 and islog: row["islog"] = "True" def getbase(self, sock): base = znc.CZNC.Get().GetZNCPath() user = sock.GetUser() scope = self.nv[user] if scope == "Global": base = base + '/moddata/log/' + user + '/' elif scope == "User": base = base + '/users/' + user + '/moddata/log/' else: base = base + '/users/' + user + '/networks/' + self.nv[user] + '/moddata/log/' return base def getscopes(self, sock, tmpl): user_string = sock.GetUser() user = znc.CZNC.Get().FindUser(user_string) networks = user.GetNetworks() net_array = [] for network in networks: net_array.append(network.GetName()) net_array = sorted(net_array) net_array.insert(0, 'User'); net_array.insert(0, 'Global') for net in net_array: row = tmpl.AddRow("ScopeLoop") try: if net == self.nv[user_string]: row["active"] = "True" except KeyError: pass row["network"] = net def setscope(self, scope, sock, tmpl): user = sock.GetUser() self.nv[user] = scope row = tmpl.AddRow("MessageLoop") row["message"] = "Scope successfully set."
gpl-3.0
4,999,262,817,996,553,000
32.732026
199
0.496803
false
3.939695
false
false
false
CalvinNeo/EasyMLPlatform
py/graphic/tree.py
1
4067
#coding:utf8 import numpy as np import math import pylab as pl import matplotlib.cm as cm import matplotlib.mlab as mlab import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import json class GraphTree: def __init__(self): self.jsonobj = {} self.leafNode = dict(boxstyle = 'round4',fc = '0.8') self.branchNode = dict(boxstyle = 'sawtooth',fc = '0.8') self.arrow = dict(arrowstyle = '<-') self.depth = 0 self.leafcount = 0 def get_depth_leafcount(self,root): current_node = root.keys()[0] #name of choice node(string) branch_dict = root[current_node] maxdepth, thisdepth, thisleafcount = 0,0,0 for current_node in branch_dict.keys(): # print current_node,type(branch_dict[current_node]).__name__ if type(branch_dict[current_node]).__name__ == 'dict': temp = self.get_depth_leafcount(branch_dict[current_node]) thisdepth = 1 + temp[0] thisleafcount += temp[1] else: thisdepth = 1 thisleafcount += 1 if thisdepth > maxdepth: maxdepth = thisdepth return maxdepth,thisleafcount def load(self,strjson): self.jsonobj = dict(strjson) self.depth,self.leafcount = self.get_depth_leafcount(self.jsonobj) def plotMidText(self, cntrPt, parentPt, txtString): xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0] yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1] self.ax1.text(xMid, yMid, txtString) def plotNode(self, nodeTxt, cntrPt, parentPt, nodeType): self.ax1.annotate(nodeTxt, xy = parentPt, xycoords = 'axes fraction', xytext = cntrPt, \ textcoords = 'axes fraction', va = 'center', ha = 'center', bbox = nodeType, arrowprops = self.arrow) def plotTree(self, myTree, parentPt, nodeTxt): depth, leaves = self.get_depth_leafcount(myTree) current_node = myTree.keys()[0] cntrPt = (self.xOff + (1.0 + leaves) / 2.0 / self.leafcount, self.yOff) self.plotMidText(cntrPt, parentPt, nodeTxt) self.plotNode(current_node, cntrPt, parentPt, self.branchNode) branch_dict = myTree[current_node] self.yOff -= 1.0 / self.depth for current_node in branch_dict.keys(): if type(branch_dict[current_node]).__name__ == 'dict': self.plotTree(branch_dict[current_node], cntrPt, str(current_node)) else: self.xOff += 1.0 / self.leafcount self.plotNode(branch_dict[current_node], (self.xOff, self.yOff), cntrPt, self.leafNode) self.plotMidText((self.xOff, self.yOff), cntrPt, str(current_node)) self.yOff += 1.0 / self.depth def createPlot(self, show = True, save = ''): fig = plt.figure(1, facecolor = 'white') fig.clf() axprops = dict(xticks = [], yticks = []) self.ax1 = plt.subplot(111,frameon = False, **axprops) self.xOff, self.yOff = -0.5 / self.leafcount, 1.0 self.plotTree(self.jsonobj, (0.5,1.0), '') import StringIO, urllib, base64 if show: plt.show() else: imgdata = StringIO.StringIO() fig.savefig(imgdata, format='png') imgdata.seek(0) # rewind the data uri = 'data:image/png;base64,' + urllib.quote(base64.b64encode(imgdata.buf)) imgdata.close() return uri def showPlot(self): plt.show() if __name__ == '__main__': tr = GraphTree() # aa = '{"no surfacing":{"0":"no","1":{"flippers":{"0":"no","1":"yes"}}}}' # tr.load(json.loads(aa)) #JSON can't have non-string key aa = {"aged":{"0":"no","1":{"male":{"0":"no","1":"yes"}}}} # aa = {'water': {0: 1, 1: {'foot': {0: "'no'", 1: "'yes'"}}}} print dict(aa) # aa = {"no surfacing":{0:"no",1:{"flippers":{0:"no",1:"yes"}}}} # print dict(aa) tr.load(aa) print tr.leafcount,tr.depth tr.createPlot(show=True)
apache-2.0
7,361,431,833,645,096,000
38.485437
114
0.572412
false
3.238057
false
false
false
deplinenoise/rlaunch
vbcc-driver.py
1
1297
#! /usr/bin/python import sys import os import os.path import subprocess import re line_re = re.compile(r'^(warning|error) (\d+) in line (\d+) of "([^"]*)":\s*(.*)$') def fix_fn(root_dir, fn): # If there are path separators in the filename, assume the path is valid if fn.find(os.sep) != -1: return fn if os.path.exists(fn): return fn full_path = os.path.join(root_dir, fn) if os.path.exists(full_path): return full_path return 'bah' def munge(root_dir, line): m = re.match(line_re, line) if not m: return line.strip() fn = fix_fn(root_dir, m.group(4)) return '%s(%s) : %s %s: %s' % (fn, m.group(3), m.group(1), m.group(2), m.group(5)) if __name__ == '__main__': vbcc_root = os.environ.get('VBCC') if not vbcc_root: sys.stderr.write('VBCC environment variable not set') sys.exit(1) vc_bin = os.path.join(vbcc_root, 'bin' + os.sep + 'vc') if os.name == 'nt': vc_bin += '.exe' root_dir = '.' for arg in sys.argv[1:]: if arg.endswith('.c'): root_dir, dummy = os.path.split(arg) vc = subprocess.Popen( args = sys.argv[1:], executable = vc_bin, universal_newlines=True, stdin = None, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) for line in vc.stdout: sys.stdout.write(munge(root_dir, line)) sys.stdout.write('\n')
gpl-3.0
2,088,134,103,908,204,300
19.587302
83
0.621434
false
2.548134
false
false
false
thefab/tornadis
tornadis/pool.py
1
7269
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is part of tornadis library released under the MIT license. # See the LICENSE file for more information. import tornado.gen import tornado.ioloop import tornado.locks import logging import functools from collections import deque from tornadis.client import Client from tornadis.utils import ContextManagerFuture from tornadis.exceptions import ClientError LOG = logging.getLogger(__name__) class ClientPool(object): """High level object to deal with a pool of redis clients.""" def __init__(self, max_size=-1, client_timeout=-1, autoclose=False, **client_kwargs): """Constructor. Args: max_size (int): max size of the pool (-1 means "no limit"). client_timeout (int): timeout in seconds of a connection released to the pool (-1 means "no timeout"). autoclose (boolean): automatically disconnect released connections with lifetime > client_timeout (test made every client_timeout/10 seconds). client_kwargs (dict): Client constructor arguments. """ self.max_size = max_size self.client_timeout = client_timeout self.client_kwargs = client_kwargs self.__ioloop = client_kwargs.get('ioloop', tornado.ioloop.IOLoop.instance()) self.autoclose = autoclose self.__pool = deque() if self.max_size != -1: self.__sem = tornado.locks.Semaphore(self.max_size) else: self.__sem = None self.__autoclose_periodic = None if self.autoclose and self.client_timeout > 0: every = int(self.client_timeout) * 100 if int(tornado.version[0]) >= 5: cb = tornado.ioloop.PeriodicCallback(self._autoclose, every) else: cb = tornado.ioloop.PeriodicCallback(self._autoclose, every, self.__ioloop) self.__autoclose_periodic = cb self.__autoclose_periodic.start() def _get_client_from_pool_or_make_it(self): try: while True: client = self.__pool.popleft() if client.is_connected(): if self._is_expired_client(client): client.disconnect() continue break except IndexError: client = self._make_client() return (True, client) return (False, client) @tornado.gen.coroutine def get_connected_client(self): """Gets a connected Client object. If max_size is reached, this method will block until a new client object is available. Returns: A Future object with connected Client instance as a result (or ClientError if there was a connection problem) """ if self.__sem is not None: yield self.__sem.acquire() client = None newly_created, client = self._get_client_from_pool_or_make_it() if newly_created: res = yield client.connect() if not res: LOG.warning("can't connect to %s", client.title) raise tornado.gen.Return( ClientError("can't connect to %s" % client.title)) raise tornado.gen.Return(client) def get_client_nowait(self): """Gets a Client object (not necessary connected). If max_size is reached, this method will return None (and won't block). Returns: A Client instance (not necessary connected) as result (or None). """ if self.__sem is not None: if self.__sem._value == 0: return None self.__sem.acquire() _, client = self._get_client_from_pool_or_make_it() return client def _autoclose(self): newpool = deque() try: while True: client = self.__pool.popleft() if client.is_connected(): if self._is_expired_client(client): client.disconnect() else: newpool.append(client) except IndexError: self.__pool = newpool def _is_expired_client(self, client): if self.client_timeout != -1 and client.is_connected(): delta = client.get_last_state_change_timedelta() if delta.total_seconds() >= self.client_timeout: return True return False def connected_client(self): """Returns a ContextManagerFuture to be yielded in a with statement. Returns: A ContextManagerFuture object. Examples: >>> with (yield pool.connected_client()) as client: # client is a connected tornadis.Client instance # it will be automatically released to the pool thanks to # the "with" keyword reply = yield client.call("PING") """ future = self.get_connected_client() cb = functools.partial(self._connected_client_release_cb, future) return ContextManagerFuture(future, cb) def _connected_client_release_cb(self, future=None): client = future.result() self.release_client(client) def release_client(self, client): """Releases a client object to the pool. Args: client: Client object. """ if isinstance(client, Client): if not self._is_expired_client(client): LOG.debug('Client is not expired. Adding back to pool') self.__pool.append(client) elif client.is_connected(): LOG.debug('Client is expired and connected. Disconnecting') client.disconnect() if self.__sem is not None: self.__sem.release() def destroy(self): """Disconnects all pooled client objects.""" while True: try: client = self.__pool.popleft() if isinstance(client, Client): client.disconnect() except IndexError: break @tornado.gen.coroutine def preconnect(self, size=-1): """(pre)Connects some or all redis clients inside the pool. Args: size (int): number of redis clients to build and to connect (-1 means all clients if pool max_size > -1) Raises: ClientError: when size == -1 and pool max_size == -1 """ if size == -1 and self.max_size == -1: raise ClientError("size=-1 not allowed with pool max_size=-1") limit = min(size, self.max_size) if size != -1 else self.max_size clients = yield [self.get_connected_client() for _ in range(0, limit)] for client in clients: self.release_client(client) def _make_client(self): """Makes and returns a Client object.""" kwargs = self.client_kwargs client = Client(**kwargs) return client
mit
-2,969,162,958,521,919,500
35.164179
79
0.554959
false
4.61817
false
false
false
wkerzendorf/wsynphot
wsynphot/base.py
1
15987
# defining the base filter curve classes import os from scipy import interpolate from wsynphot.spectrum1d import SKSpectrum1D as Spectrum1D import pandas as pd from wsynphot.io.cache_filters import load_filter_index, load_transmission_data from astropy import units as u, constants as const from astropy import utils import numpy as np from wsynphot.calibration import get_vega_calibration_spectrum def calculate_filter_flux_density(spectrum, filter): """ Calculate the average flux through the filter by evaluating the integral ..math:: f_lambda = \\frac{\\int_}{} Parameters ---------- spectrum: ~specutils.Spectrum1D spectrum object filter: ~wsynphot.FilterCurve :return: """ filtered_spectrum = filter * spectrum filter_flux_density = np.trapz(filtered_spectrum.flux * filtered_spectrum.wavelength, filtered_spectrum.wavelength) return filter_flux_density def calculate_vega_magnitude(spectrum, filter): filter_flux_density = calculate_filter_flux_density(spectrum, filter) wavelength_delta = filter.calculate_wavelength_delta() filtered_f_lambda = (filter_flux_density / wavelength_delta) zp_vega_f_lambda = filter.zp_vega_f_lambda return -2.5 * np.log10(filtered_f_lambda / zp_vega_f_lambda) def calculate_ab_magnitude(spectrum, filter): filtered_f_lambda = (calculate_filter_flux_density(spectrum, filter) / filter.calculate_wavelength_delta()) return -2.5 * np.log10(filtered_f_lambda / filter.zp_ab_f_lambda) def list_filters(): """ List available filter sets along with their properties """ return load_filter_index() class BaseFilterCurve(object): """ Basic filter curve class Parameters ---------- wavelength: ~astropy.units.Quantity wavelength for filter curve transmission_lambda: numpy.ndarray transmission_lambda for filter curve interpolation_kind: str allowed interpolation kinds given in scipy.interpolate.interp1d """ @classmethod def load_filter(cls, filter_id=None, interpolation_kind='linear'): """ Parameters ---------- filter_id: str or None if None is provided will return a DataFrame of all filters interpolation_kind: str see scipy.interpolation.interp1d """ if filter_id is None: return list_filters() else: filter = load_transmission_data(filter_id) wavelength_unit = 'angstrom' wavelength = filter['Wavelength'].values * u.Unit(wavelength_unit) return cls(wavelength, filter['Transmission'].values, interpolation_kind=interpolation_kind, filter_id=filter_id) def __init__(self, wavelength, transmission_lambda, interpolation_kind='linear', filter_id=None): if not hasattr(wavelength, 'unit'): raise ValueError('the wavelength needs to be a astropy quantity') self.wavelength = wavelength self.transmission_lambda = transmission_lambda self.interpolation_object = interpolate.interp1d(self.wavelength, self.transmission_lambda, kind=interpolation_kind, bounds_error=False, fill_value=0.0) self.filter_id = filter_id def __mul__(self, other): if not hasattr(other, 'flux') or not hasattr(other, 'wavelength'): raise ValueError('requiring a specutils.Spectrum1D-like object that' 'has attributes "flux" and "wavelength"') #new_wavelength = np.union1d(other.wavelength.to(self.wavelength.unit).value, # self.wavelength.value) * self.wavelength.unit transmission = self.interpolate(other.wavelength) return Spectrum1D.from_array(other.wavelength, transmission * other.flux) def __rmul__(self, other): return self.__mul__(other) @utils.lazyproperty def lambda_pivot(self): """ Calculate the pivotal wavelength as defined in Bessell & Murphy 2012 .. math:: \\lambda_\\textrm{pivot} = \\sqrt{ \\frac{\\int S(\\lambda)\\lambda d\\lambda}{\\int \\frac{S(\\lambda)}{\\lambda}}}\\\\ <f_\\nu> = <f_\\lambda>\\frac{\\lambda_\\textrm{pivot}^2}{c} """ return np.sqrt((np.trapz(self.transmission_lambda * self.wavelength, self.wavelength)/ (np.trapz(self.transmission_lambda / self.wavelength, self.wavelength)))) @utils.lazyproperty def wavelength_start(self): return self.get_wavelength_start() @utils.lazyproperty def wavelength_end(self): return self.get_wavelength_end() @utils.lazyproperty def zp_ab_f_lambda(self): return (self.zp_ab_f_nu * const.c / self.lambda_pivot**2).to( 'erg/s/cm^2/Angstrom', u.spectral()) @utils.lazyproperty def zp_ab_f_nu(self): return (3631 * u.Jy).to('erg/s/cm^2/Hz') @utils.lazyproperty def zp_vega_f_lambda(self): return (calculate_filter_flux_density(get_vega_calibration_spectrum(), self) / self.calculate_wavelength_delta()) def interpolate(self, wavelength): """ Interpolate the filter onto new wavelength grid Parameters ---------- wavelength: ~astropy.units.Quantity wavelength grid to interpolate on """ converted_wavelength = wavelength.to(self.wavelength.unit) return self.interpolation_object(converted_wavelength) def _calculuate_flux_density(self, wavelength, flux): return _calculcate_filter_flux_density(flux, self) def calculate_flux_density(self, spectrum): return calculate_filter_flux_density(spectrum, self) def calculate_f_lambda(self, spectrum): return (self.calculate_flux_density(spectrum) / self.calculate_wavelength_delta()) def calculate_wavelength_delta(self): """ Calculate the Integral :math:`\integral :return: """ return np.trapz(self.transmission_lambda * self.wavelength, self.wavelength) def calculate_weighted_average_wavelength(self): """ Calculate integral :math:`\\frac{\\int S(\\lambda) \\lambda d\\lambda}{\\int S(\\lambda) d\\lambda}` Returns : ~astropy.units.Quantity """ return (np.trapz(self.transmission_lambda * self.wavelength, self.wavelength) / self.calculate_wavelength_delta()) def calculate_vega_magnitude(self, spectrum): __doc__ = calculate_vega_magnitude.__doc__ return calculate_vega_magnitude(spectrum, self) def calculate_ab_magnitude(self, spectrum): __doc__ = calculate_ab_magnitude.__doc__ return calculate_ab_magnitude(spectrum, self) def convert_ab_magnitude_to_f_lambda(self, mag): return 10**(-0.4*mag) * self.zp_ab_f_lambda def convert_vega_magnitude_to_f_lambda(self, mag): return 10**(-0.4*mag) * self.zp_vega_f_lambda def plot(self, ax, scale_max=None, make_label=True, plot_kwargs={}, format_filter_id=None): if scale_max is not None: if hasattr(scale_max, 'unit'): scale_max = scale_max.value transmission = (self.transmission_lambda * scale_max / self.transmission_lambda.max()) else: transmission = self.transmission_lambda ax.plot(self.wavelength, transmission, **plot_kwargs) ax.set_xlabel('Wavelength [{0}]'.format( self.wavelength.unit.to_string(format='latex'))) ax.set_ylabel('Transmission [1]') if make_label==True and self.filter_id is not None: if format_filter_id is not None: filter_id = format_filter_id(self.filter_id) else: filter_id = self.filter_id text_x = (self.lambda_pivot).value text_y = transmission.max()/2 ax.text(text_x, text_y, filter_id, horizontalalignment='center', verticalalignment='center', bbox=dict(facecolor='white', alpha=0.5)) def get_wavelength_start(self, threshold=0.01): norm_cum_sum = (np.cumsum(self.transmission_lambda) / np.sum(self.transmission_lambda)) return self.wavelength[norm_cum_sum.searchsorted(threshold)] def get_wavelength_end(self, threshold=0.01): norm_cum_sum = (np.cumsum(self.transmission_lambda) / np.sum(self.transmission_lambda)) return self.wavelength[norm_cum_sum.searchsorted(1 - threshold)] class FilterCurve(BaseFilterCurve): def __repr__(self): if self.filter_id is None: filter_id = "{0:x}".format(self.__hash__()) else: filter_id = self.filter_id return "FilterCurve <{0}>".format(filter_id) class FilterSet(object): """ A set of filters Parameters ---------- filter_set: ~list a list of strings or a list of filters interpolation_kind: ~str scipy interpolaton kinds """ def __init__(self, filter_set, interpolation_kind='linear'): if hasattr(filter_set[0], 'wavelength'): self.filter_set = filter_set else: self.filter_set = [FilterCurve.load_filter(filter_id, interpolation_kind= interpolation_kind) for filter_id in filter_set] def __iter__(self): self.current_filter_idx = 0 return self def __next__(self): try: item = self.filter_set[self.current_filter_idx] except IndexError: raise StopIteration self.current_filter_idx += 1 return item next = __next__ def __getitem__(self, item): return self.filter_set.__getitem__(item) def __repr__(self): return "<{0} \n{1}>".format(self.__class__.__name__, '\n'.join( [item.filter_id for item in self.filter_set])) @property def lambda_pivot(self): return u.Quantity([item.lambda_pivot for item in self]) def calculate_f_lambda(self, spectrum): return u.Quantity( [item.calculate_f_lambda(spectrum) for item in self.filter_set]) def calculate_ab_magnitudes(self, spectrum): mags = [item.calculate_ab_magnitude(spectrum) for item in self.filter_set] return mags def calculate_vega_magnitudes(self, spectrum): mags = [item.calculate_vega_magnitude(spectrum) for item in self.filter_set] return mags def convert_ab_magnitudes_to_f_lambda(self, magnitudes): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambdas = [filter.convert_ab_magnitude_to_f_lambda(mag) for filter, mag in zip(self.filter_set, magnitudes)] return u.Quantity(f_lambdas) def convert_ab_magnitude_uncertainties_to_f_lambda_uncertainties( self, magnitudes, magnitude_uncertainties): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambda_positive_uncertainties = u.Quantity( [filter.convert_ab_magnitude_to_f_lambda(mag + mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties, )]) f_lambda_negative_uncertainties = u.Quantity( [filter.convert_ab_magnitude_to_f_lambda(mag - mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties)]) return np.abs(u.Quantity((f_lambda_positive_uncertainties, f_lambda_negative_uncertainties)) - self.convert_ab_magnitudes_to_f_lambda(magnitudes)) def convert_vega_magnitude_uncertainties_to_f_lambda_uncertainties( self, magnitudes, magnitude_uncertainties): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambda_positive_uncertainties = u.Quantity( [filter.convert_vega_magnitude_to_f_lambda(mag + mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties, )]) f_lambda_negative_uncertainties = u.Quantity( [filter.convert_vega_magnitude_to_f_lambda(mag - mag_uncertainty) for filter, mag, mag_uncertainty in zip( self.filter_set, magnitudes, magnitude_uncertainties)]) return np.abs(u.Quantity((f_lambda_positive_uncertainties, f_lambda_negative_uncertainties)) - self.convert_vega_magnitudes_to_f_lambda(magnitudes)) def convert_vega_magnitudes_to_f_lambda(self, magnitudes): if len(magnitudes) != len(self.filter_set): raise ValueError("Filter set and magnitudes need to have the same " "number of items") f_lambdas = [filter.convert_vega_magnitude_to_f_lambda(mag) for filter, mag in zip(self.filter_set, magnitudes)] return u.Quantity(f_lambdas) def plot_spectrum(self, spectrum, ax, make_labels=True, spectrum_plot_kwargs={}, filter_plot_kwargs={}, filter_color_list=None, format_filter_id=None): """ plot a spectrum with the given filters spectrum: ax: make_labels: :return: """ ax.plot(spectrum.wavelength, spectrum.flux, **spectrum_plot_kwargs) for i, filter in enumerate(self.filter_set): filter_scale = filter.calculate_f_lambda(spectrum) if filter_color_list is not None: filter_plot_kwargs['color'] = filter_color_list[i] filter.plot(ax, scale_max=filter_scale, make_label=make_labels, plot_kwargs=filter_plot_kwargs, format_filter_id=format_filter_id) class MagnitudeSet(FilterSet): def __init__(self, filter_set, magnitudes, magnitude_uncertainties=None, interpolation_kind='linear'): super(MagnitudeSet, self).__init__(filter_set, interpolation_kind= interpolation_kind) self.magnitudes = np.array(magnitudes) self.magnitude_uncertainties = np.array(magnitude_uncertainties) def __repr__(self): mag_str = '{0} {1:.4f} +/- {2:.4f}' mag_data = [] for i, filter in enumerate(self.filter_set): unc = (np.nan if self.magnitude_uncertainties is None else self.magnitude_uncertainties[i]) mag_data.append(mag_str.format(filter.filter_id, self.magnitudes[i], unc)) return "<{0} \n{1}>".format(self.__class__.__name__, '\n'.join(mag_data))
bsd-3-clause
6,882,483,729,775,752,000
33.454741
108
0.585851
false
4.021887
false
false
false
ChrisTimperley/PythonCGum
cgum/program.py
1
5861
from cgum.basic import * from cgum.utility import FNULL from pprint import pprint import cgum.statement as statement import cgum.expression as expression import cgum.preprocessor as preprocessor import cgum.typs as typs from subprocess import Popen, CalledProcessError import os.path import json import tempfile import codecs # TODO: Probe class Asm(Node): CODE = "260800" LABEL = "Asm" def __init__(self, pos, length, label, children): assert label is None super().__init__(pos, length, label, children) class Label(Node): CODE = "270100" LABEL = "Label" def __init__(self, pos, length, label, children): assert label is None assert len(children) in [1, 2] assert isinstance(children[0], GenericString) super().__init__(pos, length, label, children) def name(self): return self.__children[0].to_s() def statement(self): children = self.children() if len(children) == 2: return children[1] return None class FunctionParameter(Node): CODE = "220100" LABEL = "ParameterType" def __init__(self, pos, length, label, children): assert label is None assert len(children) <= 2 # Find the optional type and name of this parameter tmp = children.copy() self.__typ = \ tmp.pop(0) if (tmp and isinstance(tmp[0], typs.FullType)) else None self.__name = tmp.pop(0) if tmp else None assert self.__typ is None or isinstance(self.__typ, typs.FullType) assert self.__name is None or isinstance(self.__name, GenericString) super().__init__(pos, length, label, children) def is_incomplete(self): return self.name() is None def typ(self): return self.__typ.to_s() if self.__typ else None def name(self): return self.__name.to_s() if self.__name else None class FunctionParameters(Node): CODE = "200000" LABEL = "ParamList" def __init__(self, pos, length, label, children): assert label is None assert all([isinstance(c, FunctionParameter) for c in children]) super().__init__(pos, length, label, children) def parameters(self): return self.__children class FunctionDefinition(Node): CODE = "380000" LABEL = "Definition" @staticmethod def from_json(jsn): return FunctionDefinition(jsn['pos'], name, params, block, storage, dots) def __init__(self, pos, length, label, children): assert len(children) >= 3 and len(children) <= 5 tmp = children.copy() self.__storage = \ tmp.pop(0) if isinstance(tmp[0], typs.Storage) else None self.__parameters = tmp.pop(0) self.__dots = \ tmp.pop(0) if isinstance(tmp[0], typs.DotsParameter) else None self.__name = tmp.pop(0) self.__block = tmp.pop(0) assert isinstance(self.__parameters, FunctionParameters) assert self.__dots is None or \ isinstance(self.__dots, typs.DotsParameter) assert self.__storage is None or \ isinstance(self.__storage, typs.Storage) assert isinstance(self.__name, GenericString) assert isinstance(self.__block, statement.Block) super().__init__(pos, length, label, children) def name(self): return self.__name def parameters(self): return self.__parameters def block(self): return self.__block def storage(self): return self.__storage def dots(self): return self.__dots def is_variadic(self): return not (self.dots() is None) # Used to mark the end of the program! class FinalDef(Token): CODE = "450800" LABEL = "FinalDef" # Represents the root AST node for a program # For now we just get all the "components" of a program and worry about what # kind of components they might be later. # # Throw away the FinalDef class Program(Node): CODE = "460000" LABEL = "Program" # Generates an AST for a given source code file, using GumTree and CGum @staticmethod def from_source_file(fn): tmp_f = tempfile.NamedTemporaryFile() Program.parse_to_json_file(fn, tmp_f) return Program.from_json_file(tmp_f.name) # Parses a JSON CGum AST, stored in a file at a specified location, into an # equivalent, Python representation @staticmethod def from_json_file(fn): #print("Attempting to read CGum AST from a JSON file: %s" % fn) assert os.path.isfile(fn), "file not found" with codecs.open(fn, 'r', 'utf-8') as f: program = Node.from_json(json.load(f)['root']) #print("Finished converting CGum AST from JSON into Python") program.renumber() return program def __init__(self, pos, length, label, children): assert label is None assert len(children) >= 1 assert isinstance(children[-1], FinalDef) children.pop() super().__init__(pos, length, label, children) @staticmethod def parse_to_json_file(src_fn, jsn_f): with tempfile.TemporaryFile() as f_err: cmd = "gumtree parse \"%s\"" % src_fn p = Popen(cmd, shell=True, stdin=FNULL, stdout=jsn_f, stderr=f_err) code = p.wait() # read the contents of the standard error f_err.seek(0) err = str(f_err.read())[2:-1] # ensure the exit status was zero if code != 0: raise Exception("ERROR [PyCGum/parse_to_json_file]: unexpected exit code - %s" % error) # run-time exceptions can occur whilst still returning an exit status # of zero elif err.startswith("java.lang.RuntimeException:"): raise Exception("ERROR [PyCGum/parse_to_json_file]: %s" % err)
mit
3,467,776,033,047,293,000
31.743017
103
0.613888
false
3.866095
false
false
false
greenonion/pytvd
tvdip.py
1
8146
""" tvdip.py ~~~~~~~~ This module is a direct port of the original [1] tvdip Matlab script into NumPy. [1] M.A. Little, Nick S. Jones (2010) "Sparse Bayesian Step-Filtering for High- Throughput Analysis of Molecular Machine Dynamics", in 2010 IEEE International Conference on Acoustics, Speech and Signal Processing, 2010, ICASSP 2010 Proceedings. """ import numpy as np import scipy as Sci from scipy import sparse from scipy.sparse import linalg import sys def tvdiplmax(y): """Calculate the value of lambda so that if lambda >= lambdamax, the TVD functional solved by TVDIP is minimized by the trivial constant solution x = mean(y). This can then be used to determine a useful range of values of lambda, for example. Args: y: Original signal to denoise, size N x 1. Returns: lambdamax: Value of lambda at which x = mean(y) is the output of the TVDIP function. """ N = y.size M = N - 1 # Construct sparse operator matrices I1 = sparse.eye(M) O1 = sparse.dia_matrix((M, 1)) D = sparse.hstack([I1, O1]) - sparse.hstack([O1, I1]) DDT = D.dot(D.conj().T) Dy = D.dot(y) lambdamax = np.absolute(linalg.spsolve(DDT, Dy)).max(0) return lambdamax def tvdip(y, lambdas, display=1, stoptol=1e-3, maxiter=60): """Performs discrete total variation denoising (TVD) using a primal-dual interior-point solver. It minimizes the following discrete functional: E=(1/2)||y-x||_2^2+lambda*||Dx||_1 over the variable x, given the input signal y, according to each value of the regularization parametero lambda > 0. D is the first difference matrix. Uses hot-restarts from each value of lambda to speed up convergence for subsequent values: best use of the feature is made by ensuring that the chosen lambda values are close to each other. Args: y: Original signal to denoise, size N x 1. lambdas: A vector of positive regularization parameters, size L x 1. TVD will be applied to each value in the vector. display: (Optional) Set to 0 to turn off progress display, 1 to turn on. Defaults to 1. stoptol: (Optional) Precision as determined by duality gap tolerance, if not specified defaults to 1e-3. maxiter: (Optional) Maximum interior-point iterations, if not specified defaults to 60. Returns: x: Denoised output signal for each value of lambda, size N x L. E: Objective functional at minimum for each lamvda, size L x 1. s: Optimization result, 1 = solved, 0 = maximum iterations exceeded before reaching duality gap tolerance, size L x 1. lambdamax: Maximum value of lambda for the given y. If lambda >= lambdamax, the output is the trivial constant solution x = mean(y). Example: >>> import numpy as np >>> import tvdip as tv >>> # Find the value of lambda greater than which the TVD solution is >>> # just the mean. >>> lmax = tv.tvdiplmax(y) >>> # Perform TV denoising for lambda across a range of values up to a >>> # small fraction of the maximum found above. >>> lratio = np.array([1e-4, 1e-3, 1e-2, 1e-1]) >>> x, E, status, l_max = tv.tvdip(y, lmax*lratio, True, 1e-3) >>> plot(x[:,0]) """ # Search tuning parameters ALPHA = 0.01 # Backtracking linesearch parameter (0,0.5] BETA = 0.5 # Backtracking linesearch parameter (0,1) MAXLSITER = 20 # Max iterations of backtracking linesearch MU = 2 # t update N = y.size # Length of input signal y M = N - 1 # Size of Dx # Construct sparse operator matrices I1 = sparse.eye(M) O1 = sparse.dia_matrix((M, 1)) D = sparse.hstack([I1, O1]) - sparse.hstack([O1, I1]) DDT = D.dot(D.conj().T) Dy = D.dot(y) # Find max value of lambda lambdamax = (np.absolute(linalg.spsolve(DDT, Dy))).max(0) if display: print "lambda_max=%5.2e" % lambdamax L = lambdas.size x = np.zeros((N, L)) s = np.zeros((L, 1)) E = np.zeros((L, 1)) # Optimization variables set up once at the start z = np.zeros((M, 1)) mu1 = np.ones((M, 1)) mu2 = np.ones((M, 1)) # Work through each value of lambda, with hot-restart on optimization # variables for idx, l in enumerate(lambdas): t = 1e-10 step = np.inf f1 = z - l f2 = -z - l # Main optimization loop s[idx] = 1 if display: print "Solving for lambda={0:5.2e}, lambda/lambda_max={1:5.2e}".format(l, l/lambdamax) print "Iter# primal Dual Gap" for iters in xrange(maxiter): DTz = (z.conj().T * D).conj().T DDTz = D.dot(DTz) w = Dy - (mu1 - mu2) # Calculate objectives and primal-dual gap pobj1 = 0.5*w.conj().T.dot(linalg.spsolve(DDT,w))+l*(np.sum(mu1+mu2)) pobj2 = 0.5*DTz.conj().T.dot(DTz)+l*np.sum(np.absolute(Dy-DDTz)) pobj = np.minimum(pobj1, pobj2) dobj = -0.5*DTz.conj().T.dot(DTz) + Dy.conj().T.dot(z) gap = pobj - dobj if display: print "{:5d} {:7.2e} {:7.2e} {:7.2e}".format(iters, pobj[0, 0], dobj[0, 0], gap[0, 0]) # Test duality gap stopping criterion if gap <= stoptol: s[idx] = 1 break if step >= 0.2: t = np.maximum(2*M*MU/gap, 1.2*t) # Do Newton step rz = DDTz - w Sdata = (mu1/f1 + mu2/f2) S = DDT-sparse.csc_matrix((Sdata.reshape(Sdata.size), (np.arange(M), np.arange(M)))) r = -DDTz + Dy + (1/t)/f1 - (1/t)/f2 dz = linalg.spsolve(S, r).reshape(r.size, 1) dmu1 = -(mu1+((1/t)+dz*mu1)/f1) dmu2 = -(mu2+((1/t)-dz*mu2)/f2) resDual = rz.copy() resCent = np.vstack((-mu1*f1-1/t, -mu2*f2-1/t)) residual = np.vstack((resDual, resCent)) # Perform backtracking linesearch negIdx1 = dmu1 < 0 negIdx2 = dmu2 < 0 step = 1 if np.any(negIdx1): step = np.minimum(step, 0.99*(-mu1[negIdx1]/dmu1[negIdx1]).min(0)) if np.any(negIdx2): step = np.minimum(step, 0.99*(-mu2[negIdx2]/dmu2[negIdx2]).min(0)) for _ in xrange(MAXLSITER): newz = z + step*dz newmu1 = mu1 + step*dmu1 newmu2 = mu2 + step*dmu2 newf1 = newz - l newf2 = -newz - l # Update residuals newResDual = DDT.dot(newz) - Dy + newmu1 - newmu2 newResCent = np.vstack((-newmu1*newf1-1/t, -newmu2*newf2-1/t)) newResidual = np.vstack((newResDual, newResCent)) if (np.maximum(newf1.max(0), newf2.max(0)) < 0 and (Sci.linalg.norm(newResidual) <= (1-ALPHA*step)*Sci.linalg.norm(residual))): break step = BETA * step # Update primal and dual optimization parameters z = newz mu1 = newmu1 mu2 = newmu2 f1 = newf1 f2 = newf2 x[:, idx] = (y-D.conj().T.dot(z)).reshape(x.shape[0]) xval = x[:, idx].reshape(x.shape[0], 1) E[idx] = 0.5*np.sum((y-xval)**2)+l*np.sum(np.absolute(D.dot(xval))) # We may have a close solution that does not satisfy the duality gap if iters >= maxiter: s[idx] = 0 if display: if s[idx]: print("Solved to precision of duality gap %5.2e") % gap else: print("Max iterations exceeded - solution may be inaccurate") return x, E, s, lambdamax
gpl-2.0
-8,849,415,337,858,050,000
33.811966
98
0.545912
false
3.385702
false
false
false
tobykurien/MakerDroid
assetsrc/public.mp3/fabmetheus_utilities/fabmetheus_tools/interpret_plugins/gts.py
1
4561
""" This page is in the table of contents. The gts.py script is an import translator plugin to get a carving from an gts file. An import plugin is a script in the interpret_plugins folder which has the function getCarving. It is meant to be run from the interpret tool. To ensure that the plugin works on platforms which do not handle file capitalization properly, give the plugin a lower case name. The getCarving function takes the file name of an gts file and returns the carving. The GNU Triangulated Surface (.gts) format is described at: http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE Quoted from http://gts.sourceforge.net/reference/gts-surfaces.html#GTS-SURFACE-WRITE "All the lines beginning with GTS_COMMENTS (#!) are ignored. The first line contains three unsigned integers separated by spaces. The first integer is the number of vertices, nv, the second is the number of edges, ne and the third is the number of faces, nf. Follows nv lines containing the x, y and z coordinates of the vertices. Follows ne lines containing the two indices (starting from one) of the vertices of each edge. Follows nf lines containing the three ordered indices (also starting from one) of the edges of each face. The format described above is the least common denominator to all GTS files. Consistent with an object-oriented approach, the GTS file format is extensible. Each of the lines of the file can be extended with user-specific attributes accessible through the read() and write() virtual methods of each of the objects written (surface, vertices, edges or faces). When read with different object classes, these extra attributes are just ignored." This example gets a carving for the gts file Screw Holder Bottom.gts. This example is run in a terminal in the folder which contains Screw Holder Bottom.gts and gts.py. > python Python 2.5.1 (r251:54863, Sep 22 2007, 01:43:31) [GCC 4.2.1 (SUSE Linux)] on linux2 Type "help", "copyright", "credits" or "license" for more information. >>> import gts >>> gts.getCarving() [11.6000003815, 10.6837882996, 7.80209827423 .. many more lines of the carving .. """ from __future__ import absolute_import #Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module. import __init__ from fabmetheus_utilities.vector3 import Vector3 from fabmetheus_utilities import gcodec from fabmetheus_utilities.solids import triangle_mesh __author__ = "Enrique Perez ([email protected])" __credits__ = 'Nophead <http://hydraraptor.blogspot.com/>\nArt of Illusion <http://www.artofillusion.org/>' __date__ = "$Date: 2008/21/04 $" __license__ = "GPL 3.0" def getFromGNUTriangulatedSurfaceText( gnuTriangulatedSurfaceText, triangleMesh ): "Initialize from a GNU Triangulated Surface Text." if gnuTriangulatedSurfaceText == '': return None lines = gcodec.getTextLines( gnuTriangulatedSurfaceText ) linesWithoutComments = [] for line in lines: if len( line ) > 0: firstCharacter = line[ 0 ] if firstCharacter != '#' and firstCharacter != '!': linesWithoutComments.append( line ) splitLine = linesWithoutComments[ 0 ].split() numberOfVertices = int( splitLine[ 0 ] ) numberOfEdges = int( splitLine[ 1 ] ) numberOfFaces = int( splitLine[ 2 ] ) faceTriples = [] for vertexIndex in xrange( numberOfVertices ): line = linesWithoutComments[ vertexIndex + 1 ] splitLine = line.split() vertex = Vector3( float( splitLine[ 0 ] ), float( splitLine[ 1 ] ), float( splitLine[ 2 ] ) ) triangleMesh.vertices.append( vertex ) edgeStart = numberOfVertices + 1 for edgeIndex in xrange( numberOfEdges ): line = linesWithoutComments[ edgeIndex + edgeStart ] splitLine = line.split() vertexIndexes = [] for word in splitLine[ : 2 ]: vertexIndexes.append( int( word ) - 1 ) edge = triangle_mesh.Edge().getFromVertexIndexes( edgeIndex, vertexIndexes ) triangleMesh.edges.append( edge ) faceStart = edgeStart + numberOfEdges for faceIndex in xrange( numberOfFaces ): line = linesWithoutComments[ faceIndex + faceStart ] splitLine = line.split() edgeIndexes = [] for word in splitLine[ : 3 ]: edgeIndexes.append( int( word ) - 1 ) face = triangle_mesh.Face().getFromEdgeIndexes( edgeIndexes, triangleMesh.edges, faceIndex ) triangleMesh.faces.append( face ) return triangleMesh def getCarving( fileName ): "Get the carving for the gts file." return getFromGNUTriangulatedSurfaceText( gcodec.getFileText( fileName ), triangle_mesh.TriangleMesh() )
gpl-3.0
1,615,288,114,955,321,900
48.043011
441
0.754221
false
3.497699
false
false
false
DarkFenX/Pyfa
eos/saveddata/targetProfile.py
1
20115
# =============================================================================== # Copyright (C) 2014 Ryan Holmes # # This file is part of eos. # # eos is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # eos is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with eos. If not, see <http://www.gnu.org/licenses/>. # =============================================================================== import math import re from collections import OrderedDict from logbook import Logger from sqlalchemy.orm import reconstructor import eos.db pyfalog = Logger(__name__) BUILTINS = OrderedDict([ # 0 is taken by ideal target profile, composed manually in one of TargetProfile methods (-1, ('Uniform (25%)', 0.25, 0.25, 0.25, 0.25)), (-2, ('Uniform (50%)', 0.50, 0.50, 0.50, 0.50)), (-3, ('Uniform (75%)', 0.75, 0.75, 0.75, 0.75)), (-4, ('Uniform (90%)', 0.90, 0.90, 0.90, 0.90)), (-5, ('[T1 Resist]Shield', 0.0, 0.20, 0.40, 0.50)), (-6, ('[T1 Resist]Armor', 0.50, 0.45, 0.25, 0.10)), (-7, ('[T1 Resist]Hull', 0.33, 0.33, 0.33, 0.33)), (-8, ('[T1 Resist]Shield (+T2 DCU)', 0.125, 0.30, 0.475, 0.562)), (-9, ('[T1 Resist]Armor (+T2 DCU)', 0.575, 0.532, 0.363, 0.235)), (-10, ('[T1 Resist]Hull (+T2 DCU)', 0.598, 0.598, 0.598, 0.598)), (-11, ('[T2 Resist]Amarr (Shield)', 0.0, 0.20, 0.70, 0.875)), (-12, ('[T2 Resist]Amarr (Armor)', 0.50, 0.35, 0.625, 0.80)), (-13, ('[T2 Resist]Caldari (Shield)', 0.20, 0.84, 0.76, 0.60)), (-14, ('[T2 Resist]Caldari (Armor)', 0.50, 0.8625, 0.625, 0.10)), (-15, ('[T2 Resist]Gallente (Shield)', 0.0, 0.60, 0.85, 0.50)), (-16, ('[T2 Resist]Gallente (Armor)', 0.50, 0.675, 0.8375, 0.10)), (-17, ('[T2 Resist]Minmatar (Shield)', 0.75, 0.60, 0.40, 0.50)), (-18, ('[T2 Resist]Minmatar (Armor)', 0.90, 0.675, 0.25, 0.10)), (-19, ('[NPC][Asteroid]Angel Cartel', 0.54, 0.42, 0.37, 0.32)), (-20, ('[NPC][Asteroid]Blood Raiders', 0.34, 0.39, 0.45, 0.52)), (-21, ('[NPC][Asteroid]Guristas', 0.55, 0.35, 0.3, 0.48)), (-22, ('[NPC][Asteroid]Rogue Drones', 0.35, 0.38, 0.44, 0.49)), (-23, ('[NPC][Asteroid]Sanshas Nation', 0.35, 0.4, 0.47, 0.53)), (-24, ('[NPC][Asteroid]Serpentis', 0.49, 0.38, 0.29, 0.51)), (-25, ('[NPC][Deadspace]Angel Cartel', 0.59, 0.48, 0.4, 0.32)), (-26, ('[NPC][Deadspace]Blood Raiders', 0.31, 0.39, 0.47, 0.56)), (-27, ('[NPC][Deadspace]Guristas', 0.57, 0.39, 0.31, 0.5)), (-28, ('[NPC][Deadspace]Rogue Drones', 0.42, 0.42, 0.47, 0.49)), (-29, ('[NPC][Deadspace]Sanshas Nation', 0.31, 0.39, 0.47, 0.56)), (-30, ('[NPC][Deadspace]Serpentis', 0.49, 0.38, 0.29, 0.56)), (-31, ('[NPC][Mission]Amarr Empire', 0.34, 0.38, 0.42, 0.46)), (-32, ('[NPC][Mission]Caldari State', 0.51, 0.38, 0.3, 0.51)), (-33, ('[NPC][Mission]CONCORD', 0.47, 0.46, 0.47, 0.47)), (-34, ('[NPC][Mission]Gallente Federation', 0.51, 0.38, 0.31, 0.52)), (-35, ('[NPC][Mission]Khanid', 0.51, 0.42, 0.36, 0.4)), (-36, ('[NPC][Mission]Minmatar Republic', 0.51, 0.46, 0.41, 0.35)), (-37, ('[NPC][Mission]Mordus Legion', 0.32, 0.48, 0.4, 0.62)), (-38, ('[NPC][Other]Sleeper', 0.61, 0.61, 0.61, 0.61)), (-39, ('[NPC][Other]Sansha Incursion', 0.65, 0.63, 0.64, 0.65)), (-40, ('[NPC][Burner]Cruor (Blood Raiders)', 0.8, 0.73, 0.69, 0.67)), (-41, ('[NPC][Burner]Dramiel (Angel)', 0.35, 0.48, 0.61, 0.68)), (-42, ('[NPC][Burner]Daredevil (Serpentis)', 0.69, 0.59, 0.59, 0.43)), (-43, ('[NPC][Burner]Succubus (Sanshas Nation)', 0.35, 0.48, 0.61, 0.68)), (-44, ('[NPC][Burner]Worm (Guristas)', 0.48, 0.58, 0.69, 0.74)), (-45, ('[NPC][Burner]Enyo', 0.58, 0.72, 0.86, 0.24)), (-46, ('[NPC][Burner]Hawk', 0.3, 0.86, 0.79, 0.65)), (-47, ('[NPC][Burner]Jaguar', 0.78, 0.65, 0.48, 0.56)), (-48, ('[NPC][Burner]Vengeance', 0.66, 0.56, 0.75, 0.86)), (-49, ('[NPC][Burner]Ashimmu (Blood Raiders)', 0.8, 0.76, 0.68, 0.7)), (-50, ('[NPC][Burner]Talos', 0.68, 0.59, 0.59, 0.43)), (-51, ('[NPC][Burner]Sentinel', 0.58, 0.45, 0.52, 0.66)), # Source: ticket #2067 (-52, ('[NPC][Invasion]Invading Precursor Entities', 0.422, 0.367, 0.453, 0.411)), (-53, ('[NPC][Invasion]Retaliating Amarr Entities', 0.360, 0.310, 0.441, 0.602)), (-54, ('[NPC][Invasion]Retaliating Caldari Entities', 0.287, 0.610, 0.487, 0.401)), (-55, ('[NPC][Invasion]Retaliating Gallente Entities', 0.383, 0.414, 0.578, 0.513)), (-56, ('[NPC][Invasion]Retaliating Minmatar Entities', 0.620, 0.422, 0.355, 0.399)), (-57, ('[NPC][Abyssal][Dark Matter All Tiers]Drones', 0.439, 0.522, 0.529, 0.435)), (-58, ('[NPC][Abyssal][Dark Matter All Tiers]Overmind', 0.626, 0.576, 0.612, 0.624)), (-59, ('[NPC][Abyssal][Dark Matter All Tiers]Seeker', 0.082, 0.082, 0.082, 0.082)), (-60, ('[NPC][Abyssal][Dark Matter All Tiers]Triglavian', 0.477, 0.401, 0.449, 0.37)), (-61, ('[NPC][Abyssal][Dark Matter All Tiers]Drifter', 0.403, 0.403, 0.403, 0.403)), (-62, ('[NPC][Abyssal][Dark Matter All Tiers]Sleeper', 0.435, 0.435, 0.435, 0.435)), (-63, ('[NPC][Abyssal][Dark Matter All Tiers]All', 0.507, 0.477, 0.502, 0.493)), (-64, ('[NPC][Abyssal][Electrical T1/T2]Drones', 0.323, 0.522, 0.529, 0.435)), (-65, ('[NPC][Abyssal][Electrical T1/T2]Overmind', 0.521, 0.576, 0.612, 0.624)), (-66, ('[NPC][Abyssal][Electrical T1/T2]Seeker', 0, 0.082, 0.082, 0.082)), (-67, ('[NPC][Abyssal][Electrical T1/T2]Triglavian', 0.333, 0.401, 0.449, 0.37)), (-68, ('[NPC][Abyssal][Electrical T1/T2]Drifter', 0.267, 0.403, 0.403, 0.403)), (-69, ('[NPC][Abyssal][Electrical T1/T2]Sleeper', 0.329, 0.435, 0.435, 0.435)), (-70, ('[NPC][Abyssal][Electrical T1/T2]All', 0.385, 0.477, 0.502, 0.493)), (-71, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Drones', 0.255, 0.522, 0.529, 0.435)), (-72, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Overmind', 0.457, 0.576, 0.612, 0.624)), (-73, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Seeker', 0, 0.082, 0.082, 0.082)), (-74, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Triglavian', 0.241, 0.401, 0.449, 0.37)), (-75, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Drifter', 0.184, 0.403, 0.403, 0.403)), (-76, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]Sleeper', 0.268, 0.435, 0.435, 0.435)), (-77, ('[NPC][Abyssal][Electrical T3 (Some T5 Rooms)]All', 0.313, 0.477, 0.502, 0.493)), (-78, ('[NPC][Abyssal][Electrical T4/T5]Drones', 0.193, 0.522, 0.529, 0.435)), (-79, ('[NPC][Abyssal][Electrical T4/T5]Overmind', 0.398, 0.576, 0.612, 0.624)), (-80, ('[NPC][Abyssal][Electrical T4/T5]Seeker', 0, 0.082, 0.082, 0.082)), (-81, ('[NPC][Abyssal][Electrical T4/T5]Triglavian', 0.183, 0.401, 0.449, 0.37)), (-82, ('[NPC][Abyssal][Electrical T4/T5]Drifter', 0.107, 0.403, 0.403, 0.403)), (-83, ('[NPC][Abyssal][Electrical T4/T5]Sleeper', 0.215, 0.435, 0.435, 0.435)), (-84, ('[NPC][Abyssal][Electrical T4/T5]All', 0.25, 0.477, 0.502, 0.493)), (-85, ('[NPC][Abyssal][Firestorm T1/T2]Drones', 0.461, 0.425, 0.541, 0.443)), (-86, ('[NPC][Abyssal][Firestorm T1/T2]Overmind', 0.65, 0.469, 0.625, 0.633)), (-87, ('[NPC][Abyssal][Firestorm T1/T2]Seeker', 0.084, 0, 0.084, 0.084)), (-88, ('[NPC][Abyssal][Firestorm T1/T2]Triglavian', 0.534, 0.266, 0.484, 0.366)), (-89, ('[NPC][Abyssal][Firestorm T1/T2]Drifter', 0.422, 0.282, 0.422, 0.422)), (-90, ('[NPC][Abyssal][Firestorm T1/T2]Sleeper', 0.512, 0.402, 0.512, 0.512)), (-91, ('[NPC][Abyssal][Firestorm T1/T2]All', 0.541, 0.365, 0.524, 0.504)), (-92, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Drones', 0.461, 0.36, 0.541, 0.443)), (-93, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Overmind', 0.65, 0.391, 0.625, 0.633)), (-94, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Seeker', 0.084, 0, 0.084, 0.084)), (-95, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Triglavian', 0.534, 0.161, 0.484, 0.366)), (-96, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Drifter', 0.422, 0.196, 0.422, 0.422)), (-97, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]Sleeper', 0.512, 0.337, 0.512, 0.512)), (-98, ('[NPC][Abyssal][Firestorm T3 (Some T5 Rooms)]All', 0.541, 0.284, 0.524, 0.504)), (-99, ('[NPC][Abyssal][Firestorm T4/T5]Drones', 0.461, 0.305, 0.541, 0.443)), (-100, ('[NPC][Abyssal][Firestorm T4/T5]Overmind', 0.65, 0.323, 0.625, 0.633)), (-101, ('[NPC][Abyssal][Firestorm T4/T5]Seeker', 0.084, 0, 0.084, 0.084)), (-102, ('[NPC][Abyssal][Firestorm T4/T5]Triglavian', 0.534, 0.082, 0.484, 0.366)), (-103, ('[NPC][Abyssal][Firestorm T4/T5]Drifter', 0.422, 0.114, 0.422, 0.422)), (-104, ('[NPC][Abyssal][Firestorm T4/T5]Sleeper', 0.512, 0.276, 0.512, 0.512)), (-105, ('[NPC][Abyssal][Firestorm T4/T5]All', 0.541, 0.214, 0.524, 0.504)), (-106, ('[NPC][Abyssal][Exotic T1/T2]Drones', 0.439, 0.522, 0.417, 0.435)), (-107, ('[NPC][Abyssal][Exotic T1/T2]Overmind', 0.626, 0.576, 0.496, 0.624)), (-108, ('[NPC][Abyssal][Exotic T1/T2]Seeker', 0.082, 0.082, 0, 0.082)), (-109, ('[NPC][Abyssal][Exotic T1/T2]Triglavian', 0.477, 0.401, 0.284, 0.37)), (-110, ('[NPC][Abyssal][Exotic T1/T2]Drifter', 0.403, 0.403, 0.267, 0.403)), (-111, ('[NPC][Abyssal][Exotic T1/T2]Sleeper', 0.435, 0.435, 0.329, 0.435)), (-112, ('[NPC][Abyssal][Exotic T1/T2]All', 0.507, 0.477, 0.373, 0.493)), (-113, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Drones', 0.439, 0.522, 0.351, 0.435)), (-114, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Overmind', 0.626, 0.576, 0.419, 0.624)), (-115, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Seeker', 0.082, 0.082, 0, 0.082)), (-116, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Triglavian', 0.477, 0.401, 0.176, 0.37)), (-117, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Drifter', 0.403, 0.403, 0.184, 0.403)), (-118, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]Sleeper', 0.435, 0.435, 0.268, 0.435)), (-119, ('[NPC][Abyssal][Exotic T3 (Some T5 Rooms)]All', 0.507, 0.477, 0.293, 0.493)), (-120, ('[NPC][Abyssal][Exotic T4/T5]Drones', 0.439, 0.522, 0.293, 0.435)), (-121, ('[NPC][Abyssal][Exotic T4/T5]Overmind', 0.626, 0.576, 0.344, 0.624)), (-122, ('[NPC][Abyssal][Exotic T4/T5]Seeker', 0.082, 0.082, 0, 0.082)), (-123, ('[NPC][Abyssal][Exotic T4/T5]Triglavian', 0.477, 0.401, 0.107, 0.37)), (-124, ('[NPC][Abyssal][Exotic T4/T5]Drifter', 0.403, 0.403, 0.107, 0.403)), (-125, ('[NPC][Abyssal][Exotic T4/T5]Sleeper', 0.435, 0.435, 0.215, 0.435)), (-126, ('[NPC][Abyssal][Exotic T4/T5]All', 0.507, 0.477, 0.223, 0.493)), (-127, ('[NPC][Abyssal][Gamma T1/T2]Drones', 0.449, 0.54, 0.549, 0.336)), (-128, ('[NPC][Abyssal][Gamma T1/T2]Overmind', 0.6, 0.557, 0.601, 0.504)), (-129, ('[NPC][Abyssal][Gamma T1/T2]Seeker', 0.085, 0.085, 0.085, 0)), (-130, ('[NPC][Abyssal][Gamma T1/T2]Triglavian', 0.463, 0.392, 0.447, 0.193)), (-131, ('[NPC][Abyssal][Gamma T1/T2]Drifter', 0.428, 0.428, 0.428, 0.287)), (-132, ('[NPC][Abyssal][Gamma T1/T2]Sleeper', 0.435, 0.435, 0.435, 0.329)), (-133, ('[NPC][Abyssal][Gamma T1/T2]All', 0.493, 0.472, 0.5, 0.362)), (-134, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Drones', 0.449, 0.54, 0.549, 0.264)), (-135, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Overmind', 0.6, 0.557, 0.601, 0.428)), (-136, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Seeker', 0.085, 0.085, 0.085, 0)), (-137, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Triglavian', 0.463, 0.392, 0.447, 0.071)), (-138, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Drifter', 0.428, 0.428, 0.428, 0.2)), (-139, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]Sleeper', 0.435, 0.435, 0.435, 0.268)), (-140, ('[NPC][Abyssal][Gamma T3 (Some T5 Rooms)]All', 0.493, 0.472, 0.5, 0.28)), (-141, ('[NPC][Abyssal][Gamma T4/T5]Drones', 0.449, 0.54, 0.549, 0.197)), (-142, ('[NPC][Abyssal][Gamma T4/T5]Overmind', 0.6, 0.557, 0.601, 0.356)), (-143, ('[NPC][Abyssal][Gamma T4/T5]Seeker', 0.085, 0.085, 0.085, 0)), (-144, ('[NPC][Abyssal][Gamma T4/T5]Triglavian', 0.463, 0.392, 0.447, 0.029)), (-145, ('[NPC][Abyssal][Gamma T4/T5]Drifter', 0.428, 0.428, 0.428, 0.117)), (-146, ('[NPC][Abyssal][Gamma T4/T5]Sleeper', 0.435, 0.435, 0.435, 0.215)), (-147, ('[NPC][Abyssal][Gamma T4/T5]All', 0.493, 0.472, 0.5, 0.21))]) class TargetProfile: # also determined import/export order - VERY IMPORTANT DAMAGE_TYPES = ('em', 'thermal', 'kinetic', 'explosive') _idealTarget = None _builtins = None def __init__(self, *args, **kwargs): self.builtin = False self.update(*args, **kwargs) @reconstructor def init(self): self.builtin = False def update(self, emAmount=0, thermalAmount=0, kineticAmount=0, explosiveAmount=0, maxVelocity=None, signatureRadius=None, radius=None): self.emAmount = emAmount self.thermalAmount = thermalAmount self.kineticAmount = kineticAmount self.explosiveAmount = explosiveAmount self._maxVelocity = maxVelocity self._signatureRadius = signatureRadius self._radius = radius @classmethod def getBuiltinList(cls): if cls._builtins is None: cls.__generateBuiltins() return list(cls._builtins.values()) @classmethod def getBuiltinById(cls, id): if cls._builtins is None: cls.__generateBuiltins() return cls._builtins.get(id) @classmethod def __generateBuiltins(cls): cls._builtins = OrderedDict() for id, data in BUILTINS.items(): rawName = data[0] data = data[1:] profile = TargetProfile(*data) profile.ID = id profile.rawName = rawName profile.builtin = True cls._builtins[id] = profile @classmethod def getIdeal(cls): if cls._idealTarget is None: cls._idealTarget = cls( emAmount=0, thermalAmount=0, kineticAmount=0, explosiveAmount=0, maxVelocity=0, signatureRadius=None, radius=0) cls._idealTarget.rawName = 'Ideal Target' cls._idealTarget.ID = 0 cls._idealTarget.builtin = True return cls._idealTarget @property def maxVelocity(self): return self._maxVelocity or 0 @maxVelocity.setter def maxVelocity(self, val): self._maxVelocity = val @property def signatureRadius(self): if self._signatureRadius is None or self._signatureRadius == -1: return math.inf return self._signatureRadius @signatureRadius.setter def signatureRadius(self, val): if val is not None and math.isinf(val): val = None self._signatureRadius = val @property def radius(self): return self._radius or 0 @radius.setter def radius(self, val): self._radius = val @classmethod def importPatterns(cls, text): lines = re.split('[\n\r]+', text) patterns = [] numPatterns = 0 # When we import damage profiles, we create new ones and update old ones. To do this, get a list of current # patterns to allow lookup lookup = {} current = eos.db.getTargetProfileList() for pattern in current: lookup[pattern.rawName] = pattern for line in lines: try: if line.strip()[0] == "#": # comments continue line = line.split('#', 1)[0] # allows for comments type, data = line.rsplit('=', 1) type, data = type.strip(), [d.strip() for d in data.split(',')] except (KeyboardInterrupt, SystemExit): raise except: pyfalog.warning("Data isn't in correct format, continue to next line.") continue if type not in ("TargetProfile", "TargetResists"): continue numPatterns += 1 name, dataRes, dataMisc = data[0], data[1:5], data[5:8] fields = {} for index, val in enumerate(dataRes): val = float(val) if val else 0 if math.isinf(val): val = 0 try: assert 0 <= val <= 100 fields["%sAmount" % cls.DAMAGE_TYPES[index]] = val / 100 except (KeyboardInterrupt, SystemExit): raise except: pyfalog.warning("Caught unhandled exception in import patterns.") continue if len(dataMisc) == 3: for index, val in enumerate(dataMisc): try: fieldName = ("maxVelocity", "signatureRadius", "radius")[index] except IndexError: break val = float(val) if val else 0 if fieldName != "signatureRadius" and math.isinf(val): val = 0 fields[fieldName] = val if len(fields) in (4, 7): # Avoid possible blank lines if name.strip() in lookup: pattern = lookup[name.strip()] pattern.update(**fields) eos.db.save(pattern) else: pattern = TargetProfile(**fields) pattern.rawName = name.strip() eos.db.save(pattern) patterns.append(pattern) eos.db.commit() return patterns, numPatterns EXPORT_FORMAT = "TargetProfile = %s,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f,%.1f\n" @classmethod def exportPatterns(cls, *patterns): out = "# Exported from pyfa\n#\n" out += "# Values are in following format:\n" out += "# TargetProfile = [name],[EM %],[Thermal %],[Kinetic %],[Explosive %],[Max velocity m/s],[Signature radius m],[Radius m]\n\n" for dp in patterns: out += cls.EXPORT_FORMAT % ( dp.rawName, dp.emAmount * 100, dp.thermalAmount * 100, dp.kineticAmount * 100, dp.explosiveAmount * 100, dp.maxVelocity, dp.signatureRadius, dp.radius ) return out.strip() @property def name(self): return self.rawName @property def fullName(self): categories, tail = self.__parseRawName() return '{}{}'.format(''.join('[{}]'.format(c) for c in categories), tail) @property def shortName(self): return self.__parseRawName()[1] @property def hierarchy(self): return self.__parseRawName()[0] def __parseRawName(self): hierarchy = [] remainingName = self.rawName.strip() if self.rawName else '' while True: start, end = remainingName.find('['), remainingName.find(']') if start == -1 or end == -1: return hierarchy, remainingName splitter = remainingName.find('|') if splitter != -1 and splitter == start - 1: return hierarchy, remainingName[1:] hierarchy.append(remainingName[start + 1:end]) remainingName = remainingName[end + 1:].strip() def __deepcopy__(self, memo): p = TargetProfile( self.emAmount, self.thermalAmount, self.kineticAmount, self.explosiveAmount, self._maxVelocity, self._signatureRadius, self._radius) p.rawName = "%s copy" % self.rawName return p
gpl-3.0
6,335,731,424,209,932,000
48.666667
141
0.554412
false
2.653344
false
false
false
napalm-automation/napalm-yang
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs_/bandwidth_constraints/__init__.py
1
12844
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ from . import state class bandwidth_constraints(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth-constraints. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: This container defines bandwidth-constraints. For DS-TE, the existing Maximum Reservable link bandwidth parameter is retained, but its semantics is generalized and interpreted as the aggregate bandwidth constraint across all Class-Types """ __slots__ = ("_path_helper", "_extmethods", "__state") _yang_name = "bandwidth-constraints" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "isis", "levels", "level", "link-state-database", "lsp", "tlvs", "tlv", "mt-isn", "neighbors", "neighbor", "subTLVs", "subTLVs", "bandwidth-constraints", ] def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container) YANG Description: State parameters of IS Extended Reachability sub-TLV 22. """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: State parameters of IS Extended Reachability sub-TLV 22. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) state = __builtin__.property(_get_state) _pyangbind_elements = OrderedDict([("state", state)]) from . import state class bandwidth_constraints(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/mt-isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth-constraints. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: This container defines bandwidth-constraints. For DS-TE, the existing Maximum Reservable link bandwidth parameter is retained, but its semantics is generalized and interpreted as the aggregate bandwidth constraint across all Class-Types """ __slots__ = ("_path_helper", "_extmethods", "__state") _yang_name = "bandwidth-constraints" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "isis", "levels", "level", "link-state-database", "lsp", "tlvs", "tlv", "mt-isn", "neighbors", "neighbor", "subTLVs", "subTLVs", "bandwidth-constraints", ] def _get_state(self): """ Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container) YANG Description: State parameters of IS Extended Reachability sub-TLV 22. """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/mt_isn/neighbors/neighbor/subTLVs/subTLVs/bandwidth_constraints/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: State parameters of IS Extended Reachability sub-TLV 22. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="container", is_config=False, ) state = __builtin__.property(_get_state) _pyangbind_elements = OrderedDict([("state", state)])
apache-2.0
1,062,767,270,325,623,600
37.570571
375
0.583775
false
4.37317
true
false
false
sserrot/champion_relationships
venv/Lib/site-packages/ipykernel/inprocess/blocking.py
1
3068
""" Implements a fully blocking kernel client. Useful for test suites and blocking terminal interfaces. """ #----------------------------------------------------------------------------- # Copyright (C) 2012 The IPython Development Team # # Distributed under the terms of the BSD License. The full license is in # the file COPYING.txt, distributed as part of this software. #----------------------------------------------------------------------------- import sys try: from queue import Queue, Empty # Py 3 except ImportError: from Queue import Queue, Empty # Py 2 # IPython imports from traitlets import Type # Local imports from .channels import ( InProcessChannel, ) from .client import InProcessKernelClient class BlockingInProcessChannel(InProcessChannel): def __init__(self, *args, **kwds): super(BlockingInProcessChannel, self).__init__(*args, **kwds) self._in_queue = Queue() def call_handlers(self, msg): self._in_queue.put(msg) def get_msg(self, block=True, timeout=None): """ Gets a message if there is one that is ready. """ if timeout is None: # Queue.get(timeout=None) has stupid uninteruptible # behavior, so wait for a week instead timeout = 604800 return self._in_queue.get(block, timeout) def get_msgs(self): """ Get all messages that are currently ready. """ msgs = [] while True: try: msgs.append(self.get_msg(block=False)) except Empty: break return msgs def msg_ready(self): """ Is there a message that has been received? """ return not self._in_queue.empty() class BlockingInProcessStdInChannel(BlockingInProcessChannel): def call_handlers(self, msg): """ Overridden for the in-process channel. This methods simply calls raw_input directly. """ msg_type = msg['header']['msg_type'] if msg_type == 'input_request': _raw_input = self.client.kernel._sys_raw_input prompt = msg['content']['prompt'] print(prompt, end='', file=sys.__stdout__) sys.__stdout__.flush() self.client.input(_raw_input()) class BlockingInProcessKernelClient(InProcessKernelClient): # The classes to use for the various channels. shell_channel_class = Type(BlockingInProcessChannel) iopub_channel_class = Type(BlockingInProcessChannel) stdin_channel_class = Type(BlockingInProcessStdInChannel) def wait_for_ready(self): # Wait for kernel info reply on shell channel while True: msg = self.shell_channel.get_msg(block=True) if msg['msg_type'] == 'kernel_info_reply': self._handle_kernel_info_reply(msg) break # Flush IOPub channel while True: try: msg = self.iopub_channel.get_msg(block=True, timeout=0.2) print(msg['msg_type']) except Empty: break
mit
1,264,620,225,133,396,200
31.989247
78
0.58605
false
4.27894
false
false
false
alfa-addon/addon
plugin.video.alfa/channels/vi2.py
1
11788
# -*- coding: utf-8 -*- # -*- Channel Vi2.co -*- # -*- Created for Alfa-addon -*- # -*- By the Alfa Develop Group -*- import sys PY3 = False if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int import re import base64 from channelselector import get_thumb from core import httptools from core import jsontools from core import scrapertools from core import servertools from core import tmdb from lib import jsunpack from core.item import Item from channels import filtertools from channels import autoplay from platformcode import config, logger IDIOMAS = {'Latino': 'LAT', 'Español':'CAST', 'Subtitulado': 'VOSE', 'VO': 'VO'} list_language = list(IDIOMAS.values()) list_quality = ['Full HD 1080p', 'HDRip', 'DVDScreener', '720p', 'Ts Screener hq', 'HD Real 720p', 'DVDRip', 'BluRay-1080p', 'BDremux-1080p'] list_servers = [ 'directo', 'openload', 'rapidvideo', 'jawcloud', 'cloudvideo', 'upvid', 'vevio', 'gamovideo' ] host = 'http://vi2.co' def mainlist(item): logger.info() autoplay.init(item.channel, list_servers, list_quality) itemlist = [] itemlist.append(Item(channel=item.channel, title='Peliculas', action='select_menu', type='peliculas', thumbnail= get_thumb('movies', auto=True))) # itemlist.append(Item(channel=item.channel, title='Series', url=host+'serie', action='select_menu', type='series', # thumbnail= get_thumb('tvshows', auto=True))) autoplay.show_option(item.channel, itemlist) return itemlist def select_menu(item): logger.info() itemlist=[] url = host + '/%s/es/' % item.type itemlist.append(Item(channel=item.channel, title='Streaming', action='sub_menu', thumbnail=get_thumb('all', auto=True), type=item.type)) itemlist.append(Item(channel=item.channel, title='Torrent', action='sub_menu', thumbnail=get_thumb('all', auto=True), type=item.type)) itemlist.append(Item(channel=item.channel, title='Generos', action='section', url=url, thumbnail=get_thumb('genres', auto=True), type='all')) itemlist.append(Item(channel=item.channel, title='Por Año', action='section', url=url, thumbnail=get_thumb('year', auto=True), type='all')) return itemlist def sub_menu(item): logger.info() itemlist = [] url = host + '/%s/es/ajax/1/' % item.type link_type = item.title.lower() if link_type == 'streaming': link_type = 'flash' movies_options = ['Todas', 'Castellano', 'Latino', 'VOSE'] tv_options = ['Ultimas', 'Ultimas Castellano', 'Ultimas Latino', 'Ultimas VOSE'] if item.type == 'peliculas': title = movies_options thumb_1 = 'all' else: thumb_1 = 'last' title = tv_options itemlist.append(Item(channel=item.channel, title=title[0], url=url+'?q=%s' % link_type, action='list_all', thumbnail=get_thumb(thumb_1, auto=True), type=item.type, link_type=link_type)) itemlist.append(Item(channel=item.channel, title=title[1], url=url + '?q=%s+espanol' % link_type, action='list_all', thumbnail=get_thumb('cast', auto=True), type=item.type, send_lang='Español', link_type=link_type)) itemlist.append(Item(channel=item.channel, title=title[2], url=url + '?q=%s+latino' % link_type, action='list_all', thumbnail=get_thumb('lat', auto=True), type=item.type, send_lang='Latino', link_type=link_type)) itemlist.append(Item(channel=item.channel, title=title[3], url=url + '?q=%s+subtitulado' % link_type, action='list_all', thumbnail=get_thumb('vose', auto=True), type=item.type, send_lang='VOSE', link_type=link_type)) itemlist.append(Item(channel=item.channel, title="Buscar", action="search", url=url + '?q=', thumbnail=get_thumb("search", auto=True), type=item.type, link_type=link_type)) return itemlist def get_source(url, referer=None): logger.info() if referer is None: data = httptools.downloadpage(url).data else: data = httptools.downloadpage(url, headers={'Referer':referer}).data data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data) return data def section(item): logger.info() itemlist=[] excluded = ['latino', 'español', 'subtitulado', 'v.o.', 'streaming', 'torrent'] full_data = get_source(item.url) data = scrapertools.find_single_match(full_data, 'toptags-container(.*?)<div class="android-more-section">') patron = 'href="([^"]+)">([^<]+)<' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedurl, scrapedtitle in matches: title = scrapedtitle url = host+scrapedurl.replace('/?','/ajax/1/?') if (item.title=='Generos' and title.lower() not in excluded and not title.isdigit()) or (item.title=='Por Año' and title.isdigit()): itemlist.append(Item(channel=item.channel, url=url, title=title, action='list_all', type=item.type)) return itemlist def list_all(item): from core import jsontools logger.info() itemlist = [] listed =[] quality='' infoLabels = {} json_data= jsontools.load(get_source(item.url)) data = json_data['render'] data = re.sub(r'\n|\r|\t|&nbsp;|<br>|\s{2,}', "", data) #if item.type == 'peliculas': patron = '<img\s*class="cover"[^>]+src="([^"]+)"\s*data-id="\d+"\s*' patron +='alt="Ver\s*([^\(]+)(.*?)">\s*' patron += '<div\s*class="mdl-card__menu">\s*<a\s*class="clean-link"\s*href="([^"]+)">' patron += '.*?<span\s*class="link-size">([^<]*)<' matches = re.compile(patron, re.DOTALL).findall(data) for scrapedthumbnail, scrapedtitle, extra_info, scrapedurl , size in matches: if item.send_lang != '': lang = item.send_lang else: lang = '' year='-' extra_info = extra_info.replace('(', '|').replace('[','|').replace(')','').replace(']','') extra_info = extra_info.split('|') for info in extra_info: info = info.strip() if 'Rip' in info or '1080' in info or '720' in info or 'Screener' in info: quality = info if 'ingl' in info.lower(): info = 'VO' if info in IDIOMAS: lang = info elif info.isdigit(): year = info if lang in IDIOMAS: lang = IDIOMAS[lang] title = '%s' % scrapedtitle.strip() if not config.get_setting('unify'): if year.isdigit(): title = '%s [%s]' % (title, year) if quality != '': title = '%s [%s]' % (title, quality) if lang != '': title = '%s [%s]' % (title, lang) thumbnail = host+scrapedthumbnail url = host+scrapedurl if item.type == 'series': season, episode = scrapertools.find_single_match(scrapedtitle, '(\d+)x(\d+)') infoLabels['season'] = season infoLabels['episode'] = episode else: infoLabels['year'] = year if title not in listed: new_item = Item(channel=item.channel, title=title, url=url, action='findvideos', thumbnail=thumbnail, type=item.type, language = lang, quality=quality, link_type=item.link_type, torrent_data= size, infoLabels = infoLabels ) if item.type == 'peliculas' or item.type == 'all': new_item.contentTitle = scrapedtitle else: scrapedtitle = scrapedtitle.split(' - ') new_item.contentSerieName = scrapedtitle[0] itemlist.append(new_item) listed.append(title) tmdb.set_infoLabels(itemlist, seekTmdb=True) itemlist.sort(key=lambda it: it.title) # Paginación if json_data['next']: actual_page = scrapertools.find_single_match(item.url, 'ajax/(\d+)/') next_page =int(actual_page) + 1 url_next_page = item.url.replace('ajax/%s' % actual_page, 'ajax/%s' % next_page) itemlist.append(item.clone(title="Siguiente >>", url=url_next_page, type=item.type, action='list_all', send_lang=item.send_lang)) return itemlist def findvideos(item): logger.info() import base64 itemlist = [] server = '' data = get_source(item.url) pre_url = scrapertools.find_single_match(data, 'class="inside-link" href="([^"]+)".*?<button type="button"') data = get_source(host+pre_url) patron = 'data-video="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) lang = item.language quality = item.quality for url in matches: title = '' link_type = '' server = '' url = base64.b64decode(url.encode('utf8')).decode('utf8') if 'torrent' in url: if item.link_type == 'torrent' or item.type == 'all': server = 'torrent' link_type = 'torrent' title = ' [%s]' % item.torrent_data elif 'torrent' not in url: link_type = 'flash' if link_type == item.link_type.lower() or item.type == 'all': itemlist.append(Item(channel=item.channel, url=url, title='%s'+title, action='play', server=server, language=lang, quality=quality, infoLabels=item.infoLabels)) itemlist = servertools.get_servers_itemlist(itemlist, lambda x: x.title % x.server.capitalize()) # Requerido para FilterTools itemlist = filtertools.get_links(itemlist, item, list_language) # Requerido para AutoPlay autoplay.start(itemlist, item) itemlist = sorted(itemlist, key=lambda it: it.language) if item.contentType != 'episode': if config.get_videolibrary_support() and len(itemlist) > 0 and item.extra != 'findvideos': itemlist.append( Item(channel=item.channel, title='[COLOR yellow]Añadir esta pelicula a la videoteca[/COLOR]', url=item.url, action="add_pelicula_to_library", extra="findvideos", contentTitle=item.contentTitle)) return itemlist def search(item, texto): logger.info() texto = texto.replace(" ", "+") url = '%spelicula+%s+%s&o=2' % (item.url, texto, item.link_type) #Parche busqueda global (solo vale para peliculas en streaming) if not item.url: item.type = 'peliculas' item.link_type = 'flash' ajax = '%s/%s/es/ajax/1/' % (host, item.type) url = '%s?q=%s+%s+%s&o=2' % (ajax, item.type, texto, item.link_type) item.url = url try: return list_all(item) except: import sys for line in sys.exc_info(): logger.error("%s" % line) return []
gpl-3.0
-5,339,963,962,902,820,000
34.138037
140
0.54155
false
3.665526
false
false
false
Aravinthu/odoo
addons/mrp_repair/models/mrp_repair.py
4
32545
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from datetime import datetime from odoo import api, fields, models, _ from odoo.addons import decimal_precision as dp from odoo.exceptions import UserError, ValidationError from odoo.tools import float_compare class StockMove(models.Model): _inherit = 'stock.move' repair_id = fields.Many2one('mrp.repair') class Repair(models.Model): _name = 'mrp.repair' _description = 'Repair Order' _inherit = ['mail.thread', 'mail.activity.mixin'] _order = 'create_date desc' @api.model def _default_stock_location(self): warehouse = self.env['stock.warehouse'].search([], limit=1) if warehouse: return warehouse.lot_stock_id.id return False name = fields.Char( 'Repair Reference', default=lambda self: self.env['ir.sequence'].next_by_code('mrp.repair'), copy=False, required=True, states={'confirmed': [('readonly', True)]}) product_id = fields.Many2one( 'product.product', string='Product to Repair', readonly=True, required=True, states={'draft': [('readonly', False)]}) product_qty = fields.Float( 'Product Quantity', default=1.0, digits=dp.get_precision('Product Unit of Measure'), readonly=True, required=True, states={'draft': [('readonly', False)]}) product_uom = fields.Many2one( 'product.uom', 'Product Unit of Measure', readonly=True, required=True, states={'draft': [('readonly', False)]}) partner_id = fields.Many2one( 'res.partner', 'Customer', index=True, states={'confirmed': [('readonly', True)]}, help='Choose partner for whom the order will be invoiced and delivered.') address_id = fields.Many2one( 'res.partner', 'Delivery Address', domain="[('parent_id','=',partner_id)]", states={'confirmed': [('readonly', True)]}) default_address_id = fields.Many2one('res.partner', compute='_compute_default_address_id') state = fields.Selection([ ('draft', 'Quotation'), ('cancel', 'Cancelled'), ('confirmed', 'Confirmed'), ('under_repair', 'Under Repair'), ('ready', 'Ready to Repair'), ('2binvoiced', 'To be Invoiced'), ('invoice_except', 'Invoice Exception'), ('done', 'Repaired')], string='Status', copy=False, default='draft', readonly=True, track_visibility='onchange', help="* The \'Draft\' status is used when a user is encoding a new and unconfirmed repair order.\n" "* The \'Confirmed\' status is used when a user confirms the repair order.\n" "* The \'Ready to Repair\' status is used to start to repairing, user can start repairing only after repair order is confirmed.\n" "* The \'To be Invoiced\' status is used to generate the invoice before or after repairing done.\n" "* The \'Done\' status is set when repairing is completed.\n" "* The \'Cancelled\' status is used when user cancel repair order.") location_id = fields.Many2one( 'stock.location', 'Current Location', default=_default_stock_location, index=True, readonly=True, required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}) location_dest_id = fields.Many2one( 'stock.location', 'Delivery Location', readonly=True, required=True, states={'draft': [('readonly', False)], 'confirmed': [('readonly', True)]}) lot_id = fields.Many2one( 'stock.production.lot', 'Lot/Serial', domain="[('product_id','=', product_id)]", help="Products repaired are all belonging to this lot", oldname="prodlot_id") guarantee_limit = fields.Date('Warranty Expiration', states={'confirmed': [('readonly', True)]}) operations = fields.One2many( 'mrp.repair.line', 'repair_id', 'Parts', copy=True, readonly=True, states={'draft': [('readonly', False)]}) pricelist_id = fields.Many2one( 'product.pricelist', 'Pricelist', default=lambda self: self.env['product.pricelist'].search([], limit=1).id, help='Pricelist of the selected partner.') partner_invoice_id = fields.Many2one('res.partner', 'Invoicing Address') invoice_method = fields.Selection([ ("none", "No Invoice"), ("b4repair", "Before Repair"), ("after_repair", "After Repair")], string="Invoice Method", default='none', index=True, readonly=True, required=True, states={'draft': [('readonly', False)]}, help='Selecting \'Before Repair\' or \'After Repair\' will allow you to generate invoice before or after the repair is done respectively. \'No invoice\' means you don\'t want to generate invoice for this repair order.') invoice_id = fields.Many2one( 'account.invoice', 'Invoice', copy=False, readonly=True, track_visibility="onchange") move_id = fields.Many2one( 'stock.move', 'Move', copy=False, readonly=True, track_visibility="onchange", help="Move created by the repair order") fees_lines = fields.One2many( 'mrp.repair.fee', 'repair_id', 'Operations', copy=True, readonly=True, states={'draft': [('readonly', False)]}) internal_notes = fields.Text('Internal Notes') quotation_notes = fields.Text('Quotation Notes') company_id = fields.Many2one( 'res.company', 'Company', default=lambda self: self.env['res.company']._company_default_get('mrp.repair')) invoiced = fields.Boolean('Invoiced', copy=False, readonly=True) repaired = fields.Boolean('Repaired', copy=False, readonly=True) amount_untaxed = fields.Float('Untaxed Amount', compute='_amount_untaxed', store=True) amount_tax = fields.Float('Taxes', compute='_amount_tax', store=True) amount_total = fields.Float('Total', compute='_amount_total', store=True) tracking = fields.Selection('Product Tracking', related="product_id.tracking") @api.one @api.depends('partner_id') def _compute_default_address_id(self): if self.partner_id: self.default_address_id = self.partner_id.address_get(['contact'])['contact'] @api.one @api.depends('operations.price_subtotal', 'invoice_method', 'fees_lines.price_subtotal', 'pricelist_id.currency_id') def _amount_untaxed(self): total = sum(operation.price_subtotal for operation in self.operations) total += sum(fee.price_subtotal for fee in self.fees_lines) self.amount_untaxed = self.pricelist_id.currency_id.round(total) @api.one @api.depends('operations.price_unit', 'operations.product_uom_qty', 'operations.product_id', 'fees_lines.price_unit', 'fees_lines.product_uom_qty', 'fees_lines.product_id', 'pricelist_id.currency_id', 'partner_id') def _amount_tax(self): val = 0.0 for operation in self.operations: if operation.tax_id: tax_calculate = operation.tax_id.compute_all(operation.price_unit, self.pricelist_id.currency_id, operation.product_uom_qty, operation.product_id, self.partner_id) for c in tax_calculate['taxes']: val += c['amount'] for fee in self.fees_lines: if fee.tax_id: tax_calculate = fee.tax_id.compute_all(fee.price_unit, self.pricelist_id.currency_id, fee.product_uom_qty, fee.product_id, self.partner_id) for c in tax_calculate['taxes']: val += c['amount'] self.amount_tax = val @api.one @api.depends('amount_untaxed', 'amount_tax') def _amount_total(self): self.amount_total = self.pricelist_id.currency_id.round(self.amount_untaxed + self.amount_tax) _sql_constraints = [ ('name', 'unique (name)', 'The name of the Repair Order must be unique!'), ] @api.onchange('product_id') def onchange_product_id(self): self.guarantee_limit = False self.lot_id = False if self.product_id: self.product_uom = self.product_id.uom_id.id @api.onchange('product_uom') def onchange_product_uom(self): res = {} if not self.product_id or not self.product_uom: return res if self.product_uom.category_id != self.product_id.uom_id.category_id: res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')} self.product_uom = self.product_id.uom_id.id return res @api.onchange('location_id') def onchange_location_id(self): self.location_dest_id = self.location_id.id @api.onchange('partner_id') def onchange_partner_id(self): if not self.partner_id: self.address_id = False self.partner_invoice_id = False self.pricelist_id = self.env['product.pricelist'].search([], limit=1).id else: addresses = self.partner_id.address_get(['delivery', 'invoice', 'contact']) self.address_id = addresses['delivery'] or addresses['contact'] self.partner_invoice_id = addresses['invoice'] self.pricelist_id = self.partner_id.property_product_pricelist.id @api.multi def button_dummy(self): # TDE FIXME: this button is very interesting return True @api.multi def action_repair_cancel_draft(self): if self.filtered(lambda repair: repair.state != 'cancel'): raise UserError(_("Repair must be canceled in order to reset it to draft.")) self.mapped('operations').write({'state': 'draft'}) return self.write({'state': 'draft'}) def action_validate(self): self.ensure_one() precision = self.env['decimal.precision'].precision_get('Product Unit of Measure') available_qty = self.env['stock.quant']._get_available_quantity(self.product_id, self.location_id, self.lot_id, strict=True) if float_compare(available_qty, self.product_qty, precision_digits=precision) >= 0: return self.action_repair_confirm() else: return { 'name': _('Insufficient Quantity'), 'view_type': 'form', 'view_mode': 'form', 'res_model': 'stock.warn.insufficient.qty.repair', 'view_id': self.env.ref('mrp_repair.stock_warn_insufficient_qty_repair_form_view').id, 'type': 'ir.actions.act_window', 'context': { 'default_product_id': self.product_id.id, 'default_location_id': self.location_id.id, 'default_repair_id': self.id }, 'target': 'new' } @api.multi def action_repair_confirm(self): """ Repair order state is set to 'To be invoiced' when invoice method is 'Before repair' else state becomes 'Confirmed'. @param *arg: Arguments @return: True """ if self.filtered(lambda repair: repair.state != 'draft'): raise UserError(_("Can only confirm draft repairs.")) before_repair = self.filtered(lambda repair: repair.invoice_method == 'b4repair') before_repair.write({'state': '2binvoiced'}) to_confirm = self - before_repair to_confirm_operations = to_confirm.mapped('operations') to_confirm_operations.write({'state': 'confirmed'}) to_confirm.write({'state': 'confirmed'}) return True @api.multi def action_repair_cancel(self): if self.filtered(lambda repair: repair.state == 'done'): raise UserError(_("Cannot cancel completed repairs.")) if any(repair.invoiced for repair in self): raise UserError(_('Repair order is already invoiced.')) self.mapped('operations').write({'state': 'cancel'}) return self.write({'state': 'cancel'}) @api.multi def action_send_mail(self): self.ensure_one() template_id = self.env.ref('mrp_repair.mail_template_mrp_repair_quotation').id ctx = { 'default_model': 'mrp.repair', 'default_res_id': self.id, 'default_use_template': bool(template_id), 'default_template_id': template_id, 'default_composition_mode': 'comment' } return { 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'mail.compose.message', 'target': 'new', 'context': ctx, } @api.multi def print_repair_order(self): return self.env.ref('mrp_repair.action_report_mrp_repair_order').report_action(self) def action_repair_invoice_create(self): for repair in self: repair.action_invoice_create() if repair.invoice_method == 'b4repair': repair.action_repair_ready() elif repair.invoice_method == 'after_repair': repair.write({'state': 'done'}) return True @api.multi def action_invoice_create(self, group=False): """ Creates invoice(s) for repair order. @param group: It is set to true when group invoice is to be generated. @return: Invoice Ids. """ res = dict.fromkeys(self.ids, False) invoices_group = {} InvoiceLine = self.env['account.invoice.line'] Invoice = self.env['account.invoice'] for repair in self.filtered(lambda repair: repair.state not in ('draft', 'cancel') and not repair.invoice_id): if not repair.partner_id.id and not repair.partner_invoice_id.id: raise UserError(_('You have to select a Partner Invoice Address in the repair form!')) comment = repair.quotation_notes if repair.invoice_method != 'none': if group and repair.partner_invoice_id.id in invoices_group: invoice = invoices_group[repair.partner_invoice_id.id] invoice.write({ 'name': invoice.name + ', ' + repair.name, 'origin': invoice.origin + ', ' + repair.name, 'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''), }) else: if not repair.partner_id.property_account_receivable_id: raise UserError(_('No account defined for partner "%s".') % repair.partner_id.name) invoice = Invoice.create({ 'name': repair.name, 'origin': repair.name, 'type': 'out_invoice', 'account_id': repair.partner_id.property_account_receivable_id.id, 'partner_id': repair.partner_invoice_id.id or repair.partner_id.id, 'currency_id': repair.pricelist_id.currency_id.id, 'comment': repair.quotation_notes, 'fiscal_position_id': repair.partner_id.property_account_position_id.id }) invoices_group[repair.partner_invoice_id.id] = invoice repair.write({'invoiced': True, 'invoice_id': invoice.id}) for operation in repair.operations: if operation.type == 'add': if group: name = repair.name + '-' + operation.name else: name = operation.name if operation.product_id.property_account_income_id: account_id = operation.product_id.property_account_income_id.id elif operation.product_id.categ_id.property_account_income_categ_id: account_id = operation.product_id.categ_id.property_account_income_categ_id.id else: raise UserError(_('No account defined for product "%s".') % operation.product_id.name) invoice_line = InvoiceLine.create({ 'invoice_id': invoice.id, 'name': name, 'origin': repair.name, 'account_id': account_id, 'quantity': operation.product_uom_qty, 'invoice_line_tax_ids': [(6, 0, [x.id for x in operation.tax_id])], 'uom_id': operation.product_uom.id, 'price_unit': operation.price_unit, 'price_subtotal': operation.product_uom_qty * operation.price_unit, 'product_id': operation.product_id and operation.product_id.id or False }) operation.write({'invoiced': True, 'invoice_line_id': invoice_line.id}) for fee in repair.fees_lines: if group: name = repair.name + '-' + fee.name else: name = fee.name if not fee.product_id: raise UserError(_('No product defined on Fees!')) if fee.product_id.property_account_income_id: account_id = fee.product_id.property_account_income_id.id elif fee.product_id.categ_id.property_account_income_categ_id: account_id = fee.product_id.categ_id.property_account_income_categ_id.id else: raise UserError(_('No account defined for product "%s".') % fee.product_id.name) invoice_line = InvoiceLine.create({ 'invoice_id': invoice.id, 'name': name, 'origin': repair.name, 'account_id': account_id, 'quantity': fee.product_uom_qty, 'invoice_line_tax_ids': [(6, 0, [x.id for x in fee.tax_id])], 'uom_id': fee.product_uom.id, 'product_id': fee.product_id and fee.product_id.id or False, 'price_unit': fee.price_unit, 'price_subtotal': fee.product_uom_qty * fee.price_unit }) fee.write({'invoiced': True, 'invoice_line_id': invoice_line.id}) invoice.compute_taxes() res[repair.id] = invoice.id return res @api.multi def action_created_invoice(self): self.ensure_one() return { 'name': _('Invoice created'), 'type': 'ir.actions.act_window', 'view_mode': 'form', 'res_model': 'account.invoice', 'view_id': self.env.ref('account.invoice_form').id, 'target': 'current', 'res_id': self.invoice_id.id, } def action_repair_ready(self): self.mapped('operations').write({'state': 'confirmed'}) return self.write({'state': 'ready'}) @api.multi def action_repair_start(self): """ Writes repair order state to 'Under Repair' @return: True """ if self.filtered(lambda repair: repair.state not in ['confirmed', 'ready']): raise UserError(_("Repair must be confirmed before starting reparation.")) self.mapped('operations').write({'state': 'confirmed'}) return self.write({'state': 'under_repair'}) @api.multi def action_repair_end(self): """ Writes repair order state to 'To be invoiced' if invoice method is After repair else state is set to 'Ready'. @return: True """ if self.filtered(lambda repair: repair.state != 'under_repair'): raise UserError(_("Repair must be under repair in order to end reparation.")) for repair in self: repair.write({'repaired': True}) vals = {'state': 'done'} vals['move_id'] = repair.action_repair_done().get(repair.id) if not repair.invoiced and repair.invoice_method == 'after_repair': vals['state'] = '2binvoiced' repair.write(vals) return True @api.multi def action_repair_done(self): """ Creates stock move for operation and stock move for final product of repair order. @return: Move ids of final products """ if self.filtered(lambda repair: not repair.repaired): raise UserError(_("Repair must be repaired in order to make the product moves.")) res = {} Move = self.env['stock.move'] for repair in self: moves = self.env['stock.move'] for operation in repair.operations: move = Move.create({ 'name': repair.name, 'product_id': operation.product_id.id, 'product_uom_qty': operation.product_uom_qty, 'product_uom': operation.product_uom.id, 'partner_id': repair.address_id.id, 'location_id': operation.location_id.id, 'location_dest_id': operation.location_dest_id.id, 'move_line_ids': [(0, 0, {'product_id': operation.product_id.id, 'lot_id': operation.lot_id.id, 'product_uom_qty': 0, # bypass reservation here 'product_uom_id': operation.product_uom.id, 'qty_done': operation.product_uom_qty, 'package_id': False, 'result_package_id': False, 'location_id': operation.location_id.id, #TODO: owner stuff 'location_dest_id': operation.location_dest_id.id,})], 'repair_id': repair.id, 'origin': repair.name, }) moves |= move operation.write({'move_id': move.id, 'state': 'done'}) move = Move.create({ 'name': repair.name, 'product_id': repair.product_id.id, 'product_uom': repair.product_uom.id or repair.product_id.uom_id.id, 'product_uom_qty': repair.product_qty, 'partner_id': repair.address_id.id, 'location_id': repair.location_id.id, 'location_dest_id': repair.location_dest_id.id, 'move_line_ids': [(0, 0, {'product_id': repair.product_id.id, 'lot_id': repair.lot_id.id, 'product_uom_qty': 0, # bypass reservation here 'product_uom_id': repair.product_uom.id or repair.product_id.uom_id.id, 'qty_done': repair.product_qty, 'package_id': False, 'result_package_id': False, 'location_id': repair.location_id.id, #TODO: owner stuff 'location_dest_id': repair.location_dest_id.id,})], 'repair_id': repair.id, 'origin': repair.name, }) consumed_lines = moves.mapped('move_line_ids') produced_lines = move.move_line_ids moves |= move moves._action_done() produced_lines.write({'consume_line_ids': [(6, 0, consumed_lines.ids)]}) res[repair.id] = move.id return res class RepairLine(models.Model): _name = 'mrp.repair.line' _description = 'Repair Line' name = fields.Char('Description', required=True) repair_id = fields.Many2one( 'mrp.repair', 'Repair Order Reference', index=True, ondelete='cascade') type = fields.Selection([ ('add', 'Add'), ('remove', 'Remove')], 'Type', required=True) product_id = fields.Many2one('product.product', 'Product', required=True) invoiced = fields.Boolean('Invoiced', copy=False, readonly=True) price_unit = fields.Float('Unit Price', required=True, digits=dp.get_precision('Product Price')) price_subtotal = fields.Float('Subtotal', compute='_compute_price_subtotal', digits=0) tax_id = fields.Many2many( 'account.tax', 'repair_operation_line_tax', 'repair_operation_line_id', 'tax_id', 'Taxes') product_uom_qty = fields.Float( 'Quantity', default=1.0, digits=dp.get_precision('Product Unit of Measure'), required=True) product_uom = fields.Many2one( 'product.uom', 'Product Unit of Measure', required=True) invoice_line_id = fields.Many2one( 'account.invoice.line', 'Invoice Line', copy=False, readonly=True) location_id = fields.Many2one( 'stock.location', 'Source Location', index=True, required=True) location_dest_id = fields.Many2one( 'stock.location', 'Dest. Location', index=True, required=True) move_id = fields.Many2one( 'stock.move', 'Inventory Move', copy=False, readonly=True) lot_id = fields.Many2one('stock.production.lot', 'Lot/Serial') state = fields.Selection([ ('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')], 'Status', default='draft', copy=False, readonly=True, required=True, help='The status of a repair line is set automatically to the one of the linked repair order.') @api.constrains('lot_id', 'product_id') def constrain_lot_id(self): for line in self.filtered(lambda x: x.product_id.tracking != 'none' and not x.lot_id): raise ValidationError(_("Serial number is required for operation line with product '%s'") % (line.product_id.name)) @api.one @api.depends('price_unit', 'repair_id', 'product_uom_qty', 'product_id', 'repair_id.invoice_method') def _compute_price_subtotal(self): taxes = self.tax_id.compute_all(self.price_unit, self.repair_id.pricelist_id.currency_id, self.product_uom_qty, self.product_id, self.repair_id.partner_id) self.price_subtotal = taxes['total_excluded'] @api.onchange('type', 'repair_id') def onchange_operation_type(self): """ On change of operation type it sets source location, destination location and to invoice field. @param product: Changed operation type. @param guarantee_limit: Guarantee limit of current record. @return: Dictionary of values. """ if not self.type: self.location_id = False self.location_dest_id = False elif self.type == 'add': self.onchange_product_id() args = self.repair_id.company_id and [('company_id', '=', self.repair_id.company_id.id)] or [] warehouse = self.env['stock.warehouse'].search(args, limit=1) self.location_id = warehouse.lot_stock_id self.location_dest_id = self.env['stock.location'].search([('usage', '=', 'production')], limit=1).id else: self.price_unit = 0.0 self.tax_id = False self.location_id = self.env['stock.location'].search([('usage', '=', 'production')], limit=1).id self.location_dest_id = self.env['stock.location'].search([('scrap_location', '=', True)], limit=1).id @api.onchange('repair_id', 'product_id', 'product_uom_qty') def onchange_product_id(self): """ On change of product it sets product quantity, tax account, name, uom of product, unit price and price subtotal. """ partner = self.repair_id.partner_id pricelist = self.repair_id.pricelist_id if not self.product_id or not self.product_uom_qty: return if self.product_id: if partner: self.name = self.product_id.with_context(lang=partner.lang).display_name else: self.name = self.product_id.display_name self.product_uom = self.product_id.uom_id.id if self.type != 'remove': if partner and self.product_id: self.tax_id = partner.property_account_position_id.map_tax(self.product_id.taxes_id, self.product_id, partner).ids warning = False if not pricelist: warning = { 'title': _('No Pricelist!'), 'message': _('You have to select a pricelist in the Repair form !\n Please set one before choosing a product.')} else: price = pricelist.get_product_price(self.product_id, self.product_uom_qty, partner) if price is False: warning = { 'title': _('No valid pricelist line found !'), 'message': _("Couldn't find a pricelist line matching this product and quantity.\nYou have to change either the product, the quantity or the pricelist.")} else: self.price_unit = price if warning: return {'warning': warning} class RepairFee(models.Model): _name = 'mrp.repair.fee' _description = 'Repair Fees Line' repair_id = fields.Many2one( 'mrp.repair', 'Repair Order Reference', index=True, ondelete='cascade', required=True) name = fields.Char('Description', index=True, required=True) product_id = fields.Many2one('product.product', 'Product') product_uom_qty = fields.Float('Quantity', digits=dp.get_precision('Product Unit of Measure'), required=True, default=1.0) price_unit = fields.Float('Unit Price', required=True) product_uom = fields.Many2one('product.uom', 'Product Unit of Measure', required=True) price_subtotal = fields.Float('Subtotal', compute='_compute_price_subtotal', digits=0) tax_id = fields.Many2many('account.tax', 'repair_fee_line_tax', 'repair_fee_line_id', 'tax_id', 'Taxes') invoice_line_id = fields.Many2one('account.invoice.line', 'Invoice Line', copy=False, readonly=True) invoiced = fields.Boolean('Invoiced', copy=False, readonly=True) @api.one @api.depends('price_unit', 'repair_id', 'product_uom_qty', 'product_id') def _compute_price_subtotal(self): taxes = self.tax_id.compute_all(self.price_unit, self.repair_id.pricelist_id.currency_id, self.product_uom_qty, self.product_id, self.repair_id.partner_id) self.price_subtotal = taxes['total_excluded'] @api.onchange('repair_id', 'product_id', 'product_uom_qty') def onchange_product_id(self): """ On change of product it sets product quantity, tax account, name, uom of product, unit price and price subtotal. """ if not self.product_id: return partner = self.repair_id.partner_id pricelist = self.repair_id.pricelist_id if partner and self.product_id: self.tax_id = partner.property_account_position_id.map_tax(self.product_id.taxes_id, self.product_id, partner).ids if self.product_id: self.name = self.product_id.display_name self.product_uom = self.product_id.uom_id.id warning = False if not pricelist: warning = { 'title': _('No Pricelist!'), 'message': _('You have to select a pricelist in the Repair form !\n Please set one before choosing a product.')} else: price = pricelist.get_product_price(self.product_id, self.product_uom_qty, partner) if price is False: warning = { 'title': _('No valid pricelist line found !'), 'message': _("Couldn't find a pricelist line matching this product and quantity.\nYou have to change either the product, the quantity or the pricelist.")} else: self.price_unit = price if warning: return {'warning': warning}
agpl-3.0
-5,396,696,964,052,102,000
48.161631
227
0.570103
false
4.089595
false
false
false