repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
wojciechpolak/webxiangpianbu | tools/staticgen.py | 1 | 12710 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# WebXiangpianbu Copyright (C) 2014, 2015 Wojciech Polak
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import os
import sys
import glob
import getopt
import shutil
import signal
import codecs
from datetime import datetime
from django.utils import six
from django.utils.six.moves import urllib, SimpleHTTPServer, socketserver
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
os.environ['DJANGO_SETTINGS_MODULE'] = 'webxiang.settings'
sys.path.insert(0, os.path.join(SITE_ROOT, '../'))
import django
if hasattr(django, 'setup'):
django.setup()
from django.conf import settings
try:
from django.shortcuts import render
except ImportError as e:
print(e)
print("Copy `webxiang/settings_sample.py` to " \
"`webxiang/settings.py` and modify it to your needs.")
sys.exit(1)
from django.core.urlresolvers import set_urlconf, set_script_prefix
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.utils import translation
from django.utils.translation import ugettext as _
from webxiang import webxiang
__generated = set()
__items_no = 0
def main():
opts = {
'verbose': 1,
'output_dir': None,
'album_dir': os.path.abspath(getattr(settings, 'ALBUM_DIR', 'albums')),
'photo_dir': os.path.abspath(getattr(settings, 'WEBXIANG_PHOTOS_ROOT', '')),
'root': '/',
'assets_url': getattr(settings, 'STATIC_URL', 'assets/'),
'photos_url': getattr(settings, 'WEBXIANG_PHOTOS_URL', 'data/'),
'names': 'index',
'lang': 'en',
'quick': False,
'copy': False,
'serve': None,
'port': 8000,
}
try:
gopts, args = getopt.getopt(sys.argv[1:], 'v:yl:sp:',
['help',
'verbose=',
'lang=',
'output-dir=',
'album-dir=',
'photo-dir=',
'root=',
'assets-url=',
'photos-url=',
'copy',
'quick=',
'serve=',
'port=',
])
for o, arg in gopts:
if o == '--help':
raise getopt.GetoptError('')
elif o in ('-v', '--verbose'):
opts['verbose'] = int(arg)
elif o == '--output-dir':
opts['output_dir'] = arg
elif o == '--album-dir':
opts['album_dir'] = os.path.abspath(arg)
settings.ALBUM_DIR = opts['album_dir']
elif o == '--photo-dir':
opts['photo_dir'] = os.path.abspath(arg)
elif o == '--root':
if not arg.endswith('/'):
arg += '/'
opts['root'] = arg
elif o == '--assets-url':
if not arg.endswith('/'):
arg += '/'
opts['assets_url'] = arg
elif o == '--photos-url':
if not arg.endswith('/'):
arg += '/'
opts['photos_url'] = arg
elif o in ('-l', '--lang'):
opts['lang'] = arg
elif o == '--copy':
opts['copy'] = True
elif o in ('-s', '--serve'):
opts['serve'] = arg
elif o in ('-p', '--port'):
opts['port'] = int(arg)
elif o == '--quick': # a quick shortcut
arg = os.path.expanduser(arg).rstrip('/')
opts['quick'] = arg
args = [os.path.basename(arg)]
if len(args):
opts['names'] = args[0]
if len(args) > 1:
opts['output_dir'] = args[1]
else:
opts['names'] = 'index'
except getopt.GetoptError:
print("Usage: %s [OPTION...] [ALBUM-NAME1,NAME2]" % sys.argv[0])
print("%s -- album static HTML generator" % sys.argv[0])
print("""
Options Default values
-v, --verbose [%(verbose)s]
--output-dir [output-DATETIME/]
--album-dir [%(album_dir)s]
--photo-dir [%(photo_dir)s]
--root [%(root)s]
--assets-url [%(assets_url)s]
--photos-url [%(photos_url)s]
-l, --lang [%(lang)s]
--copy [%(copy)s]
--quick [folder's name]
-s, --serve [output dir]
-p, --port [%(port)s]
""" % opts)
sys.exit(1)
signal.signal(signal.SIGTERM, lambda signum, frame: __quit_app())
signal.signal(signal.SIGINT, lambda signum, frame: __quit_app())
if opts['serve']:
serve(opts, opts['serve'])
sys.exit(0)
if opts['lang']:
if opts['verbose'] > 1:
print('Switching language to %s' % opts['lang'])
translation.activate(opts['lang'])
set_urlconf('webxiang.urls_static')
set_script_prefix(opts['root'])
root_dir = opts['output_dir'] and os.path.abspath(
os.path.expanduser(opts['output_dir'])) or \
'output-%s' % datetime.now().strftime('%Y%m%d-%H%M%S')
output_dir = os.path.join(root_dir, opts['root'].lstrip('/'))
if opts['quick']:
arg = opts['quick']
arg_basename = os.path.basename(arg)
opts['assets_url'] = '%s/assets/' % arg_basename
opts['photos_url'] = '%s/data/' % arg_basename
opts['album_dir'] = os.path.abspath(arg + '/')
opts['photo_dir'] = opts['album_dir']
settings.ALBUM_DIR = opts['album_dir']
opts['assets_url'] = urllib.parse.urljoin(opts['root'], opts['assets_url'])
opts['photos_url'] = urllib.parse.urljoin(opts['root'], opts['photos_url'])
settings.WEBXIANG_PHOTOS_URL = opts['photos_url']
try:
if not os.path.exists(output_dir):
print('Creating directory "%s"' % output_dir)
os.makedirs(output_dir)
except Exception as e:
pass
if not opts['photos_url'].startswith('http'):
photos_url = opts['photos_url'].\
replace(opts['root'], '', 1).lstrip('/')
photos_url = os.path.join(output_dir, photos_url)
if opts['copy']:
print('Copying photos "%s" into "%s"' % \
(opts['photo_dir'].rstrip('/'), photos_url))
try:
if not os.path.exists(photos_url):
os.makedirs(photos_url)
__copytree(opts['photo_dir'].rstrip('/'), photos_url)
except Exception as e:
print('Copying photos', e)
else:
print('Linking photos: ln -s %s %s' % \
(opts['photo_dir'].rstrip('/'), photos_url.rstrip('/')))
try:
d = os.path.dirname(photos_url.rstrip('/'))
if not os.path.exists(d):
os.makedirs(d)
os.symlink(opts['photo_dir'].rstrip('/'),
photos_url.rstrip('/'))
except Exception as e:
print('Linking photos', e)
print('Copying assets (JS, CSS, etc.) into "%s"' % \
os.path.join(root_dir, opts['assets_url'].lstrip('/')))
try:
__copytree(settings.STATIC_ROOT,
os.path.join(root_dir,
opts['assets_url'].lstrip('/')))
except Exception as e:
print('Copying assets', e)
print('Generating static pages.')
for album_name in opts['names'].split(','):
__gen_html_album(opts, album_name, output_dir=output_dir)
print('Finished %s' % output_dir)
print('Done. Created %d files.' % __items_no)
if opts['serve'] is not False:
serve(opts, root_dir)
def __quit_app(code=0):
print()
sys.exit(code)
def serve(opts, root_dir=None):
class SimpleServer(six.moves.socketserver.TCPServer):
allow_reuse_address = True
if root_dir:
os.chdir(root_dir)
httpd = SimpleServer(('localhost', opts['port']),
six.moves.SimpleHTTPServer.SimpleHTTPRequestHandler)
print('Serving at %s%s' % ('localhost:%d' % opts['port'], opts['root']))
print('Quit the server with CONTROL-C.')
httpd.serve_forever()
def __gen_html_album(opts, album_name, output_dir='.', page=1):
global __generated, __items_no
entry_id = '%s:%s' % (album_name, page)
if entry_id in __generated:
return
__generated.add(entry_id)
if page == 1:
print(album_name, end=' ')
data = webxiang.get_data(album=album_name, page=page)
if not data:
return
tpl = data['meta'].get('template') or 'default.html'
if not tpl.endswith('.html'):
tpl += '.html'
data['STATIC_URL'] = opts['assets_url']
try:
html = render_to_string(tpl, data)
except TemplateDoesNotExist:
html = render_to_string('default.html', data)
if page > 1:
output_file = os.path.join(output_dir, album_name,
_('page-%(number)s.html') % {'number': page})
else:
output_file = os.path.join(output_dir, album_name, 'index.html')
if opts['verbose'] > 1:
print('writing %s' % output_file)
elif opts['verbose'] == 1:
sys.stdout.write('.')
sys.stdout.flush()
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
f = codecs.open(output_file, 'w', 'utf-8')
f.write(str(html))
f.close()
__items_no += 1
# symlink '/index.html' to '/index/index.html'
if album_name == 'index':
os.symlink('index/index.html',
os.path.join(output_dir, 'index.html'))
for i in data['entries'].paginator.page_range_limited:
__gen_html_album(opts, album_name, output_dir=output_dir, page=i)
for entry in data['entries']:
if 'album' in entry:
__gen_html_album(opts, entry['album'], output_dir)
else:
__gen_html_photo(opts, album_name,
'%s/' % entry['index'], output_dir)
def __gen_html_photo(opts, album_name, entry_idx, output_dir='.'):
global __generated, __items_no
entry_id = '%s/%s' % (album_name, entry_idx)
if entry_id in __generated:
return
__generated.add(entry_id)
photo_idx = entry_idx.split('/')[0]
data = webxiang.get_data(album=album_name, photo=entry_idx)
if not data:
return
tpl = data['meta'].get('template') or 'default.html'
if not tpl.endswith('.html'):
tpl += '.html'
data['STATIC_URL'] = opts['assets_url']
try:
html = render_to_string(tpl, data)
except TemplateDoesNotExist:
html = render_to_string('default.html', data)
try:
os.makedirs(os.path.join(output_dir, album_name))
except:
pass
entry = data['entries'][int(photo_idx) - 1]
if 'slug' in entry:
photo_name = '%s/%s.html' % (photo_idx, entry['slug'])
else:
photo_name = '%s.html' % photo_idx
output_file = os.path.join(output_dir, album_name, photo_name)
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
if opts['verbose'] > 1:
print('writing %s' % output_file)
elif opts['verbose'] == 1:
sys.stdout.write('.')
sys.stdout.flush()
f = codecs.open(output_file, 'w', 'utf-8')
f.write(str(html))
f.close()
__items_no += 1
def __copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
if __name__ == '__main__':
main()
| gpl-3.0 | 6,789,874,336,057,884,000 | 31.589744 | 84 | 0.525649 | false | 3.712033 | false | false | false |
bmng-dev/PyBitmessage | src/inventory.py | 1 | 10082 | import collections
import Queue
import time
from threading import enumerate as threadingEnumerate
from threading import RLock, current_thread
from helper_sql import SqlBulkExecute, sqlExecute, sqlQuery
from singleton import Singleton
@Singleton
class Inventory(collections.MutableMapping):
def __init__(self):
super(self.__class__, self).__init__()
self._inventory = {} #of objects (like msg payloads and pubkey payloads) Does not include protocol headers (the first 24 bytes of each packet).
self.numberOfInventoryLookupsPerformed = 0
self._streams = collections.defaultdict(set) # key = streamNumer, value = a set which holds the inventory object hashes that we are aware of. This is used whenever we receive an inv message from a peer to check to see what items are new to us. We don't delete things out of it; instead, the singleCleaner thread clears and refills it every couple hours.
self.lock = RLock() # Guarantees that two receiveDataThreads don't receive and process the same message concurrently (probably sent by a malicious individual)
self.InventoryItem = collections.namedtuple('InventoryItem', 'type stream payload expires tag')
def __contains__(self, hash):
with self.lock:
self.numberOfInventoryLookupsPerformed += 1
if hash in self._inventory:
return True
return bool(sqlQuery('SELECT 1 FROM inventory WHERE hash=?', hash))
def __getitem__(self, hash):
with self.lock:
if hash in self._inventory:
return self._inventory[hash]
rows = sqlQuery('SELECT objecttype, streamnumber, payload, expirestime, tag FROM inventory WHERE hash=?', hash)
if not rows:
raise KeyError(hash)
return self.InventoryItem(*rows[0])
def __setitem__(self, hash, value):
with self.lock:
value = self.InventoryItem(*value)
self._inventory[hash] = value
self._streams[value.stream].add(hash)
def __delitem__(self, hash):
raise NotImplementedError
def __iter__(self):
with self.lock:
hashes = self._inventory.keys()[:]
hashes += (x for x, in sqlQuery('SELECT hash FROM inventory'))
return hashes.__iter__()
def __len__(self):
with self.lock:
return len(self._inventory) + sqlQuery('SELECT count(*) FROM inventory')[0][0]
def by_type_and_tag(self, type, tag):
with self.lock:
values = [value for value in self._inventory.values() if value.type == type and value.tag == tag]
values += (self.InventoryItem(*value) for value in sqlQuery('SELECT objecttype, streamnumber, payload, expirestime, tag FROM inventory WHERE objecttype=? AND tag=?', type, tag))
return values
def hashes_by_stream(self, stream):
with self.lock:
s = self._streams[stream]
if not s:
s.update((inv_vector for inv_vector, in sqlQuery('SELECT hash FROM inventory WHERE streamnumber=? AND expirestime>?', stream, int(time.time()) - 3600)))
return s
def unexpired_hashes_by_stream(self, stream):
with self.lock:
t = int(time.time())
hashes = [x for x, value in self._inventory.items() if value.stream == stream and value.expires > t]
hashes += (payload for payload, in sqlQuery('SELECT hash FROM inventory WHERE streamnumber=? AND expirestime>?', stream, t))
return hashes
def flush(self):
with self.lock: # If you use both the inventoryLock and the sqlLock, always use the inventoryLock OUTSIDE of the sqlLock.
with SqlBulkExecute() as sql:
for objectHash, value in self._inventory.items():
sql.execute('INSERT INTO inventory VALUES (?, ?, ?, ?, ?, ?)', objectHash, *value)
self._inventory.clear()
def clean(self):
with self.lock:
sqlExecute('DELETE FROM inventory WHERE expirestime<?',int(time.time()) - (60 * 60 * 3))
self._streams.clear()
for objectHash, value in self.items():
self._streams[value.stream].add(objectHash)
class PendingDownloadQueue(Queue.Queue):
# keep a track of objects that have been advertised to us but we haven't downloaded them yet
maxWait = 300
def __init__(self, maxsize=0):
Queue.Queue.__init__(self, maxsize)
self.stopped = False
self.pending = {}
self.lock = RLock()
def task_done(self, hashId):
Queue.Queue.task_done(self)
try:
with self.lock:
del self.pending[hashId]
except KeyError:
pass
def get(self, block=True, timeout=None):
retval = Queue.Queue.get(self, block, timeout)
# no exception was raised
if not self.stopped:
with self.lock:
self.pending[retval] = time.time()
return retval
def clear(self):
with self.lock:
newPending = {}
for hashId in self.pending:
if self.pending[hashId] + PendingDownloadQueue.maxWait > time.time():
newPending[hashId] = self.pending[hashId]
self.pending = newPending
@staticmethod
def totalSize():
size = 0
for thread in threadingEnumerate():
if thread.isAlive() and hasattr(thread, 'downloadQueue'):
size += thread.downloadQueue.qsize() + len(thread.downloadQueue.pending)
return size
@staticmethod
def stop():
for thread in threadingEnumerate():
if thread.isAlive() and hasattr(thread, 'downloadQueue'):
thread.downloadQueue.stopped = True
with thread.downloadQueue.lock:
thread.downloadQueue.pending = {}
class PendingUploadDeadlineException(Exception):
pass
@Singleton
class PendingUpload(object):
# keep a track of objects that we have created but haven't distributed yet
def __init__(self):
super(self.__class__, self).__init__()
self.lock = RLock()
self.hashes = {}
# end by this time in any case
self.deadline = 0
self.maxLen = 0
# during shutdown, wait up to 20 seconds to finish uploading
self.shutdownWait = 20
# forget tracking objects after 60 seconds
self.objectWait = 60
# wait 10 seconds between clears
self.clearDelay = 10
self.lastCleared = time.time()
def add(self, objectHash = None):
with self.lock:
# add a new object into existing thread lists
if objectHash:
if objectHash not in self.hashes:
self.hashes[objectHash] = {'created': time.time(), 'sendCount': 0, 'peers': []}
for thread in threadingEnumerate():
if thread.isAlive() and hasattr(thread, 'peer') and \
thread.peer not in self.hashes[objectHash]['peers']:
self.hashes[objectHash]['peers'].append(thread.peer)
# add all objects into the current thread
else:
for objectHash in self.hashes:
if current_thread().peer not in self.hashes[objectHash]['peers']:
self.hashes[objectHash]['peers'].append(current_thread().peer)
def len(self):
self.clearHashes()
with self.lock:
return sum(1
for x in self.hashes if (self.hashes[x]['created'] + self.objectWait < time.time() or
self.hashes[x]['sendCount'] == 0))
def _progress(self):
with self.lock:
return float(sum(len(self.hashes[x]['peers'])
for x in self.hashes if (self.hashes[x]['created'] + self.objectWait < time.time()) or
self.hashes[x]['sendCount'] == 0))
def progress(self, raiseDeadline=True):
if self.maxLen < self._progress():
self.maxLen = self._progress()
if self.deadline < time.time():
if self.deadline > 0 and raiseDeadline:
raise PendingUploadDeadlineException
self.deadline = time.time() + 20
try:
return 1.0 - self._progress() / self.maxLen
except ZeroDivisionError:
return 1.0
def clearHashes(self, objectHash=None):
if objectHash is None:
if self.lastCleared > time.time() - self.clearDelay:
return
objects = self.hashes.keys()
else:
objects = objectHash,
with self.lock:
for i in objects:
try:
if self.hashes[i]['sendCount'] > 0 and (
len(self.hashes[i]['peers']) == 0 or
self.hashes[i]['created'] + self.objectWait < time.time()):
del self.hashes[i]
except KeyError:
pass
self.lastCleared = time.time()
def delete(self, objectHash=None):
if not hasattr(current_thread(), 'peer'):
return
if objectHash is None:
return
with self.lock:
try:
if objectHash in self.hashes and current_thread().peer in self.hashes[objectHash]['peers']:
self.hashes[objectHash]['sendCount'] += 1
self.hashes[objectHash]['peers'].remove(current_thread().peer)
except KeyError:
pass
self.clearHashes(objectHash)
def stop(self):
with self.lock:
self.hashes = {}
def threadEnd(self):
with self.lock:
for objectHash in self.hashes:
try:
if current_thread().peer in self.hashes[objectHash]['peers']:
self.hashes[objectHash]['peers'].remove(current_thread().peer)
except KeyError:
pass
self.clearHashes()
| mit | 2,504,208,822,498,780,700 | 39.48996 | 361 | 0.583515 | false | 4.443367 | false | false | false |
kaiyou/pyircbot | src/pyircbot/behavior.py | 1 | 6125 | #!/usr/bin/python
#
# PyIRCBot
# Copyright (C) Pierre Jaury 2011 <[email protected]>
#
# PyIRCBot is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ognbot is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from core import BotProtocol, botcommand
from twisted.internet.defer import Deferred
from twisted.internet.task import LoopingCall
from twisted.python import log
import shelve
import new
class LoggingBotProtocol(BotProtocol):
'''
I am a bot protocol which is able to log commands and messages to
a file-like object.
'''
def privmsg (self, user, channel, message):
log.msg ('incoming %s %s %s' % (user, channel, message))
super (LoggingBotProtocol, self).privmsg (user, channel, message)
def msg (self, channel, message):
log.msg ('outgoing %s %s' % (channel, message))
super (LoggingBotProtocol, self).msg (channel, message)
def command (self, out, command, *args):
log.msg ('command %s %s' % (command, ' '.join (args)))
super (LoggingBotProtocol, self).command (out, command, *args)
class AsynchronousCallBotProtocol(BotProtocol):
'''
I am a bot protocol which implements asynchronous queries to other bots
or services (even users if really needed for a check or anything)
For every actor i can interact with, you have to provide me with a
reference handshake, so that I know when they are finished talking
For instance, if one service called DummyServ replies 'Pong!' to the
message 'ping', just add {'DummyServ': ('ping', 'Pong!')} to your factory
and I will be able to interact with it (him).
I maintain a pool of pending requests for every actor. When an actor is
finished talking, I simply fires your callback and execute the next
pending request.
'''
def _sync (self, user, channel, message):
'''
This is called when a message is recieve from one of the actors
I am connected to
'''
if self._job[channel]:
query, stop = self.factory.sync[channel]
if not message == stop:
self._buffer[channel].append (message)
else:
self._job[channel].callback (self._buffer[channel])
self._buffer[channel] = []
self._nextjob (channel)
def _nextjob (self, channel):
'''
This is called to trigger the next job in the pool if available
'''
if len(self._pool[channel]) > 0:
query, stop = self.factory.sync[channel]
d, message = self._pool[channel].pop (0)
self.msg (channel, message)
for line in query:
self.msg (channel, line)
self._buffer[channel] = []
self._job[channel] = d
else:
self._job[channel] = None
def _addjob (self, channel, message):
'''
You might use this method to add a new request message for the
actor channel, just rely on the returned deferred
'''
d = Deferred ()
self._pool[channel].append ((d, message))
if not self._job[channel]:
self._nextjob (channel)
return d
def connectionMade (self):
'''
Initialization of specific attributes
'''
self._pool = dict([(key, []) for key in self.factory.sync])
self._job = dict([(key, None) for key in self.factory.sync])
self._buffer = dict([(key, []) for key in self.factory.sync])
super(AsynchronousCallBotProtocol, self).connectionMade ()
def _handle (self, user, channel, message, wrap = False):
'''
Triggers the _sync method if necessary
'''
if channel in self.factory.sync:
self._sync (user, channel, message)
return super(AsynchronousCallBotProtocol, self)._handle (user, channel, message, wrap)
class AliasBotProtocol (BotProtocol):
'''
I am a bot protocol which implement command aliases
'''
def connectionMade (self):
'''
Initialization of specific attributes
'''
self._aliases = {}
self._aliases = shelve.open('aliases.db', flag='c', protocol=None,
writeback=True)
loop = LoopingCall (self._aliases.sync)
loop.start (10)
super(AliasBotProtocol, self).connectionMade ()
@botcommand
def setAlias (self, flow, out, user, channel, name, *command):
'''
\x02setAlias\x02 <name> <command line>
Saves the given command line as responding to the specified name
Every '=>' in the command line will be replaced by the piping pattern
Arguments to the alias can be retrived using %(0)s, %(1)s, etc.
\x02Aliases shall not be piped to other commands for now.\x02
'''
if name in dir (self) or name.startswith ('_'):
out.append ('\x02Error\x02: illegal alias name')
else:
command = ' '.join (command).replace ('=>', '->')
self._aliases[name] = command
out.append ('\x02Saved %s as\x02: %s' % (name, command))
@botcommand
def listAliases (self, flow, out, user, channel):
'''
\x02listAliases\x02
Lists currently defined aliases
'''
if len (self._aliases.keys ()) == 0:
out.append ('\x02Notice\x02 No alias is currently defined')
for name, command in self._aliases.items ():
out.append ('\x02%s:\x02 %s' % (name, command))
@botcommand
def delAlias (self, flow, out, user, channel, name):
'''
\x02delAlias\x02 <name>
Deletes the specified alias
'''
if name not in self._aliases:
out.append ('\x02Warning\x02 Unkown alias %s' % name)
out.append ('Deleted alias \x02%s\x02' % name)
del self._aliases[name]
def _check (self, user, channel, command, args):
return (super(AliasBotProtocol, self)._check (user, channel, command, args)
or command in self._aliases)
def __getattr__ (self, name):
if name in self._aliases:
def f (self, flow, out, user, channel, *args):
args = dict (zip (map (str, range (len (args))), args))
d = self._handle (user, channel, self._aliases[name] % args, True)
d.callback (flow)
return d
return new.instancemethod (f, self, self.__class__)
| gpl-3.0 | 1,358,964,609,801,808,000 | 33.217877 | 88 | 0.691429 | false | 3.312601 | false | false | false |
tomchadwin/qgis2web | qgis2web/maindialog.py | 1 | 39316 | # -*- coding: utf-8 -*-
# qgis-ol3 Creates OpenLayers map from QGIS layers
# Copyright (C) 2014 Victor Olaya ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys
from collections import defaultdict, OrderedDict
import webbrowser
# This import is to enable SIP API V2
# noinspection PyUnresolvedReferences
from qgis.core import (Qgis,
QgsWkbTypes,
QgsProject,
QgsMapLayer,
QgsVectorLayer,
QgsNetworkAccessManager,
QgsMessageLog)
# noinspection PyUnresolvedReferences
from qgis.PyQt.QtCore import (QObject,
QSettings,
pyqtSignal,
pyqtSlot,
QUrl,
QRect,
QByteArray,
QEvent,
Qt)
from qgis.PyQt.QtGui import (QIcon)
from qgis.PyQt.QtWidgets import (QAction,
QAbstractItemView,
QDialog,
QHBoxLayout,
QTreeWidgetItem,
QComboBox,
QListWidget,
QCheckBox,
QToolButton,
QWidget,
QTextBrowser)
from qgis.PyQt.uic import loadUiType
from qgis.PyQt.QtWebKitWidgets import QWebView, QWebInspector, QWebPage
from qgis.PyQt.QtWebKit import QWebSettings
import traceback
from . import utils
from qgis2web.configparams import (getParams,
specificParams,
specificOptions)
from qgis2web.olwriter import OpenLayersWriter
from qgis2web.leafletWriter import LeafletWriter
from qgis2web.mapboxWriter import MapboxWriter
from qgis2web.writerRegistry import (WRITER_REGISTRY)
from qgis2web.exporter import (EXPORTER_REGISTRY)
from qgis2web.feedbackDialog import FeedbackDialog
from qgis.gui import QgsColorButton
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
webkit_available = True
FORM_CLASS, _ = loadUiType(os.path.join(
os.path.dirname(__file__), 'ui_maindialog.ui'))
class MainDialog(QDialog, FORM_CLASS):
"""The main dialog of QGIS2Web plugin."""
items = {}
def __init__(self, iface, parent=None):
super(MainDialog, self).__init__(parent)
QDialog.__init__(self)
self.setupUi(self)
self.iface = iface
self.previewUrl = None
self.layer_search_combo = None
self.layer_filter_select = None
self.exporter_combo = None
self.feedback = FeedbackDialog(self)
self.feedback.setModal(True)
stgs = QSettings()
self.restoreGeometry(stgs.value("qgis2web/MainDialogGeometry",
QByteArray(), type=QByteArray))
self.verticalLayout_2.addStretch()
self.horizontalLayout_6.addStretch()
if stgs.value("qgis2web/previewOnStartup", Qt.Checked) == Qt.Checked:
self.previewOnStartup.setCheckState(Qt.Checked)
else:
self.previewOnStartup.setCheckState(Qt.Unchecked)
if stgs.value("qgis2web/closeFeedbackOnSuccess",
Qt.Checked) == Qt.Checked:
self.closeFeedbackOnSuccess.setCheckState(Qt.Checked)
else:
self.closeFeedbackOnSuccess.setCheckState(Qt.Unchecked)
self.previewFeatureLimit.setText(
stgs.value("qgis2web/previewFeatureLimit", "1000"))
self.appearanceParams.setSelectionMode(
QAbstractItemView.SingleSelection)
self.preview = None
if webkit_available:
widget = QWebView()
self.preview = widget
try:
# if os.environ["TRAVIS"]:
self.preview.setPage(WebPage())
except Exception:
print("Failed to set custom webpage")
webview = self.preview.page()
webview.setNetworkAccessManager(QgsNetworkAccessManager.instance())
self.preview.settings().setAttribute(
QWebSettings.DeveloperExtrasEnabled, True)
self.preview.settings().setAttribute(
QWebSettings.DnsPrefetchEnabled, True)
else:
widget = QTextBrowser()
widget.setText(self.tr('Preview is not available since QtWebKit '
'dependency is missing on your system'))
self.right_layout.insertWidget(0, widget)
self.populateConfigParams(self)
self.populate_layers_and_groups(self)
self.populateLayerSearch()
self.populateAttrFilter()
writer = WRITER_REGISTRY.createWriterFromProject()
self.setStateToWriter(writer)
self.exporter = EXPORTER_REGISTRY.createFromProject()
self.exporter_combo.setCurrentIndex(
self.exporter_combo.findText(self.exporter.name()))
self.exporter_combo.currentIndexChanged.connect(
self.exporterTypeChanged)
self.toggleOptions()
if webkit_available:
if self.previewOnStartup.checkState() == Qt.Checked:
self.autoUpdatePreview()
self.buttonPreview.clicked.connect(self.previewMap)
else:
self.buttonPreview.setDisabled(True)
QgsProject.instance().cleared.connect(self.reject)
self.layersTree.model().dataChanged.connect(self.populateLayerSearch)
self.layersTree.model().dataChanged.connect(self.populateAttrFilter)
self.ol3.clicked.connect(self.changeFormat)
self.leaflet.clicked.connect(self.changeFormat)
self.mapbox.clicked.connect(self.changeFormat)
self.buttonExport.clicked.connect(self.saveMap)
helpText = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"helpFile.md")
self.helpField.setSource(QUrl.fromLocalFile(helpText))
if webkit_available:
self.devConsole = QWebInspector(self.preview)
self.devConsole.setFixedHeight(0)
self.devConsole.setObjectName("devConsole")
self.devConsole.setPage(self.preview.page())
self.devConsole.hide()
self.right_layout.insertWidget(1, self.devConsole)
self.filter = devToggleFilter()
self.filter.devToggle.connect(self.showHideDevConsole)
self.installEventFilter(self.filter)
self.setModal(False)
@pyqtSlot(bool)
def showHideDevConsole(self, visible):
self.devConsole.setVisible(visible)
def changeFormat(self):
self.autoUpdatePreview()
self.toggleOptions()
def exporterTypeChanged(self):
new_exporter_name = self.exporter_combo.currentText()
try:
self.exporter = [
e for e in EXPORTER_REGISTRY.getExporters()
if e.name() == new_exporter_name][0]()
except Exception:
pass
def currentMapFormat(self):
"""
Returns the currently selected map writer type
"""
return self.getWriterFactory().type()
def getWriterFactory(self):
"""
Returns a factory to create the currently selected map writer
"""
if self.mapFormat.checkedButton() == self.ol3:
return OpenLayersWriter
elif self.mapFormat.checkedButton() == self.leaflet:
return LeafletWriter
elif self.mapFormat.checkedButton() == self.mapbox:
return MapboxWriter
def createWriter(self):
"""
Creates a writer object reflecting the current settings
in the dialog
"""
writer = self.getWriterFactory()()
(writer.layers, writer.groups, writer.popup,
writer.visible, writer.interactive, writer.json,
writer.cluster, writer.getFeatureInfo) = self.getLayersAndGroups()
writer.params = self.getParameters()
return writer
def showErrorMessage(self, error):
"""
Shows an error message in the preview window
"""
html = "<html>"
html += "<head></head>"
html += "<style>body {font-family: sans-serif;}</style>"
html += "<body><h1>Error</h1>"
html += "<p>qgis2web produced an error:</p><code>"
html += error
html += "</code></body></html>"
if self.preview:
self.preview.setHtml(html)
def showFeedbackMessage(self, title, message):
"""
Shows a feedback message in the preview window
"""
html = "<html>"
html += "<head></head>"
html += "<style>body {font-family: sans-serif;}</style>"
html += "<body><h1>{}</h1>".format(title)
html += "<p>{}</p>".format(message)
html += "</body></html>"
if self.preview:
self.preview.setHtml(html)
def toggleOptions(self):
currentWriter = self.getWriterFactory()
for param, value in specificParams.items():
treeParam = self.appearanceParams.findItems(
param, Qt.MatchExactly | Qt.MatchRecursive)[0]
if currentWriter == OpenLayersWriter:
if value == "OL3":
treeParam.setDisabled(False)
if treeParam.combo:
treeParam.combo.setEnabled(True)
else:
treeParam.setDisabled(True)
if treeParam.combo:
treeParam.combo.setEnabled(False)
else:
if value == "OL3":
treeParam.setDisabled(True)
if treeParam.combo:
treeParam.combo.setEnabled(False)
else:
treeParam.setDisabled(False)
if treeParam.combo:
treeParam.combo.setEnabled(True)
for option, value in specificOptions.items():
treeOptions = self.layersTree.findItems(option, Qt.MatchExactly |
Qt.MatchRecursive)
for treeOption in treeOptions:
if currentWriter == OpenLayersWriter:
if value == "OL3":
treeOption.setDisabled(False)
else:
treeOption.setDisabled(True)
else:
if value == "OL3":
treeOption.setDisabled(True)
else:
treeOption.setDisabled(False)
def createPreview(self):
writer = self.createWriter()
return writer.write(self.iface,
dest_folder=utils.tempFolder()).index_file
def shouldAutoPreview(self):
"""
Returns a tuple, with a bool for whether the preview should
automatically be generated, and a string for explanations
as to why the preview cannot be automatically generated
"""
writer = self.createWriter()
total_features = 0
for layer in writer.layers:
if isinstance(layer, QgsVectorLayer):
total_features += layer.featureCount()
if total_features > int(self.previewFeatureLimit.text()):
# Too many features => too slow!
return (False, self.tr('<p>A large number of features are '
'present in the map. Generating the '
'preview may take some time.</p>'
'<p>Click Update Preview to generate the '
'preview anyway.</p>'))
return (True, None)
def autoUpdatePreview(self):
"""
Triggered when a preview will be automatically generated, i.e.
not as a result of the user manually clicking the
Update Preview button.
"""
(auto_preview, message) = self.shouldAutoPreview()
if not auto_preview:
self.showFeedbackMessage(self.tr('Preview Map'), message)
else:
self.previewMap()
def previewMap(self):
preview_file = self.createPreview()
self.loadPreviewFile(preview_file)
def saveMap(self):
writer = self.createWriter()
write_folder = self.exporter.exportDirectory()
if not write_folder:
return
self.feedback.reset()
self.feedback.show()
results = writer.write(self.iface,
dest_folder=write_folder,
feedback=self.feedback)
self.feedback.showFeedback('Success')
if self.closeFeedbackOnSuccess.checkState() == Qt.Checked:
self.feedback.close()
result = self.exporter.postProcess(results, feedback=self.feedback)
if result and (not os.environ.get('CI') and
not os.environ.get('TRAVIS')):
webbrowser.open_new_tab(self.exporter.destinationUrl())
def populate_layers_and_groups(self, dlg):
"""Populate layers on QGIS into our layers and group tree view."""
root_node = QgsProject.instance().layerTreeRoot()
tree_groups = []
tree_layers = root_node.findLayers()
self.layers_item = QTreeWidgetItem()
self.layers_item.setText(0, "Layers and Groups")
self.layersTree.setColumnCount(3)
for tree_layer in tree_layers:
layer = tree_layer.layer()
if (layer.type() != QgsMapLayer.PluginLayer and
(layer.type() != QgsMapLayer.VectorLayer or
layer.wkbType() != QgsWkbTypes.NoGeometry) and
layer.customProperty("ol_layer_type") is None):
try:
# if layer.type() == QgsMapLayer.VectorLayer:
# testDump = layer.renderer().dump()
layer_parent = tree_layer.parent()
if layer_parent.parent() is None:
item = TreeLayerItem(self.iface, layer,
self.layersTree, dlg)
self.layers_item.addChild(item)
else:
if layer_parent not in tree_groups:
tree_groups.append(layer_parent)
except Exception:
QgsMessageLog.logMessage(traceback.format_exc(),
"qgis2web",
level=Qgis.Critical)
for tree_group in tree_groups:
group_name = tree_group.name()
group_layers = [
tree_layer.layer() for tree_layer in tree_group.findLayers()]
item = TreeGroupItem(group_name, group_layers, self.layersTree)
self.layers_item.addChild(item)
self.layersTree.addTopLevelItem(self.layers_item)
self.layersTree.expandAll()
self.layersTree.resizeColumnToContents(0)
self.layersTree.resizeColumnToContents(1)
for i in range(self.layers_item.childCount()):
item = self.layers_item.child(i)
if item.checkState(0) != Qt.Checked:
item.setExpanded(False)
def populateLayerSearch(self):
self.layer_search_combo.clear()
self.layer_search_combo.addItem("None")
(layers, groups, popup, visible, interactive,
json, cluster, getFeatureInfo) = self.getLayersAndGroups()
for count, layer in enumerate(layers):
if layer.type() == layer.VectorLayer:
options = []
fields = layer.fields()
for f in fields:
fieldIndex = fields.indexFromName(f.name())
editorWidget = layer.editorWidgetSetup(fieldIndex).type()
if editorWidget == 'Hidden':
continue
options.append(f.name())
for option in options:
displayStr = layer.name() + ": " + option
self.layer_search_combo.insertItem(0, displayStr)
sln = utils.safeName(layer.name())
self.layer_search_combo.setItemData(
self.layer_search_combo.findText(displayStr),
sln + "_" + str(count))
def populateAttrFilter(self):
self.layer_filter_select.clear()
(layers, groups, popup, visible, interactive,
json, cluster, getFeatureInfo) = self.getLayersAndGroups()
options = []
for count, layer in enumerate(layers):
if layer.type() == layer.VectorLayer:
fields = layer.fields()
for f in fields:
fieldIndex = fields.indexFromName(f.name())
editorWidget = layer.editorWidgetSetup(fieldIndex).type()
if editorWidget == 'Hidden':
continue
if utils.boilType(f.typeName()) in ["int", "str", "real",
"date", "bool",
"time", "datetime"]:
options.append([f.name() + ": " +
utils.boilType(f.typeName()),
layer.name()])
preCleanOptions = {}
for entry in options:
if entry[0] not in list(preCleanOptions.keys()):
preCleanOptions[entry[0]] = ": " + entry[1]
else:
preCleanOptions[entry[0]] = "| ".join(
[preCleanOptions[entry[0]], entry[1]])
options = []
for key, value in preCleanOptions.items():
options.append(key + value)
cleanOptions = list(set(options))
for option in cleanOptions:
self.layer_filter_select.insertItem(0, option)
def configureExporter(self):
self.exporter.configure()
def populateConfigParams(self, dlg):
""" Populates the dialog with option items and widgets """
self.items = defaultdict(dict)
tree = dlg.appearanceParams
configure_export_action = QAction('...', self)
configure_export_action.triggered.connect(self.configureExporter)
params = getParams(configure_exporter_action=configure_export_action)
for group, settings in params.items():
if group != "Data export":
item = QTreeWidgetItem()
item.setText(0, group)
for param, value in settings.items():
subitem = self.createOptionItem(tree_widget=tree,
parent_item=item,
parameter=param,
default_value=value)
item.addChild(subitem)
self.items[group][param] = subitem
self.appearanceParams.addTopLevelItem(item)
item.sortChildren(0, Qt.AscendingOrder)
self.appearanceParams.expandAll()
self.appearanceParams.resizeColumnToContents(0)
self.appearanceParams.resizeColumnToContents(1)
self.layer_search_combo.removeItem(1)
self.layer_filter_select.takeItem(1)
# configure export params in separate tab
exportTree = dlg.exportParams
for group, settings in params.items():
if group == "Data export":
item = QTreeWidgetItem()
item.setText(0, group)
for param, value in settings.items():
subitem = self.createOptionItem(tree_widget=exportTree,
parent_item=item,
parameter=param,
default_value=value)
item.addChild(subitem)
self.items[group][param] = subitem
self.exportParams.addTopLevelItem(item)
item.sortChildren(0, Qt.AscendingOrder)
self.exportParams.expandAll()
self.exportParams.resizeColumnToContents(0)
self.exportParams.resizeColumnToContents(1)
def createOptionItem(self, tree_widget, parent_item,
parameter, default_value):
"""create the tree item corresponding to an option parameter"""
action = None
if isinstance(default_value, dict):
action = default_value['action']
default_value = default_value['option']
subitem = TreeSettingItem(parent_item, tree_widget,
parameter, default_value, action)
if parameter == 'Layer search':
self.layer_search_combo = subitem.combo
if parameter == 'Attribute filter':
self.layer_filter_select = subitem.list
elif parameter == 'Exporter':
self.exporter_combo = subitem.combo
return subitem
def setStateToWriter(self, writer):
"""
Sets the dialog state to match the specified writer
"""
self.selectMapFormat(writer)
self.setStateToParams(writer.params)
def setStateToParams(self, params):
"""
Sets the dialog state to match the specified parameters
"""
for group, settings in self.items.items():
for param, item in settings.items():
value = params[group][param]
item.setValue(value)
def selectMapFormat(self, writer):
"""
Updates dialog state to match the specified writer format
"""
self.ol3.setChecked(isinstance(writer, OpenLayersWriter))
self.leaflet.setChecked(isinstance(writer, LeafletWriter))
self.mapbox.setChecked(isinstance(writer, MapboxWriter))
def loadPreviewFile(self, file):
"""
Loads a web based preview from a local file path
"""
self.previewUrl = QUrl.fromLocalFile(file)
if self.preview:
self.preview.settings().clearMemoryCaches()
self.preview.setUrl(self.previewUrl)
def getParameters(self):
parameters = defaultdict(dict)
for group, settings in self.items.items():
for param, item in settings.items():
if param in ('Widget Icon', 'Widget Background'):
parameters[group][param] = item._value.color().name()
else:
parameters[group][param] = item.value()
if param == "Layer search":
parameters["Appearance"]["Search layer"] = (
self.layer_search_combo.itemData(
self.layer_search_combo.currentIndex()))
if param == "Attribute filter":
parameters["Appearance"]["Attribute filter"] = (
self.layer_filter_select.selectedItems())
return parameters
def saveParameters(self):
"""
Saves current dialog state to project
"""
WRITER_REGISTRY.saveWriterToProject(self.createWriter())
EXPORTER_REGISTRY.writeToProject(self.exporter)
def getLayersAndGroups(self):
layers = []
groups = {}
popup = []
visible = []
interactive = []
json = []
cluster = []
getFeatureInfo = []
for i in range(self.layers_item.childCount()):
item = self.layers_item.child(i)
if isinstance(item, TreeLayerItem):
if item.checkState(0) == Qt.Checked:
layers.append(item.layer)
popup.append(item.popup)
visible.append(item.visible)
interactive.append(item.interactive)
json.append(item.json)
cluster.append(item.cluster)
getFeatureInfo.append(item.getFeatureInfo)
else:
group = item.name
groupLayers = []
if item.checkState(0) != Qt.Checked:
continue
for layer in item.layers:
groupLayers.append(layer)
layers.append(layer)
popup.append({})
if item.visible:
visible.append(True)
else:
visible.append(False)
if item.interactive:
interactive.append(True)
else:
interactive.append(False)
if hasattr(item, "json") and item.json:
json.append(True)
else:
json.append(False)
if hasattr(item, "cluster") and item.cluster:
cluster.append(True)
else:
cluster.append(False)
if hasattr(item, "getFeatureInfo") and item.getFeatureInfo:
getFeatureInfo.append(True)
else:
getFeatureInfo.append(False)
groups[group] = groupLayers[::-1]
return (layers[::-1],
groups,
popup[::-1],
visible[::-1],
interactive[::-1],
json[::-1],
cluster[::-1],
getFeatureInfo[::-1])
def reject(self):
self.saveParameters()
(layers, groups, popup, visible, interactive,
json, cluster, getFeatureInfo) = self.getLayersAndGroups()
try:
for layer, pop, vis, int in zip(layers, popup, visible,
interactive):
attrDict = {}
for attr in pop:
attrDict['attr'] = pop[attr]
layer.setCustomProperty("qgis2web/popup/" + attr,
pop[attr])
layer.setCustomProperty("qgis2web/Visible", vis)
layer.setCustomProperty("qgis2web/Interactive", int)
except Exception:
pass
QSettings().setValue(
"qgis2web/MainDialogGeometry", self.saveGeometry())
QSettings().setValue("qgis2web/previewOnStartup",
self.previewOnStartup.checkState())
QSettings().setValue("qgis2web/closeFeedbackOnSuccess",
self.closeFeedbackOnSuccess.checkState())
QSettings().setValue("qgis2web/previewFeatureLimit",
self.previewFeatureLimit.text())
QDialog.close(self)
def closeEvent(self, event):
try:
if self.devConsole or self.devConsole.isVisible() and self.preview:
del self.devConsole
del self.preview
self.reject()
event.accept()
except Exception:
pass
class devToggleFilter(QObject):
devToggle = pyqtSignal(bool)
def eventFilter(self, obj, event):
try:
if event.type() == QEvent.KeyPress:
if event.key() == Qt.Key_F12:
self.devToggle.emit(not obj.devConsole.isVisible())
if obj.devConsole.height() != 0:
obj.devConsole.setFixedHeight(0)
else:
obj.devConsole.setFixedHeight(168)
return True
except Exception:
pass
return False
class TreeGroupItem(QTreeWidgetItem):
groupIcon = QIcon(os.path.join(os.path.dirname(__file__), "icons",
"group.gif"))
def __init__(self, name, layers, tree):
QTreeWidgetItem.__init__(self)
self.layers = layers
self.name = name
self.setText(0, name)
self.setIcon(0, self.groupIcon)
self.setCheckState(0, Qt.Checked)
self.visibleItem = QTreeWidgetItem(self)
self.visibleCheck = QCheckBox()
self.visibleCheck.setChecked(True)
self.visibleItem.setText(0, "Visibility")
self.addChild(self.visibleItem)
tree.setItemWidget(self.visibleItem, 1, self.visibleCheck)
self.interactiveItem = QTreeWidgetItem(self)
self.interactiveCheck = QCheckBox()
self.interactiveCheck.setChecked(True)
self.interactiveItem.setText(0, "Popups")
self.addChild(self.interactiveItem)
tree.setItemWidget(self.interactiveItem, 1, self.interactiveCheck)
@property
def visible(self):
return self.visibleCheck.isChecked()
@property
def interactive(self):
return self.interactiveCheck.isChecked()
class TreeLayerItem(QTreeWidgetItem):
layerIcon = QIcon(os.path.join(os.path.dirname(__file__), "icons",
"layer.png"))
def __init__(self, iface, layer, tree, dlg):
QTreeWidgetItem.__init__(self)
self.iface = iface
self.layer = layer
self.setText(0, layer.name())
self.setIcon(0, self.layerIcon)
project = QgsProject.instance()
if project.layerTreeRoot().findLayer(layer.id()).isVisible():
self.setCheckState(0, Qt.Checked)
else:
self.setCheckState(0, Qt.Unchecked)
self.visibleItem = QTreeWidgetItem(self)
self.visibleCheck = QCheckBox()
vis = layer.customProperty("qgis2web/Visible", True)
if vis == 0 or str(vis).lower() == "false":
self.visibleCheck.setChecked(False)
else:
self.visibleCheck.setChecked(True)
self.visibleItem.setText(0, "Visible")
self.addChild(self.visibleItem)
tree.setItemWidget(self.visibleItem, 1, self.visibleCheck)
self.interactiveItem = QTreeWidgetItem(self)
self.interactiveCheck = QCheckBox()
int = True
if int == 0 or str(int).lower() == "false":
self.interactiveCheck.setChecked(False)
else:
self.interactiveCheck.setChecked(True)
self.interactiveItem.setText(0, "Popups")
self.addChild(self.interactiveItem)
tree.setItemWidget(self.interactiveItem, 1, self.interactiveCheck)
if layer.type() == layer.VectorLayer:
if layer.providerType() == 'WFS':
self.jsonItem = QTreeWidgetItem(self)
self.jsonCheck = QCheckBox()
if layer.customProperty("qgis2web/Encode to JSON") == 2:
self.jsonCheck.setChecked(True)
self.jsonItem.setText(0, "Encode to JSON")
self.jsonCheck.stateChanged.connect(self.changeJSON)
self.addChild(self.jsonItem)
tree.setItemWidget(self.jsonItem, 1, self.jsonCheck)
if layer.geometryType() == QgsWkbTypes.PointGeometry:
self.clusterItem = QTreeWidgetItem(self)
self.clusterCheck = QCheckBox()
if layer.customProperty("qgis2web/Cluster") == 2:
self.clusterCheck.setChecked(True)
self.clusterItem.setText(0, "Cluster")
self.clusterCheck.stateChanged.connect(self.changeCluster)
self.addChild(self.clusterItem)
tree.setItemWidget(self.clusterItem, 1, self.clusterCheck)
self.popupItem = QTreeWidgetItem(self)
self.popupItem.setText(0, "Popup fields")
options = []
fields = self.layer.fields()
for f in fields:
fieldIndex = fields.indexFromName(f.name())
editorWidget = layer.editorWidgetSetup(fieldIndex).type()
if editorWidget == 'Hidden':
continue
options.append(f.name())
for option in options:
self.attr = QTreeWidgetItem(self)
self.attrWidget = QComboBox()
self.attrWidget.addItem("no label")
self.attrWidget.addItem("inline label")
self.attrWidget.addItem("header label")
custProp = layer.customProperty("qgis2web/popup/" + option)
if (custProp != "" and custProp is not None):
self.attrWidget.setCurrentIndex(
self.attrWidget.findText(
layer.customProperty("qgis2web/popup/" + option)))
self.attr.setText(1, option)
self.popupItem.addChild(self.attr)
tree.setItemWidget(self.attr, 2, self.attrWidget)
self.addChild(self.popupItem)
else:
if layer.providerType() == 'wms':
self.getFeatureInfoItem = QTreeWidgetItem(self)
self.getFeatureInfoCheck = QCheckBox()
if layer.customProperty("qgis2web/GetFeatureInfo") == 2:
self.getFeatureInfoCheck.setChecked(True)
self.getFeatureInfoItem.setText(0, "Enable GetFeatureInfo?")
self.getFeatureInfoCheck.stateChanged.connect(
self.changeGetFeatureInfo)
self.addChild(self.getFeatureInfoItem)
tree.setItemWidget(self.getFeatureInfoItem, 1,
self.getFeatureInfoCheck)
@property
def popup(self):
popup = []
self.tree = self.treeWidget()
for p in range(self.childCount()):
item = self.child(p).text(1)
if item != "":
popupVal = self.tree.itemWidget(self.child(p), 2).currentText()
pair = (item, popupVal)
popup.append(pair)
popup = OrderedDict(popup)
return popup
@property
def visible(self):
return self.visibleCheck.isChecked()
@property
def interactive(self):
return self.interactiveCheck.isChecked()
@property
def json(self):
try:
return self.jsonCheck.isChecked()
except Exception:
return False
@property
def cluster(self):
try:
return self.clusterCheck.isChecked()
except Exception:
return False
@property
def getFeatureInfo(self):
try:
return self.getFeatureInfoCheck.isChecked()
except Exception:
return False
def changeJSON(self, isJSON):
self.layer.setCustomProperty("qgis2web/Encode to JSON", isJSON)
def changeCluster(self, isCluster):
self.layer.setCustomProperty("qgis2web/Cluster", isCluster)
def changeGetFeatureInfo(self, isGetFeatureInfo):
self.layer.setCustomProperty("qgis2web/GetFeatureInfo",
isGetFeatureInfo)
class TreeSettingItem(QTreeWidgetItem):
def __init__(self, parent, tree, name, value, action=None):
QTreeWidgetItem.__init__(self, parent)
self.parent = parent
self.tree = tree
self.name = name
self._value = value
self.combo = None
self.list = None
self.setText(0, name)
widget = None
if isinstance(value, QgsColorButton):
widget = value
elif isinstance(value, bool):
if value:
self.setCheckState(1, Qt.Checked)
else:
self.setCheckState(1, Qt.Unchecked)
elif isinstance(value, tuple):
self.combo = QComboBox()
self.combo.setSizeAdjustPolicy(0)
for option in value:
self.combo.addItem(option)
widget = self.combo
elif isinstance(value, list):
self.list = QListWidget()
self.list.setSizeAdjustPolicy(0)
self.list.setSelectionMode(QListWidget.MultiSelection)
for option in value:
self.list.addItem(option)
widget = self.list
else:
self.setText(1, unicode(value))
if action:
layout = QHBoxLayout()
layout.setMargin(0)
if widget:
layout.addWidget(widget)
button = QToolButton()
button.setDefaultAction(action)
button.setText(action.text())
layout.addWidget(button)
layout.addStretch(1)
widget = QWidget()
widget.setLayout(layout)
if widget:
self.tree.setItemWidget(self, 1, widget)
def setValue(self, value):
if isinstance(value, bool):
if value:
self.setCheckState(1, Qt.Checked)
else:
self.setCheckState(1, Qt.Unchecked)
elif self.combo:
index = self.combo.findText(value)
if index != -1:
self.combo.setCurrentIndex(index)
else:
self.setText(1, str(value))
def value(self):
if isinstance(self._value, bool):
return self.checkState(1) == Qt.Checked
elif isinstance(self._value, (int, float)):
return float(self.text(1))
elif isinstance(self._value, tuple):
return self.combo.currentText()
else:
return self.text(1)
class WebPage(QWebPage):
"""
Makes it possible to use a Python logger to print javascript
console messages
"""
def __init__(self, logger=None, parent=None):
super(WebPage, self).__init__(parent)
def javaScriptConsoleMessage(self, msg, lineNumber, sourceID):
if (msg != ("Unable to get image data from canvas because "
"the canvas has been tainted by cross-origin data.") and
msg != ("Deprecated include of L.Mixin.Events: this property "
"will be removed in future releases, please inherit "
"from L.Evented instead.") and
os.environ.get('CI') and os.environ.get('TRAVIS')):
raise jsException("JS %s:%d\n%s" % (sourceID, lineNumber, msg),
Exception())
class jsException(Exception):
def __init__(self, message, errors):
# Call the base class constructor with the parameters it needs
super(jsException, self).__init__(message)
# Now for your custom code...
self.errors = errors
| gpl-2.0 | -2,166,607,151,134,969,000 | 38.83384 | 79 | 0.555321 | false | 4.630314 | false | false | false |
inean/python-oauth2 | oauth2/__init__.py | 1 | 24530 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
READ_BUFFER_CHUNK_SIZE = 128 * 1024
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_session_handle(length=10):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
session_handle = None
expires_in = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def set_session_handle(self, handle=None):
if handle is not None:
self.session_handle = handle
else:
self.session_handle = generate_session_handle()
def set_expires_in(self, expires):
self.expires_in = expires
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
# Token expiration. Only send session_handle is server has set
# a proper expires_in value (integer > 0)
if self.session_handle is not None and self.expires_in:
data['oauth_session_handle'] = self.session_handle
data['expiresIn'] = self.expires_in
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
hsh = sha()
#pylint: disable-msg=E1103
if self.body and hasattr(self.body, 'tell'):
# remenber current pos
curpos = self.body.tell()
while(True):
# read chunks (128Kb)
chunk = self.body.read(READ_BUFFER_CHUNK_SIZE)
if chunk == '':
break
# update hash
hsh.update(chunk)
# reset seek
self.body.seek(curpos)
else:
# default implementation
hsh.update(self.body)
self['oauth_body_hash'] = base64.b64encode(hsh.digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| mit | 6,820,572,739,031,814,000 | 32.37415 | 265 | 0.600163 | false | 4.217675 | false | false | false |
pombredanne/cliques | poll/views.py | 1 | 5047 | from collections import defaultdict
import datetime
import logging
import operator
import random
from django.contrib.auth import get_user_model
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound
from django.utils.timezone import utc
from django.views.generic import CreateView, DetailView
import notify.utils
from poll.models import Vote, Submission, SubmissionForm, Poll
from website.models import Post, UserProfile, Comment
logger = logging.getLogger()
def vote(request, poll_stub, submission_id):
#TODO(pcsforeducation) make this AJAX and POST only.
# if request.method != "POST":
# return HttpResponseBadRequest('Must be a POST')
try:
submission = Submission.objects.get(id=submission_id)
except:
return HttpResponseNotFound("Submission does not exist: {}".format(
submission_id
))
try:
prev_vote = Vote.objects.get(user=request.user)
except Vote.DoesNotExist:
# First vote
Vote(user=request.user, submission=submission).save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
try:
# Switch vote or undo vote
if prev_vote.submission == submission:
# Undo
prev_vote.delete()
else:
# Switch
prev_vote.delete()
Vote(user=request.user, submission=submission).save()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
except:
logging.exception('Could not switch vote')
raise
def cron(request):
# Get all the votes (may need to improve filtering on poll here).
#TODO(pcsforeducation) support multiple polls
poll = Poll.objects.all()[0]
submissions = defaultdict(int)
votes = Vote.objects.all()
for vote in votes:
submissions[vote.submission.id] += 1
# Eww.
top_submissions = list(reversed(sorted(submissions.iteritems(),
key=operator.itemgetter(1))))
logging.info("Top submissions: {}".format(top_submissions))
if top_submissions:
top_votes = top_submissions[0][1]
if top_votes > 0:
# Choose winning vote
winning_submissions = []
for submission in top_submissions:
logging.info("Testing submission: {}, top_votes: {}, equal? {}"
.format(submission, top_votes,
submission[0] == top_votes))
if submission[1] == top_votes:
winning_submissions.append(submission[0])
winning_index = random.randrange(0, len(winning_submissions))
_post_winning_submission(poll, winning_submissions[winning_index])
seven_days_ago = datetime.datetime.utcnow().replace(tzinfo=utc) \
- datetime.timedelta(days=7)
Submission.objects.filter(submitted__lt=seven_days_ago).delete()
return HttpResponse('ok')
def _post_winning_submission(poll, submission_id):
user = UserProfile.objects.get(username=poll.bot_name)
submission = Submission.objects.get(id=submission_id)
post = Post(user=user,
category=poll.category,
title="{}: {}".format(poll.stub, submission.title),
url=submission.url,
type='image')
post.save()
text = poll.winning_text.format(
title=poll.title,
stub=poll.stub,
username=submission.user.username)
comment = Comment(user=user,
post=post,
text=text)
comment.save()
winning_user = UserProfile.objects.get(id=submission.user.id)
winning_user.poll_votes += 1
winning_user.save()
submission.delete()
# Notify the winner they won
notify.utils.notify_users(
user_ids=[winning_user.id],
text="Your {} submission won!".format(poll.title),
link="http://www.slashertraxx.com/post/{}/".format(post.id),
type='comment',
level='info')
class PollDetailView(DetailView):
model = Poll
slug_field = 'stub'
slug_url_kwarg = 'stub'
template_name = 'poll/submission.html'
def get_context_data(self, **kwargs):
context = super(PollDetailView, self).get_context_data(**kwargs)
try:
context['vote'] = Vote.objects.get(user=self.request.user.id)
except Vote.DoesNotExist:
pass
context['form'] = SubmissionForm
return context
class SubmissionFormView(CreateView):
model = Submission
success_url = '/'
fields = ['title', 'url']
# template_name = 'website/post.html'
def form_valid(self, form):
stub = self.kwargs.get('stub')
user_model = get_user_model()
form.instance.user = user_model.objects.get(id=self.request.user.id)
form.instance.poll = Poll.objects.get(stub=stub)
self.object = form.save()
self.success_url = "/poll/{}/".format(stub)
return super(SubmissionFormView, self).form_valid(form)
| apache-2.0 | 8,324,600,533,763,739,000 | 33.806897 | 80 | 0.624728 | false | 4.0376 | false | false | false |
sebastian-software/jasy | jasy/core/Util.py | 1 | 4039 | #
# Jasy - Web Tooling Framework
# Copyright 2010-2012 Zynga Inc.
# Copyright 2013-2014 Sebastian Werner
#
import re
import os
import hashlib
import tempfile
import subprocess
import sys
import shlex
import jasy.core.Console as Console
import jasy.core.Base62 as Base62
def executeCommand(args, failMessage=None, path=None, wrapOutput=True):
"""
Executes the given process and outputs failMessage when errors happen.
:param args:
:type args: str or list
:param failMessage: Message for exception when command fails
:type failMessage: str
:param path: Directory path where the command should be executed
:type path: str
:raise Exception: Raises an exception whenever the shell command fails in execution
:type wrapOutput: bool
:param wrapOutput: Whether shell output should be wrapped and returned (and passed through to Console.debug())
"""
if isinstance(args, str):
args = shlex.split(args)
prevpath = os.getcwd()
# Execute in custom directory
if path:
path = os.path.abspath(os.path.expanduser(path))
os.chdir(path)
Console.debug("Executing command: %s", " ".join(args))
Console.indent()
# Using shell on Windows to resolve binaries like "git"
if not wrapOutput:
returnValue = subprocess.call(args, shell=sys.platform == "win32")
result = returnValue
else:
output = tempfile.TemporaryFile(mode="w+t")
returnValue = subprocess.call(args, stdout=output, stderr=output, shell=sys.platform == "win32")
output.seek(0)
result = output.read().strip("\n\r")
output.close()
# Change back to previous path
os.chdir(prevpath)
if returnValue != 0 and failMessage:
raise Exception("Error during executing shell command: %s (%s)" % (failMessage, result))
if wrapOutput:
for line in result.splitlines():
Console.debug(line)
Console.outdent()
return result
SIPHASH_SUPPORTED = False
try:
import siphash
SIPHASH_SUPPORTED = True
except:
pass
def generateChecksum(key, method="base62"):
"""
Generates a unique SHA1 based hash/checksum encoded as Base62 or Hex depending on the given parameters.
:param key:
:type key: str
:param method:
:type method: str
"""
# Alternative hashing method using SIP keys:
#
# https://github.com/majek/pysiphash (Python library)
# https://github.com/jedisct1/siphash-js (Node/JS library - for Core)
#
# if SIPHASH_SUPPORTED:
# sipkey = ("JASY" * 4).encode("ascii")
# self.__checksum2 = siphash.SipHash_2_4(sipkey).update(self.__key.encode("ascii")).hexdigest()
# print("SIP Checksum: %s" % self.__checksum2.decode("ascii"))
sha1 = hashlib.sha1(key.encode("ascii"))
if method == "base62":
return Base62.encodeArrayToString(sha1.digest())
else:
return sha1.hexdigest()
def getKey(data, key, default=None):
"""
Returns the key from the data if available or the given default.
:param data: Data structure to inspect
:type data: dict
:param key: Key to lookup in dictionary
:type key: str
:param default: Default value to return when key is not set
:type default: any
"""
if key in data:
return data[key]
else:
return default
__REGEXP_DASHES = re.compile(r"\-+([\S]+)?")
__REGEXP_HYPHENATE = re.compile(r"([A-Z])")
def __camelizeHelper(match):
result = match.group(1)
return result[0].upper() + result[1:].lower()
def __hyphenateHelper(match):
return "-%s" % match.group(1).lower()
def camelize(str):
"""
Returns a camelized version of the incoming string: foo-bar-baz => fooBarBaz
:param str: Input string
"""
return __REGEXP_DASHES.sub(__camelizeHelper, str)
def hyphenate(str):
"""Returns a hyphenated version of the incoming string: fooBarBaz => foo-bar-baz
:param str: Input string
"""
return __REGEXP_HYPHENATE.sub(__hyphenateHelper, str)
| mit | 8,754,968,981,034,551,000 | 24.402516 | 114 | 0.657836 | false | 3.719153 | false | false | false |
pozytywnie/webapp-health-monitor | webapp_health_monitor/management/commands/verify.py | 1 | 1307 | import importlib
import sys
from django.apps import apps
from django.core.management.base import BaseCommand
from webapp_health_monitor.verification_suit import VerificationSuit
class Command(BaseCommand):
SUBMODULE_NAME = 'verificators'
def add_arguments(self, parser):
parser.add_argument('--tag', type=str, default=[], action='append', dest='tags')
def handle(self, tags, **kwargs):
submodules = self._get_verificator_modules()
for submodule in submodules:
try:
importlib.import_module(submodule)
except ImportError as e:
if not self._import_error_concerns_verificator(submodule, e):
raise e
result = VerificationSuit(tags).run()
self.stdout.write('{}\n'.format(result.report()))
sys.exit(result.has_failed())
def _get_verificator_modules(self):
for app in apps.get_app_configs():
yield '.'.join([app.module.__name__, self.SUBMODULE_NAME])
def _import_error_concerns_verificator(self, submodule, error):
if sys.version_info >= (3, 0):
return str(error) == "No module named '{}'".format(submodule)
else:
return error.message == "No module named {}".format(
self.SUBMODULE_NAME)
| mit | -4,889,517,402,081,881,000 | 35.305556 | 88 | 0.625096 | false | 4.04644 | false | false | false |
MoRgUiJu/morguiju.repo | plugin.video.pelisalacarta/channels/pelisadicto.py | 1 | 10054 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para cuevana
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import sys
import urlparse
from core import config
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
DEBUG = config.get_setting("debug")
def mainlist(item):
logger.info("[pelisadicto.py] mainlist")
itemlist = []
itemlist.append( Item(channel=item.channel, title="Últimas agregadas" , action="agregadas", url="http://pelisadicto.com", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel, title="Listado por género" , action="porGenero", url="http://pelisadicto.com"))
itemlist.append( Item(channel=item.channel, title="Buscar" , action="search", url="http://pelisadicto.com") )
return itemlist
def porGenero(item):
logger.info("[pelisadicto.py] porGenero")
itemlist = []
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Acción",url="http://pelisadicto.com/genero/Acción/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Adulto",url="http://pelisadicto.com/genero/Adulto/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Animación",url="http://pelisadicto.com/genero/Animación/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Aventura",url="http://pelisadicto.com/genero/Aventura/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Biográfico",url="http://pelisadicto.com/genero/Biográfico/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Ciencia Ficción",url="http://pelisadicto.com/genero/Ciencia Ficción/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Cine Negro",url="http://pelisadicto.com/genero/Cine Negro/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Comedia",url="http://pelisadicto.com/genero/Comedia/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Corto",url="http://pelisadicto.com/genero/Corto/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Crimen",url="http://pelisadicto.com/genero/Crimen/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Deporte",url="http://pelisadicto.com/genero/Deporte/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Documental",url="http://pelisadicto.com/genero/Documental/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Drama",url="http://pelisadicto.com/genero/Drama/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Familiar",url="http://pelisadicto.com/genero/Familiar/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Fantasía",url="http://pelisadicto.com/genero/Fantasía/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Guerra",url="http://pelisadicto.com/genero/Guerra/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Historia",url="http://pelisadicto.com/genero/Historia/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Misterio",url="http://pelisadicto.com/genero/Misterio/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Música",url="http://pelisadicto.com/genero/Música/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Musical",url="http://pelisadicto.com/genero/Musical/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Romance",url="http://pelisadicto.com/genero/Romance/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Terror",url="http://pelisadicto.com/genero/Terror/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Thriller",url="http://pelisadicto.com/genero/Thriller/1", viewmode="movie_with_plot"))
itemlist.append( Item(channel=item.channel , action="agregadas" , title="Western",url="http://pelisadicto.com/genero/Western/1", viewmode="movie_with_plot"))
return itemlist
def search(item,texto):
logger.info("[pelisadicto.py] search")
'''
texto_get = texto.replace(" ","%20")
texto_post = texto.replace(" ","+")
item.url = "http://pelisadicto.com/buscar/%s?search=%s" % (texto_get,texto_post)
'''
texto_post = texto.replace(" ","+")
item.url = "http://pelisadicto.com/buscar/%s" % texto
try:
return agregadas(item)
# Se captura la excepci?n, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
return busqueda(item)
def agregadas(item):
logger.info("[pelisadicto.py] agregadas")
itemlist = []
'''
# Descarga la pagina
if "?search=" in item.url:
url_search = item.url.split("?search=")
data = scrapertools.cache_page(url_search[0], url_search[1])
else:
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
'''
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
# Extrae las entradas
fichas = re.sub(r"\n|\s{2}","",scrapertools.get_match(data,'<ul class="thumbnails">(.*?)</ul>'))
#<li class="col-xs-6 col-sm-2 CALDVD"><a href="/pelicula/101-dalmatas" title="Ver 101 dálmatas Online" class="thumbnail thumbnail-artist-grid"><img class="poster" style="width: 180px; height: 210px;" src="/img/peliculas/101-dalmatas.jpg" alt="101 dálmatas"/><div class="calidad">DVD</div><div class="idiomas"><img src="/img/1.png" height="20" width="30" /></div><div class="thumbnail-artist-grid-name-container-1"><div class="thumbnail-artist-grid-name-container-2"><span class="thumbnail-artist-grid-name">101 dálmatas</span></div></div></a></li>
patron = 'href="([^"]+)".*?' # url
patron+= 'src="([^"]+)" ' # thumbnail
patron+= 'alt="([^"]+)' # title
matches = re.compile(patron,re.DOTALL).findall(fichas)
for url,thumbnail,title in matches:
url=urlparse.urljoin(item.url,url)
thumbnail = urlparse.urljoin(url,thumbnail)
itemlist.append( Item(channel=item.channel, action="findvideos", title=title+" ", fulltitle=title , url=url , thumbnail=thumbnail , show=title) )
# Paginación
try:
#<ul class="pagination"><li class="active"><span>1</span></li><li><span><a href="2">2</a></span></li><li><span><a href="3">3</a></span></li><li><span><a href="4">4</a></span></li><li><span><a href="5">5</a></span></li><li><span><a href="6">6</a></span></li></ul>
current_page_number = int(scrapertools.get_match(item.url,'/(\d+)$'))
item.url = re.sub(r"\d+$","%s",item.url)
next_page_number = current_page_number + 1
next_page = item.url % (next_page_number)
itemlist.append( Item(channel=item.channel, action="agregadas", title="Página siguiente >>" , url=next_page, viewmode="movie_with_plot") )
except: pass
return itemlist
def findvideos(item):
logger.info("[pelisadicto.py] findvideos")
itemlist = []
data = re.sub(r"\n|\s{2}","",scrapertools.cache_page(item.url))
#<!-- SINOPSIS --> <h2>Sinopsis de 101 dálmatas</h2> <p>Pongo y Perdita, los dálmatas protagonistas, son una feliz pareja canina que vive rodeada de sus cachorros y con sus amos Roger y Anita. Pero su felicidad está amenazada. Cruella de Ville, una pérfida mujer que vive en una gran mansión y adora los abrigos de pieles, se entera de que los protagonistas tienen quince cachorros dálmatas. Entonces, la idea de secuestrarlos para hacerse un exclusivo abrigo de pieles se convierte en una obsesión enfermiza. Para hacer realidad su sueño contrata a dos ladrones.</p>
patron = "<!-- SINOPSIS --> "
patron += "<h2>[^<]+</h2> "
patron += "<p>([^<]+)</p>"
matches = re.compile(patron,re.DOTALL).findall(data)
plot = matches[0]
# Descarga la pagina
data = scrapertools.cache_page(item.url)
patron = '<tr>.*?'
patron += '<td><img src="(.*?)".*?<td>(.*?)</td>.*?<td>(.*?)</td>.*?<a href="(.*?)".*?</tr>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedidioma, scrapedcalidad, scrapedserver, scrapedurl in matches:
idioma =""
if "/img/1.png" in scrapedidioma: idioma="Castellano"
if "/img/2.png" in scrapedidioma: idioma="Latino"
if "/img/3.png" in scrapedidioma: idioma="Subtitulado"
title = item.title + " ["+scrapedcalidad+"][" + idioma + "][" + scrapedserver + "]"
itemlist.append( Item(channel=item.channel, action="play", title=title, fulltitle=title , url=scrapedurl , thumbnail="" , plot=plot , show = item.show) )
return itemlist
def play(item):
logger.info("[pelisadicto.py] play")
itemlist = servertools.find_video_items(data=item.url)
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = item.channel
return itemlist
| gpl-2.0 | 6,785,634,815,207,113,000 | 57.22093 | 579 | 0.67156 | false | 2.960095 | false | false | false |
Inspq/ansible | lib/ansible/plugins/action/fetch.py | 1 | 8462 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import base64
from ansible.constants import mk_boolean as boolean
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes
from ansible.module_utils.six import string_types
from ansible.plugins.action import ActionBase
from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
from ansible.utils.path import makedirs_safe
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
''' handler for fetch operations '''
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
if self._play_context.check_mode:
result['skipped'] = True
result['msg'] = 'check mode not (yet) supported for this module'
return result
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
flat = boolean(self._task.args.get('flat'))
fail_on_missing = boolean(self._task.args.get('fail_on_missing'))
validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5', True)))
if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args:
result['msg'] = "validate_checksum and validate_md5 cannot both be specified"
if source is None or dest is None:
result['msg'] = "src and dest are required"
if result.get('msg'):
result['failed'] = True
return result
source = self._connection._shell.join_path(source)
source = self._remote_expand_user(source)
remote_checksum = None
if not self._play_context.become:
# calculate checksum for the remote file, don't bother if using become as slurp will be used
# Force remote_checksum to follow symlinks because fetch always follows symlinks
remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True)
# use slurp if permissions are lacking or privilege escalation is needed
remote_data = None
if remote_checksum in ('1', '2', None):
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
if slurpres.get('failed'):
if not fail_on_missing and (slurpres.get('msg').startswith('file not found') or remote_checksum == '1'):
result['msg'] = "the remote file does not exist, not transferring, ignored"
result['file'] = source
result['changed'] = False
else:
result.update(slurpres)
return result
else:
if slurpres['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres['content'])
if remote_data is not None:
remote_checksum = checksum_s(remote_data)
# the source path may have been expanded on the
# target system, so we compare it here and use the
# expanded version if it's different
remote_source = slurpres.get('source')
if remote_source and remote_source != source:
source = remote_source
# calculate the destination name
if os.path.sep not in self._connection._shell.join_path('a', ''):
source = self._connection._shell._unquote(source)
source_local = source.replace('\\', '/')
else:
source_local = source
dest = os.path.expanduser(dest)
if flat:
if dest.endswith(os.sep):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source_local)
dest = os.path.join(dest, base)
if not dest.startswith("/"):
# if dest does not start with "/", we'll assume a relative path
dest = self._loader.path_dwim(dest)
else:
# files are saved in dest dir, with a subdir for each host, then the filename
if 'inventory_hostname' in task_vars:
target_name = task_vars['inventory_hostname']
else:
target_name = self._play_context.remote_addr
dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
dest = dest.replace("//","/")
if remote_checksum in ('0', '1', '2', '3', '4'):
# these don't fail because you may want to transfer a log file that
# possibly MAY exist but keep going to fetch other log files
result['changed'] = False
result['file'] = source
if remote_checksum == '0':
result['msg'] = "unable to calculate the checksum of the remote file"
elif remote_checksum == '1':
if fail_on_missing:
result['failed'] = True
del result['changed']
result['msg'] = "the remote file does not exist"
else:
result['msg'] = "the remote file does not exist, not transferring, ignored"
elif remote_checksum == '2':
result['msg'] = "no read permission on remote file, not transferring, ignored"
elif remote_checksum == '3':
result['msg'] = "remote file is a directory, fetch cannot work on directories"
elif remote_checksum == '4':
result['msg'] = "python isn't present on the system. Unable to compute checksum"
return result
# calculate checksum for the local file
local_checksum = checksum(dest)
if remote_checksum != local_checksum:
# create the containing directories, if needed
makedirs_safe(os.path.dirname(dest))
# fetch the file and check for changes
if remote_data is None:
self._connection.fetch_file(source, dest)
else:
try:
f = open(to_bytes(dest, errors='surrogate_or_strict'), 'wb')
f.write(remote_data)
f.close()
except (IOError, OSError) as e:
raise AnsibleError("Failed to fetch the file: %s" % e)
new_checksum = secure_hash(dest)
# For backwards compatibility. We'll return None on FIPS enabled systems
try:
new_md5 = md5(dest)
except ValueError:
new_md5 = None
if validate_checksum and new_checksum != remote_checksum:
result.update(dict(failed=True, md5sum=new_md5,
msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None,
checksum=new_checksum, remote_checksum=remote_checksum))
else:
result.update(dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum))
else:
# For backwards compatibility. We'll return None on FIPS enabled systems
try:
local_md5 = md5(dest)
except ValueError:
local_md5 = None
result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum))
return result
| gpl-3.0 | -1,681,434,668,187,707,000 | 44.010638 | 152 | 0.589931 | false | 4.357364 | false | false | false |
polyaxon/polyaxon | sdks/python/http_client/v1/polyaxon_sdk/models/v1_compiled_operation.py | 1 | 21129 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Polyaxon SDKs and REST API specification.
Polyaxon SDKs and REST API specification. # noqa: E501
The version of the OpenAPI document: 1.10.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from polyaxon_sdk.configuration import Configuration
class V1CompiledOperation(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'version': 'float',
'kind': 'str',
'name': 'str',
'description': 'str',
'tags': 'list[str]',
'presets': 'list[str]',
'queue': 'str',
'cache': 'V1Cache',
'termination': 'V1Termination',
'plugins': 'V1Plugins',
'schedule': 'object',
'events': 'list[V1EventTrigger]',
'build': 'V1Build',
'hooks': 'list[V1Hook]',
'dependencies': 'list[str]',
'trigger': 'V1TriggerPolicy',
'conditions': 'str',
'skip_on_upstream_skip': 'bool',
'matrix': 'object',
'joins': 'dict(str, V1Join)',
'inputs': 'list[V1IO]',
'outputs': 'list[V1IO]',
'contexts': 'list[V1IO]',
'is_approved': 'bool',
'cost': 'float',
'run': 'object'
}
attribute_map = {
'version': 'version',
'kind': 'kind',
'name': 'name',
'description': 'description',
'tags': 'tags',
'presets': 'presets',
'queue': 'queue',
'cache': 'cache',
'termination': 'termination',
'plugins': 'plugins',
'schedule': 'schedule',
'events': 'events',
'build': 'build',
'hooks': 'hooks',
'dependencies': 'dependencies',
'trigger': 'trigger',
'conditions': 'conditions',
'skip_on_upstream_skip': 'skipOnUpstreamSkip',
'matrix': 'matrix',
'joins': 'joins',
'inputs': 'inputs',
'outputs': 'outputs',
'contexts': 'contexts',
'is_approved': 'isApproved',
'cost': 'cost',
'run': 'run'
}
def __init__(self, version=None, kind=None, name=None, description=None, tags=None, presets=None, queue=None, cache=None, termination=None, plugins=None, schedule=None, events=None, build=None, hooks=None, dependencies=None, trigger=None, conditions=None, skip_on_upstream_skip=None, matrix=None, joins=None, inputs=None, outputs=None, contexts=None, is_approved=None, cost=None, run=None, local_vars_configuration=None): # noqa: E501
"""V1CompiledOperation - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._version = None
self._kind = None
self._name = None
self._description = None
self._tags = None
self._presets = None
self._queue = None
self._cache = None
self._termination = None
self._plugins = None
self._schedule = None
self._events = None
self._build = None
self._hooks = None
self._dependencies = None
self._trigger = None
self._conditions = None
self._skip_on_upstream_skip = None
self._matrix = None
self._joins = None
self._inputs = None
self._outputs = None
self._contexts = None
self._is_approved = None
self._cost = None
self._run = None
self.discriminator = None
if version is not None:
self.version = version
if kind is not None:
self.kind = kind
if name is not None:
self.name = name
if description is not None:
self.description = description
if tags is not None:
self.tags = tags
if presets is not None:
self.presets = presets
if queue is not None:
self.queue = queue
if cache is not None:
self.cache = cache
if termination is not None:
self.termination = termination
if plugins is not None:
self.plugins = plugins
if schedule is not None:
self.schedule = schedule
if events is not None:
self.events = events
if build is not None:
self.build = build
if hooks is not None:
self.hooks = hooks
if dependencies is not None:
self.dependencies = dependencies
if trigger is not None:
self.trigger = trigger
if conditions is not None:
self.conditions = conditions
if skip_on_upstream_skip is not None:
self.skip_on_upstream_skip = skip_on_upstream_skip
if matrix is not None:
self.matrix = matrix
if joins is not None:
self.joins = joins
if inputs is not None:
self.inputs = inputs
if outputs is not None:
self.outputs = outputs
if contexts is not None:
self.contexts = contexts
if is_approved is not None:
self.is_approved = is_approved
if cost is not None:
self.cost = cost
if run is not None:
self.run = run
@property
def version(self):
"""Gets the version of this V1CompiledOperation. # noqa: E501
:return: The version of this V1CompiledOperation. # noqa: E501
:rtype: float
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this V1CompiledOperation.
:param version: The version of this V1CompiledOperation. # noqa: E501
:type: float
"""
self._version = version
@property
def kind(self):
"""Gets the kind of this V1CompiledOperation. # noqa: E501
:return: The kind of this V1CompiledOperation. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1CompiledOperation.
:param kind: The kind of this V1CompiledOperation. # noqa: E501
:type: str
"""
self._kind = kind
@property
def name(self):
"""Gets the name of this V1CompiledOperation. # noqa: E501
:return: The name of this V1CompiledOperation. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1CompiledOperation.
:param name: The name of this V1CompiledOperation. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this V1CompiledOperation. # noqa: E501
:return: The description of this V1CompiledOperation. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this V1CompiledOperation.
:param description: The description of this V1CompiledOperation. # noqa: E501
:type: str
"""
self._description = description
@property
def tags(self):
"""Gets the tags of this V1CompiledOperation. # noqa: E501
:return: The tags of this V1CompiledOperation. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this V1CompiledOperation.
:param tags: The tags of this V1CompiledOperation. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def presets(self):
"""Gets the presets of this V1CompiledOperation. # noqa: E501
:return: The presets of this V1CompiledOperation. # noqa: E501
:rtype: list[str]
"""
return self._presets
@presets.setter
def presets(self, presets):
"""Sets the presets of this V1CompiledOperation.
:param presets: The presets of this V1CompiledOperation. # noqa: E501
:type: list[str]
"""
self._presets = presets
@property
def queue(self):
"""Gets the queue of this V1CompiledOperation. # noqa: E501
:return: The queue of this V1CompiledOperation. # noqa: E501
:rtype: str
"""
return self._queue
@queue.setter
def queue(self, queue):
"""Sets the queue of this V1CompiledOperation.
:param queue: The queue of this V1CompiledOperation. # noqa: E501
:type: str
"""
self._queue = queue
@property
def cache(self):
"""Gets the cache of this V1CompiledOperation. # noqa: E501
:return: The cache of this V1CompiledOperation. # noqa: E501
:rtype: V1Cache
"""
return self._cache
@cache.setter
def cache(self, cache):
"""Sets the cache of this V1CompiledOperation.
:param cache: The cache of this V1CompiledOperation. # noqa: E501
:type: V1Cache
"""
self._cache = cache
@property
def termination(self):
"""Gets the termination of this V1CompiledOperation. # noqa: E501
:return: The termination of this V1CompiledOperation. # noqa: E501
:rtype: V1Termination
"""
return self._termination
@termination.setter
def termination(self, termination):
"""Sets the termination of this V1CompiledOperation.
:param termination: The termination of this V1CompiledOperation. # noqa: E501
:type: V1Termination
"""
self._termination = termination
@property
def plugins(self):
"""Gets the plugins of this V1CompiledOperation. # noqa: E501
:return: The plugins of this V1CompiledOperation. # noqa: E501
:rtype: V1Plugins
"""
return self._plugins
@plugins.setter
def plugins(self, plugins):
"""Sets the plugins of this V1CompiledOperation.
:param plugins: The plugins of this V1CompiledOperation. # noqa: E501
:type: V1Plugins
"""
self._plugins = plugins
@property
def schedule(self):
"""Gets the schedule of this V1CompiledOperation. # noqa: E501
:return: The schedule of this V1CompiledOperation. # noqa: E501
:rtype: object
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""Sets the schedule of this V1CompiledOperation.
:param schedule: The schedule of this V1CompiledOperation. # noqa: E501
:type: object
"""
self._schedule = schedule
@property
def events(self):
"""Gets the events of this V1CompiledOperation. # noqa: E501
:return: The events of this V1CompiledOperation. # noqa: E501
:rtype: list[V1EventTrigger]
"""
return self._events
@events.setter
def events(self, events):
"""Sets the events of this V1CompiledOperation.
:param events: The events of this V1CompiledOperation. # noqa: E501
:type: list[V1EventTrigger]
"""
self._events = events
@property
def build(self):
"""Gets the build of this V1CompiledOperation. # noqa: E501
:return: The build of this V1CompiledOperation. # noqa: E501
:rtype: V1Build
"""
return self._build
@build.setter
def build(self, build):
"""Sets the build of this V1CompiledOperation.
:param build: The build of this V1CompiledOperation. # noqa: E501
:type: V1Build
"""
self._build = build
@property
def hooks(self):
"""Gets the hooks of this V1CompiledOperation. # noqa: E501
:return: The hooks of this V1CompiledOperation. # noqa: E501
:rtype: list[V1Hook]
"""
return self._hooks
@hooks.setter
def hooks(self, hooks):
"""Sets the hooks of this V1CompiledOperation.
:param hooks: The hooks of this V1CompiledOperation. # noqa: E501
:type: list[V1Hook]
"""
self._hooks = hooks
@property
def dependencies(self):
"""Gets the dependencies of this V1CompiledOperation. # noqa: E501
:return: The dependencies of this V1CompiledOperation. # noqa: E501
:rtype: list[str]
"""
return self._dependencies
@dependencies.setter
def dependencies(self, dependencies):
"""Sets the dependencies of this V1CompiledOperation.
:param dependencies: The dependencies of this V1CompiledOperation. # noqa: E501
:type: list[str]
"""
self._dependencies = dependencies
@property
def trigger(self):
"""Gets the trigger of this V1CompiledOperation. # noqa: E501
:return: The trigger of this V1CompiledOperation. # noqa: E501
:rtype: V1TriggerPolicy
"""
return self._trigger
@trigger.setter
def trigger(self, trigger):
"""Sets the trigger of this V1CompiledOperation.
:param trigger: The trigger of this V1CompiledOperation. # noqa: E501
:type: V1TriggerPolicy
"""
self._trigger = trigger
@property
def conditions(self):
"""Gets the conditions of this V1CompiledOperation. # noqa: E501
:return: The conditions of this V1CompiledOperation. # noqa: E501
:rtype: str
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1CompiledOperation.
:param conditions: The conditions of this V1CompiledOperation. # noqa: E501
:type: str
"""
self._conditions = conditions
@property
def skip_on_upstream_skip(self):
"""Gets the skip_on_upstream_skip of this V1CompiledOperation. # noqa: E501
:return: The skip_on_upstream_skip of this V1CompiledOperation. # noqa: E501
:rtype: bool
"""
return self._skip_on_upstream_skip
@skip_on_upstream_skip.setter
def skip_on_upstream_skip(self, skip_on_upstream_skip):
"""Sets the skip_on_upstream_skip of this V1CompiledOperation.
:param skip_on_upstream_skip: The skip_on_upstream_skip of this V1CompiledOperation. # noqa: E501
:type: bool
"""
self._skip_on_upstream_skip = skip_on_upstream_skip
@property
def matrix(self):
"""Gets the matrix of this V1CompiledOperation. # noqa: E501
:return: The matrix of this V1CompiledOperation. # noqa: E501
:rtype: object
"""
return self._matrix
@matrix.setter
def matrix(self, matrix):
"""Sets the matrix of this V1CompiledOperation.
:param matrix: The matrix of this V1CompiledOperation. # noqa: E501
:type: object
"""
self._matrix = matrix
@property
def joins(self):
"""Gets the joins of this V1CompiledOperation. # noqa: E501
:return: The joins of this V1CompiledOperation. # noqa: E501
:rtype: dict(str, V1Join)
"""
return self._joins
@joins.setter
def joins(self, joins):
"""Sets the joins of this V1CompiledOperation.
:param joins: The joins of this V1CompiledOperation. # noqa: E501
:type: dict(str, V1Join)
"""
self._joins = joins
@property
def inputs(self):
"""Gets the inputs of this V1CompiledOperation. # noqa: E501
:return: The inputs of this V1CompiledOperation. # noqa: E501
:rtype: list[V1IO]
"""
return self._inputs
@inputs.setter
def inputs(self, inputs):
"""Sets the inputs of this V1CompiledOperation.
:param inputs: The inputs of this V1CompiledOperation. # noqa: E501
:type: list[V1IO]
"""
self._inputs = inputs
@property
def outputs(self):
"""Gets the outputs of this V1CompiledOperation. # noqa: E501
:return: The outputs of this V1CompiledOperation. # noqa: E501
:rtype: list[V1IO]
"""
return self._outputs
@outputs.setter
def outputs(self, outputs):
"""Sets the outputs of this V1CompiledOperation.
:param outputs: The outputs of this V1CompiledOperation. # noqa: E501
:type: list[V1IO]
"""
self._outputs = outputs
@property
def contexts(self):
"""Gets the contexts of this V1CompiledOperation. # noqa: E501
:return: The contexts of this V1CompiledOperation. # noqa: E501
:rtype: list[V1IO]
"""
return self._contexts
@contexts.setter
def contexts(self, contexts):
"""Sets the contexts of this V1CompiledOperation.
:param contexts: The contexts of this V1CompiledOperation. # noqa: E501
:type: list[V1IO]
"""
self._contexts = contexts
@property
def is_approved(self):
"""Gets the is_approved of this V1CompiledOperation. # noqa: E501
:return: The is_approved of this V1CompiledOperation. # noqa: E501
:rtype: bool
"""
return self._is_approved
@is_approved.setter
def is_approved(self, is_approved):
"""Sets the is_approved of this V1CompiledOperation.
:param is_approved: The is_approved of this V1CompiledOperation. # noqa: E501
:type: bool
"""
self._is_approved = is_approved
@property
def cost(self):
"""Gets the cost of this V1CompiledOperation. # noqa: E501
:return: The cost of this V1CompiledOperation. # noqa: E501
:rtype: float
"""
return self._cost
@cost.setter
def cost(self, cost):
"""Sets the cost of this V1CompiledOperation.
:param cost: The cost of this V1CompiledOperation. # noqa: E501
:type: float
"""
self._cost = cost
@property
def run(self):
"""Gets the run of this V1CompiledOperation. # noqa: E501
:return: The run of this V1CompiledOperation. # noqa: E501
:rtype: object
"""
return self._run
@run.setter
def run(self, run):
"""Sets the run of this V1CompiledOperation.
:param run: The run of this V1CompiledOperation. # noqa: E501
:type: object
"""
self._run = run
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1CompiledOperation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1CompiledOperation):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 | 1,406,717,811,419,982,800 | 25.847522 | 439 | 0.582328 | false | 4.141317 | false | false | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/troubleshooting_details.py | 1 | 1830 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TroubleshootingDetails(Model):
"""Information gained from troubleshooting of specified resource.
:param id: The id of the get troubleshoot operation.
:type id: str
:param reason_type: Reason type of failure.
:type reason_type: str
:param summary: A summary of troubleshooting.
:type summary: str
:param detail: Details on troubleshooting results.
:type detail: str
:param recommended_actions: List of recommended actions.
:type recommended_actions:
list[~azure.mgmt.network.v2017_08_01.models.TroubleshootingRecommendedActions]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'reason_type': {'key': 'reasonType', 'type': 'str'},
'summary': {'key': 'summary', 'type': 'str'},
'detail': {'key': 'detail', 'type': 'str'},
'recommended_actions': {'key': 'recommendedActions', 'type': '[TroubleshootingRecommendedActions]'},
}
def __init__(self, **kwargs):
super(TroubleshootingDetails, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.reason_type = kwargs.get('reason_type', None)
self.summary = kwargs.get('summary', None)
self.detail = kwargs.get('detail', None)
self.recommended_actions = kwargs.get('recommended_actions', None)
| mit | 1,079,771,110,420,915,600 | 39.666667 | 108 | 0.610929 | false | 4.084821 | false | false | false |
Mathew/psychoanalysis | psychoanalysis/apps/pa/aggregation.py | 1 | 6791 | from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from itertools import chain
def make_query(query_dict):
query_type = 0
if 'result type' in query_dict:
if query_dict['result type'] == 'percent':
query_type = 1
data_set = 0
if 'data set' in query_dict:
data_set = 2
the_list = get_data_set(data_set)
return the_list
def get_data_set(num=0):
if num == 0:
return get_data_set_0()
if num == 1:
return get_data_set_1()
if num == 2:
return get_data_set_2()
return get_data_set_3()
def get_data_set_0():
data_list = []
data_list.append(['Activity', 'Consultant Clinical Psychologist', 'Clinical Psychologist 1', 'Clinical Psychologist 2', 'Clinical Psychologist 3', 'CAAP', 'CAAP Trainee', 'Clinical Psychology Trainee'])
data_list.append(['Assessment', 60, 120, 0, 240, 0, 0, 120])
data_list.append(['Individual Follow up', 990, 1140, 180, 120, 315, 495, 330])
data_list.append(['Low Intensity', 0, 0, 0, 0, 0, 60, 0])
data_list.append(['High Intensity', 60, 0, 0, 0, 315, 435, 0])
data_list.append(['High Intensity - Specialist', 375, 660, 0, 0, 0, 0, 330])
data_list.append(['Highly Specialist', 555, 480, 180, 240, 0, 0, 0])
data_list.append(['Group Therapy', 0, 0, 270, 285, 90, 0, 0])
data_list.append(['Case review (with patient)', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Other Treatment', 0, 0, 0, 0, 30, 0, 0])
data_list.append(['Clinical Administration', 750, 1230, 315, 660, 645, 990, 465])
data_list.append(['Telephone', 0, 30, 30, 0, 0, 0, 105])
data_list.append(['Clinical meeting', 195, 300, 0, 60, 75, 90, 15])
data_list.append(['Supervision - giving', 60, 360, 0, 120, 75, 0, 60])
data_list.append(['Supervision - receiving', 0, 90, 0, 0, 180, 60, 60])
data_list.append(['Other Supervision', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Admin tasks', 165, 255, 15, 75, 0, 225, 75])
data_list.append(['Dealing with emails', 525, 420, 0, 60, 90, 75, 105])
data_list.append(['Travel', 270, 525, 75, 180, 210, 120, 135])
data_list.append(['Meetings (non-clinical)', 1050, 330, 30, 135, 0, 0, 0])
data_list.append(['Research', 30, 75, 0, 45, 30, 0, 0])
data_list.append(['Training/ CPD (Delivering)', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Training/ CPD (Receiving)', 0, 15, 0, 0, 0, 450, 0])
data_list.append(['Annual Leave', 0, 0, 0, 0, 480, 540, 0])
data_list.append(['Sick Leave', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Other leave', 0, 0, 0, 0, 240, 0, 540])
data_list.append(['Breaks', 195, 15, 45, 90, 45, 150, 90])
data_list.append(['Management', 735, 15, 0, 0, 30, 30, 0])
data_list.append(['Other Management', 0, 345, 0, 0, 0, 0, 30])
return data_list
def get_data_set_1():
the_list = get_data_set_0()
percent_list = ['Time Recorded', 5025, 5265, 960, 2070, 2535, 3225, 2130]
num = 0
ret_list = []
for item in the_list:
if num == 0:
ret_list.append(item)
else:
ret_list.append(percent_item(item, percent_list))
num += 1
return ret_list
def get_data_set_2():
main_list = get_category_data_set()
data_list = []
data_list.append(['Category', 'Consultant Clinical Psychologist', 'Clinical Psychologist 1', 'Clinical Psychologist 2', 'Clinical Psychologist 3', 'CAAP', 'CAAP Trainee', 'Clinical Psychology Trainee'])
direct_list = get_one_catergory_data('Direct', main_list)
indirect_list = get_one_catergory_data('Indirect', main_list)
other_list = get_one_catergory_data('Other', main_list)
data_list.append(direct_list)
data_list.append(indirect_list)
data_list.append(other_list)
return data_list
def get_data_set_3():
the_list = get_data_set_2()
percent_list = ['Time Recorded', 5025, 5265, 960, 2070, 2535, 3225, 2130]
num = 0
ret_list = []
for item in the_list:
if num == 0:
ret_list.append(item)
else:
ret_list.append(percent_item(item, percent_list))
num += 1
return ret_list
def get_category_data_set():
data_list = []
data_list.append(['Direct', 60, 120, 0, 240, 0, 0, 120])
data_list.append(['Direct', 990, 1140, 180, 120, 315, 495, 330])
data_list.append(['Direct', 0, 0, 0, 0, 0, 60, 0])
data_list.append(['Direct', 60, 0, 0, 0, 315, 435, 0])
data_list.append(['Direct', 375, 660, 0, 0, 0, 0, 330])
data_list.append(['Direct', 555, 480, 180, 240, 0, 0, 0])
data_list.append(['Direct', 0, 0, 270, 285, 90, 0, 0])
data_list.append(['Direct', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Direct', 0, 0, 0, 0, 30, 0, 0])
data_list.append(['Indirect', 750, 1230, 315, 660, 645, 990, 465])
data_list.append(['Indirect', 0, 30, 30, 0, 0, 0, 105])
data_list.append(['Indirect', 195, 300, 0, 60, 75, 90, 15])
data_list.append(['Indirect', 60, 360, 0, 120, 75, 0, 60])
data_list.append(['Indirect', 0, 90, 0, 0, 180, 60, 60])
data_list.append(['Indirect', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Other', 165, 255, 15, 75, 0, 225, 75])
data_list.append(['Other', 525, 420, 0, 60, 90, 75, 105])
data_list.append(['Other', 270, 525, 75, 180, 210, 120, 135])
data_list.append(['Other', 1050, 330, 30, 135, 0, 0, 0])
data_list.append(['Other', 30, 75, 0, 45, 30, 0, 0])
data_list.append(['Other', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Other', 0, 15, 0, 0, 0, 450, 0])
data_list.append(['Other', 0, 0, 0, 0, 480, 540, 0])
data_list.append(['Other', 0, 0, 0, 0, 0, 0, 0])
data_list.append(['Other', 0, 0, 0, 0, 240, 0, 540])
data_list.append(['Other', 195, 15, 45, 90, 45, 150, 90])
data_list.append(['Other', 735, 15, 0, 0, 30, 30, 0])
data_list.append(['Other', 0, 345, 0, 0, 0, 0, 30])
return data_list
def get_one_catergory_data(category, data_list):
the_len = len(data_list[0])
ret_list = []
ret_list.append(category)
for num in range(1, the_len):
ret_list.append(0)
for sub_list in data_list:
if sub_list[0] != category:
continue
for index in range(1, the_len):
tot = sub_list[index] + ret_list[index]
ret_list[index] = tot
return ret_list
def percent_item(item, percent_list):
num = 0
ret_list = []
for val in item:
if num == 0:
ret_list.append(val)
else:
result = val * 100.0 / percent_list[num]
ret_list.append(result)
num += 1
return ret_list
| mit | -728,023,969,507,841,200 | 38.254335 | 206 | 0.566485 | false | 2.643441 | false | false | false |
gregpuzzles1/Sandbox | Example Programs/Ch_07_Student_Files/pilimages.py | 1 | 4982 | """
images.py
This module, writtn by Kenneth Lambert, supports simple image processing.
The Image class represents either an image loaded from a GIF file or a
blank image.
To instantiate an image from a file, enter
image = Image(aGifFileName)
To instantiate a blank image, enter
image = Image(aWidth, aHeight)
Image methods:
draw() Displays the image in a window
getWidth() -> anInt The width in pixels
getHeight() -> anInt The height in pixels
getPixel(x, y) -> (r, g, b) The RGB values of pixel at x, y
setPixel(x, y, (r, g, b)) Resets pixel at x, y to (r, g, b)
save() Saves the image to the current file name
save(aFileName) Saves the image to fileName
LICENSE: This is open-source software released under the terms of the
GPL (http://www.gnu.org/licenses/gpl.html).
"""
import Tkinter
import os, os.path
tk = Tkinter
import PIL.Image
import PIL.ImageTk
Pimg = PIL.Image
Pimgtk = PIL.ImageTk
import exceptions
_root = None
class ImageView(tk.Canvas):
def __init__(self, image,
title = "New Image",
autoflush=False):
master = tk.Toplevel(_root)
master.protocol("WM_DELETE_WINDOW", self.close)
tk.Canvas.__init__(self, master,
width = image.getWidth(),
height = image.getHeight())
self.master.title(title)
self.pack()
master.resizable(0,0)
self.image = image
self.height = image.getHeight()
self.width = image.getWidth()
self.autoflush = autoflush
self.closed = False
def close(self):
"""Close the window"""
self.closed = True
self.master.destroy()
self.image.canvas = None
_root.quit()
def isClosed(self):
return self.closed
def getHeight(self):
"""Return the height of the window"""
return self.height
def getWidth(self):
"""Return the width of the window"""
return self.width
class Image:
def __init__(self, *args):
self.canvas = None
if len(args) == 1:
name = args[0]
if type(name) != str:
raise Exception, 'Must be a file name'
if not os.path.exists(args[0]):
raise Exception, 'File not in current directory'
self.image = Pimg.open(args[0])
self.filename = args[0]
box = self.image.getbbox()
self.width = box[2]
self.height = box[3]
else: # arguments are width and height
self.width, self.height = args
self.image = Pimg.new(mode = "RGB",
size = (self.width, self.height))
self.filename = ""
def getWidth(self):
"""Returns the width of the image in pixels"""
return self.width
def getHeight(self):
"""Returns the height of the image in pixels"""
return self.height
def getPixel(self, x, y):
"""Returns a tuple (r,g,b) with the RGB color values for pixel (x,y)
r,g,b are in range(256)
"""
return self.image.getpixel((x, y))
def setPixel(self, x, y, color):
"""Sets pixel (x,y) to the color given by RGB values r, g, and b.
r,g,b should be in range(256)
"""
self.image.putpixel((x, y), color)
def draw(self):
"""Creates and opens a window on an image.
The user must close the window to return control to
the caller."""
if not self.canvas:
self.canvas = ImageView(self,
self.filename)
self.photoImage = Pimgtk.PhotoImage(self.image)
self.canvas.create_image(self.width / 2,
self.height / 2,
image = self.photoImage)
_root.mainloop()
def save(self, filename = ""):
"""Saves the image to filename. If no file name
is provided, uses the image's file name if there
is one; otherwise, simply returns.
If the .gif extension is not present, it is added.
"""
if filename == "":
return
else:
self.filename = filename
#path, name = os.path.split(filename)
#ext = name.split(".")[-1]
#if ext != "gif":
#filename += ".gif"
self.filename = filename
self.image.save(self.filename)
def clone(self):
new = Image(self.width, self.height)
new.image = self.image.copy()
return new
def __str__(self):
rep = ""
if self.filename:
rep += ("File name: " + self.filename + "\n")
rep += ("Width: " + str(self.width) + \
"\nHeight: " + str(self.height))
return rep
_root = tk.Tk()
_root.withdraw()
| gpl-3.0 | -1,244,779,245,137,367,800 | 27.965116 | 76 | 0.540145 | false | 3.925926 | false | false | false |
ernstblecha/gen.sh | waitforstring.py | 1 | 1698 | #!/usr/bin/python3 -u
# using -u here to make stdin unbuffered
# This file is part of gen.sh.
#
# gen.sh is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gen.sh is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gen.sh. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
VERSION = 2
if os.environ.get("GET_VERSION") == "1":
print(VERSION)
sys.exit(0)
w = 0
n = ""
s = ""
if len(sys.argv) > 1: # get the needle and its length
w = len(sys.argv[1])
n = sys.argv[1]
while w > 0: # "endless" loop if we have a needle
c = sys.stdin.read(1)
if len(c) == 0:
sys.exit(1) # stream ended, needle not found
s += c
s = s[-w:] # store the last l characters for comparison
if s == n:
sys.exit(0) # needle was found
# usage message if needle is missing
print(os.path.basename(sys.argv[0])+""" needle
blocks until the string passed in the first argument (\"needle\") is found on
stdin or the stream ends additional parameters are ignored
returns 0 if string is found
returns 1 if string is not found
returns 2 if no string is given
This message is shown if no string is given
Version of """+os.path.basename(sys.argv[0])+": " + str(VERSION))
sys.exit(2) # errorcode for missing needle
| gpl-3.0 | -159,925,529,795,135,940 | 29.321429 | 77 | 0.691402 | false | 3.34252 | false | false | false |
Charley-fan/metaArray | metaFunc.py | 1 | 10584 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from decimal import Decimal
# import numpy as np
from numpy import bincount
from numpy import linspace
from numpy import round
from numpy import zeros, ones
from numpy.fft import rfft as np_rfft
from scipy.signal import firwin
from core import metaArray
from misc import spline_resize
from misc import quantise
from misc import filtfilt
from misc import engUnit
####################
# Helper functions #
####################
def padding_calc(metAry, min_freq = 0, max_freq = 1e6, resolution = 2048, \
debug = False):
"""
For a given 1D metaArray, work out the overall length of array necessary
for the specified resolution between the frequency limits
Padding ratio is always >= len(metAry)
Example:
rfft(ary, n = padding_calc(ary))
"""
n = len(metAry)
t0 = metAry.get_range(0, 'begin')
t1 = metAry.get_range(0, 'end')
f = n / float(t1-t0) # Sampling freq
# f = abs(f) / 2 # Nyquist
N = n * abs(max_freq - min_freq) / abs(f) # Unpadded resolution
if N < resolution:
return int(round((resolution / N) * n)) # Scale up accordingly
else: # Already at or better resolution
return int(round(n))
def meta_fir_len(metAry, length = 0.005):
"""
Simple helper function to work out the approprate number of taps for type I
FIR for a given metaArray.
Default to 0.5% of the input metAry duration, minimum 3.
Input:
metAry Targed metaArray
length Desire length/duration of the filter as ratio to len(metAry)
Output:
length Length of the desire FIR filter (Int)
"""
length = int(round(len(metAry) * length)) # Round to nearest ratio
if length < 3: length = 3
# l must be odd for Type I filter
if length%2 == 0: length += 1
return length
def meta_lowpass(metAry, freq, length = 0.005, window='hann', copy = True):
"""
Perform a two pass Type I FIR filter of cut-off freq(uency) on the given
1D metaArray, once forward and once backward.
Inputs:
metAry Target metaArray
freq Cut-off frequency (float, in metAry unit)
length Length of the FIR filter (See notes below)
window Window function for the FIR filter
copy Whether to return a copy or modify inplace
Length
If given as float type, it will be interpreted as percentage length
(duration) of the input metaArray.
If given as int type, it will be interpreted as the desire number of
taps for FIR filter.
The default FIR length is 0.5% of that in the input metaArray, mimimum 3.
The exact number of taps is rounded to the next odd number, in order to
meet the type I conditions.
Scipy.signal.firwin support the following window options:
boxcar
triang
blackman
hamming
hann
bartlett
flattop
parzen
bohman
blackmanharris
nuttall
barthann
kaiser (needs beta)
gaussian (needs std)
general_gaussian (needs power, width)
slepian (needs width)
chebwin (needs attenuation)
"""
assert metAry.ndim is 1, "Only 1D metaArray accepted, there are %i dimemsions in the given data." % metAry.ndim
if copy: ary = metAry.copy()
else: ary = metAry
# Work out the Nyquist frequency
Nyquist = ary.get_smp_rate() / 2
# Normalise frequency
name_str = 'Low pass filtered at ' + engUnit(freq, unit = 'Hz', sigfig=3)
freq = float(freq) / Nyquist
# Number of taps
if type(length) is float:
length = meta_fir_len(ary, length = length)
elif type(length) is int:
pass
else:
raise ValueError('Unexpected variable type for length: ' + str(type(length)))
# a = [1.]
b = firwin(length, freq, window=window)
ary.data = filtfilt(b, [1.], ary.data)
if type(ary['name']) is str:
ary['name'] += ' (' + name_str + ')'
else:
ary['name'] = name_str
if copy: return ary
else: return
def meta_highpass(metAry, freq, length = 0.005, window='hann', copy=True):
"""
Perform a two pass Type I FIR filter of cut-off freq(uency) on the given
1D metaArray, once forward and once backward.
meta_highpass(metAry) === metAry - meta_lowpass(metAry)
Inputs:
metAry Target metaArray
freq Cut-off frequency (float, in metAry unit)
length Length of the FIR filter (See notes below)
window Window function for the FIR filter
copy Whether to return a copy or modify inplace
See meta_lowpass for details
"""
loary = meta_lowpass(metAry, freq, length=length, window=window, copy=True)
name_str = 'High pass filtered at ' + engUnit(freq, unit = 'Hz', sigfig=3)
if copy: ary = metAry.copy()
else: ary = metAry
ary.data -= loary.data
if type(metAry['name']) is str:
ary['name'] = metAry['name'] + ' (' + name_str + ')'
else:
ary['name'] = name_str
if copy: return ary
else: return
def meta_resample(metAry, rate=False, l=0.005, window='hamming', order = 5):
"""
Resample 1D metaArray data into the given sampling rate, this is
implemented using misc.spline_resize()
This function distinct from the scipy.signal.resample function that, it
uses spline for resampling, instead of FFT based method. Periodicity of the
metAry content is not implied, or required.
Inputs:
metAry Input metaArray
rate Sampling rate (float, in metaArray unit)
l Length of the FIR filter, default to 0.5% len(metAry) mimimum 3
window Window method to generate the FIR filter
order Order of spline polynomial, default to 5
Output:
metaArray A resampled copy of the input metAry
If upsampling, quintic spline interpolation will be used.
If downsampling, two pass anti-aliasing FIR filter will be applied, once
forward and once reverse to null the group delay, then quintic spline
interpolation will be used.
If target sampling rate is not given, it will try to find the next highest
sampling rate by default. The resampled data will always align at time 0,
and never exceed the duration of the given data.
The sampling rate will come in multiples of 1, 2, or 5Hz, this function
will modify the input array in place.
"""
assert metAry.ndim is 1, "Only 1D metaArray accepted, there are %i dimemsions in the given data." % metAry.ndim
ary = metAry.copy()
if rate is False:
# Target sampling rate is not specified
r = len(ary) / float(abs(ary.get_range(0, 'end') - ary.get_range(0, 'begin')))
# Find out the exponent of the current sampling rate
exponent = Decimal(str(r)).adjusted()
# Remove the exponent
scale = r * 10**(0 - exponent)
# make the standard scale slightly larger (1e-5) so numerical
# error (rounding error) do not come in to play and force it up
# to the next sampling scale
if scale > 5.00005:
scale = 10
elif scale > 2.00002:
scale = 5
elif scale > 1.00001:
scale = 2
else:
# This really shouldnt happen, but just in case the Decimal
# function return numbers like 0.123e+45 instead of 1.23e+45
scale = 1
print "Warning!! Unexpected values for scale evaluation!" + \
'scale variable (' + str(scale) + ') should be greater than 1.'
# This is what the sampling rate should be
rate = scale * 10**exponent
# Target size of the ary
n = float(abs(ary.get_range(0, 'end') - ary.get_range(0, 'begin'))) * rate
if type(l) is float: l = meta_fir_len(ary, l)
# resize the data
ary.data = spline_resize(ary.data, n, l=l, window=window, order = order)
# Update the meta info
ary.update_range()
return ary
metaResample = meta_resample
def meta_histogram(metAry, bins = False):
"""
Compute a histogram of the given 1D metaArray.
It will try to work out the maximum number of bins (i.e. minimum
quantisation from the data) by default.
Will raise QuantsationError if unable to determin number of bins.
"""
assert metAry.ndim is 1, "Only 1D metaArray accepted, there are %i dimemsions in the given data." % metAry.ndim
# Flatten the data to 1D array
data = metAry.data.ravel()
if bins is not False:
quanter = data.ptp() / bins
else:
# Try to quantise the array data
quanter = quantise(data)
# Quantise the data, and offset to the +ve side of value, bincount requires +ve
# int arrays
quantum = round(data / quanter).astype(int)
quantum -= quantum.min()
# Do the bincount for histogram
hist = bincount(quantum)
# Update the metaInfo
hist = metaArray(hist)
hist.set_range(0, 'begin', metAry.min())
hist.set_range(0, 'end', metAry.max())
hist.set_range(0, 'unit', metAry['unit'])
hist.set_range(0, 'label', metAry['label'])
hist['name'] = 'Histogram of ' + metAry['name']
hist['unit'] = ''
hist['label'] = 'Counts'
return hist
histogram = meta_histogram
| gpl-3.0 | -2,785,566,513,698,070,000 | 31.268293 | 115 | 0.606293 | false | 3.879765 | false | false | false |
alenickwork/python_training | fixture/orm.py | 1 | 4159 | from pony.orm import *
from datetime import datetime
from model.group import Group
from model.contact import Contact
from pymysql.converters import decoders
from pymysql.converters import encoders, decoders, convert_mysql_timestamp
conv = encoders
conv.update(decoders)
conv[datetime] = convert_mysql_timestamp
class ORMFixture:
db = Database()
class ORMGroup(db.Entity):
_table_ = 'group_list'
id = PrimaryKey(int, column = 'group_id')
name = Optional(str, column= 'group_name')
header = Optional(str, column = 'group_header')
footer = Optional(str, column = 'group_footer')
contacts = Set(lambda: ORMFixture.ORMContact, table = 'address_in_groups',
column = "id", reverse = "groups", lazy = True)
class ORMContact(db.Entity):
_table_ = 'addressbook'
id = PrimaryKey(int, column = 'id')
firstname = Optional(str, column= 'firstname')
lastname = Optional(str, column= 'lastname')
deprecated = Optional(datetime, column= 'deprecated')
groups = Set(lambda: ORMFixture.ORMGroup, table = 'address_in_groups',
column = "group_id", reverse = "contacts", lazy = True)
email_prior = Optional(str, column= 'email')
email_2 = Optional(str, column= 'email2')
email_3 = Optional(str, column= 'email3')
home_phone = Optional(str, column= 'home')
mobile_phone = Optional(str, column= 'mobile')
work_phone = Optional(str, column= 'work')
phone_secondary = Optional(str, column= 'phone2')
def __init__(self,host, name, user, password):
self.db.bind('mysql', host=host, database=name, user=user, password=password, autocommit=True,conv=conv)
self.db.generate_mapping()
#sql_debug(True)
def convert_groups_to_model(self, groups):
def convert(group):
return Group(id = str(group.id),
name = group.name,
header = group.header,
footer = group.footer)
return list(map(convert, groups))
@db_session
def get_group_list(self):
return self.convert_groups_to_model(list(select(g for g in ORMFixture.ORMGroup)))
def convert_contacts_to_model(self, contacts, full = False):
def convert(cont):
return Contact(id = str(cont.id),
firstname = cont.firstname,
lastname = cont.lastname)
def convert_full(cont):
return Contact(id = str(cont.id),
firstname = cont.firstname,
lastname = cont.lastname,
email_prior = cont.email_prior,
email_2 = cont.email_2,
email_3 = cont.email_3,
home_phone = cont.home_phone,
mobile_phone = cont.mobile_phone,
work_phone = cont.work_phone,
phone_secondary = cont.phone_secondary
)
if not full:
return list(map(convert, contacts))
else:
return list(map(convert_full, contacts))
@db_session
def get_contact_list(self, full = False):
return self.convert_contacts_to_model(select(c for c in ORMFixture.ORMContact if c.deprecated is None), full)
@db_session
def get_contact_by_id(self, id, full = False):
return self.convert_contacts_to_model(select(c for c in ORMFixture.ORMContact if c.deprecated is None and c.id == id), full)[0]
@db_session
def get_contacts_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
return self.convert_contacts_to_model(orm_group.contacts)
@db_session
def get_contacts_not_in_group(self, group):
orm_group = list(select(g for g in ORMFixture.ORMGroup if g.id == group.id))[0]
conts = select(c for c in ORMFixture.ORMContact if c.deprecated is None and orm_group not in c.groups)
return self.convert_contacts_to_model(conts)
| apache-2.0 | -884,994,360,897,504,300 | 39.378641 | 135 | 0.591248 | false | 3.94592 | false | false | false |
linted/Skip-Trace | common/STcommon.py | 1 | 1245 | try:
import logging
from Crypto.PublicKey import RSA
except ImportError as e:
print("[-] {}, exiting".format(e))
exit(1)
def configDebugLog(logFileName):
log_file = logging.FileHandler(logFileName,mode='w')
log_file.setLevel(logging.DEBUG)
log_file.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
# ERROR level or higher should be output to console as well
log_console = logging.StreamHandler()
log_console.setLevel(logging.ERROR)
log_console.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logger = logging.getLogger('main_logger')
logger.addHandler(log_console)
logger.addHandler(log_file)
return logger
def keyGen(path):
key = RSA.generate(2048)
with open(path +'/python.pem','wb') as privateKey:
privateKey.write(key.exportKey('PEM'))
with open(path+ '/python.pub', 'wb') as publicKey:
publicKey.write(key.publickey().exportKey('PEM'))
def parseArgs():
'''Parses args using the argparse lib'''
parser = argparse.ArgumentParser(description='Location logging server')
parser.add_argument('-g', '--generate-keys', metavar='PATH', type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parseArgs()
if args.generate_keys:
keyGen(args.generate_keys) | mit | -4,271,112,123,629,025,000 | 28.666667 | 86 | 0.724498 | false | 3.259162 | false | false | false |
MaxTyutyunnikov/lino | lino/projects/min2/settings.py | 1 | 2156 | # -*- coding: UTF-8 -*-
## Copyright 2012-2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
import os
import lino
from lino.projects.std.settings import *
#~ from lino.modlib import cal
#~ class Site(Site,cal.SiteMixin):
class Site(Site):
title = "Lino/MinimalApp 2"
#~ help_url = "http://lino.saffre-rumma.net/az/index.html"
#~ migration_module = 'lino.projects.az.migrate'
#~ project_model = 'contacts.Person'
#~ project_model = 'contacts.Person'
project_model = 'projects.Project'
user_model = "users.User"
#~ languages = ('de', 'fr')
languages = 'en et'
#~ index_view_action = "dsbe.Home"
#~ remote_user_header = "REMOTE_USER"
#~ remote_user_header = None
#~ def setup_quicklinks(self,ui,user,tb):
#~ tb.add_action(self.modules.contacts.Persons.detail_action)
#~ tb.add_action(self.modules.contacts.Companies.detail_action)
def get_installed_apps(self):
for a in super(Site,self).get_installed_apps():
yield a
yield 'django.contrib.contenttypes'
yield 'lino.modlib.system'
yield 'lino.modlib.users'
yield 'lino.modlib.countries'
yield 'lino.modlib.contacts'
yield 'lino.modlib.projects'
yield 'lino.modlib.uploads'
yield 'lino.modlib.cal'
yield 'lino.modlib.outbox'
yield 'lino.modlib.pages'
#~ yield 'lino.projects.min2'
SITE = Site(globals())
| gpl-3.0 | 804,034,290,664,175,200 | 32.774194 | 71 | 0.641466 | false | 3.546053 | false | false | false |
vlegoff/tsunami | src/primaires/scripting/actions/equiper.py | 1 | 5397 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant l'action equiper."""
from primaires.scripting.action import Action
from primaires.scripting.instruction import ErreurExecution
class ClasseAction(Action):
"""Fait équiper un personnage."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.equiper_prototype, "Personnage", "str")
cls.ajouter_types(cls.equiper_objet, "Personnage", "Objet")
@staticmethod
def equiper_prototype(personnage, cle_prototype):
"""Fait équiper un objet à un personnage.
Paramètres à préciser :
* personnage : le personnage qui doit s'équiper
* cle_prototype : la clé du prototype d'objet à équiper
Exemple d'utilisation :
equiper personnage "sabre_bois"
Le personnage n'a pas besoin d'avoir l'objet indiqué dans
son inventaire : il sera dans tous les cas créé. En outre,
cette action ne vérifie pas que le joueur peut s'équiper
à cet emplacement (utilisez la fonction 'peut_equiper' pour
vérifier cela).
"""
if not cle_prototype in importeur.objet.prototypes:
raise ErreurExecution("prototype d'objet {} introuvable".format(
repr(cle_prototype)))
prototype = importeur.objet.prototypes[cle_prototype]
objet = importeur.objet.creer_objet(prototype)
for membre in personnage.equipement.membres:
if membre.peut_equiper(objet):
membre.equiper(objet)
return
raise ErreurExecution("le personnage {} ne peut équiper {}".format(
repr(personnage), repr(objet.cle)))
@staticmethod
def equiper_objet(personnage, objet):
"""Force un personnage à équiper l'objet précisé.
Cette syntaxe de l'action se rapproche davantage de la commande
**porter/wear**. Elle demande à un personnage d'équiper un
objet qu'il possède (dans ses mains, ou dans un sac qu'il équipe).
Paramètres à préciser :
* personnage : le personnage que l'on souhaite équiper
* objet : l'objet que l'on souhaite équiper.
Cette action est susceptible de faire des erreurs, par exemple,
si l'objet n'est pas possédé par le personnage ou si il ne
peut être équipé par le personnage. Il est de bonne politique
de tester avant d'équiper le personnage, sauf si on est dans
une situation extrêmement limitée en aléatoire.
Exemple d'utilisation :
# On cherche à faire équiper un sabre de bois au personnage
# Le personnage possède le sabre de bois dans son inventaire
sabre = possede(personnage, "sabre_bois")
si sabre:
# On vérifié qu'il n'a rien dans la main gauche
si !equipe(personnage, "*main gauche"):
equiper personnage sabre
finsi
finsi
"""
if not any(o for o in personnage.equipement.inventaire if o is objet):
raise ErreurExecution("{} ne possède visiblement pas {}".format(
personnage.nom_unique, objet.identifiant))
# Si 'objet' est déjà équipé, ne fait rien
if objet.contenu is personnage.equipement.equipes:
return
# Essaye d'équiper l'objet sur un membre
for membre in personnage.equipement.membres:
if membre.peut_equiper(objet):
objet.contenu.retirer(objet)
membre.equiper(objet)
objet.script["porte"].executer(objet=objet,
personnage=personnage)
return
raise ErreurExecution("{} ne peut équiper {}, aucun emplacement " \
"libre".format(personnage.nom_unique, objet.identifiant))
| bsd-3-clause | 5,963,677,848,703,850,000 | 40.757813 | 79 | 0.674275 | false | 3.466278 | false | false | false |
fiduswriter/fiduswriter | fiduswriter/base/tests/test_prelogin.py | 1 | 2855 | from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from testing.testcases import LiveTornadoTestCase
from testing.selenium_helper import SeleniumHelper
class PreloginTest(LiveTornadoTestCase, SeleniumHelper):
fixtures = [
'initial_terms.json',
]
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.base_url = cls.live_server_url
driver_data = cls.get_drivers(1)
cls.driver = driver_data["drivers"][0]
cls.client = driver_data["clients"][0]
cls.driver.implicitly_wait(driver_data["wait_time"])
cls.wait_time = driver_data["wait_time"]
@classmethod
def tearDownClass(cls):
cls.driver.quit()
super().tearDownClass()
def test_flatpage(self):
self.driver.get(self.base_url + "/")
self.driver.find_element(
By.CSS_SELECTOR,
"a[href='/pages/terms/']"
).click()
h3 = WebDriverWait(self.driver, self.wait_time).until(
EC.presence_of_element_located((By.CSS_SELECTOR, 'h3'))
)
self.assertEqual(
h3.text,
'Your Account and Documents on the Website'
)
self.driver.find_element(
By.CSS_SELECTOR,
"a[href='/pages/privacy/']"
).click()
h3 = WebDriverWait(self.driver, self.wait_time).until(
EC.presence_of_element_located(
(By.CSS_SELECTOR, 'h3:nth-child(4)')
)
)
self.assertEqual(
h3.text,
'B. Collecting personal information'
)
def test_language_switch(self):
driver = self.driver
driver.get(self.base_url + "/")
self.driver.find_element(
By.ID,
"lang-selection"
).click()
self.driver.find_element(
By.CSS_SELECTOR,
"#lang-selection option[value=es]"
).click()
self.assertEqual(
self.driver.find_element(
By.CSS_SELECTOR,
"html[lang=es] h1.fw-login-title"
).text,
'INICIAR SESIÓN'
)
self.assertEqual(
self.driver.find_element(
By.ID,
"lang-selection"
).get_attribute('value'),
'es'
)
self.driver.find_element(
By.ID,
"lang-selection"
).click()
self.driver.find_element(
By.CSS_SELECTOR,
"#lang-selection option[value=en]"
).click()
self.assertEqual(
self.driver.find_element(
By.CSS_SELECTOR,
"html[lang=en] h1.fw-login-title"
).text,
'LOG IN'
)
| agpl-3.0 | -6,557,665,526,337,172,000 | 29.361702 | 67 | 0.539944 | false | 3.97493 | true | false | false |
TobleMiner/fahrschulcard | solver.py | 1 | 2470 | import sqlite3
from question import Question, Answer
conn = sqlite3.connect('license.db')
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS "answers" (\
`id` INTEGER PRIMARY KEY AUTOINCREMENT,\
`answer` TEXT,\
`question` INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS "questions" (\
`id` INTEGER PRIMARY KEY AUTOINCREMENT,\
`question` TEXT,\
`media` TEXT)')
conn.commit()
def find_answers(question):
if(question.media):
t = (question.question, question.media)
c.execute('SELECT answers.answer FROM questions, answers WHERE \
questions.question = ? AND questions.id = answers.question AND \
questions.media = ? ORDER BY answers.id ASC', t)
else:
t = (question.question, )
c.execute('SELECT answers.answer FROM questions, answers WHERE \
questions.question = ? AND questions.id = answers.question \
ORDER BY answers.id ASC', t)
answers = []
row = c.fetchone()
aid = 1
while(row):
if(question.type == Question.Type.multiple_choice):
for answer in question.answers:
if(answer.answer == row[0]):
answers.append(answer)
elif(question.type == Question.Type.text):
answer = Answer(aid)
answer.answer = row[0]
answers.append(answer)
aid += 1
row = c.fetchone()
return answers
def add_question(question):
if(question.media):
t = (question.question, question.media)
c.execute('SELECT * FROM questions WHERE question = ? AND media = ?', t)
else:
t = (question.question,)
c.execute('SELECT * FROM questions WHERE question = ?', t)
if(not c.fetchone()):
t = (question.question, question.media)
c.execute('INSERT INTO questions (question, media) VALUES (?, ?)', t)
conn.commit();
def add_answer(question, answer):
if(question.media):
t = (question.question, question.media)
c.execute('SELECT id FROM questions WHERE question = ? AND media = ?', t)
else:
t = (question.question,)
c.execute('SELECT id FROM questions WHERE question = ?', t)
qid = c.fetchone()[0]
t = (answer.answer, qid)
c.execute('SELECT * FROM answers WHERE answer = ? AND question = ?', t)
if(not c.fetchone()):
t = (answer.answer, qid)
c.execute('INSERT INTO answers (answer, question) VALUES (?, ?)', t)
conn.commit();
| mit | 8,390,763,817,388,021,000 | 34.285714 | 81 | 0.604453 | false | 3.841369 | false | false | false |
r-martin-/Code_College | PythonProgramming/poker_hands_v2.py | 1 | 1983 | """Count poker hands
Sample program to count poker hands and thus estimate the probability of a given hand occurring .
The file contains 1 million records randomly distributed and is, therefore, statistically valid.
The data looks like this:
1,1,1,13,2,4,2,3,1,12,0
3,12,3,2,3,11,4,5,2,5,1
1,9,4,6,1,4,3,2,3,9,1
1,4,3,13,2,13,2,1,3,6,1
A hand in poker consists of five cards. Each pair of numbers represents a card giving its suit and value.
Suits are 1-spades, 2-hearts, 3-diamonds, 4-clubs
Values go from Ace (13) highest to 2 (shown as 1) lowest.
Ranks are 0-nothing, 1-pair, 2-two pair, 3-three of a kind, 4-flush, 5-straight, 6-full house, 7-four of a kind,
8-straight flush, 9-royal flush
In our example above the first line represents the hand, 2 of spades, ace of spades, 5 of hearts, 4 of hearts,
king of clubs, The last column is the rank
"""
# 1. Open file for reading
try:
poker_file = open("poker-hand-testing.data", 'r')
except IOError as e:
print(e)
quit()
# 2. Create and initialize variables to hold the counts
total_count = 0
rank_counts = {}
rank_list = ['nothing', 'pair', 'two pair', 'three of a kind', 'straight', 'flush', 'full house', 'four of a kind',
'straight flush', 'royal flush']
# 3. Loop through each line of the file
for line in poker_file:
# At each line increment the counter
total_count += 1
# Get hand rank: split on comma, get last item as int
try:
handRank = int(line.split(',')[-1])
except ValueError as e:
print(e)
continue
# If rank already in dictionary, increment it otherwise add it and set to 1
if handRank in rank_counts:
rank_counts[handRank] += 1
else:
rank_counts[handRank] = 1
# 4. Print the results
print("Total hands in file: {}".format(total_count))
print("Count and probability of hands:")
for i in range(10):
print(" {:18s}:{:10,d}{:10.4%}".format(rank_list[i], rank_counts[i], rank_counts[i] / total_count))
| mit | -5,603,430,153,374,674,000 | 30.47619 | 115 | 0.672718 | false | 3.004545 | false | false | false |
tvuillemin/nicetypes | setup.py | 1 | 2325 | import codecs
import os
import re
from setuptools import setup, find_packages
###################################################################
NAME = "nicetypes"
PACKAGES = find_packages(where="src")
META_PATH = os.path.join("src", "nicetypes", "__init__.py")
KEYWORDS = ["class", "attribute", "boilerplate"]
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
INSTALL_REQUIRES = []
###################################################################
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=find_meta("uri"),
version=find_meta("version"),
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
keywords=KEYWORDS,
long_description=read("README.md"),
packages=PACKAGES,
package_dir={"": "src"},
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
)
| mit | -7,027,364,177,655,694,000 | 28.43038 | 77 | 0.569892 | false | 3.927365 | false | false | false |
martynovp/edx-platform | lms/djangoapps/instructor_task/tasks_helper.py | 1 | 62345 | """
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
from collections import OrderedDict
from datetime import datetime
from django.conf import settings
from eventtracking import tracker
from itertools import chain
from time import time
import unicodecsv
import logging
from celery import Task, current_task
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.files.storage import DefaultStorage
from django.db import transaction, reset_queries
from django.db.models import Q
import dogstats_wrapper as dog_stats_api
from pytz import UTC
from StringIO import StringIO
from edxmako.shortcuts import render_to_string
from instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
from shoppingcart.models import (
PaidCourseRegistration, CourseRegCodeItem, InvoiceTransaction,
Invoice, CouponRedemption, RegistrationCodeRedemption, CourseRegistrationCode
)
from track.views import task_track
from util.file import course_filename_prefix_generator, UniversalNewlineIterator
from xmodule.modulestore.django import modulestore
from xmodule.split_test_module import get_split_user_partitions
from django.utils.translation import ugettext as _
from certificates.models import (
CertificateWhitelist,
certificate_info_for_user,
CertificateStatuses
)
from certificates.api import generate_user_certificates
from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.grades import iterate_grades_for
from courseware.models import StudentModule
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_analytics.basic import enrolled_students_features, list_may_enroll
from instructor_analytics.csvs import format_dictlist
from instructor_task.models import ReportStore, InstructorTask, PROGRESS
from lms.djangoapps.lms_xblock.runtime import LmsPartitionService
from openedx.core.djangoapps.course_groups.cohorts import get_cohort
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from opaque_keys.edx.keys import UsageKey
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted
from student.models import CourseEnrollment, CourseAccessRole
from verify_student.models import SoftwareSecurePhotoVerification
from util.query import use_read_replica_if_available
# define different loggers for use within tasks and on client side
TASK_LOG = logging.getLogger('edx.celery.task')
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
FILTERED_OUT_ROLES = ['staff', 'instructor', 'finance_admin', 'sales_admin']
# define values for update functions to use to return status to perform_module_state_update
UPDATE_STATUS_SUCCEEDED = 'succeeded'
UPDATE_STATUS_FAILED = 'failed'
UPDATE_STATUS_SKIPPED = 'skipped'
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
class BaseInstructorTask(Task):
"""
Base task class for use with InstructorTask models.
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
updates the entry on success and failure of the task it wraps. It is setting the entry's value
for task_state based on what Celery would set it to once the task returns to Celery:
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to perform_module_state_update, and documented there.
"""
abstract = True
def on_success(self, task_progress, task_id, args, kwargs):
"""
Update InstructorTask object corresponding to this task with info about success.
Updates task_output and task_state. But it shouldn't actually do anything
if the task is only creating subtasks to actually do the work.
Assumes `task_progress` is a dict containing the task's result, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
"""
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
# We should be able to find the InstructorTask object to update
# based on the task_id here, without having to dig into the
# original args to the task. On the other hand, the entry_id
# is the first value passed to all such args, so we'll use that.
# And we assume that it exists, else we would already have had a failure.
entry_id = args[0]
entry = InstructorTask.objects.get(pk=entry_id)
# Check to see if any subtasks had been defined as part of this task.
# If not, then we know that we're done. (If so, let the subtasks
# handle updating task_state themselves.)
if len(entry.subtasks) == 0:
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Update InstructorTask object corresponding to this task with info about failure.
Fetches and updates exception and traceback information on failure.
If an exception is raised internal to the task, it is caught by celery and provided here.
The information is recorded in the InstructorTask object as a JSON-serialized dict
stored in the task_output column. It contains the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Note that there is no way to record progress made within the task (e.g. attempted,
succeeded, etc.) when such failures occur.
"""
TASK_LOG.debug(u'Task %s: failure returned', task_id)
entry_id = args[0]
try:
entry = InstructorTask.objects.get(pk=entry_id)
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
class TaskProgress(object):
"""
Encapsulates the current task's progress by keeping track of
'attempted', 'succeeded', 'skipped', 'failed', 'total',
'action_name', and 'duration_ms' values.
"""
def __init__(self, action_name, total, start_time):
self.action_name = action_name
self.total = total
self.start_time = start_time
self.attempted = 0
self.succeeded = 0
self.skipped = 0
self.failed = 0
def update_task_state(self, extra_meta=None):
"""
Update the current celery task's state to the progress state
specified by the current object. Returns the progress
dictionary for use by `run_main_task` and
`BaseInstructorTask.on_success`.
Arguments:
extra_meta (dict): Extra metadata to pass to `update_state`
Returns:
dict: The current task's progress dict
"""
progress_dict = {
'action_name': self.action_name,
'attempted': self.attempted,
'succeeded': self.succeeded,
'skipped': self.skipped,
'failed': self.failed,
'total': self.total,
'duration_ms': int((time() - self.start_time) * 1000),
}
if extra_meta is not None:
progress_dict.update(extra_meta)
_get_current_task().update_state(state=PROGRESS, meta=progress_dict)
return progress_dict
def run_main_task(entry_id, task_fcn, action_name):
"""
Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.
Arguments passed to `task_fcn` are:
`entry_id` : the primary key for the InstructorTask entry representing the task.
`course_id` : the id for the course.
`task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
`action_name` : past-tense verb to use for constructing status messages.
If no exceptions are raised, the `task_fcn` should return a dict containing
the task's result with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages.
Should be past-tense. Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
"""
# Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
# There's no point in catching it here.
entry = InstructorTask.objects.get(pk=entry_id)
entry.task_state = PROGRESS
entry.save_now()
# Get inputs to use in this task from the entry
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
# Construct log message
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
TASK_LOG.error(message)
raise ValueError(message)
# Now do the work
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
# Release any queries that the connection has been hanging onto
reset_queries()
# Log and exit, returning task_progress info as task result
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
return task_progress
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible updates to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
start_time = time()
usage_keys = []
problem_url = task_input.get('problem_url')
entrance_exam_url = task_input.get('entrance_exam_url')
student_identifier = task_input.get('student')
problems = {}
# if problem_url is present make a usage key from it
if problem_url:
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
usage_keys.append(usage_key)
# find the problem descriptor:
problem_descriptor = modulestore().get_item(usage_key)
problems[unicode(usage_key)] = problem_descriptor
# if entrance_exam is present grab all problems in it
if entrance_exam_url:
problems = get_problems_in_section(entrance_exam_url)
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
# find the modules in question
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
# give the option of updating an individual student. If not specified,
# then updates all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
task_progress.update_task_state()
for module_to_update in modules_to_update:
task_progress.attempted += 1
module_descriptor = problems[unicode(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
return task_progress.update_task_state()
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_xqueue_callback_url_prefix(xmodule_instance_args):
"""Gets prefix to use when constructing xqueue_callback_url."""
return xmodule_instance_args.get('xqueue_callback_url_prefix', '') if xmodule_instance_args is not None else ''
def _get_track_function_for_task(student, xmodule_instance_args=None, source_page='x_module_task'):
"""
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
"""
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {'student': student.username, 'task_id': _get_task_id_from_xmodule_args(xmodule_instance_args)}
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page=source_page)
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None, course=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(
user=student,
descriptor=module_descriptor,
field_data_cache=field_data_cache,
course_id=course_id,
track_function=make_track_function(),
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type,
# This module isn't being used for front-end rendering
request_token=None,
# pass in a loaded course for override enabling
course=course
)
@transaction.autocommit
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
usage_key = student_module.module_state_key
with modulestore().bulk_operations(course_id):
course = get_course_by_id(course_id)
# TODO: Here is a call site where we could pass in a loaded course. I
# think we certainly need it since grading is happening here, and field
# overrides would be important in handling that correctly
instance = _get_module_instance_for_task(
course_id,
student,
module_descriptor,
xmodule_instance_args,
grade_bucket_type='rescore',
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(
loc=usage_key,
student=student
)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
instance.save()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: unexpected response %(msg)s",
dict(
msg=result,
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
else:
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_SUCCEEDED
@transaction.autocommit
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Resets problem attempts to zero for specified `student_module`.
Returns a status of UPDATE_STATUS_SUCCEEDED if a problem has non-zero attempts
that are being reset, and UPDATE_STATUS_SKIPPED otherwise.
"""
update_status = UPDATE_STATUS_SKIPPED
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
track_function('problem_reset_attempts', event_info)
update_status = UPDATE_STATUS_SUCCEEDED
return update_status
@transaction.autocommit
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Delete the StudentModule entry.
Always returns UPDATE_STATUS_SUCCEEDED, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
track_function('problem_delete_state', {})
return UPDATE_STATUS_SUCCEEDED
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
"""
Upload data as a CSV using ReportStore.
Arguments:
rows: CSV data in the following format (first column may be a
header):
[
[row1_colum1, row1_colum2, ...],
...
]
csv_name: Name of the resulting CSV
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
report_store.store_rows(
course_id,
u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
course_prefix=course_filename_prefix_generator(course_id),
csv_name=csv_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
),
rows
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": csv_name, })
def upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
"""
Upload Executive Summary Html file using ReportStore.
Arguments:
data_dict: containing executive report data.
report_name: Name of the resulting Html File.
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
# Use the data dict and html template to generate the output buffer
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
report_store.store(
course_id,
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
course_prefix=course_filename_prefix_generator(course_id),
report_name=report_name,
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
),
output_buffer,
config={
'content_type': 'text/html',
'content_encoding': None,
}
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": report_name})
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a grades CSV file for all students that
are enrolled, and store using a `ReportStore`. Once created, the files can
be accessed by instantiating another `ReportStore` (via
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
that are visible in ReportStore will be complete ones.
As we start to add more CSV downloads, it will probably be worthwhile to
make a more general CSVDoc class instead of building out the rows like we
do here.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
course = get_course_by_id(course_id)
course_is_cohorted = is_course_cohorted(course.id)
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
experiment_partitions = get_split_user_partitions(course.user_partitions)
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
# Loop over all our students and build our CSV lists in memory
header = None
rows = []
err_rows = [["id", "username", "error_msg"]]
current_step = {'step': 'Calculating Grades'}
total_enrolled_students = enrolled_students.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
task_info_string,
action_name,
current_step,
total_enrolled_students
)
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after each student is graded to get a sense
# of the task's progress
student_counter += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
if gradeset:
# We were able to successfully grade this student for this course.
task_progress.succeeded += 1
if not header:
header = [section['label'] for section in gradeset[u'section_breakdown']]
rows.append(
["id", "email", "username", "grade"] + header + cohorts_header +
group_configs_header + ['Enrollment Track', 'Verification Status'] + certificate_info_header
)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
cohorts_group_name = []
if course_is_cohorted:
group = get_cohort(student, course_id, assign=False)
cohorts_group_name.append(group.name if group else '')
group_configs_group_names = []
for partition in experiment_partitions:
group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
group_configs_group_names.append(group.name if group else '')
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
student,
course_id,
enrollment_mode
)
certificate_info = certificate_info_for_user(
student,
course_id,
gradeset['grade'],
student.id in whitelisted_user_ids
)
# Not everybody has the same gradable items. If the item is not
# found in the user's gradeset, just assume it's a 0. The aggregated
# grades for their sections and overall course will be calculated
# without regard for the item they didn't have access to, so it's
# possible for a student to have a 0.0 show up in their row but
# still have 100% for the course.
row_percents = [percents.get(label, 0.0) for label in header]
rows.append(
[student.id, student.email, student.username, gradeset['percent']] +
row_percents + cohorts_group_name + group_configs_group_names +
[enrollment_mode] + [verification_status] + certificate_info
)
else:
# An empty gradeset means we failed to grade a student.
task_progress.failed += 1
err_rows.append([student.id, student.username, err_msg])
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)
# If there are any error rows (don't count the header), write them out as well
if len(err_rows) > 1:
upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def _order_problems(blocks):
"""
Sort the problems by the assignment type and assignment that it belongs to.
Args:
blocks (OrderedDict) - A course structure containing blocks that have been ordered
(i.e. when we iterate over them, we will see them in the order
that they appear in the course).
Returns:
an OrderedDict that maps a problem id to its headers in the final report.
"""
problems = OrderedDict()
assignments = dict()
grading_type = settings.GRADING_TYPE
# First, sort out all the blocks into their correct assignments and all the
# assignments into their correct types.
for block in blocks:
# Put the assignments in order into the assignments list.
if blocks[block]['block_type'] == grading_type:
block_format = blocks[block]['format']
if block_format not in assignments:
assignments[block_format] = OrderedDict()
assignments[block_format][block] = list()
# Put the problems into the correct order within their assignment.
if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True:
current = blocks[block]['parent']
# crawl up the tree for the block with type == grading_type
while blocks[current]['block_type'] != grading_type:
current = blocks[current]['parent']
current_format = blocks[current]['format']
assignments[current_format][current].append(block)
# Now that we have a sorting and an order for the assignments and problems,
# iterate through them in order to generate the header row.
for assignment_type in assignments:
for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1):
for problem in assignments[assignment_type][assignment]:
header_name = u"{assignment_type} {assignment_index}: {assignment_name} - {block}".format(
block=blocks[problem]['display_name'],
assignment_type=assignment_type,
assignment_index=assignment_index,
assignment_name=blocks[assignment]['display_name']
)
problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"]
return problems
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Generate a CSV containing all students' problem grades within a given
`course_id`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
# This struct encapsulates both the display names of each static item in the
# header row as values as well as the django User field names of those items
# as the keys. It is structured in this way to keep the values related.
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
try:
course_structure = CourseStructure.objects.get(course_id=course_id)
blocks = course_structure.ordered_blocks
problems = _order_problems(blocks)
except CourseStructure.DoesNotExist:
return task_progress.update_task_state(
extra_meta={'step': 'Generating course structure. Please refresh and try again.'}
)
# Just generate the static fields for now.
rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))]
error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'}
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True):
student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1
if 'percent' not in gradeset or 'raw_scores' not in gradeset:
# There was an error grading this student.
# Generally there will be a non-empty err_msg, but that is not always the case.
if not err_msg:
err_msg = u"Unknown error"
error_rows.append(student_fields + [err_msg])
task_progress.failed += 1
continue
final_grade = gradeset['percent']
# Only consider graded problems
problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
earned_possible_values = list()
for problem_id in problems:
try:
problem_score = problem_scores[problem_id]
earned_possible_values.append([problem_score.earned, problem_score.possible])
except KeyError:
# The student has not been graded on this problem. For example,
# iterate_grades_for skips problems that students have never
# seen in order to speed up report generation. It could also be
# the case that the student does not have access to it (e.g. A/B
# test or cohorted courseware).
earned_possible_values.append(['N/A', 'N/A'])
rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values)))
task_progress.succeeded += 1
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload if any students have been successfully graded
if len(rows) > 1:
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
# If there are any error rows, write them out as well
if len(error_rows) > 1:
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating Profile Info'}
task_progress.update_task_state(extra_meta=current_step)
# compute the student features table and format it
query_features = task_input.get('features')
student_data = enrolled_students_features(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'student_profile_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_enrollment_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
students_in_course = CourseEnrollment.objects.enrolled_and_dropped_out_users(course_id)
task_progress = TaskProgress(action_name, students_in_course.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
# Loop over all our students and build our CSV lists in memory
rows = []
header = None
current_step = {'step': 'Gathering Profile Information'}
enrollment_report_provider = PaidCourseEnrollmentReportProvider()
total_students = students_in_course.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating detailed enrollment report for total students: %s',
task_info_string,
action_name,
current_step,
total_students
)
for student in students_in_course:
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after certain intervals to get a hint that task is in progress
student_counter += 1
if student_counter % 100 == 0:
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, gathering enrollment profile for students in progress: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
user_data = enrollment_report_provider.get_user_profile(student.id)
course_enrollment_data = enrollment_report_provider.get_enrollment_info(student, course_id)
payment_data = enrollment_report_provider.get_payment_info(student, course_id)
# display name map for the column headers
enrollment_report_headers = {
'User ID': _('User ID'),
'Username': _('Username'),
'Full Name': _('Full Name'),
'First Name': _('First Name'),
'Last Name': _('Last Name'),
'Company Name': _('Company Name'),
'Title': _('Title'),
'Language': _('Language'),
'Year of Birth': _('Year of Birth'),
'Gender': _('Gender'),
'Level of Education': _('Level of Education'),
'Mailing Address': _('Mailing Address'),
'Goals': _('Goals'),
'City': _('City'),
'Country': _('Country'),
'Enrollment Date': _('Enrollment Date'),
'Currently Enrolled': _('Currently Enrolled'),
'Enrollment Source': _('Enrollment Source'),
'Enrollment Role': _('Enrollment Role'),
'List Price': _('List Price'),
'Payment Amount': _('Payment Amount'),
'Coupon Codes Used': _('Coupon Codes Used'),
'Registration Code Used': _('Registration Code Used'),
'Payment Status': _('Payment Status'),
'Transaction Reference Number': _('Transaction Reference Number')
}
if not header:
header = user_data.keys() + course_enrollment_data.keys() + payment_data.keys()
display_headers = []
for header_element in header:
# translate header into a localizable display string
display_headers.append(enrollment_report_headers.get(header_element, header_element))
rows.append(display_headers)
rows.append(user_data.values() + course_enrollment_data.values() + payment_data.values())
task_progress.succeeded += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Detailed enrollment report generated for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'enrollment_report', course_id, start_date, config_name='FINANCIAL_REPORTS')
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing detailed enrollment task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
information about students who may enroll but have not done so
yet, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about students who may enroll'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = task_input.get('features')
student_data = list_may_enroll(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def get_executive_report(course_id):
"""
Returns dict containing information about the course executive summary.
"""
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_id)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id)
paid_invoices_total = InvoiceTransaction.get_total_amount_of_paid_course_invoices(course_id)
gross_paid_revenue = single_purchase_total + bulk_purchase_total + paid_invoices_total
all_invoices_total = Invoice.get_invoice_total_amount_for_course(course_id)
gross_pending_revenue = all_invoices_total - float(paid_invoices_total)
gross_revenue = float(gross_paid_revenue) + float(gross_pending_revenue)
refunded_self_purchased_seats = PaidCourseRegistration.get_self_purchased_seat_count(
course_id, status='refunded'
)
refunded_bulk_purchased_seats = CourseRegCodeItem.get_bulk_purchased_seat_count(
course_id, status='refunded'
)
total_seats_refunded = refunded_self_purchased_seats + refunded_bulk_purchased_seats
self_purchased_refunds = PaidCourseRegistration.get_total_amount_of_purchased_item(
course_id,
status='refunded'
)
bulk_purchase_refunds = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id, status='refunded')
total_amount_refunded = self_purchased_refunds + bulk_purchase_refunds
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(course_id)
total_coupon_codes_purchases = CouponRedemption.get_total_coupon_code_purchases(course_id)
bulk_purchased_codes = CourseRegistrationCode.order_generated_registration_codes(course_id)
unused_registration_codes = 0
for registration_code in bulk_purchased_codes:
if not RegistrationCodeRedemption.is_registration_code_redeemed(registration_code.code):
unused_registration_codes += 1
self_purchased_seat_count = PaidCourseRegistration.get_self_purchased_seat_count(course_id)
bulk_purchased_seat_count = CourseRegCodeItem.get_bulk_purchased_seat_count(course_id)
total_invoiced_seats = CourseRegistrationCode.invoice_generated_registration_codes(course_id).count()
total_seats = self_purchased_seat_count + bulk_purchased_seat_count + total_invoiced_seats
self_purchases_percentage = 0.0
bulk_purchases_percentage = 0.0
invoice_purchases_percentage = 0.0
avg_price_paid = 0.0
if total_seats != 0:
self_purchases_percentage = (float(self_purchased_seat_count) / float(total_seats)) * 100
bulk_purchases_percentage = (float(bulk_purchased_seat_count) / float(total_seats)) * 100
invoice_purchases_percentage = (float(total_invoiced_seats) / float(total_seats)) * 100
avg_price_paid = gross_revenue / total_seats
course = get_course_by_id(course_id, depth=0)
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
return {
'display_name': course.display_name,
'start_date': course.start.strftime("%Y-%m-%d") if course.start is not None else 'N/A',
'end_date': course.end.strftime("%Y-%m-%d") if course.end is not None else 'N/A',
'total_seats': total_seats,
'currency': currency,
'gross_revenue': float(gross_revenue),
'gross_paid_revenue': float(gross_paid_revenue),
'gross_pending_revenue': gross_pending_revenue,
'total_seats_refunded': total_seats_refunded,
'total_amount_refunded': float(total_amount_refunded),
'average_paid_price': float(avg_price_paid),
'discount_codes_data': top_discounted_codes,
'total_seats_using_discount_codes': total_coupon_codes_purchases,
'total_self_purchase_seats': self_purchased_seat_count,
'total_bulk_purchase_seats': bulk_purchased_seat_count,
'total_invoiced_seats': total_invoiced_seats,
'unused_bulk_purchase_code_count': unused_registration_codes,
'self_purchases_percentage': self_purchases_percentage,
'bulk_purchases_percentage': bulk_purchases_percentage,
'invoice_purchases_percentage': invoice_purchases_percentage,
}
def upload_exec_summary_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a html report containing information,
which provides a snapshot of how the course is doing.
"""
start_time = time()
report_generation_date = datetime.now(UTC)
status_interval = 100
enrolled_users = CourseEnrollment.objects.users_enrolled_in(course_id)
true_enrollment_count = 0
for user in enrolled_users:
if not user.is_staff and not CourseAccessRole.objects.filter(
user=user, course_id=course_id, role__in=FILTERED_OUT_ROLES
).exists():
true_enrollment_count += 1
task_progress = TaskProgress(action_name, true_enrollment_count, start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
current_step = {'step': 'Gathering executive summary report information'}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating executive summary report',
task_info_string,
action_name,
current_step
)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# get the course executive summary report information.
data_dict = get_executive_report(course_id)
data_dict.update(
{
'total_enrollments': true_enrollment_count,
'report_generation_date': report_generation_date.strftime("%Y-%m-%d"),
}
)
# By this point, we've got the data that we need to generate html report.
current_step = {'step': 'Uploading executive summary report HTML file'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_exec_summary_to_store(data_dict, 'executive_report', course_id, report_generation_date)
task_progress.succeeded += 1
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing executive summary report task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def generate_students_certificates(
_xmodule_instance_args, _entry_id, course_id, task_input, action_name): # pylint: disable=unused-argument
"""
For a given `course_id`, generate certificates for all students
that are enrolled.
"""
start_time = time()
enrolled_students = use_read_replica_if_available(CourseEnrollment.objects.users_enrolled_in(course_id))
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating students already have certificates'}
task_progress.update_task_state(extra_meta=current_step)
students_require_certs = students_require_certificate(course_id, enrolled_students)
task_progress.skipped = task_progress.total - len(students_require_certs)
current_step = {'step': 'Generating Certificates'}
task_progress.update_task_state(extra_meta=current_step)
course = modulestore().get_course(course_id, depth=0)
# Generate certificate for each student
for student in students_require_certs:
task_progress.attempted += 1
status = generate_user_certificates(
student,
course_id,
course=course
)
if status in [CertificateStatuses.generating, CertificateStatuses.downloadable]:
task_progress.succeeded += 1
else:
task_progress.failed += 1
return task_progress.update_task_state(extra_meta=current_step)
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
Within a given course, cohort students in bulk, then upload the results
using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
# Iterate through rows to get total assignments for task progress
with DefaultStorage().open(task_input['file_name']) as f:
total_assignments = 0
for _line in unicodecsv.DictReader(UniversalNewlineIterator(f)):
total_assignments += 1
task_progress = TaskProgress(action_name, total_assignments, start_time)
current_step = {'step': 'Cohorting Students'}
task_progress.update_task_state(extra_meta=current_step)
# cohorts_status is a mapping from cohort_name to metadata about
# that cohort. The metadata will include information about users
# successfully added to the cohort, users not found, and a cached
# reference to the corresponding cohort object to prevent
# redundant cohort queries.
cohorts_status = {}
with DefaultStorage().open(task_input['file_name']) as f:
for row in unicodecsv.DictReader(UniversalNewlineIterator(f), encoding='utf-8'):
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
username_or_email = row.get('email') or row.get('username')
cohort_name = row.get('cohort') or ''
task_progress.attempted += 1
if not cohorts_status.get(cohort_name):
cohorts_status[cohort_name] = {
'Cohort Name': cohort_name,
'Students Added': 0,
'Students Not Found': set()
}
try:
cohorts_status[cohort_name]['cohort'] = CourseUserGroup.objects.get(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=cohort_name
)
cohorts_status[cohort_name]["Exists"] = True
except CourseUserGroup.DoesNotExist:
cohorts_status[cohort_name]["Exists"] = False
if not cohorts_status[cohort_name]['Exists']:
task_progress.failed += 1
continue
try:
with transaction.commit_on_success():
add_user_to_cohort(cohorts_status[cohort_name]['cohort'], username_or_email)
cohorts_status[cohort_name]['Students Added'] += 1
task_progress.succeeded += 1
except User.DoesNotExist:
cohorts_status[cohort_name]['Students Not Found'].add(username_or_email)
task_progress.failed += 1
except ValueError:
# Raised when the user is already in the given cohort
task_progress.skipped += 1
task_progress.update_task_state(extra_meta=current_step)
current_step['step'] = 'Uploading CSV'
task_progress.update_task_state(extra_meta=current_step)
# Filter the output of `add_users_to_cohorts` in order to upload the result.
output_header = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
output_rows = [
[
','.join(status_dict.get(column_name, '')) if column_name == 'Students Not Found'
else status_dict[column_name]
for column_name in output_header
]
for _cohort_name, status_dict in cohorts_status.iteritems()
]
output_rows.insert(0, output_header)
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def students_require_certificate(course_id, enrolled_students):
""" Returns list of students where certificates needs to be generated.
Removing those students who have their certificate already generated
from total enrolled students for given course.
:param course_id:
:param enrolled_students:
"""
# compute those students where certificates already generated
students_already_have_certs = use_read_replica_if_available(User.objects.filter(
~Q(generatedcertificate__status=CertificateStatuses.unavailable),
generatedcertificate__course_id=course_id))
return list(set(enrolled_students) - set(students_already_have_certs))
| agpl-3.0 | -3,188,295,569,391,423,000 | 43.820273 | 142 | 0.663117 | false | 4.053641 | false | false | false |
aubzen/sheltermanager | src/waitinglist.py | 1 | 14773 | #!/usr/bin/python
import additional
import al
import animal
import audit
import configuration
import db
import diary
import log
import media
import utils
from i18n import _, after, now, python2display, subtract_years, add_days, date_diff
def get_waitinglist_query():
"""
Returns the SELECT and JOIN commands necessary for selecting
waiting list rows with resolved lookups.
"""
return "SELECT * FROM v_animalwaitinglist a"
def get_waitinglist_by_id(dbo, wid):
"""
Returns a single waitinglist record for the ID given
"""
l = dbo.locale
sql = get_waitinglist_query() + " WHERE a.ID = %d" % int(wid)
rows = db.query(dbo, sql)
if len(rows) == 0: return None
r = rows[0]
ranks = get_waitinglist_ranks(dbo)
if ranks.has_key(r["WLID"]):
r["RANK"] = ranks[r["WLID"]]
else:
r["RANK"] = ""
r["TIMEONLIST"] = date_diff(l, r["DATEPUTONLIST"], now(dbo.timezone))
return r
def get_person_name(dbo, wid):
"""
Returns the contact name for the waitinglist with id
"""
return db.query_string(dbo, "SELECT o.OwnerName FROM animalwaitinglist a INNER JOIN owner o ON a.OwnerID = o.ID WHERE a.ID = %d" % int(wid))
def get_waitinglist_ranks(dbo):
"""
Returns a dictionary of waiting list IDs with their current ranks.
"""
byspecies = configuration.waiting_list_rank_by_species(dbo)
if not byspecies:
rows = db.query(dbo, "SELECT a.ID, a.SpeciesID FROM animalwaitinglist a " \
"INNER JOIN owner o ON a.OwnerID = o.ID " \
"WHERE a.DateRemovedFromList Is Null " \
"ORDER BY a.Urgency, a.DatePutOnList")
else:
rows = db.query(dbo, "SELECT a.ID, a.SpeciesID FROM animalwaitinglist a " \
"INNER JOIN owner o ON a.OwnerID = o.ID " \
"WHERE a.DateRemovedFromList Is Null " \
"ORDER BY a.SpeciesID, a.Urgency, a.DatePutOnList")
ranks = {}
lastspecies = 0
rank = 1
for r in rows:
if byspecies:
if not lastspecies == r["SPECIESID"]:
lastspecies = r["SPECIESID"]
rank = 1
ranks[r["ID"]] = rank
rank += 1
return ranks
def get_waitinglist(dbo, priorityfloor = 5, species = -1, addresscontains = "", includeremoved = 0, namecontains = "", descriptioncontains = ""):
"""
Retrieves the waiting list
priorityfloor: The lowest urgency to show (1 = urgent, 5 = lowest)
species: A species filter or -1 for all
addresscontains: A partial address
includeremoved: Whether or not to include removed entries
namecontains: A partial name
descriptioncontains: A partial description
"""
l = dbo.locale
ranks = get_waitinglist_ranks(dbo)
sql = get_waitinglist_query() + " WHERE a.Urgency <= " + str(priorityfloor)
if includeremoved == 0: sql += " AND a.DateRemovedFromList Is Null"
if species != -1: sql += " AND a.SpeciesID = " + str(species)
if addresscontains != "": sql += " AND UPPER(OwnerAddress) Like '%" + str(addresscontains).upper().replace("'", "`") + "%'"
if namecontains != "": sql += " AND UPPER(OwnerName) Like '%" + str(namecontains).upper().replace("'", "`") + "%'"
if descriptioncontains != "": sql += " AND UPPER(AnimalDescription) Like '%" + str(descriptioncontains).upper().replace("'", "`") + "%'"
sql += " ORDER BY a.Urgency, a.DatePutOnList"
rows = db.query(dbo, sql)
wlh = configuration.waiting_list_highlights(dbo).split(" ")
for r in rows:
r["HIGHLIGHT"] = ""
for hi in wlh:
if hi != "":
if hi.find("|") == -1:
wid = hi
h = "1"
else:
wid, h = hi.split("|")
if wid == str(r["WLID"]).strip():
r["HIGHLIGHT"] = h
break
if ranks.has_key(r["WLID"]):
r["RANK"] = ranks[r["WLID"]]
else:
r["RANK"] = ""
r["TIMEONLIST"] = date_diff(l, r["DATEPUTONLIST"], now(dbo.timezone))
return rows
def get_waitinglist_find_simple(dbo, query = "", limit = 0, onlyindexed = False):
"""
Returns rows for simple waiting list searches.
query: The search criteria
"""
# If no query has been given, do a current waitinglist search
if query == "":
return get_waitinglist(dbo)
ors = []
add = lambda f: "LOWER(%s) LIKE '%%%s%%'" % (f, query.lower())
if utils.is_numeric(query):
ors.append("a.ID = " + str(utils.cint(query)))
ors.append(add("a.OwnerName"))
ors.append(add("a.AnimalDescription"))
if not onlyindexed:
ors.append(add("a.ReasonForWantingToPart"))
ors.append(add("a.ReasonForRemoval"))
sql = get_waitinglist_query() + " WHERE " + " OR ".join(ors)
if limit > 0: sql += " LIMIT " + str(limit)
return db.query(dbo, sql)
def get_satellite_counts(dbo, wlid):
"""
Returns a resultset containing the number of each type of satellite
record that a waitinglist entry has.
"""
sql = "SELECT a.ID, " \
"(SELECT COUNT(*) FROM media me WHERE me.LinkID = a.ID AND me.LinkTypeID = %d) AS media, " \
"(SELECT COUNT(*) FROM diary di WHERE di.LinkID = a.ID AND di.LinkType = %d) AS diary, " \
"(SELECT COUNT(*) FROM log WHERE log.LinkID = a.ID AND log.LinkType = %d) AS logs " \
"FROM animalwaitinglist a WHERE a.ID = %d" \
% (media.WAITINGLIST, diary.WAITINGLIST, log.WAITINGLIST, int(wlid))
return db.query(dbo, sql)
def delete_waitinglist(dbo, username, wid):
"""
Deletes a waiting list record
"""
audit.delete(dbo, username, "animalwaitinglist", str(db.query(dbo, "SELECT * FROM animalwaitinglist WHERE ID=%d" % wid)))
db.execute(dbo, "DELETE FROM animalwaitinglist WHERE ID = %d" % wid)
def update_waitinglist_remove(dbo, username, wid):
"""
Marks a waiting list record as removed
"""
db.execute(dbo, "UPDATE animalwaitinglist SET DateRemovedFromList = %s WHERE ID = %d" % ( db.dd(now(dbo.timezone)), int(wid) ))
audit.edit(dbo, username, "animalwaitinglist", "%s: DateRemovedFromList ==> %s" % ( str(wid), python2display(dbo.locale, now(dbo.timezone))))
def update_waitinglist_highlight(dbo, wlid, himode):
"""
Toggles a waiting list ID record as highlighted.
wlid: The waiting list id to toggle
himode: a highlight value from 1 to 5 for a colour
"""
hl = list(configuration.waiting_list_highlights(dbo).split(" "))
wlid = str(wlid).strip()
# Create a new highlight list that doesn't have our id in it
nl = []
removed = False
for hi in hl:
if hi != "":
if hi.find("|") != -1:
wid, h = hi.split("|")
else:
wid = hi
h = "1"
if wlid == wid:
removed = True
else:
nl.append(wid + "|" + h)
# If our id wasn't present in the list, add it (so we're
# effectively toggling the id on and off)
if not removed:
nl.append(wlid + "|" + himode)
configuration.waiting_list_highlights(dbo, " ".join(nl))
def auto_remove_waitinglist(dbo):
"""
Finds and automatically marks entries removed that have gone past
the last contact date + weeks.
"""
l = dbo.locale
rows = db.query(dbo, "SELECT a.ID, a.DateOfLastOwnerContact, " \
"a.AutoRemovePolicy " \
"FROM animalwaitinglist a WHERE a.DateRemovedFromList Is Null " \
"AND AutoRemovePolicy > 0 AND DateOfLastOwnerContact Is Not Null")
updates = []
for r in rows:
xdate = add_days(r["DATEOFLASTOWNERCONTACT"], 7 * r["AUTOREMOVEPOLICY"])
if after(now(dbo.timezone), xdate):
al.debug("auto removing waitinglist entry %d due to policy" % int(r["ID"]), "waitinglist.auto_remove_waitinglist", dbo)
updates.append((now(dbo.timezone), _("Auto removed due to lack of owner contact.", l), r["ID"]))
if len(updates) > 0:
db.execute_many(dbo, "UPDATE animalwaitinglist SET DateRemovedFromList = %s, " \
"ReasonForRemoval=%s WHERE ID=%s", updates)
def auto_update_urgencies(dbo):
"""
Finds all animals where the next UrgencyUpdateDate field is greater
than or equal to today and the urgency is larger than High (so we
can never reach Urgent).
"""
update_period_days = configuration.waiting_list_urgency_update_period(dbo)
if update_period_days == 0:
al.debug("urgency update period is 0, not updating waiting list entries", "waitinglist.auto_update_urgencies", dbo)
return
rows = db.query(dbo, "SELECT a.* " \
"FROM animalwaitinglist a WHERE UrgencyUpdateDate <= %s " \
"AND Urgency > 2" % db.dd(now(dbo.timezone)))
updates = []
for r in rows:
al.debug("increasing urgency of waitinglist entry %d" % int(r["ID"]), "waitinglist.auto_update_urgencies", dbo)
updates.append((now(dbo.timezone), add_days(r["URGENCYUPDATEDATE"], update_period_days), r["URGENCY"] - 1, r["ID"]))
if len(updates) > 0:
db.execute_many(dbo, "UPDATE animalwaitinglist SET " \
"UrgencyLastUpdatedDate=%s, " \
"UrgencyUpdateDate=%s, " \
"Urgency=%s " \
"WHERE ID=%s ", updates)
def update_waitinglist_from_form(dbo, data, username):
"""
Updates a waiting list record from the screen
data: The webpy data object containing form parameters
"""
l = dbo.locale
wlid = utils.df_ki(data, "id")
if utils.df_ks(data, "description") == "":
raise utils.ASMValidationError(_("Description cannot be blank", l))
if utils.df_ki(data, "owner") == "0":
raise utils.ASMValidationError(_("Waiting list entries must have a contact", l))
if utils.df_ks(data, "dateputon") == "":
raise utils.ASMValidationError(_("Date put on cannot be blank", l))
preaudit = db.query(dbo, "SELECT * FROM animalwaitinglist WHERE ID = %d" % wlid)
db.execute(dbo, db.make_update_user_sql(dbo, "animalwaitinglist", username, "ID=%d" % wlid, (
( "SpeciesID", utils.df_s(data, "species")),
( "DatePutOnList", utils.df_d(data, "dateputon", l)),
( "OwnerID", utils.df_s(data, "owner")),
( "AnimalDescription", utils.df_t(data, "description")),
( "ReasonForWantingToPart", utils.df_t(data, "reasonforwantingtopart")),
( "CanAffordDonation", utils.df_c(data, "canafforddonation")),
( "Urgency", utils.df_s(data, "urgency")),
( "DateRemovedFromList", utils.df_d(data, "dateremoved", l)),
( "AutoRemovePolicy", utils.df_s(data, "autoremovepolicy")),
( "DateOfLastOwnerContact", utils.df_d(data, "dateoflastownercontact", l)),
( "ReasonForRemoval", utils.df_t(data, "reasonforremoval")),
( "Comments", utils.df_t(data, "comments"))
)))
additional.save_values_for_link(dbo, data, wlid, "waitinglist")
postaudit = db.query(dbo, "SELECT * FROM animalwaitinglist WHERE ID = %d" % wlid)
audit.edit(dbo, username, "animalwaitinglist", audit.map_diff(preaudit, postaudit))
def insert_waitinglist_from_form(dbo, data, username):
"""
Creates a waiting list record from the screen
data: The webpy data object containing form parameters
"""
l = dbo.locale
if utils.df_ks(data, "description") == "":
raise utils.ASMValidationError(_("Description cannot be blank", l))
if utils.df_ki(data, "owner") == "0":
raise utils.ASMValidationError(_("Waiting list entries must have a contact", l))
if utils.df_ks(data, "dateputon") == "":
raise utils.ASMValidationError(_("Date put on cannot be blank", l))
nwlid = db.get_id(dbo, "animalwaitinglist")
db.execute(dbo, db.make_insert_user_sql(dbo, "animalwaitinglist", username, (
( "ID", db.di(nwlid)),
( "SpeciesID", utils.df_s(data, "species")),
( "DatePutOnList", utils.df_d(data, "dateputon", l)),
( "OwnerID", utils.df_s(data, "owner")),
( "AnimalDescription", utils.df_t(data, "description")),
( "ReasonForWantingToPart", utils.df_t(data, "reasonforwantingtopart")),
( "CanAffordDonation", utils.df_c(data, "canafforddonation")),
( "Urgency", utils.df_s(data, "urgency")),
( "DateRemovedFromList", utils.df_d(data, "dateremoved", l)),
( "AutoRemovePolicy", utils.df_s(data, "autoremovepolicy")),
( "DateOfLastOwnerContact", db.dd(now(dbo.timezone))),
( "ReasonForRemoval", utils.df_t(data, "reasonforremoval")),
( "Comments", utils.df_t(data, "comments")),
( "UrgencyLastUpdatedDate", db.dd(now(dbo.timezone))),
( "UrgencyUpdateDate", db.dd(add_days(now(dbo.timezone), configuration.waiting_list_urgency_update_period(dbo))))
)))
audit.create(dbo, username, "animalwaitinglist", str(nwlid))
return nwlid
def create_animal(dbo, username, wlid):
"""
Creates an animal record from a waiting list entry with the id given
"""
a = db.query(dbo, "SELECT * FROM animalwaitinglist WHERE ID = %d" % wlid)[0]
l = dbo.locale
data = {
"animalname": _("Waiting List {0}", l).format(wlid),
"markings": str(a["ANIMALDESCRIPTION"]),
"reasonforentry": str(a["REASONFORWANTINGTOPART"]),
"species": str(a["SPECIESID"]),
"comments": str(a["COMMENTS"]),
"broughtinby": str(a["OWNERID"]),
"originalowner": str(a["OWNERID"]),
"animaltype": configuration.default_type(dbo),
"breed1": configuration.default_breed(dbo),
"breed2": configuration.default_breed(dbo),
"basecolour": configuration.default_colour(dbo),
"size": configuration.default_size(dbo),
"internallocation": configuration.default_location(dbo),
"dateofbirth": python2display(l, subtract_years(now(dbo.timezone))),
"estimateddob": "1"
}
# If we're creating shelter codes manually, we need to put something unique
# in there for now. Use the id
if configuration.manual_codes(dbo):
data["sheltercode"] = "WL" + str(wlid)
data["shortcode"] = "WL" + str(wlid)
nextid, code = animal.insert_animal_from_form(dbo, data, username)
# Now that we've created our animal, we should remove this entry from the waiting list
db.execute(dbo, "UPDATE animalwaitinglist SET DateRemovedFromList = %s, ReasonForRemoval = %s " \
"WHERE ID = %d" % (
db.dd(now(dbo.timezone)),
db.ds(_("Moved to animal record {0}", l).format(code)),
wlid))
return nextid
| gpl-3.0 | -4,002,365,165,038,267,000 | 42.967262 | 145 | 0.6057 | false | 3.401566 | true | false | false |
pwwang/bioprocs | bioprocs/utils/meme.py | 1 | 5335 | """
Read or write MEME motif file
"""
import re
import math
from collections import OrderedDict
class MemeRecord(object):
def __init__(self,
name,
matrix,
altname = '',
mtype = 'letter-probability',
alength = None,
w = None,
nsites = 20,
E = 0,
URL = None
):
self.name = name
self.matrix = matrix
self.altname = altname
self.mtype = mtype
self.alength = alength or len(matrix[0])
self.w = w or len(matrix)
self.nsites = nsites
self.E = E
self.URL = URL
def __str__(self):
return """
MOTIF {name}{altname}
{mtype} matrix: alength= {alength} w= {w} nsites= {nsites} E= {E}
{matrix}
{URL}
""".format(
name = self.name,
altname = " " + self.altname if self.altname else "",
mtype = self.mtype,
alength = self.alength,
w = self.w,
nsites = self.nsites,
E = self.E,
matrix = "\n".join(" ".join(str(r) for r in row) for row in self.matrix),
URL = "URL {}".format(self.URL) if self.URL else ""
)
def pwm2logodds(self):
assert self.mtype == 'letter-probability'
matrix = [
tuple(math.exp(p)/(1.0 + math.exp(p)) for p in row)
for row in self.matrix
]
return MemeRecord(
name = self.name,
matrix = matrix,
altname = self.altname,
mtype = 'log-odds',
alength = self.alength,
w = self.w,
nsites = self.nsites,
E = self.E,
URL = self.URL
)
def pwm2prob(self):
assert self.mtype == 'log-odds'
matrix = [
tuple(math.log(p/(1.0-p)) for p in row)
for row in self.matrix
]
return MemeRecord(
name = self.name,
matrix = matrix,
altname = self.altname,
mtype = 'letter-probability',
alength = self.alength,
w = self.w,
nsites = self.nsites,
E = self.E,
URL = self.URL
)
class MemeReader(object):
def __init__(self, memefile):
self.meta = {}
alphabet_flag = False
bgfreqs_flag = False
self.file = open(memefile)
self.tell = 0
while True:
self.tell = self.file.tell()
line = self.file.readline()
if not line:
raise ValueError('Not a valid MEME motif file.')
if line.startswith('MEME version'):
self.meta['Version'] = line[12:].strip()
elif line.startswith('ALPHABET='):
self.meta['Alphabet'] = line[9:].strip()
elif line.startswith('ALPHABET'):
self.meta['Alphabet'] = line[8:].strip()
alphabet_flag = True
elif line.startswith('END ALPHABET'):
alphabet_flag = False
elif alphabet_flag:
self.meta['Alphabet'] += '\n' + line.strip()
elif line.startswith('strands:'):
self.meta['Strands'] = line[8:].strip()
elif line.startswith('Background letter frequencies'):
bgfreqs_flag = True
source = line[30:].strip()
if source.startswith('(from '):
source = source[6:-2]
else:
source = ''
self.meta['bgfreqs'] = {'from': source, 'freqs': OrderedDict()}
elif bgfreqs_flag:
bgfreqs_flag = False
parts = line.strip().split()
self.meta['bgfreqs']['freqs'] = OrderedDict(tuple([parts[2*i], float(parts[2*i+1])] for i in range(int(len(parts)/2))))
elif line.startswith('MOTIF'):
self.file.seek(self.tell)
break
def next(self):
name = None
altname = ''
url = None
mtype = ''
matrix = []
attrs = {}
while True:
tell = self.file.tell()
line = self.file.readline()
if not line:
raise StopIteration()
if line.startswith('MOTIF'):
if name:
self.file.seek(tell)
break
parts = line[5:].strip().split()
name = parts.pop(0)
if parts:
altname = parts[0]
elif line.startswith('URL'):
url = line[3:].strip()
elif 'matrix:' in line:
matrix = [] # in case there are multiple matrices
mtype, attrs = line.strip().split('matrix:')
mtype = mtype.strip()
attrs = re.split(r'(?:\s*=\s*|\s+)', attrs.strip())
attrs = {attrs[2*i]:attrs[2*i+1] for i in range(int(len(attrs)/2))}
else:
line = line.strip()
if not line:
continue
matrix.append(tuple(float(v) for v in line.split()))
return MemeRecord(
name,
matrix,
altname = altname,
mtype = mtype,
URL = url,
**attrs
)
def __next__(self):
return self.next()
def rewind(self):
self.file.seek(self.tell)
def __iter__(self):
return self
def __del__(self):
self.close()
def close(self):
if self.file:
self.file.close()
class MemeWriter(object):
def __init__(self, outfile, meta = None):
self.meta = meta or {}
self.file = open(outfile, 'w')
def writeMeta(self):
self.file.write("MEME version {}\n\n".format(self.meta.get('Version', 4)))
alphabet = self.meta.get('Alphabet', 'ACGT')
if '\n' in alphabet:
self.file.write("ALPHABET {}\nEND ALPHABET\n\n".format(alphabet))
else:
self.file.write("ALPHABET= {}\n\n".format(alphabet))
strands = self.meta.get('Strands', '+ -')
self.file.write("strands: {}\n\n".format(strands))
bgfreqs = self.meta.get("bgfreqs", {})
if "from" in bgfreqs:
self.file.write("Background letter frequencies (from {}):\n".format(bgfreqs['from']))
if "freqs" in bgfreqs:
self.file.write(" ".join('{} {}'.format(k, v) for k, v in bgfreqs['freqs'].items()) + "\n\n")
def write(self, mrec):
self.file.write(str(mrec))
def __del__(self):
self.close()
def close(self):
if self.file:
self.file.close()
| mit | -8,825,826,625,067,057,000 | 23.813953 | 124 | 0.597938 | false | 2.754259 | false | false | false |
fin/froide | froide/publicbody/csv_import.py | 1 | 3665 | # -*- encoding: utf-8 -*-
import requests
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.template.defaultfilters import slugify
from django.utils.six import StringIO, BytesIO, PY3
from taggit.utils import parse_tags
if PY3:
import csv
else:
import unicodecsv as csv
from froide.publicbody.models import (PublicBody, PublicBodyTag, Jurisdiction, FoiLaw)
User = get_user_model()
class CSVImporter(object):
topic_cache = {}
default_topic = None
jur_cache = {}
def __init__(self):
self.user = User.objects.order_by('id')[0]
self.site = Site.objects.get_current()
def import_from_url(self, url):
response = requests.get(url)
# Force requests to evaluate as UTF-8
response.encoding = 'utf-8'
csv_file = BytesIO(response.content)
self.import_from_file(csv_file)
def import_from_file(self, csv_file):
"""
csv_file should be encoded in utf-8
"""
if PY3:
csv_file = StringIO(csv_file.read().decode('utf-8'))
reader = csv.DictReader(csv_file)
for row in reader:
self.import_row(row)
def import_row(self, row):
# generate slugs
row['name'] = row['name'].strip()
row['email'] = row['email'].lower()
if row['url'] and not row['url'].startswith(('http://', 'https://')):
row['url'] = 'http://' + row['url']
row['slug'] = slugify(row['name'])
row['classification_slug'] = slugify(row['classification'])
tags = parse_tags(row.pop('tags', ''))
# Backwards compatible handling of topic__slug
topic_slug = row.pop('topic__slug', None)
if topic_slug:
tags.append(self.get_topic(topic_slug))
# resolve foreign keys
row['jurisdiction'] = self.get_jurisdiction(row.pop('jurisdiction__slug'))
parent = row.pop('parent__name', None)
if parent:
row['parent'] = PublicBody.objects.get(slug=slugify(parent))
# get optional values
for n in ('description', 'other_names', 'request_note', 'website_dump'):
row[n] = row.get(n, '')
try:
if 'id' in row and row['id']:
pb = PublicBody.objects.get(id=row['id'])
else:
pb = PublicBody.objects.get(slug=row['slug'])
# If it exists, update it
row.pop('id', None) # Do not update id though
row['_updated_by'] = self.user
PublicBody.objects.filter(id=pb.id).update(**row)
pb.laws.clear()
pb.laws.add(*row['jurisdiction'].laws)
pb.tags.set(*tags)
return
except PublicBody.DoesNotExist:
pass
row.pop('id', None) # Remove id if present
public_body = PublicBody(**row)
public_body._created_by = self.user
public_body._updated_by = self.user
public_body.confirmed = True
public_body.site = self.site
public_body.save()
public_body.laws.add(*row['jurisdiction'].laws)
public_body.tags.set(*list(tags))
def get_jurisdiction(self, slug):
if slug not in self.jur_cache:
jur = Jurisdiction.objects.get(slug=slug)
laws = FoiLaw.objects.filter(jurisdiction=jur)
jur.laws = laws
self.jur_cache[slug] = jur
return self.jur_cache[slug]
def get_topic(self, slug):
if slug not in self.topic_cache:
self.topic_cache[slug] = PublicBodyTag.objects.get(slug=slug, is_topic=True)
return self.topic_cache[slug]
| mit | 4,057,306,953,763,655,700 | 32.623853 | 88 | 0.583356 | false | 3.621542 | false | false | false |
OpenTransportDataProject/ckanext-customharvesters | ckanext/dcat/harvesters/createorg.py | 1 | 2400 | # Description: create harvested organizations from orgfile.txt
# Author: Shanshan Jiang, last modified 14.12.2016
import json
import urllib
import urllib2
import pprint
print "create organizations"
org_dict = {
'name': 'testagain',
'title': 'test again',
'image_url': ''
}
def create_org(dataset_dict):
data_string = urllib.quote(json.dumps(dataset_dict))
# replace with the correct url of CKAN server
request = urllib2.Request(
'http://127.0.0.1:5000/api/action/organization_create')
# replace with the correct APIkey
request.add_header('Authorization', '765e099f-6d07-48a8-82ba-5a79730a976f')
# Make the HTTP request.
response = urllib2.urlopen(request, data_string)
assert response.code == 200
# Use the json module to load CKAN's response into a dictionary.
response_dict = json.loads(response.read())
assert response_dict['success'] is True
# package_create returns the created package as its result.
created_package = response_dict['result']
pprint.pprint(created_package)
# check if organization exists in the catalogue
def check_org_exist(org_name):
found = False
for org in org_list:
print org
if org == org_name:
print "Found the organization : " + org_name
found = True
break
return found
# get the list of organizations from the catalogue
org_url='http://127.0.0.1:5000/api/3/action/organization_list'
orglist=urllib.urlopen(org_url).read()
doc = json.loads(orglist)
org_list = doc["result"]
print 'The list of organizations: '
print org_list
with open('orgfile.txt') as f:
content = f.read().decode('utf8').splitlines()
print content
for line in content:
print line
if line.startswith('org_name:'):
org_name = line[9:]
print 'org_name: ' + org_name
org_dict.update({'name': org_name})
if line.startswith('url:'):
org_url = line[4:]
print 'image url: ' + org_url
org_dict.update({'image_url': org_url})
if line.startswith('display_name:'):
display_name = line[13:]
print 'display_name: ' + display_name
org_dict.update({'title': display_name})
print org_dict
if check_org_exist(org_name):
print 'The organization ' + org_name + ' already exists!'
else:
create_org(org_dict)
f.close()
| agpl-3.0 | -3,692,371,861,684,130,000 | 26.586207 | 80 | 0.646667 | false | 3.550296 | false | false | false |
tusharmakkar08/AlphaPy | Syntax/syntaxhighlighter.py | 1 | 2865 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# syntaxhighlighter.py
#
# Copyright 2013 tusharmakkar08 <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
""" Importing Modules """
import pygments
from pygments import highlight
from pygments.lexers import *
from pygments.formatters import *
from pygments.styles import STYLE_MAP
from pygments.styles import get_all_styles
import os
import webbrowser
"""Main code"""
def guess_lex(code):
return guess_lexer(code)
def guess_lex_file(name,code):
return guess_lexer_for_filename(name,code)
def Highlight(name):
k="pygmentize %s"%(name)
os.system(k)
def pref_style():
styles = list(get_all_styles())
print "Choose from one of the styles"
count=1
for i in styles:
print count,":",i
count+=1
k=input()
return styles[k-1]
def html_out(name,k):
"""HTML printed"""
styles = list(get_all_styles())
m=styles[k-1]
print m
new=""
for i in name:
if i==".":
break
new+=i
stri="pygmentize -O full,style="+m+" -o "+new+".html "+name
print stri
os.system(stri)
def show_html(name):
new=""
for i in name:
if i==".":
break
new+=i
url=new+".html"
stri="libreoffice --writer -o %s"%(url)
os.system(stri)
def open_html(name):
newt=2 # open in a new tab, if possible
new=""
for i in name:
if i==".":
break
new+=i
url=new+".html"
webbrowser.open(url,new=newt)
def rtf_out(name,k):
"""Rich text format"""
styles = list(get_all_styles())
m=styles[k-1]
new=""
for i in name:
if i==".":
break
new+=i
stri="pygmentize -O full,style="+m+" -o "+new+".rtf "+name
os.system(stri)
def open_rtf(name):
new=""
for i in name:
if i==".":
break
new+=i
url=new+".rtf"
stri="libreoffice --writer -o %s"%(url)
os.system(stri)
def copy_clipboard(name,flag):
"""For directly cutting paste to different pahes like powerpoint etc"""
new=""
for i in name:
if i==".":
break
new+=i
if flag==1:
stri="xclip -in -selection c "+new+".html"
else:
stri="xclip -in -selection c "+new+".rtf"
os.system(stri)
"""Code Testing"""
#t=raw_input("Enter filename\n")
#rtf_out("test.py",5)
#copy_clipboard(t,1)
#open_rtf(t)
#print pref_style()
| mit | 668,118,477,405,931,000 | 20.222222 | 72 | 0.669459 | false | 2.757459 | false | false | false |
mjtamlyn/django-denorm | test_project/test_app/tests.py | 1 | 17380 | from djangosanetesting import cases
from django.contrib.auth.models import User,Permission
from django.contrib.contenttypes.models import ContentType
import denorm
from denorm import denorms
import models
class TestSkip(cases.DestructiveDatabaseTestCase):
"""
Tests for the skip feature.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
post = models.SkipPost(text='Here be ponies.')
post.save()
self.post = post
# TODO: Enable and check!
# Unsure on how to test this behaviour. It results in an endless loop:
# update -> trigger -> update -> trigger -> ...
#
#def test_without_skip(self):
# # This results in an infinate loop on SQLite.
# comment = SkipCommentWithoutSkip(post=self.post, text='Oh really?')
# comment.save()
#
# denorm.flush()
# TODO: Check if an infinate loop happens and stop it.
def test_with_skip(self):
# This should not result in an endless loop.
comment = models.SkipCommentWithSkip(post=self.post, text='Oh really?')
comment.save()
denorm.flush()
def test_meta_skip(self):
"""Test a model with the attribute listed under denorm_always_skip."""
comment = models.SkipCommentWithAttributeSkip(post=self.post, text='Yup, and they have wings!')
comment.save()
denorm.flush()
class TestDenormalisation(cases.DestructiveDatabaseTestCase):
"""
Tests for the denormalisation fields.
"""
def setUp(self):
denorms.drop_triggers()
denorms.install_triggers()
self.testuser = User.objects.create_user("testuser","testuser","testuser")
self.testuser.is_staff = True
ctype = ContentType.objects.get_for_model(models.Member)
Permission.objects.filter(content_type=ctype).get(name='Can change member').user_set.add(self.testuser)
self.testuser.save()
def tearDown(self):
# delete all model instances
self.testuser.delete()
models.Attachment.objects.all().delete()
models.Post.objects.all().delete()
models.Forum.objects.all().delete()
def test_depends_related(self):
"""
Test the DependsOnRelated stuff.
"""
# Make a forum, check it's got no posts
f1 = models.Forum.objects.create(title="forumone")
self.assertEqual(f1.post_count, 0)
# Check its database copy too
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
# Add a post
p1 = models.Post.objects.create(forum=f1)
# Has the post count updated?
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
denorm.flush()
# Check its title, in p1 and the DB
self.assertEqual(p1.forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
# Update the forum title
f1.title = "forumtwo"
f1.save()
denorm.flush()
# Has the post's title changed?
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumtwo")
# Add and remove some posts and check the post count
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 3)
p1.delete()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
# Delete everything, check once more.
models.Post.objects.all().delete()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
# Make an orphaned post, see what its title is.
# Doesn't work yet - no support for null FKs
#p4 = Post.objects.create(forum=None)
#self.assertEqual(p4.forum_title, None)
def test_dependency_chains(self):
# create a forum, a member and a post
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1,author=m1)
denorm.flush()
# check the forums author list contains the member
self.assertEqual(models.Forum.objects.get(id=f1.id).author_names, "memberone")
# change the member's name
m1.name = "membertwo"
m1.save()
denorm.flush()
# check again
self.assertEqual(models.Forum.objects.get(id=f1.id).author_names, "membertwo")
def test_trees(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo",parent_forum=f1)
f3 = models.Forum.objects.create(title="forumthree",parent_forum=f2)
denorm.flush()
self.assertEqual(f1.path,'/forumone/')
self.assertEqual(f2.path,'/forumone/forumtwo/')
self.assertEqual(f3.path,'/forumone/forumtwo/forumthree/')
f1.title = 'someothertitle'
f1.save()
denorm.flush()
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
f3 = models.Forum.objects.get(id=f3.id)
self.assertEqual(f1.path,'/someothertitle/')
self.assertEqual(f2.path,'/someothertitle/forumtwo/')
self.assertEqual(f3.path,'/someothertitle/forumtwo/forumthree/')
def test_reverse_fk_null(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1,author=m1)
models.Attachment.objects.create()
denorm.flush()
def test_bulk_update(self):
"""
Test the DependsOnRelated stuff.
"""
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
p1 = models.Post.objects.create(forum=f1)
p2 = models.Post.objects.create(forum=f2)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "forumtwo")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 1)
models.Post.objects.update(forum=f1)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "forumone")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "forumone")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
models.Forum.objects.update(title="oneforall")
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).forum_title, "oneforall")
self.assertEqual(models.Post.objects.get(id=p2.id).forum_title, "oneforall")
def test_no_dependency(self):
m1 = models.Member.objects.create(first_name="first",name="last")
denorm.flush()
self.assertEqual(models.Member.objects.get(id=m1.id).full_name,"first last")
models.Member.objects.filter(id=m1.id).update(first_name="second")
denorm.flush()
self.assertEqual(models.Member.objects.get(id=m1.id).full_name,"second last")
def test_self_backward_relation(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(forum=f1,)
p2 = models.Post.objects.create(forum=f1,response_to=p1)
p3 = models.Post.objects.create(forum=f1,response_to=p1)
p4 = models.Post.objects.create(forum=f1,response_to=p2)
denorm.flush()
self.assertEqual(models.Post.objects.get(id=p1.id).response_count, 3)
self.assertEqual(models.Post.objects.get(id=p2.id).response_count, 1)
self.assertEqual(models.Post.objects.get(id=p3.id).response_count, 0)
self.assertEqual(models.Post.objects.get(id=p4.id).response_count, 0)
def test_m2m_relation(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(forum=f1,title="post1")
m1 = models.Member.objects.create(first_name="first1",name="last1")
denorm.flush()
m1.bookmarks.add(p1)
denorm.flush()
self.assertTrue('post1' in models.Member.objects.get(id=m1.id).bookmark_titles)
p1.title = "othertitle"
p1.save()
denorm.flush()
self.assertTrue('post1' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('othertitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
p2 = models.Post.objects.create(forum=f1,title="thirdtitle")
m1.bookmarks.add(p2)
denorm.flush()
self.assertTrue('post1' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('othertitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('thirdtitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
m1.bookmarks.remove(p1)
denorm.flush()
self.assertTrue('othertitle' not in models.Member.objects.get(id=m1.id).bookmark_titles)
self.assertTrue('thirdtitle' in models.Member.objects.get(id=m1.id).bookmark_titles)
def test_middleware(self):
# FIXME, this test currently does not work with a transactional
# database, so it's skipped for now.
return
# FIXME, set and de-set middleware values
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(first_name="first1",name="last1")
p1 = models.Post.objects.create(forum=f1,author=m1)
self.assertEqual(models.Post.objects.get(id=p1.id).author_name, "last1")
self.client.login(username="testuser",password="testuser")
self.client.post("/admin/denorm_testapp/member/%s/"%(m1.pk),
{'name':'last2','first_name':'first2'})
self.assertEqual(models.Post.objects.get(id=p1.id).author_name, "last2")
def test_countfield(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumone")
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 0)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
models.Post.objects.create(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
p2 = models.Post.objects.create(forum=f2)
p3 = models.Post.objects.create(forum=f2)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 1)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 2)
p2.forum = f1
p2.save()
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 2)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 1)
models.Post.objects.filter(pk=p3.pk).update(forum=f1)
self.assertEqual(models.Forum.objects.get(id=f1.id).post_count, 3)
self.assertEqual(models.Forum.objects.get(id=f2.id).post_count, 0)
def test_foreignkey(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
m1 = models.Member.objects.create(first_name="first1",name="last1")
p1 = models.Post.objects.create(forum=f1,author=m1)
a1 = models.Attachment.objects.create(post=p1)
self.assertEqual(models.Attachment.objects.get(id=a1.id).forum, f1)
a2 = models.Attachment.objects.create()
self.assertEqual(models.Attachment.objects.get(id=a2.id).forum, None)
# Change forum
p1.forum = f2
p1.save()
denorm.flush()
self.assertEqual(models.Attachment.objects.get(id=a1.id).forum, f2)
def test_m2m(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
models.Post.objects.create(forum=f1,author=m1)
denorm.flush()
# check the forums author list contains the member
self.assertTrue(m1 in models.Forum.objects.get(id=f1.id).authors.all())
m2 = models.Member.objects.create(name="membertwo")
p2 = models.Post.objects.create(forum=f1,author=m2)
denorm.flush()
self.assertTrue(m1 in models.Forum.objects.get(id=f1.id).authors.all())
self.assertTrue(m2 in models.Forum.objects.get(id=f1.id).authors.all())
p2.delete()
denorm.flush()
self.assertTrue(m2 not in models.Forum.objects.get(id=f1.id).authors.all())
def test_denorm_rebuild(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(forum=f1,author=m1)
denorm.denorms.rebuildall()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.post_count, 1)
self.assertEqual(f1.authors.all()[0],m1)
def test_denorm_subclass(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(forum=f1,author=m1)
self.assertEqual(f1.tags_string, '')
self.assertEqual(p1.tags_string, '')
models.Tag.objects.create(name='tagone', content_object=f1)
models.Tag.objects.create(name='tagtwo', content_object=f1)
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagone, tagtwo')
self.assertEqual(p1.tags_string, '')
models.Tag.objects.create(name='tagthree', content_object=p1)
t4 = models.Tag.objects.create(name='tagfour', content_object=p1)
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagone, tagtwo')
self.assertEqual(p1.tags_string, 'tagfour, tagthree')
t4.content_object = f1
t4.save()
denorm.denorms.flush()
f1 = models.Forum.objects.get(id=f1.id)
m1 = models.Member.objects.get(id=m1.id)
p1 = models.Post.objects.get(id=p1.id)
self.assertEqual(f1.tags_string, 'tagfour, tagone, tagtwo')
self.assertEqual(p1.tags_string, 'tagthree')
def test_cache_key_field_backward(self):
f1 = models.Forum.objects.create(title="forumone")
f2 = models.Forum.objects.create(title="forumtwo")
ck1 = f1.cachekey
ck2 = f2.cachekey
p1 = models.Post.objects.create(forum=f1)
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
self.assertNotEqual(ck1,f1.cachekey)
self.assertEqual(ck2,f2.cachekey)
ck1 = f1.cachekey
ck2 = f2.cachekey
p1 = models.Post.objects.get(id=p1.id)
p1.forum = f2
p1.save()
f1 = models.Forum.objects.get(id=f1.id)
f2 = models.Forum.objects.get(id=f2.id)
self.assertNotEqual(ck1,f1.cachekey)
self.assertNotEqual(ck2,f2.cachekey)
def test_cache_key_field_forward(self):
f1 = models.Forum.objects.create(title="forumone")
p1 = models.Post.objects.create(title='initial_title',forum=f1)
a1 = models.Attachment.objects.create(post=p1)
a2 = models.Attachment.objects.create(post=p1)
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(a1.cachekey,a2.cachekey)
ck1 = a1.cachekey
ck2 = a2.cachekey
p1.title = 'new_title'
p1.save()
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(ck1,a1.cachekey)
self.assertNotEqual(ck2,a2.cachekey)
a1 = models.Attachment.objects.get(id=a1.id)
a2 = models.Attachment.objects.get(id=a2.id)
self.assertNotEqual(a1.cachekey,a2.cachekey)
def test_cache_key_field_m2m(self):
f1 = models.Forum.objects.create(title="forumone")
m1 = models.Member.objects.create(name="memberone")
p1 = models.Post.objects.create(title='initial_title',forum=f1)
m1 = models.Member.objects.get(id=m1.id)
ck1 = m1.cachekey
m1.bookmarks.add(p1)
m1 = models.Member.objects.get(id=m1.id)
self.assertNotEqual(ck1,m1.cachekey)
ck1 = m1.cachekey
p1 = models.Post.objects.get(id=p1.id)
p1.title = 'new_title'
p1.save()
m1 = models.Member.objects.get(id=m1.id)
self.assertNotEqual(ck1,m1.cachekey)
| bsd-3-clause | -3,809,027,810,471,516,000 | 37.030635 | 111 | 0.637112 | false | 3.248598 | true | false | false |
fgirault/smeuhsocial | apps/threadedcomments/tests/test_views.py | 1 | 26436 | from django.core.urlresolvers import reverse
from django.test import TestCase
from json import loads
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from threadedcomments.models import FreeThreadedComment, ThreadedComment
from threadedcomments.models import TestModel
__all__ = ("ViewsTestCase",)
class ViewsTestCase(TestCase):
def test_freecomment_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_free_comment', kwargs={
'content_type': content_type.id,
'object_id': topic.id
})
self.client.post(url, {
'comment': 'test1',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]',
'next': '/'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test1',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_preview(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_free_comment', kwargs={
'content_type': content_type.id,
'object_id': topic.id
})
response = self.client.post(url, {
'comment': 'test1',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]',
'next': '/',
'preview': 'True'
})
self.assertEquals(len(response.content) > 0, True)
def test_freecomment_edit(self):
topic = TestModel.objects.create(name="Test2")
comment = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_edit', kwargs={
'edit_id': comment.pk
})
self.client.post(url, {
'comment': 'test1_edited',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]',
'next': '/'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test1_edited',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_edit_with_preview(self):
topic = TestModel.objects.create(name="Test2")
comment = FreeThreadedComment.objects.create_for_object(
topic,
website="http://oebfare.com/",
comment="My test free comment!",
ip_address='127.0.0.1',
)
url = reverse('tc_free_comment_edit', kwargs={
'edit_id': comment.pk
})
response = self.client.post(url, {
'comment': 'test1_edited',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]',
'next': '/',
'preview': 'True'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://oebfare.com/',
'comment': u'My test free comment!',
'name': u'',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'',
'is_approved': False
})
self.assertEquals(len(response.content) > 0, True)
def test_freecomment_json_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_free_comment_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'ajax': 'json'
})
response = self.client.post(url, {
'comment': 'test2',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]'
})
tmp = loads(response.content)
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test2',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_json_edit(self):
topic = TestModel.objects.create(name="Test2")
comment = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_edit_ajax', kwargs={
'edit_id': comment.pk,
'ajax': 'json'
})
response = self.client.post(url, {
'comment': 'test2_edited',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]'
})
loads(response.content)
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test2_edited',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_xml_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_free_comment_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'ajax': 'xml'
})
response = self.client.post(url, {
'comment': 'test3', 'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]', 'next': '/'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test3',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_xml_edit(self):
topic = TestModel.objects.create(name="Test2")
comment = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_edit_ajax', kwargs={
'edit_id': comment.pk,
'ajax': 'xml'
})
self.client.post(url, {
'comment': 'test2_edited',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test2_edited',
'name': u'eric',
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_child_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_parent', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id
})
self.client.post(url, {
'comment': 'test4',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]',
'next': '/'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test4',
'name': u'eric',
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_child_json_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_parent_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id,
'ajax': 'json'
})
self.client.post(url, {
'comment': 'test5',
'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test5',
'name': u'eric',
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def test_freecomment_child_xml_create(self):
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = FreeThreadedComment.objects.create_for_object(
topic,
ip_address='127.0.0.1',
comment="My test free comment!",
)
url = reverse('tc_free_comment_parent_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id,
'ajax': 'xml'
})
self.client.post(url, {
'comment': 'test6', 'name': 'eric',
'website': 'http://www.eflorenzano.com/',
'email': '[email protected]'
})
o = FreeThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'website': u'http://www.eflorenzano.com/',
'comment': u'test6',
'name': u'eric',
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'is_public': True,
'ip_address': u'127.0.0.1',
'email': u'[email protected]',
'is_approved': False
})
def create_user_and_login(self):
user = User.objects.create_user(
'testuser',
'[email protected]',
'password',
)
user.is_active = True
user.save()
self.client.login(username='testuser', password='password')
return user
def test_comment_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_comment', kwargs={
'content_type': content_type.id,
'object_id': topic.id
})
self.client.post(url, {
'comment': 'test7',
'next': '/'
})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test7',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_preview(self):
self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_comment', kwargs={
'content_type': content_type.id,
'object_id': topic.id
})
response = self.client.post(url, {
'comment': 'test7',
'next': '/',
'preview': 'True'
})
self.assertEquals(len(response.content) > 0, True)
def test_comment_edit(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
comment = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_edit', kwargs={
'edit_id': comment.pk,
})
self.client.post(url, {
'comment': 'test7_edited',
'next': '/',
})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test7_edited',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_edit_with_preview(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
comment = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_edit', kwargs={
'edit_id': comment.pk,
})
response = self.client.post(url, {
'comment': 'test7_edited',
'next': '/',
'preview': 'True'
})
self.assertEquals(len(response.content) > 0, True)
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'My test comment!',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_json_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_comment_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'ajax': 'json'
})
self.client.post(url, {'comment': 'test8'})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test8',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_json_edit(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
comment = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_edit_ajax', kwargs={
'edit_id': comment.pk,
'ajax': 'json',
})
response = self.client.post(url, {
'comment': 'test8_edited'
})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test8_edited',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_xml_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
url = reverse('tc_comment_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'ajax': 'xml'
})
self.client.post(url, {'comment': 'test9'})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test9',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_xml_edit(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
comment = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_edit_ajax', kwargs={
'edit_id': comment.pk,
'ajax': 'xml',
})
self.client.post(url, {'comment': 'test8_edited'})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test8_edited',
'is_approved': False,
'parent': None,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_child_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_parent', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id
})
self.client.post(url, {
'comment': 'test10',
'next': '/'
})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test10',
'is_approved': False,
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_child_json_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_parent_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id,
'ajax': 'json'
})
self.client.post(url, {'comment': 'test11'})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test11',
'is_approved': False,
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_comment_child_xml_create(self):
user = self.create_user_and_login()
topic = TestModel.objects.create(name="Test2")
content_type = ContentType.objects.get_for_model(topic)
parent = ThreadedComment.objects.create_for_object(
topic,
user=user,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
url = reverse('tc_comment_parent_ajax', kwargs={
'content_type': content_type.id,
'object_id': topic.id,
'parent_id': parent.id,
'ajax': 'xml'
})
self.client.post(url, {'comment': 'test12'})
o = ThreadedComment.objects.latest(
'date_submitted').get_base_data(show_dates=False)
self.assertEquals(o, {
'comment': u'test12',
'is_approved': False,
'parent': parent,
'markup': u'Plain text',
'content_object': topic,
'user': user,
'is_public': True,
'ip_address': u'127.0.0.1',
})
def test_freecomment_delete(self):
user = User.objects.create_user(
'testuser',
'[email protected]',
'password',
)
user.is_active = True
user.save()
self.client.login(username='testuser', password='password')
topic = TestModel.objects.create(name="Test2")
comment = FreeThreadedComment.objects.create_for_object(
topic,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
deleted_id = comment.id
url = reverse('tc_free_comment_delete', kwargs={
'object_id': comment.id,
})
response = self.client.post(url, {'next': '/'})
o = response['Location'].split(
'?')[-1] == 'next=/comments/freecomment/%d/delete/' % deleted_id
self.assertEquals(o, True)
# become super user and try deleting comment
user.is_superuser = True
user.save()
response = self.client.post(url, {'next': '/'})
self.assertEquals(response['Location'], 'http://testserver/')
self.assertRaises(
FreeThreadedComment.DoesNotExist,
lambda: FreeThreadedComment.objects.get(id=deleted_id)
)
# re-create comment
comment.save()
response = self.client.get(url, {'next': '/'})
self.assertEquals(len(response.content) > 0, True)
o = FreeThreadedComment.objects.get(id=deleted_id) != None
self.assertEquals(o, True)
def test_comment_delete(self):
some_other_guy = User.objects.create_user(
'some_other_guy',
'[email protected]',
'password1',
)
user = User.objects.create_user(
'testuser',
'[email protected]',
'password',
)
user.is_active = True
user.save()
self.client.login(username='testuser', password='password')
topic = TestModel.objects.create(name="Test2")
comment = ThreadedComment.objects.create_for_object(
topic,
user=some_other_guy,
ip_address=u'127.0.0.1',
comment="My test comment!",
)
deleted_id = comment.id
url = reverse('tc_comment_delete', kwargs={
'object_id': comment.id,
})
response = self.client.post(url, {'next': '/'})
self.assertEquals(response['Location'].split(
'?')[-1], 'next=/comments/comment/%s/delete/' % deleted_id)
user.is_superuser = True
user.save()
response = self.client.post(url, {'next': '/'})
self.assertEquals(response['Location'], 'http://testserver/')
self.assertRaises(
ThreadedComment.DoesNotExist,
lambda: ThreadedComment.objects.get(id=deleted_id)
)
# re-create comment
comment.save()
response = self.client.get(url, {'next': '/'})
self.assertEquals(len(response.content) > 0, True)
o = ThreadedComment.objects.get(id=deleted_id) != None
self.assertEquals(o, True)
| mit | 5,914,407,054,020,151,000 | 30.174528 | 76 | 0.511235 | false | 3.786308 | true | false | false |
delfick/harpoon | harpoon/helpers.py | 1 | 2085 | from contextlib import contextmanager
from io import StringIO
import tempfile
import logging
import time
import os
log = logging.getLogger("harpoon.helpers")
@contextmanager
def a_temp_file():
"""Yield the name of a temporary file and ensure it's removed after use"""
filename = None
try:
tmpfile = tempfile.NamedTemporaryFile(delete=False)
filename = tmpfile.name
yield tmpfile
finally:
if filename and os.path.exists(filename):
os.remove(filename)
def until(timeout=10, step=0.5, action=None, silent=False):
"""Yield until timeout"""
yield
started = time.time()
while True:
if action and not silent:
log.info(action)
if time.time() - started > timeout:
if action and not silent:
log.error("Timedout %s", action)
return
else:
time.sleep(step)
yield
class memoized_property(object):
"""Decorator to make a descriptor that memoizes it's value"""
def __init__(self, func):
self.func = func
self.name = func.__name__
self.cache_name = "_{0}".format(self.name)
def __get__(self, instance=None, owner=None):
if not instance:
return self
if not getattr(instance, self.cache_name, None):
setattr(instance, self.cache_name, self.func(instance))
return getattr(instance, self.cache_name)
def __set__(self, instance, value):
setattr(instance, self.cache_name, value)
def __delete__(self, instance):
if hasattr(instance, self.cache_name):
delattr(instance, self.cache_name)
def write_to(output, txt):
"""Write some text to some output"""
if isinstance(txt, bytes) and isinstance(output, StringIO):
output.write(txt.decode("utf-8", "replace"))
elif (
isinstance(txt, str)
and hasattr(output, "file")
and "b" in getattr(output.file, "mode", "w")
):
output.write(txt.encode("utf-8", "replace"))
else:
output.write(txt)
| mit | -888,619,971,368,138,900 | 26.077922 | 78 | 0.606235 | false | 4.025097 | false | false | false |
simbha/GAE-appswell | appspot/framework/lib/error_handling.py | 1 | 1373 | """
Appswell Error Handling Lib
functions for handling errors
USAGE
from lib import error_handling
error_details = error_handling.get_error_details()
error_page = error_handling.render_error_page(error_details)
"""
#
# IMPORTS
#
import sys, os, logging, inspect
from os.path import (abspath, dirname, join as pathjoin)
import traceback
VIEW_DIR = abspath(pathjoin( dirname(__file__), '../views' ))
LAYOUT_DIR = pathjoin( VIEW_DIR, 'layouts' )
VIEW_PATH = pathjoin( VIEW_DIR, 'error/default.mako' )
def get_error_details():
exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
detail = {
'error_type' : exceptionValue,
'tracelist' : traceback.extract_tb(exceptionTraceback),
'trace' : traceback.format_exc(),
'syspath' : sys.path
}
return detail
def render_error_page(detail):
from framework.vendor.mako.template import Template
from framework.vendor.mako.lookup import TemplateLookup
# create mako objects and render
mako_lookup = TemplateLookup( directories=[LAYOUT_DIR],
output_encoding='utf-8',
encoding_errors='replace' )
mako_template = Template(filename=VIEW_PATH, lookup=mako_lookup)
return mako_template.render_unicode(**detail).encode('utf-8', 'replace')
| mit | 4,580,432,256,818,936,000 | 29.511111 | 76 | 0.654042 | false | 3.911681 | false | false | false |
edgedb/edgedb | edb/schema/schema.py | 1 | 63607 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
import abc
import collections
import functools
import itertools
import immutables as immu
from edb import errors
from . import casts as s_casts
from . import functions as s_func
from . import migrations as s_migrations
from . import modules as s_mod
from . import name as sn
from . import objects as so
from . import operators as s_oper
from . import pseudo as s_pseudo
from . import types as s_types
if TYPE_CHECKING:
import uuid
from edb.common import parsing
Refs_T = immu.Map[
uuid.UUID,
immu.Map[
Tuple[Type[so.Object], str],
immu.Map[uuid.UUID, None],
],
]
STD_MODULES = (
sn.UnqualName('std'),
sn.UnqualName('schema'),
sn.UnqualName('math'),
sn.UnqualName('sys'),
sn.UnqualName('cfg'),
sn.UnqualName('cal'),
sn.UnqualName('stdgraphql'),
)
# Specifies the order of processing of files and directories in lib/
STD_SOURCES = (
sn.UnqualName('std'),
sn.UnqualName('schema'),
sn.UnqualName('math'),
sn.UnqualName('sys'),
sn.UnqualName('cfg'),
sn.UnqualName('cal'),
sn.UnqualName('ext'),
)
Schema_T = TypeVar('Schema_T', bound='Schema')
class Schema(abc.ABC):
@abc.abstractmethod
def add_raw(
self: Schema_T,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def add(
self: Schema_T,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def discard(self: Schema_T, obj: so.Object) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def delete(self: Schema_T, obj: so.Object) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def update_obj(
self: Schema_T,
obj: so.Object,
updates: Mapping[str, Any],
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def maybe_get_obj_data_raw(
self,
obj: so.Object,
) -> Optional[Tuple[Any, ...]]:
raise NotImplementedError
@abc.abstractmethod
def get_obj_data_raw(
self,
obj: so.Object,
) -> Tuple[Any, ...]:
raise NotImplementedError
@abc.abstractmethod
def set_obj_field(
self: Schema_T,
obj: so.Object,
field: str,
value: Any,
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def unset_obj_field(
self: Schema_T,
obj: so.Object,
field: str,
) -> Schema_T:
raise NotImplementedError
@abc.abstractmethod
def get_functions(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_func.Function, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_func.Function, ...]:
raise NotImplementedError
@abc.abstractmethod
def get_operators(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_oper.Operator, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_oper.Operator, ...]:
raise NotImplementedError
@abc.abstractmethod
def get_casts_to_type(
self,
to_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
raise NotImplementedError
@abc.abstractmethod
def get_casts_from_type(
self,
from_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
raise NotImplementedError
@overload
def get_referrers(
self,
scls: so.Object,
*,
scls_type: Type[so.Object_T],
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
...
@overload
def get_referrers( # NoQA: F811
self,
scls: so.Object,
*,
scls_type: None = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object]:
...
@abc.abstractmethod
def get_referrers( # NoQA: F811
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
raise NotImplementedError
@abc.abstractmethod
def get_referrers_ex(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
) -> Dict[
Tuple[Type[so.Object_T], str],
FrozenSet[so.Object_T],
]:
raise NotImplementedError
@overload
def get_by_id(
self,
obj_id: uuid.UUID,
default: Union[so.Object, so.NoDefaultT] = so.NoDefault,
*,
type: None = None,
) -> so.Object:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> so.Object_T:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: None = None,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
...
@abc.abstractmethod
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
raise NotImplementedError
@overload
def get_global(
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
) -> so.Object_T:
...
@overload
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: None = None,
) -> Optional[so.Object_T]:
...
@abc.abstractmethod
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
) -> Optional[so.Object_T]:
raise NotImplementedError
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> so.Object:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: None,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object]:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Type[so.Object_T],
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> so.Object_T:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: None,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Type[so.Object_T],
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object_T]:
...
@overload
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Optional[Type[so.Object_T]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object]:
...
def get( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
type: Optional[Type[so.Object_T]] = None,
condition: Optional[Callable[[so.Object], bool]] = None,
label: Optional[str] = None,
sourcectx: Optional[parsing.ParserContext] = None,
) -> Optional[so.Object]:
return self.get_generic(
name,
default,
module_aliases=module_aliases,
type=type,
condition=condition,
label=label,
sourcectx=sourcectx,
)
@abc.abstractmethod
def get_generic( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None],
*,
module_aliases: Optional[Mapping[Optional[str], str]],
type: Optional[Type[so.Object_T]],
condition: Optional[Callable[[so.Object], bool]],
label: Optional[str],
sourcectx: Optional[parsing.ParserContext],
) -> Optional[so.Object]:
raise NotImplementedError
@abc.abstractmethod
def has_object(self, object_id: uuid.UUID) -> bool:
raise NotImplementedError
@abc.abstractmethod
def has_module(self, module: str) -> bool:
raise NotImplementedError
def get_children(
self,
scls: so.Object_T,
) -> FrozenSet[so.Object_T]:
# Ideally get_referrers needs to be made generic via
# an overload on scls_type, but mypy crashes on that.
return self.get_referrers(
scls,
scls_type=type(scls),
field_name='bases',
)
def get_descendants(
self,
scls: so.Object_T,
) -> FrozenSet[so.Object_T]:
return self.get_referrers(
scls, scls_type=type(scls), field_name='ancestors')
@abc.abstractmethod
def get_objects(
self,
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]] = None,
excluded_modules: Optional[Iterable[sn.Name]] = None,
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> SchemaIterator[so.Object_T]:
raise NotImplementedError
@abc.abstractmethod
def get_modules(self) -> Tuple[s_mod.Module, ...]:
raise NotImplementedError
@abc.abstractmethod
def get_last_migration(self) -> Optional[s_migrations.Migration]:
raise NotImplementedError
class FlatSchema(Schema):
_id_to_data: immu.Map[uuid.UUID, Tuple[Any, ...]]
_id_to_type: immu.Map[uuid.UUID, str]
_name_to_id: immu.Map[sn.Name, uuid.UUID]
_shortname_to_id: immu.Map[
Tuple[Type[so.Object], sn.Name],
FrozenSet[uuid.UUID],
]
_globalname_to_id: immu.Map[
Tuple[Type[so.Object], sn.Name],
uuid.UUID,
]
_refs_to: Refs_T
_generation: int
def __init__(self) -> None:
self._id_to_data = immu.Map()
self._id_to_type = immu.Map()
self._shortname_to_id = immu.Map()
self._name_to_id = immu.Map()
self._globalname_to_id = immu.Map()
self._refs_to = immu.Map()
self._generation = 0
def _replace(
self,
*,
id_to_data: Optional[immu.Map[uuid.UUID, Tuple[Any, ...]]] = None,
id_to_type: Optional[immu.Map[uuid.UUID, str]] = None,
name_to_id: Optional[immu.Map[sn.Name, uuid.UUID]] = None,
shortname_to_id: Optional[
immu.Map[
Tuple[Type[so.Object], sn.Name],
FrozenSet[uuid.UUID]
]
],
globalname_to_id: Optional[
immu.Map[Tuple[Type[so.Object], sn.Name], uuid.UUID]
],
refs_to: Optional[Refs_T] = None,
) -> FlatSchema:
new = FlatSchema.__new__(FlatSchema)
if id_to_data is None:
new._id_to_data = self._id_to_data
else:
new._id_to_data = id_to_data
if id_to_type is None:
new._id_to_type = self._id_to_type
else:
new._id_to_type = id_to_type
if name_to_id is None:
new._name_to_id = self._name_to_id
else:
new._name_to_id = name_to_id
if shortname_to_id is None:
new._shortname_to_id = self._shortname_to_id
else:
new._shortname_to_id = shortname_to_id
if globalname_to_id is None:
new._globalname_to_id = self._globalname_to_id
else:
new._globalname_to_id = globalname_to_id
if refs_to is None:
new._refs_to = self._refs_to
else:
new._refs_to = refs_to
new._generation = self._generation + 1
return new # type: ignore
def _update_obj_name(
self,
obj_id: uuid.UUID,
sclass: Type[so.Object],
old_name: Optional[sn.Name],
new_name: Optional[sn.Name],
) -> Tuple[
immu.Map[sn.Name, uuid.UUID],
immu.Map[Tuple[Type[so.Object], sn.Name], FrozenSet[uuid.UUID]],
immu.Map[Tuple[Type[so.Object], sn.Name], uuid.UUID],
]:
name_to_id = self._name_to_id
shortname_to_id = self._shortname_to_id
globalname_to_id = self._globalname_to_id
is_global = not issubclass(sclass, so.QualifiedObject)
has_sn_cache = issubclass(sclass, (s_func.Function, s_oper.Operator))
if old_name is not None:
if is_global:
globalname_to_id = globalname_to_id.delete((sclass, old_name))
else:
name_to_id = name_to_id.delete(old_name)
if has_sn_cache:
old_shortname = sn.shortname_from_fullname(old_name)
sn_key = (sclass, old_shortname)
new_ids = shortname_to_id[sn_key] - {obj_id}
if new_ids:
shortname_to_id = shortname_to_id.set(sn_key, new_ids)
else:
shortname_to_id = shortname_to_id.delete(sn_key)
if new_name is not None:
if is_global:
key = (sclass, new_name)
if key in globalname_to_id:
vn = sclass.get_verbosename_static(new_name)
raise errors.SchemaError(
f'{vn} is already present in the schema')
globalname_to_id = globalname_to_id.set(key, obj_id)
else:
assert isinstance(new_name, sn.QualName)
if (
not self.has_module(new_name.module)
and new_name.module != '__derived__'
):
raise errors.UnknownModuleError(
f'module {new_name.module!r} is not in this schema')
if new_name in name_to_id:
raise errors.SchemaError(
f'name {new_name!r} is already in the schema')
name_to_id = name_to_id.set(new_name, obj_id)
if has_sn_cache:
new_shortname = sn.shortname_from_fullname(new_name)
sn_key = (sclass, new_shortname)
try:
ids = shortname_to_id[sn_key]
except KeyError:
ids = frozenset()
shortname_to_id = shortname_to_id.set(sn_key, ids | {obj_id})
return name_to_id, shortname_to_id, globalname_to_id
def update_obj(
self,
obj: so.Object,
updates: Mapping[str, Any],
) -> FlatSchema:
if not updates:
return self
obj_id = obj.id
sclass = type(obj)
all_fields = sclass.get_schema_fields()
object_ref_fields = sclass.get_object_reference_fields()
reducible_fields = sclass.get_reducible_fields()
try:
data = list(self._id_to_data[obj_id])
except KeyError:
data = [None] * len(all_fields)
name_to_id = None
shortname_to_id = None
globalname_to_id = None
orig_refs = {}
new_refs = {}
for fieldname, value in updates.items():
field = all_fields[fieldname]
findex = field.index
if fieldname == 'name':
name_to_id, shortname_to_id, globalname_to_id = (
self._update_obj_name(
obj_id,
sclass,
data[findex],
value
)
)
if value is None:
if field in reducible_fields and field in object_ref_fields:
orig_value = data[findex]
if orig_value is not None:
orig_refs[fieldname] = (
field.type.schema_refs_from_data(orig_value))
else:
if field in reducible_fields:
value = value.schema_reduce()
if field in object_ref_fields:
new_refs[fieldname] = (
field.type.schema_refs_from_data(value))
orig_value = data[findex]
if orig_value is not None:
orig_refs[fieldname] = (
field.type.schema_refs_from_data(orig_value))
data[findex] = value
id_to_data = self._id_to_data.set(obj_id, tuple(data))
refs_to = self._update_refs_to(obj_id, sclass, orig_refs, new_refs)
return self._replace(name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=id_to_data,
refs_to=refs_to)
def maybe_get_obj_data_raw(
self,
obj: so.Object,
) -> Optional[Tuple[Any, ...]]:
return self._id_to_data.get(obj.id)
def get_obj_data_raw(
self,
obj: so.Object,
) -> Tuple[Any, ...]:
try:
return self._id_to_data[obj.id]
except KeyError:
err = (f'cannot get item data: item {str(obj.id)!r} '
f'is not present in the schema {self!r}')
raise errors.SchemaError(err) from None
def set_obj_field(
self,
obj: so.Object,
fieldname: str,
value: Any,
) -> FlatSchema:
obj_id = obj.id
try:
data = self._id_to_data[obj_id]
except KeyError:
err = (f'cannot set {fieldname!r} value: item {str(obj_id)!r} '
f'is not present in the schema {self!r}')
raise errors.SchemaError(err) from None
sclass = so.ObjectMeta.get_schema_class(self._id_to_type[obj_id])
field = sclass.get_schema_field(fieldname)
findex = field.index
is_object_ref = field in sclass.get_object_reference_fields()
if field in sclass.get_reducible_fields():
value = value.schema_reduce()
name_to_id = None
shortname_to_id = None
globalname_to_id = None
if fieldname == 'name':
old_name = data[findex]
name_to_id, shortname_to_id, globalname_to_id = (
self._update_obj_name(obj_id, sclass, old_name, value)
)
data_list = list(data)
data_list[findex] = value
new_data = tuple(data_list)
id_to_data = self._id_to_data.set(obj_id, new_data)
if not is_object_ref:
refs_to = None
else:
orig_value = data[findex]
if orig_value is not None:
orig_refs = {
fieldname: field.type.schema_refs_from_data(orig_value),
}
else:
orig_refs = {}
new_refs = {fieldname: field.type.schema_refs_from_data(value)}
refs_to = self._update_refs_to(obj_id, sclass, orig_refs, new_refs)
return self._replace(
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=id_to_data,
refs_to=refs_to,
)
def unset_obj_field(
self,
obj: so.Object,
fieldname: str,
) -> FlatSchema:
obj_id = obj.id
try:
data = self._id_to_data[obj.id]
except KeyError:
return self
sclass = so.ObjectMeta.get_schema_class(self._id_to_type[obj.id])
field = sclass.get_schema_field(fieldname)
findex = field.index
name_to_id = None
shortname_to_id = None
globalname_to_id = None
orig_value = data[findex]
if orig_value is None:
return self
if fieldname == 'name':
name_to_id, shortname_to_id, globalname_to_id = (
self._update_obj_name(
obj_id,
sclass,
orig_value,
None
)
)
data_list = list(data)
data_list[findex] = None
new_data = tuple(data_list)
id_to_data = self._id_to_data.set(obj_id, new_data)
is_object_ref = field in sclass.get_object_reference_fields()
if not is_object_ref:
refs_to = None
else:
orig_refs = {
fieldname: field.type.schema_refs_from_data(orig_value),
}
refs_to = self._update_refs_to(obj_id, sclass, orig_refs, None)
return self._replace(
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=id_to_data,
refs_to=refs_to,
)
def _update_refs_to(
self,
object_id: uuid.UUID,
sclass: Type[so.Object],
orig_refs: Optional[Mapping[str, FrozenSet[uuid.UUID]]],
new_refs: Optional[Mapping[str, FrozenSet[uuid.UUID]]],
) -> Refs_T:
objfields = sclass.get_object_reference_fields()
if not objfields:
return self._refs_to
with self._refs_to.mutate() as mm:
for field in objfields:
if not new_refs:
ids = None
else:
ids = new_refs.get(field.name)
if not orig_refs:
orig_ids = None
else:
orig_ids = orig_refs.get(field.name)
if not ids and not orig_ids:
continue
old_ids: Optional[FrozenSet[uuid.UUID]]
new_ids: Optional[FrozenSet[uuid.UUID]]
key = (sclass, field.name)
if ids and orig_ids:
new_ids = ids - orig_ids
old_ids = orig_ids - ids
elif ids:
new_ids = ids
old_ids = None
else:
new_ids = None
old_ids = orig_ids
if new_ids:
for ref_id in new_ids:
try:
refs = mm[ref_id]
except KeyError:
mm[ref_id] = immu.Map((
(key, immu.Map(((object_id, None),))),
))
else:
try:
field_refs = refs[key]
except KeyError:
field_refs = immu.Map(((object_id, None),))
else:
field_refs = field_refs.set(object_id, None)
mm[ref_id] = refs.set(key, field_refs)
if old_ids:
for ref_id in old_ids:
refs = mm[ref_id]
field_refs = refs[key].delete(object_id)
if not field_refs:
mm[ref_id] = refs.delete(key)
else:
mm[ref_id] = refs.set(key, field_refs)
return mm.finish()
def add_raw(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> FlatSchema:
name_field = sclass.get_schema_field('name')
name = data[name_field.index]
if name in self._name_to_id:
raise errors.SchemaError(
f'{sclass.__name__} {name!r} is already present '
f'in the schema {self!r}')
if id in self._id_to_data:
raise errors.SchemaError(
f'{sclass.__name__} ({str(id)!r}) is already present '
f'in the schema {self!r}')
object_ref_fields = sclass.get_object_reference_fields()
if not object_ref_fields:
refs_to = None
else:
new_refs = {}
for field in object_ref_fields:
ref = data[field.index]
if ref is not None:
ref = field.type.schema_refs_from_data(ref)
new_refs[field.name] = ref
refs_to = self._update_refs_to(id, sclass, None, new_refs)
name_to_id, shortname_to_id, globalname_to_id = self._update_obj_name(
id, sclass, None, name)
updates = dict(
id_to_data=self._id_to_data.set(id, data),
id_to_type=self._id_to_type.set(id, sclass.__name__),
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
refs_to=refs_to,
)
if (
issubclass(sclass, so.QualifiedObject)
and not self.has_module(name.module)
and name.module != '__derived__'
):
raise errors.UnknownModuleError(
f'module {name.module!r} is not in this schema')
return self._replace(**updates) # type: ignore
def add(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> FlatSchema:
reducible_fields = sclass.get_reducible_fields()
if reducible_fields:
data_list = list(data)
for field in reducible_fields:
val = data[field.index]
if val is not None:
data_list[field.index] = val.schema_reduce()
data = tuple(data_list)
return self.add_raw(id, sclass, data)
def _delete(self, obj: so.Object) -> FlatSchema:
data = self._id_to_data.get(obj.id)
if data is None:
raise errors.InvalidReferenceError(
f'cannot delete {obj!r}: not in this schema')
sclass = type(obj)
name_field = sclass.get_schema_field('name')
name = data[name_field.index]
updates = {}
name_to_id, shortname_to_id, globalname_to_id = self._update_obj_name(
obj.id, sclass, name, None)
object_ref_fields = sclass.get_object_reference_fields()
if not object_ref_fields:
refs_to = None
else:
values = self._id_to_data[obj.id]
orig_refs = {}
for field in object_ref_fields:
ref = values[field.index]
if ref is not None:
ref = field.type.schema_refs_from_data(ref)
orig_refs[field.name] = ref
refs_to = self._update_refs_to(obj.id, sclass, orig_refs, None)
updates.update(dict(
name_to_id=name_to_id,
shortname_to_id=shortname_to_id,
globalname_to_id=globalname_to_id,
id_to_data=self._id_to_data.delete(obj.id),
id_to_type=self._id_to_type.delete(obj.id),
refs_to=refs_to,
))
return self._replace(**updates) # type: ignore
def discard(self, obj: so.Object) -> FlatSchema:
if obj.id in self._id_to_data:
return self._delete(obj)
else:
return self
def delete(self, obj: so.Object) -> FlatSchema:
return self._delete(obj)
def _get(
self,
name: Union[str, sn.Name],
*,
getter: Callable[[FlatSchema, sn.Name], Any],
default: Any,
module_aliases: Optional[Mapping[Optional[str], str]],
) -> Any:
if isinstance(name, str):
name = sn.name_from_string(name)
shortname = name.name
module = name.module if isinstance(name, sn.QualName) else None
implicit_builtins = module is None
if module == '__std__':
fqname = sn.QualName('std', shortname)
result = getter(self, fqname)
if result is not None:
return result
else:
return default
if module_aliases is not None:
fq_module = module_aliases.get(module)
if fq_module is not None:
module = fq_module
if module is not None:
fqname = sn.QualName(module, shortname)
result = getter(self, fqname)
if result is not None:
return result
if implicit_builtins:
fqname = sn.QualName('std', shortname)
result = getter(self, fqname)
if result is not None:
return result
return default
def get_functions(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_func.Function, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_func.Function, ...]:
if isinstance(name, str):
name = sn.name_from_string(name)
funcs = self._get(name,
getter=_get_functions,
module_aliases=module_aliases,
default=default)
if funcs is not so.NoDefault:
return cast(
Tuple[s_func.Function, ...],
funcs,
)
else:
return self._raise_bad_reference(
name=name,
module_aliases=module_aliases,
type=s_func.Function,
)
def get_operators(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_oper.Operator, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_oper.Operator, ...]:
funcs = self._get(name,
getter=_get_operators,
module_aliases=module_aliases,
default=default)
if funcs is not so.NoDefault:
return cast(
Tuple[s_oper.Operator, ...],
funcs,
)
else:
return self._raise_bad_reference(
name=name,
module_aliases=module_aliases,
type=s_oper.Operator,
)
@functools.lru_cache()
def _get_casts(
self,
stype: s_types.Type,
*,
disposition: str,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
all_casts = cast(
FrozenSet[s_casts.Cast],
self.get_referrers(
stype, scls_type=s_casts.Cast, field_name=disposition),
)
casts = []
for castobj in all_casts:
if implicit and not castobj.get_allow_implicit(self):
continue
if assignment and not castobj.get_allow_assignment(self):
continue
casts.append(castobj)
return frozenset(casts)
def get_casts_to_type(
self,
to_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return self._get_casts(to_type, disposition='to_type',
implicit=implicit, assignment=assignment)
def get_casts_from_type(
self,
from_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return self._get_casts(from_type, disposition='from_type',
implicit=implicit, assignment=assignment)
def get_referrers(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
return self._get_referrers(
scls, scls_type=scls_type, field_name=field_name)
@functools.lru_cache()
def _get_referrers(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
try:
refs = self._refs_to[scls.id]
except KeyError:
return frozenset()
else:
referrers: Set[so.Object] = set()
if scls_type is not None:
if field_name is not None:
for (st, fn), ids in refs.items():
if issubclass(st, scls_type) and fn == field_name:
referrers.update(
self.get_by_id(objid) for objid in ids)
else:
for (st, _), ids in refs.items():
if issubclass(st, scls_type):
referrers.update(
self.get_by_id(objid) for objid in ids)
elif field_name is not None:
for (_, fn), ids in refs.items():
if fn == field_name:
referrers.update(
self.get_by_id(objid) for objid in ids)
else:
refids = itertools.chain.from_iterable(refs.values())
referrers.update(self.get_by_id(objid) for objid in refids)
return frozenset(referrers) # type: ignore
@functools.lru_cache()
def get_referrers_ex(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
) -> Dict[
Tuple[Type[so.Object_T], str],
FrozenSet[so.Object_T],
]:
try:
refs = self._refs_to[scls.id]
except KeyError:
return {}
else:
result = {}
if scls_type is not None:
for (st, fn), ids in refs.items():
if issubclass(st, scls_type):
result[st, fn] = frozenset(
self.get_by_id(objid) for objid in ids)
else:
for (st, fn), ids in refs.items():
result[st, fn] = frozenset( # type: ignore
self.get_by_id(objid) for objid in ids)
return result # type: ignore
@overload
def get_by_id(
self,
obj_id: uuid.UUID,
default: Union[so.Object, so.NoDefaultT] = so.NoDefault,
*,
type: None = None,
) -> so.Object:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> so.Object_T:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: None = None,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
...
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
try:
sclass_name = self._id_to_type[obj_id]
except KeyError:
if default is so.NoDefault:
raise errors.InvalidReferenceError(
f'reference to a non-existent schema item {obj_id}'
f' in schema {self!r}'
) from None
else:
return default
else:
obj = so.Object.schema_restore((sclass_name, obj_id))
if type is not None and not isinstance(obj, type):
raise errors.InvalidReferenceError(
f'schema object {obj_id!r} exists, but is not '
f'{type.get_schema_class_displayname()}'
)
# Avoid the overhead of cast(Object_T) below
return obj # type: ignore
@overload
def get_global(
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
) -> so.Object_T:
...
@overload
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: None = None,
) -> Optional[so.Object_T]:
...
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
) -> Optional[so.Object_T]:
if isinstance(name, str):
name = sn.UnqualName(name)
obj_id = self._globalname_to_id.get((objtype, name))
if obj_id is not None:
return self.get_by_id(obj_id) # type: ignore
elif default is not so.NoDefault:
return default
else:
self._raise_bad_reference(name, type=objtype)
def get_generic(
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None],
*,
module_aliases: Optional[Mapping[Optional[str], str]],
type: Optional[Type[so.Object_T]],
condition: Optional[Callable[[so.Object], bool]],
label: Optional[str],
sourcectx: Optional[parsing.ParserContext],
) -> Optional[so.Object]:
def getter(schema: FlatSchema, name: sn.Name) -> Optional[so.Object]:
obj_id = schema._name_to_id.get(name)
if obj_id is None:
return None
obj = schema.get_by_id(obj_id, type=type, default=None)
if obj is not None and condition is not None:
if not condition(obj):
obj = None
return obj
obj = self._get(name,
getter=getter,
module_aliases=module_aliases,
default=default)
if obj is not so.NoDefault:
return obj # type: ignore
else:
self._raise_bad_reference(
name=name,
label=label,
module_aliases=module_aliases,
sourcectx=sourcectx,
type=type,
)
def _raise_bad_reference(
self,
name: Union[str, sn.Name],
*,
label: Optional[str] = None,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
sourcectx: Optional[parsing.ParserContext] = None,
type: Optional[Type[so.Object]] = None,
) -> NoReturn:
refname = str(name)
if label is None:
if type is not None:
label = type.get_schema_class_displayname()
else:
label = 'schema item'
if type is not None:
if issubclass(type, so.QualifiedObject):
if not sn.is_qualified(refname):
if module_aliases is not None:
default_module = module_aliases.get(None)
if default_module is not None:
refname = type.get_displayname_static(
sn.QualName(default_module, refname),
)
else:
refname = type.get_displayname_static(
sn.QualName.from_string(refname))
else:
refname = type.get_displayname_static(
sn.UnqualName.from_string(refname))
raise errors.InvalidReferenceError(
f'{label} {refname!r} does not exist',
context=sourcectx,
)
def has_object(self, object_id: uuid.UUID) -> bool:
return object_id in self._id_to_type
def has_module(self, module: str) -> bool:
return self.get_global(s_mod.Module, module, None) is not None
def get_objects(
self,
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]] = None,
excluded_modules: Optional[Iterable[sn.Name]] = None,
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> SchemaIterator[so.Object_T]:
return SchemaIterator[so.Object_T](
self,
self._id_to_type,
exclude_stdlib=exclude_stdlib,
exclude_global=exclude_global,
exclude_internal=exclude_internal,
included_modules=included_modules,
excluded_modules=excluded_modules,
included_items=included_items,
excluded_items=excluded_items,
type=type,
extra_filters=extra_filters,
)
def get_modules(self) -> Tuple[s_mod.Module, ...]:
modules = []
for (objtype, _), objid in self._globalname_to_id.items():
if objtype is s_mod.Module:
modules.append(self.get_by_id(objid, type=s_mod.Module))
return tuple(modules)
def get_last_migration(self) -> Optional[s_migrations.Migration]:
return _get_last_migration(self)
def __repr__(self) -> str:
return (
f'<{type(self).__name__} gen:{self._generation} at {id(self):#x}>')
class SchemaIterator(Generic[so.Object_T]):
def __init__(
self,
schema: Schema,
object_ids: Iterable[uuid.UUID],
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]],
excluded_modules: Optional[Iterable[sn.Name]],
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> None:
filters = []
if type is not None:
t = type
filters.append(lambda schema, obj: isinstance(obj, t))
if included_modules:
modules = frozenset(included_modules)
filters.append(
lambda schema, obj:
isinstance(obj, so.QualifiedObject) and
obj.get_name(schema).get_module_name() in modules)
if excluded_modules or exclude_stdlib:
excmod: Set[sn.Name] = set()
if excluded_modules:
excmod.update(excluded_modules)
if exclude_stdlib:
excmod.update(STD_MODULES)
filters.append(
lambda schema, obj: (
not isinstance(obj, so.QualifiedObject)
or obj.get_name(schema).get_module_name() not in excmod
)
)
if included_items:
objs = frozenset(included_items)
filters.append(
lambda schema, obj: obj.get_name(schema) in objs)
if excluded_items:
objs = frozenset(excluded_items)
filters.append(
lambda schema, obj: obj.get_name(schema) not in objs)
if exclude_stdlib:
filters.append(
lambda schema, obj: not isinstance(obj, s_pseudo.PseudoType)
)
if exclude_global:
filters.append(
lambda schema, obj: not isinstance(obj, so.GlobalObject)
)
if exclude_internal:
filters.append(
lambda schema, obj: not isinstance(obj, so.InternalObject)
)
# Extra filters are last, because they might depend on type.
filters.extend(extra_filters)
self._filters = filters
self._schema = schema
self._object_ids = object_ids
def __iter__(self) -> Iterator[so.Object_T]:
filters = self._filters
schema = self._schema
get_by_id = schema.get_by_id
for obj_id in self._object_ids:
obj = get_by_id(obj_id)
if all(f(self._schema, obj) for f in filters):
yield obj # type: ignore
class ChainedSchema(Schema):
__slots__ = ('_base_schema', '_top_schema', '_global_schema')
def __init__(
self,
base_schema: FlatSchema,
top_schema: FlatSchema,
global_schema: FlatSchema
) -> None:
self._base_schema = base_schema
self._top_schema = top_schema
self._global_schema = global_schema
def get_top_schema(self) -> FlatSchema:
return self._top_schema
def get_global_schema(self) -> FlatSchema:
return self._global_schema
def add_raw(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> ChainedSchema:
if issubclass(sclass, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.add_raw(id, sclass, data),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.add_raw(id, sclass, data),
self._global_schema,
)
def add(
self,
id: uuid.UUID,
sclass: Type[so.Object],
data: Tuple[Any, ...],
) -> ChainedSchema:
if issubclass(sclass, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.add(id, sclass, data),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.add(id, sclass, data),
self._global_schema,
)
def discard(self, obj: so.Object) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.discard(obj),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.discard(obj),
self._global_schema,
)
def delete(self, obj: so.Object) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.delete(obj),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.delete(obj),
self._global_schema,
)
def update_obj(
self,
obj: so.Object,
updates: Mapping[str, Any],
) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.update_obj(obj, updates),
)
else:
obj_id = obj.id
base_obj = self._base_schema.get_by_id(obj_id, default=None)
if (
base_obj is not None
and not self._top_schema.has_object(obj_id)
):
top_schema = self._top_schema.add_raw(
obj_id,
type(base_obj),
self._base_schema._id_to_data[obj_id],
)
else:
top_schema = self._top_schema
return ChainedSchema(
self._base_schema,
top_schema.update_obj(obj, updates),
self._global_schema,
)
def maybe_get_obj_data_raw(
self,
obj: so.Object,
) -> Optional[Tuple[Any, ...]]:
if isinstance(obj, so.GlobalObject):
return self._global_schema.maybe_get_obj_data_raw(obj)
else:
top = self._top_schema.maybe_get_obj_data_raw(obj)
if top is not None:
return top
else:
return self._base_schema.maybe_get_obj_data_raw(obj)
def get_obj_data_raw(
self,
obj: so.Object,
) -> Tuple[Any, ...]:
if isinstance(obj, so.GlobalObject):
return self._global_schema.get_obj_data_raw(obj)
else:
top = self._top_schema.maybe_get_obj_data_raw(obj)
if top is not None:
return top
else:
return self._base_schema.get_obj_data_raw(obj)
def set_obj_field(
self,
obj: so.Object,
fieldname: str,
value: Any,
) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.set_obj_field(obj, fieldname, value),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.set_obj_field(obj, fieldname, value),
self._global_schema,
)
def unset_obj_field(
self,
obj: so.Object,
field: str,
) -> ChainedSchema:
if isinstance(obj, so.GlobalObject):
return ChainedSchema(
self._base_schema,
self._top_schema,
self._global_schema.unset_obj_field(obj, field),
)
else:
return ChainedSchema(
self._base_schema,
self._top_schema.unset_obj_field(obj, field),
self._global_schema,
)
def get_functions(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_func.Function, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_func.Function, ...]:
objs = self._top_schema.get_functions(
name, module_aliases=module_aliases, default=())
if not objs:
objs = self._base_schema.get_functions(
name, default=default, module_aliases=module_aliases)
return objs
def get_operators(
self,
name: Union[str, sn.Name],
default: Union[
Tuple[s_oper.Operator, ...], so.NoDefaultT
] = so.NoDefault,
*,
module_aliases: Optional[Mapping[Optional[str], str]] = None,
) -> Tuple[s_oper.Operator, ...]:
objs = self._top_schema.get_operators(
name, module_aliases=module_aliases, default=())
if not objs:
objs = self._base_schema.get_operators(
name, default=default, module_aliases=module_aliases)
return objs
def get_casts_to_type(
self,
to_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return (
self._base_schema.get_casts_to_type(
to_type,
implicit=implicit,
assignment=assignment,
)
| self._top_schema.get_casts_to_type(
to_type,
implicit=implicit,
assignment=assignment,
)
)
def get_casts_from_type(
self,
from_type: s_types.Type,
*,
implicit: bool = False,
assignment: bool = False,
) -> FrozenSet[s_casts.Cast]:
return (
self._base_schema.get_casts_from_type(
from_type,
implicit=implicit,
assignment=assignment,
)
| self._top_schema.get_casts_from_type(
from_type,
implicit=implicit,
assignment=assignment,
)
)
def get_referrers(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
field_name: Optional[str] = None,
) -> FrozenSet[so.Object_T]:
return (
self._base_schema.get_referrers(
scls,
scls_type=scls_type,
field_name=field_name,
)
| self._top_schema.get_referrers(
scls,
scls_type=scls_type,
field_name=field_name,
)
| self._global_schema.get_referrers(
scls,
scls_type=scls_type,
field_name=field_name,
)
)
def get_referrers_ex(
self,
scls: so.Object,
*,
scls_type: Optional[Type[so.Object_T]] = None,
) -> Dict[
Tuple[Type[so.Object_T], str],
FrozenSet[so.Object_T],
]:
base = self._base_schema.get_referrers_ex(scls, scls_type=scls_type)
top = self._top_schema.get_referrers_ex(scls, scls_type=scls_type)
gl = self._global_schema.get_referrers_ex(scls, scls_type=scls_type)
return {
k: (
base.get(k, frozenset())
| top.get(k, frozenset())
| gl.get(k, frozenset())
)
for k in itertools.chain(base, top)
}
@overload
def get_by_id(
self,
obj_id: uuid.UUID,
default: Union[so.Object, so.NoDefaultT] = so.NoDefault,
*,
type: None = None,
) -> so.Object:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> so.Object_T:
...
@overload
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: None = None,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
...
def get_by_id( # NoQA: F811
self,
obj_id: uuid.UUID,
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
*,
type: Optional[Type[so.Object_T]] = None,
) -> Optional[so.Object_T]:
obj = self._top_schema.get_by_id(obj_id, type=type, default=None)
if obj is None:
obj = self._base_schema.get_by_id(
obj_id, default=None, type=type)
if obj is None:
obj = self._global_schema.get_by_id(
obj_id, default=default, type=type)
return obj
@overload
def get_global(
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT] = so.NoDefault,
) -> so.Object_T:
...
@overload
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: None = None,
) -> Optional[so.Object_T]:
...
def get_global( # NoQA: F811
self,
objtype: Type[so.Object_T],
name: Union[str, sn.Name],
default: Union[so.Object_T, so.NoDefaultT, None] = so.NoDefault,
) -> Optional[so.Object_T]:
if issubclass(objtype, so.GlobalObject):
return self._global_schema.get_global( # type: ignore
objtype, name, default=default)
else:
obj = self._top_schema.get_global(objtype, name, default=None)
if obj is None:
obj = self._base_schema.get_global(
objtype, name, default=default)
return obj
def get_generic( # NoQA: F811
self,
name: Union[str, sn.Name],
default: Union[so.Object, so.NoDefaultT, None],
*,
module_aliases: Optional[Mapping[Optional[str], str]],
type: Optional[Type[so.Object_T]],
condition: Optional[Callable[[so.Object], bool]],
label: Optional[str],
sourcectx: Optional[parsing.ParserContext],
) -> Optional[so.Object]:
obj = self._top_schema.get(
name,
module_aliases=module_aliases,
type=type,
default=None,
condition=condition,
label=label,
sourcectx=sourcectx,
)
if obj is None:
return self._base_schema.get(
name,
default=default,
module_aliases=module_aliases,
type=type,
condition=condition,
label=label,
sourcectx=sourcectx,
)
else:
return obj
def has_object(self, object_id: uuid.UUID) -> bool:
return (
self._base_schema.has_object(object_id)
or self._top_schema.has_object(object_id)
or self._global_schema.has_object(object_id)
)
def has_module(self, module: str) -> bool:
return (
self._base_schema.has_module(module)
or self._top_schema.has_module(module)
)
def get_objects(
self,
*,
exclude_stdlib: bool = False,
exclude_global: bool = False,
exclude_internal: bool = True,
included_modules: Optional[Iterable[sn.Name]] = None,
excluded_modules: Optional[Iterable[sn.Name]] = None,
included_items: Optional[Iterable[sn.Name]] = None,
excluded_items: Optional[Iterable[sn.Name]] = None,
type: Optional[Type[so.Object_T]] = None,
extra_filters: Iterable[Callable[[Schema, so.Object], bool]] = (),
) -> SchemaIterator[so.Object_T]:
return SchemaIterator[so.Object_T](
self,
itertools.chain(
self._base_schema._id_to_type,
self._top_schema._id_to_type,
self._global_schema._id_to_type,
),
exclude_global=exclude_global,
exclude_stdlib=exclude_stdlib,
exclude_internal=exclude_internal,
included_modules=included_modules,
excluded_modules=excluded_modules,
included_items=included_items,
excluded_items=excluded_items,
type=type,
extra_filters=extra_filters,
)
def get_modules(self) -> Tuple[s_mod.Module, ...]:
return (
self._base_schema.get_modules()
+ self._top_schema.get_modules()
)
def get_last_migration(self) -> Optional[s_migrations.Migration]:
migration = self._top_schema.get_last_migration()
if migration is None:
migration = self._base_schema.get_last_migration()
return migration
@functools.lru_cache()
def _get_functions(
schema: FlatSchema,
name: sn.Name,
) -> Optional[Tuple[s_func.Function, ...]]:
objids = schema._shortname_to_id.get((s_func.Function, name))
if objids is None:
return None
return cast(
Tuple[s_func.Function, ...],
tuple(schema.get_by_id(oid) for oid in objids),
)
@functools.lru_cache()
def _get_operators(
schema: FlatSchema,
name: sn.Name,
) -> Optional[Tuple[s_oper.Operator, ...]]:
objids = schema._shortname_to_id.get((s_oper.Operator, name))
if objids is None:
return
return cast(
Tuple[s_oper.Operator, ...],
tuple(schema.get_by_id(oid) for oid in objids),
)
@functools.lru_cache()
def _get_last_migration(
schema: FlatSchema,
) -> Optional[s_migrations.Migration]:
migrations = cast(
List[s_migrations.Migration],
[
schema.get_by_id(mid)
for (t, _), mid in schema._globalname_to_id.items()
if t is s_migrations.Migration
],
)
if not migrations:
return None
migration_map = collections.defaultdict(list)
root = None
for m in migrations:
parents = m.get_parents(schema).objects(schema)
if not parents:
if root is not None:
raise errors.InternalServerError(
'multiple migration roots found')
root = m
for parent in parents:
migration_map[parent].append(m)
if root is None:
raise errors.InternalServerError('cannot find migration root')
latest = root
while children := migration_map[latest]:
if len(children) > 1:
raise errors.InternalServerError(
'nonlinear migration history detected')
latest = children[0]
return latest
| apache-2.0 | -8,112,427,022,049,611,000 | 30.118885 | 79 | 0.517647 | false | 3.844485 | false | false | false |
yytang2012/novels-crawler | simpleSpiders/m-ifuwen.py | 1 | 1913 | import os
import requests
from parsel import Selector
from urllib.parse import urljoin
from libs.polish import polish_content, polish_title, polish_subtitle
def parse_content(url):
page = requests.get(url)
html = page.text
sel = Selector(text=html)
title = sel.xpath('//title/text()').extract()[0]
title = title.split('_')[0]
title = polish_title(title, 'm-ifuwen')
print(title)
file_path = os.path.join(os.getcwd(), '..')
file_path = os.path.join(file_path, 'userData')
file_path = os.path.join(file_path, 'downloads')
file_path = os.path.join(file_path, title + '.txt')
print(file_path)
if os.path.isfile(file_path):
return 0
next_page_url = sel.xpath('//div[@class="lb_mulu chapterList"]/ul/li/a/@href').extract()[0]
next_page_url = urljoin(page.url, next_page_url)
print(next_page_url)
article = ''
idx = 1
while True:
req = requests.get(next_page_url)
html = req.text
sel = Selector(text=html)
subtitle = sel.xpath('//h1/text()').extract()[0]
subtitle = polish_subtitle(subtitle)
article += subtitle
contents = sel.xpath('//div[@id="nr1"]/p/text()').extract()
cc = polish_content(contents)
article += cc
tmp = sel.xpath('//div[@class="nr_page"]/table/tr')
next_page_url = tmp.xpath('td[@class="next"]/a/@href').extract()[0]
mulu = tmp.xpath('td[@class="mulu"]/a/@href').extract()[0]
if next_page_url == mulu:
break
idx += 1
next_page_url = urljoin(page.url, next_page_url)
print(idx, next_page_url)
save_to_file(file_path, article)
def save_to_file(file_path, article):
print(article)
with open(file_path, 'w', encoding='utf-8') as f:
f.write(article)
if __name__ == '__main__':
url = 'https://m.ifuwen.com/novel/30264.html'
parse_content(url)
| mit | -1,006,423,881,069,310,800 | 29.365079 | 95 | 0.598014 | false | 3.136066 | false | false | false |
hortonworks/hortonworks-sandbox | apps/oozie/src/oozie/management/commands/oozie_setup.py | 1 | 3188 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Portions Copyright © 2013 Hortonworks, Inc.
import logging
import os
from django.contrib.auth.models import User
from django.core import management
from django.core.management.base import NoArgsCommand
from django.utils.translation import ugettext as _
from hadoop import cluster
from hadoop.fs.hadoopfs import Hdfs
from liboozie.conf import REMOTE_DEPLOYMENT_DIR
from oozie.conf import LOCAL_SAMPLE_DATA_DIR, LOCAL_SAMPLE_DIR, REMOTE_SAMPLE_DIR
LOG = logging.getLogger(__name__)
class Command(NoArgsCommand):
def handle_noargs(self, **options):
fs = cluster.get_hdfs()
remote_dir = create_directories(fs)
# Copy examples binaries
for name in os.listdir(LOCAL_SAMPLE_DIR.get()):
local_dir = fs.join(LOCAL_SAMPLE_DIR.get(), name)
remote_data_dir = fs.join(remote_dir, name)
LOG.info(_('Copying examples %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
fs.do_as_user(fs.DEFAULT_USER, fs.copyFromLocal, local_dir, remote_data_dir)
# Copy sample data
local_dir = LOCAL_SAMPLE_DATA_DIR.get()
remote_data_dir = fs.join(remote_dir, 'data')
LOG.info(_('Copying data %(local_dir)s to %(remote_data_dir)s\n') % {
'local_dir': local_dir, 'remote_data_dir': remote_data_dir})
fs.do_as_user(fs.DEFAULT_USER, fs.copyFromLocal, local_dir, remote_data_dir)
# Load jobs
sample, created = User.objects.get_or_create(username='sample')
management.call_command('loaddata', 'initial_oozie_examples.json', verbosity=2)
from oozie.models import Job
Job.objects.filter(owner__id=1100713).update(owner=sample) # 11OOZIE
def create_directories(fs):
# If needed, create the remote home, deployment and data directories
directories = (REMOTE_DEPLOYMENT_DIR.get(), REMOTE_SAMPLE_DIR.get())
for directory in directories:
if not fs.do_as_user("hdfs", fs.exists, directory):
remote_home_dir = Hdfs.join('/user', "hdfs")
if directory.startswith(remote_home_dir):
# Home is 755
fs.do_as_user("hdfs", fs.create_home_dir, remote_home_dir)
# Shared by all the users
fs.do_as_user("hdfs", fs.mkdir, directory, 511)
fs.do_as_user("hdfs", fs.chmod, directory, 511) # To remove after https://issues.apache.org/jira/browse/HDFS-3491
return REMOTE_SAMPLE_DIR.get()
| apache-2.0 | -7,638,046,287,902,017,000 | 38.8375 | 119 | 0.709131 | false | 3.452871 | false | false | false |
ddworken/cubeOfResistors | 2D.py | 1 | 2155 | DIFF_THRESHOLD = 1e-40
width = height = 10
class Fixed:
FREE = 0
A = 1
B = 2
class Node:
__slots__ = ["voltage", "fixed"]
def __init__(self, v=0.0, f=Fixed.FREE):
self.voltage = v
self.fixed = f
def set_boundary(mesh):
mesh[width / 2][height / 2] = Node(1.0, Fixed.A)
mesh[width / 2 + 2][height / 2 + 1] = Node(-1.0, Fixed.B)
def calc_difference(mesh, difference):
total = 0.0
for y in xrange(height):
for x in xrange(width):
totalVoltage = 0.0
numberConnections = 0
if y != 0:
totalVoltage += mesh[y-1][x].voltage
numberConnections += 1
if x != 0:
totalVoltage += mesh[y][x-1].voltage
numberConnections += 1
if y < height-1:
totalVoltage += mesh[y + 1][x].voltage
numberConnections += 1
if x < width - 1:
totalVoltage += mesh[y][x + 1].voltage
numberConnections += 1
totalVoltage = mesh[y][x].voltage - totalVoltage / numberConnections
difference[y][x].voltage = totalVoltage
if mesh[y][x].fixed == Fixed.FREE:
total += totalVoltage ** 2
return total
def iter(mesh):
difference = [[Node() for j in xrange(width)] for i in xrange(height)]
while True:
set_boundary(mesh)
if calc_difference(mesh, difference) < DIFF_THRESHOLD:
break
for i, di in enumerate(difference):
for j, dij in enumerate(di):
mesh[i][j].voltage -= dij.voltage
current = [0.0] * 3
for i, di in enumerate(difference):
for j, dij in enumerate(di):
current[mesh[i][j].fixed] += (dij.voltage *
(bool(i) + bool(j) + (i < height - 1) + (j < width - 1)))
print 2 / ((current[1] - current[2]) / 2.0)
return (current[Fixed.A] - current[Fixed.B]) / 2.0
def main():
mesh = [[Node() for j in xrange(width)] for i in xrange(height)]
print "R = " + str(2 / iter(mesh))
if __name__ == "__main__":
main()
| gpl-2.0 | 5,152,036,103,168,269,000 | 26.278481 | 99 | 0.508121 | false | 3.585691 | false | false | false |
mjvakili/ccppabc | ccppabc/code/archive/wp_covariance.py | 1 | 1717 | from halotools.empirical_models import Zheng07 , model_defaults
from halotools.mock_observables import wp
from halotools.mock_observables.clustering import tpcf
from halotools.empirical_models.mock_helpers import (three_dim_pos_bundle,
infer_mask_from_kwargs)
from halotools.mock_observables.clustering import wp
from halotools.sim_manager import supported_sims
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
import time
import numpy as np
model = Zheng07()
xir = []
for i in range(500):
model.populate_mock()
xir.append(model.mock.compute_galaxy_clustering()[1])
covar = np.cov(np.array(xir).T)
np.savetxt("clustering_covariance_Mr20.dat" , covar)
"""
a = time.time()
model.mock.compute_galaxy_clustering()
print time.time() - a
rbins = model_defaults.default_rbins
rbin_centers = (rbins[1:] + rbins[:-1])/2.
cat = supported_sims.HaloCatalog()
l = cat.Lbox
print l
p_bins = np.linspace(0,l/2,200)
mask = infer_mask_from_kwargs(model.mock.galaxy_table)
pos = three_dim_pos_bundle(table=model.mock.galaxy_table,
key1='x', key2='y', key3='z', mask=mask,
return_complement=False)
figure = plt.figure(figsize=(10,10))
cl = wp(pos , rbins, p_bins , period = l , estimator = 'Landy-Szalay')
for n_pbins in np.array([2,8,16]):
p_bins = np.linspace(0 , l/2 , n_pbins)
a = time.time()
clustering = wp(pos, rbins, p_bins , period = l , estimator = 'Landy-Szalay')
print time.time() - a
plt.plot(rbin_centers , (clustering)/cl , label = "$N\pi_{bin}$="+str(n_pbins) , lw = 2)
plt.xscale("Log")
plt.yscale("Log")
plt.legend()
plt.savefig("/home/mj/public_html/wpex.png")"""
| mit | 7,285,614,063,505,886,000 | 32.019231 | 90 | 0.664531 | false | 2.861667 | false | false | false |
vprime/puuuu | env/bin/pilfile.py | 1 | 2645 | #!/Users/Vincent/lm_svn/checkouts/personal/papertrail-django/env/bin/python
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import site
import getopt, glob, sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
Image.DEBUG = Image.DEBUG + 1
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
| mit | 2,741,384,306,119,726,000 | 26.842105 | 77 | 0.564461 | false | 3.494055 | false | false | false |
swegener/gruvi | src/build_http.py | 1 | 3155 | #
# This file is part of Gruvi. Gruvi is free software available under the
# terms of the MIT license. See the file "LICENSE" that was provided
# together with this source file for the licensing terms.
#
# Copyright (c) 2012-2017 the Gruvi authors. See the file "AUTHORS" for a
# complete list.
from __future__ import absolute_import, print_function
import os.path
from cffi import FFI
parent, _ = os.path.split(os.path.abspath(__file__))
topdir, _ = os.path.split(parent)
ffi = FFI()
ffi.set_source('http_ffi', """
#include <stdlib.h>
#include "src/http_parser.h"
#include "src/http_parser.c"
unsigned char http_message_type(http_parser *p) { return p->type; }
unsigned int http_status_code(http_parser *p) { return p->status_code; }
unsigned int http_method(http_parser *p) { return p->method; }
unsigned char http_errno(http_parser *p) { return p->http_errno; }
unsigned char http_is_upgrade(http_parser *p) { return p->upgrade; }
""", include_dirs=[topdir])
ffi.cdef("""
typedef struct http_parser http_parser;
typedef struct http_parser_settings http_parser_settings;
typedef int (*http_data_cb) (http_parser*, const char *at, size_t length);
typedef int (*http_cb) (http_parser*);
enum http_parser_type { HTTP_REQUEST, HTTP_RESPONSE, HTTP_BOTH, ... };
struct http_parser {
unsigned short http_major;
unsigned short http_minor;
void *data;
...;
};
struct http_parser_settings {
http_cb on_message_begin;
http_data_cb on_url;
http_data_cb on_status;
http_data_cb on_header_field;
http_data_cb on_header_value;
http_cb on_headers_complete;
http_data_cb on_body;
http_cb on_message_complete;
...;
};
enum http_parser_url_fields { UF_SCHEMA, UF_HOST, UF_PORT, UF_PATH,
UF_QUERY, UF_FRAGMENT, UF_USERINFO, UF_MAX };
struct http_parser_url {
uint16_t field_set;
uint16_t port;
struct {
uint16_t off;
uint16_t len;
} field_data[UF_MAX];
...;
};
void http_parser_init(http_parser *parser, enum http_parser_type type);
size_t http_parser_execute(http_parser *parser,
const http_parser_settings *settings,
const char *data,
size_t len);
int http_should_keep_alive(const http_parser *parser);
const char *http_method_str(enum http_method m);
const char *http_errno_name(enum http_errno err);
void http_parser_url_init(struct http_parser_url *u);
int http_parser_parse_url(const char *buf, size_t buflen,
int is_connect, struct http_parser_url *u);
/* Extra functions to extract bitfields not supported by cffi */
unsigned char http_message_type(http_parser *parser);
unsigned int http_status_code(http_parser *parser);
unsigned int http_method(http_parser *parser);
unsigned char http_errno(http_parser *parser);
unsigned char http_is_upgrade(http_parser *parser);
""")
if __name__ == '__main__':
ffi.compile()
| mit | 1,236,139,961,076,085,500 | 30.868687 | 78 | 0.626941 | false | 3.467033 | false | false | false |
aroberge/docpicture | examples/fake_turtle.py | 1 | 7015 | """
This is a fake turtle module (with no relevant executable code, other than
a local docpicture parser included for testing) obtained through
severely amputating the original turtle module, for the purpose of
demonstrating the docpicture concept.
We start by including a drawing made with a docpicture "parser"
that is not part of the normal docpicture distribution, but is
defined in this file. We *suggest* that such parser names start
with "self." to indicate to the reader that they are defined locally.
docpicture will handle any name - but will first look for names in
its normal set.
..docpicture:: self.red_turtle
turtle.down()
turtle.color("orange")
turtle(45).forward(200)
Note that we get an error message saying that this parser is not
recognized. This will be changed, once this parser is set to be
"trusted".
From the original:
====================
Turtle graphics is a popular way for introducing programming to
kids. It was part of the original Logo programming language developed
by Wally Feurzeig and Seymour Papert in 1966.
Imagine a robotic turtle starting at (0, 0) in the x-y plane. Give it
the command turtle.forward(15), and it moves (on-screen!) 15 pixels in
the direction it is facing, drawing a line as it moves. Give it the
command turtle.left(25), and it rotates in-place 25 degrees clockwise.
By combining together these and similar commands, intricate shapes and
pictures can easily be drawn.
=====================
For docpictures, we modify slightly the notation so as to include
the angle at which the turtle is rotated. For example, we could have
..docpicture:: bw_turtle
turtle(20).forward(125)
We also have some other styles available, such as
..docpicture:: color_turtle
turtle.down()
turtle(20).forward(125)
and even
..docpicture:: turtle
turtle.down()
turtle.color("red")
turtle(20).forward(125)
Finally, we include a drawing with an unknown docpicture object - no
drawing will ever be made.
..docpicture:: unknown
turtle(20).forward(125)
"""
import parsers.turtle
import src.svg as svg
class RawPen:
def forward(self, distance):
""" Go forward distance steps.
Example:
>>> turtle.position()
[0.0, 0.0]
>>> turtle.forward(25)
>>> turtle.position()
[25.0, 0.0]
>>> turtle.forward(-75)
>>> turtle.position()
[-50.0, 0.0]
=====================
Using docpicture.view, you can see something like this in picture.
..docpicture:: turtle
turtle(0).forward(75)
"""
pass
def left(self, angle):
""" Turn left angle units (units are by default degrees,
but can be set via the degrees() and radians() functions.)
When viewed from above, the turning happens in-place around
its front tip.
Example:
>>> turtle.heading()
22
>>> turtle.left(45)
>>> turtle.heading()
67.0
================
Using docpicture.view, you can see something like this in picture.
..docpicture:: turtle
turtle(22).left(45)
"""
pass
def right(self, angle):
""" Turn right angle units (units are by default degrees,
but can be set via the degrees() and radians() functions.)
When viewed from above, the turning happens in-place around
its front tip.
Example:
>>> turtle.heading()
22
>>> turtle.right(45)
>>> turtle.heading()
337.0
================
Using docpicture.view, you can see something like this in picture.
..docpicture:: turtle
turtle(22).right(45)
"""
pass
def up(self):
""" Pull the pen up -- no drawing when moving.
Example:
>>> turtle.up()
================
Using docpicture.view, you can see something like this in picture.
..docpicture:: turtle
turtle.up()
turtle(10).forward(100)
"""
pass
def down(self):
""" Put the pen down -- draw when moving.
Example:
>>> turtle.down()
================
Let's add a picture
..docpicture:: turtle
turtle.down()
turtle(10).forward(100)
"""
pass
def color(self, *args):
""" Set the pen color.
In the original, three input formats are allowed; for docpicture,
only the named color is supported.
color(s)
s is a Tk specification string, such as "red" or "yellow"
Example:
>>> turtle.color('brown')
================
Using docpicture.view, you can see something like this in picture.
..docpicture:: turtle
turtle.down()
turtle.color("brown")
turtle(10).forward(100)
"""
pass
class RedTurtle(parsers.turtle.Turtle):
def __init__(self):
parsers.turtle.Turtle.__init__(self)
self.directive_name = 'self.red_turtle'
def get_svg_defs(self):
'''returns an object representing all the svg defs'''
defs = svg.SvgDefs()
defs.append(self.turtle_defs())
defs.append(self.plus_signs_defs())
return defs
def turtle_defs(self):
'''creates the svg:defs content for the turtle'''
t = svg.SvgElement("g", id="red_turtle")
# legs
t.append(svg.SvgElement("circle", cx=23, cy=16, r=8, fill="yellow"))
t.append(svg.SvgElement("circle", cx=23, cy=-15, r=8, fill="yellow"))
t.append(svg.SvgElement("circle", cx=-23, cy=16, r=8, fill="yellow"))
t.append(svg.SvgElement("circle", cx=-23, cy=-15, r=8, fill="yellow"))
# head and eyes
t.append(svg.SvgElement("circle", cx=32, cy=0, r=8, fill="yellow"))
t.append(svg.SvgElement("circle", cx=36, cy=4, r=2, fill="black"))
t.append(svg.SvgElement("circle", cx=36, cy=-4, r=2, fill="black"))
# body
t.append(svg.SvgElement("ellipse", cx=0, cy=0, rx=30, ry=25,
fill="red"))
return t
def first_turtle(self):
'''creation of first turtle '''
# same as Turtle, except no filter
t1 = svg.SvgElement("g", transform="translate(%d, %d)"%(self.x1, self.y1))
_t1 = svg.SvgElement("use", x=0, y=0, transform="rotate(%s 0 0)"%(-float(self.angle1)))
_t1.attributes["xlink:href"] = "#red_turtle"
t1.append(_t1)
return t1
def second_turtle(self):
'''creation of second turtle'''
# same as Turtle, except no filter
t2 = svg.SvgElement("g", transform="translate(%d, %d)"%(self.x2, self.y2))
_t2 = svg.SvgElement("use", x=0, y=0, transform="rotate(%s 0 0)"%(-float(self.angle2)))
_t2.attributes["xlink:href"] = "#red_turtle"
t2.append(_t2)
return t2
def register_docpicture_parser(register_parser):
register_parser(RedTurtle)
| bsd-3-clause | 6,207,094,637,867,796,000 | 28.351464 | 95 | 0.594726 | false | 3.814573 | false | false | false |
hmarkus/dynclasp | DflatDecomposition.py | 1 | 2536 | #!/bin/python
class DflatIdentifiable(object):
def __init__(self, keys):
self._keys = keys
def add(self, val):
self._keys.append(val)
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self._keys)
def id(self):
return DflatIdentifiable.idstr(self._keys)
def keys(self):
return self._keys
@staticmethod
def idstr(val):
val.sort()
return str(val)
def content(self):
self.id()
return DflatIdentifiable.contentStr(self._keys, lambda _ : False) #DflatIdentifiable.SHOW_ALL)
@staticmethod
def contentItemStr(val):
if type(val) is list:
res = ""
i = 0
for j in val:
if i == 0:
res += j + "("
else: #if not exc(j):
if i > 1:
res += ", "
res += DflatIdentifiable.contentItemStr(j)
i += 1
res += ")"
return res
else:
return val
@staticmethod
def contentStr(val, exc):
res = "["
for j in val:
if not exc(j):
if len(res) > 1:
res += ", "
res += DflatIdentifiable.contentItemStr(j)
res += "]"
return res
class DflatRowContainer(DflatIdentifiable):
def __init__(self, keys):
super(DflatRowContainer, self).__init__(keys)
self._next = []
self._prev = []
self._node = None
def setNode(self, n):
self._node = n
def node(self):
return self._node
def prev(self):
return self._prev
def next(self):
return self._next
def setPrev(self, child):
self._prev = child
def setNext(self, child):
self._next = child
def addPrev(self, child):
self._prev.append(child)
def addNext(self, child):
self._next.append(child)
def __str__(self):
#return super(DflatDecomposition, self).__str__(self._keys) + str(self._next)
return super(DflatRowContainer, self).__str__() + "@" #+ str(self.prev()) # + "->" + str(self._next)
class DflatDecomposition(DflatRowContainer):
def __init__(self, keys, fullintro = True):
super(DflatDecomposition, self).__init__(keys)
self._nr = 0
self._intro = []
self._posIntro = fullintro
self._introManaged = False
def setNr(self, nr):
self._nr = nr
def nr(self):
return self._nr
def addIntro(self, intro):
self._intro.append(intro)
def setIntroPositive(self, introP):
self._posIntro = introP
def setIntro(self, intro):
self._intro = intro
def intro(self):
if not self._posIntro and not self._introManaged:
self._intro = set(self._keys) - set(self._intro)
self._introManaged = True
return self._intro
def content(self):
return "n" + str(self._nr) + ": " + super(DflatDecomposition, self).content()
| gpl-3.0 | 3,731,327,717,661,745,700 | 18.658915 | 102 | 0.630521 | false | 2.80531 | false | false | false |
QueenMargaretsCompSci/PiWars2016 | source/wii_remote_test.py | 1 | 3219 | #!/usr/bin/python
# import our modules
import cwiid
import time
import RPi.GPIO as GPIO
import piconzero as pz
import sensorlibs as sl
from picamera import PiCamera
# setup our camera
cam = PiCamera()
# setup our constants
button_delay = 0.1
PIN_LED = sl.GPIOtoBoard(4)
GPIO.setup(PIN_LED, GPIO.OUT)
GPIO.output(PIN_LED, 0)
# prompt for Wii connection
print 'Press 1 + 2 on your Wii Remote now ...'
GPIO.output(PIN_LED, 1)
time.sleep(1)
# Connect to the Wii Remote. If it times out
# then quit.
try:
wii=cwiid.Wiimote()
GPIO.output(PIN_LED, 0)
except RuntimeError:
print "Error opening wiimote connection"
GPIO.output(PIN_LED, 0)
quit()
print 'Wii Remote connected...\n'
print 'Press some buttons!\n'
print 'Press PLUS and MINUS together to disconnect and quit.\n'
# connected so lets flash our LED
for x in range(0,3):
GPIO.output(PIN_LED, 1)
time.sleep(0.25)
GPIO.output(PIN_LED, 0)
time.sleep(0.25)
wii.rpt_mode = cwiid.RPT_BTN
# initialise piconzero
pz.init()
# start recording
ts = str(time.time())
cam.vflip = True
cam.hflip = True
cam.start_recording("/home/pi/qmpiwars/videos/remote-" + ts + ".h264")
while True:
buttons = wii.state['buttons']
# If Plus and Minus buttons pressed
# together then rumble and quit.
if (buttons - cwiid.BTN_PLUS - cwiid.BTN_MINUS == 0):
print '\nClosing connection ...'
wii.rumble = 1
time.sleep(1)
wii.rumble = 0
sl.neoPixelLight("off")
pz.cleanup()
cam.stop_recording()
exit(wii)
# Check if other buttons are pressed by
# doing a bitwise AND of the buttons number
# and the predefined constant for that button.
if (buttons & cwiid.BTN_LEFT):
print 'Left pressed'
pz.spinRight(100)
time.sleep(button_delay)
sl.neoPixelLight("left")
if(buttons & cwiid.BTN_RIGHT):
print 'Right pressed'
pz.spinLeft(100)
time.sleep(button_delay)
sl.neoPixelLight("right")
if (buttons & cwiid.BTN_UP):
print 'Up pressed'
pz.forward(80)
time.sleep(button_delay)
sl.neoPixelLight("forward")
if (buttons & cwiid.BTN_B):
print 'Turbo pressed'
pz.forward(100)
time.sleep(button_delay)
sl.neoPixelLight("forward")
if (buttons & cwiid.BTN_DOWN):
print 'Down pressed'
pz.reverse(80)
time.sleep(button_delay)
sl.neoPixelLight("backward")
if (buttons & cwiid.BTN_1):
print 'Button 1 pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_2):
print 'Button 2 pressed'
time.sleep(button_delay)
if (buttons & cwiid.BTN_A):
print 'Button A pressed'
pz.stop()
sl.neoPixelLight("off")
time.sleep(button_delay)
##########################################
# Not using these buttons
#
# if (buttons & cwiid.BTN_B):
# print 'Button B pressed'
# time.sleep(button_delay)
#
# if (buttons & cwiid.BTN_HOME):
# print 'Home Button pressed'
# time.sleep(button_delay)
#
# if (buttons & cwiid.BTN_MINUS):
# print 'Minus Button pressed'
# time.sleep(button_delay)
#
# if (buttons & cwiid.BTN_PLUS):
# print 'Plus Button pressed'
# time.sleep(button_delay)
| gpl-3.0 | 1,293,673,930,725,031,700 | 22.326087 | 70 | 0.636533 | false | 2.977798 | false | false | false |
google/closure-templates | python/runtime.py | 1 | 22615 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runtime module for compiled soy templates.
This module provides utility functions required by soy templates compiled with
the Python compilers. These functions handle the runtime internals necessary to
match JS behavior in module and function loading, along with type behavior.
"""
from __future__ import unicode_literals
__author__ = '[email protected] (David Phillips)'
import importlib
import math
import os
import re
import sys
from . import environment
from . import sanitize
import six
try:
import scandir
except ImportError:
scandir = None
# To allow the rest of the file to assume Python 3 strings, we will assign str
# to unicode for Python 2. This will error in 3 and be ignored.
try:
str = unicode # pylint: disable=redefined-builtin, invalid-name
except NameError:
pass
# Map from registered delegate template key to the priority, function, and
# function name tuple.
_DELEGATE_REGISTRY = {}
# All number types for use during custom type functions.
_NUMBER_TYPES = six.integer_types + (float,)
# The mapping of css class names for get_css_name.
_css_name_mapping = None
# The xid map for get_xid_name.
_xid_name_mapping = None
def get_xid_name(xid):
"""Return the mapped xid name.
Args:
xid: The xid name to modify.
Returns:
The renamed xid.
"""
if _xid_name_mapping:
renamed = _xid_name_mapping.get(xid)
if renamed:
return renamed
return xid + '_'
def get_css_name(class_name, modifier=None):
"""Return the mapped css class name with modifier.
Following the pattern of goog.getCssName in closure, this function maps a css
class name to its proper name, and applies an optional modifier.
If no mapping is present, the class_name and modifier are joined with hyphens
and returned directly.
If a mapping is present, the resulting css name will be retrieved from the
mapping and returned.
If one argument is passed it will be processed, if two are passed only the
modifier will be processed, as it is assumed the first argument was generated
as a result of calling goog.getCssName.
Args:
class_name: The class name to look up.
modifier: An optional modifier to append to the class_name.
Returns:
A mapped class name with optional modifier.
"""
pieces = [class_name]
if modifier:
pieces.append(modifier)
if _css_name_mapping:
# Only map the last piece of the name.
pieces[-1] = _css_name_mapping.get(pieces[-1], pieces[-1])
return '-'.join(pieces)
def set_css_name_mapping(mapping):
"""Set the mapping of css names.
Args:
mapping: A dictionary of original class names to mapped class names.
"""
global _css_name_mapping
_css_name_mapping = mapping
def set_xid_name_mapping(mapping):
"""Sets the mapping of xids.
Args:
mapping: A dictionary of xid names.
"""
global _xid_name_mapping
_xid_name_mapping = mapping
def get_delegate_fn(template_id, variant, allow_empty_default):
"""Get the delegate function associated with the given template_id/variant.
Retrieves the (highest-priority) implementation that has been registered for
a given delegate template key (template_id and variant). If no implementation
has been registered for the key, then the fallback is the same template_id
with empty variant. If the fallback is also not registered,
and allow_empty_default is true, then returns an implementation that is
equivalent to an empty template (i.e. rendered output would be empty string).
Args:
template_id: The delegate template id.
variant: The delegate template variant (can be an empty string, or a number
when a global is used).
allow_empty_default: Whether to default to the empty template function if
there's no active implementation.
Returns:
The retrieved implementation function.
Raises:
RuntimeError: when no implementation of one delegate template is found.
"""
entry = _DELEGATE_REGISTRY.get(_gen_delegate_id(template_id, variant))
fn = entry[1] if entry else None
# variant may be another zero value besides the empty string and we want to
# detect that
# pylint: disable=g-explicit-bool-comparison
if not fn and variant != '':
# Fallback to empty variant.
entry = _DELEGATE_REGISTRY.get(_gen_delegate_id(template_id))
fn = entry[1] if entry else None
if fn:
return fn
elif allow_empty_default:
return _empty_template_function
else:
msg = ('Found no active impl for delegate call to "%s%s" '
'(and delcall does not set allowemptydefault="true").')
raise RuntimeError(msg % (template_id, ':' + variant if variant else ''))
def concat_attribute_values(l, r, delimiter):
"""Merge two attribute values with a delimiter or use one or the other.
Args:
l: The string which is prefixed in the return value
r: The string which is suffixed in the return value
delimiter: The delimiter between the two sides
Returns:
The combined string separated by the delimiter.
"""
if not l:
return r
if not r:
return l
return l + delimiter + r
def concat_css_values(l, r):
"""Merge two css values.
Args:
l: The css which is prefixed in the return value
r: The css which is suffixed in the return value
Returns:
The combined css separated by the delimiter.
"""
return sanitize.SanitizedCss(
concat_attribute_values(str(l), str(r), ';'),
sanitize.IActuallyUnderstandSoyTypeSafetyAndHaveSecurityApproval(
"""Internal framework code."""))
def merge_into_dict(original, secondary):
"""Merge two dictionaries into the first and return it.
This is simply a conveinence wrapper around the dictionary update method. In
addition to the update it returns the original dict to allow for chaining.
Args:
original: The dict which will be updated.
secondary: The dict which will be copied.
Returns:
The updated original dictionary.
"""
original.update(secondary)
return original
def namespaced_import(name, namespace=None, environment_path=None):
"""A function to import compiled soy modules using the Soy namespace.
This function attempts to first import the module directly. If it isn't found
in the matching package as the Soy Namespace, it will walk the sys.path
structure open any module with a matching name and test its SOY_NAMESPACE
attribute. If it matches it will load that instead.
Multiple files can share the same soy namespace. In that instance, all of
these files will be loaded, combined, and loaded as one module.
Note: If multiple files share the same namespace, they still require that the
module name begins with the last part of the namespace (e.g.
soy.examples.delegates will load delegates0.py, delegatesxyz.py, etc.).
TODO(dcphillips): See if there's any way we can avoid this limitation without
blowing up load times.
Args:
name: The name of the module to import.
namespace: The namespace of the module to import.
environment_path: A custom environment module path for interacting with the
runtime environment.
Returns:
The Module object.
"""
full_namespace = '%s.%s' % (namespace, name) if namespace else name
try:
# Try searching for the module directly
return importlib.import_module(full_namespace)
except ImportError:
# If the module isn't found, search without the namespace and check the
# namespaces.
if namespace:
namespace_key = "SOY_NAMESPACE: '%s'." % full_namespace
module = None
if environment_path:
file_loader = importlib.import_module(environment_path).file_loader
else:
file_loader = environment.file_loader
for sys_path, f_path, f_name in _find_modules(name):
# Verify the file namespace by comparing the 5th line.
with file_loader(f_path, f_name, 'r') as f:
for _ in range(4):
next(f)
if namespace_key != next(f).rstrip():
continue
# Strip the root path and the file extension.
module_path = six.ensure_str(os.path.relpath(f_path, sys_path)).replace(
'/', '.')
module_name = os.path.splitext(f_name)[0]
# Python 2 performs relative or absolute imports. Beginning with
# Python 3.3, only absolute imports are possible. Compare the
# docs for the default value of the `level` argument of `__import__`:
# https://docs.python.org/2/library/functions.html#__import__
# https://docs.python.org/3/library/functions.html#__import__
module = getattr(
__import__(module_path, globals(), locals(), [module_name]),
module_name)
break
if module:
# Add this to the global modules list for faster loading in the future.
_cache_module(full_namespace, module)
return module
raise
def manifest_import(namespace, manifest):
"""Imports a module using a namespace manifest to find the module."""
if not manifest:
raise ImportError('No manifest provided')
elif namespace not in manifest:
raise ImportError('Manfest does not contain namespace: %s' % namespace)
return importlib.import_module(manifest[namespace])
def key_safe_data_access(data, key):
"""Safe key based data access.
Traditional bracket access in Python (foo['bar']) will throw a KeyError (or
IndexError if in a list) when encountering a non-existent key.
foo.get(key, None) is solves this problem for objects, but doesn't work with
lists. Thus this function serves to do safe access with a unified syntax for
both lists and dictionaries.
Args:
data: The data object to search for the key within.
key: The key to use for access.
Returns:
data[key] if key is present or None otherwise.
"""
try:
return data[key]
except (KeyError, IndexError):
return None
def register_delegate_fn(template_id, variant, priority, fn, fn_name):
"""Register a delegate function in the global registry.
Args:
template_id: The id for the given template.
variant: The variation key for the given template.
priority: The priority value of the given template.
fn: The template function.
fn_name: A unique name of the function generated at compile time.
Raises:
RuntimeError: If a delegate was attempted to be added with the same
priority an error will be raised.
"""
map_key = _gen_delegate_id(template_id, variant)
curr_priority, _, curr_fn_name = _DELEGATE_REGISTRY.get(
map_key, (None, None, None))
# Ignore unless at a equal or higher priority.
if curr_priority is None or priority > curr_priority:
# Registering new or higher-priority function: replace registry entry.
_DELEGATE_REGISTRY[map_key] = (priority, fn, fn_name)
elif priority == curr_priority and fn_name != curr_fn_name:
# Registering same-priority function: error.
raise RuntimeError(
'Encountered two active delegates with the same priority (%s:%s:%s).' %
(template_id, variant, priority))
def type_safe_add(*args):
"""A coercion function emulating JS style type conversion in the '+' operator.
This function is similar to the JavaScript behavior when using the '+'
operator. Variables will will use the default behavior of the '+' operator
until they encounter a type error at which point the more 'simple' type will
be coerced to the more 'complex' type.
Supported types are None (which is treated like a bool), bool, primitive
numbers (int, float, etc.), and strings. All other objects will be converted
to strings.
Example:
type_safe_add(True, True) = 2
type_safe_add(True, 3) = 4
type_safe_add(3, 'abc') = '3abc'
type_safe_add(True, 3, 'abc') = '4abc'
type_safe_add('abc', True, 3) = 'abcTrue3'
Args:
*args: List of parameters for addition/coercion.
Returns:
The result of the addition. The return type will be based on the most
'complex' type passed in. Typically an integer or a string.
"""
if not args:
return None
# JS operators can sometimes work as unary operators. So, we fall back to the
# initial value here in those cases to prevent ambiguous output.
if len(args) == 1:
return args[0]
is_string = isinstance(args[0], six.string_types)
result = args[0]
for arg in args[1:]:
try:
if is_string:
arg = _convert_to_js_string(arg)
result += arg
except TypeError:
# Special case for None which can be converted to bool but is not
# autocoerced. This can result in a conversion of result from a boolean to
# a number (which can affect later string conversion) and should be
# retained.
if arg is None:
result += False
else:
result = _convert_to_js_string(result) + _convert_to_js_string(arg)
is_string = True
return result
def list_contains(l, item):
return list_indexof(l, item) >= 0
def list_indexof(l, item):
"""Equivalent getting the index of `item in l` but using soy's equality algorithm."""
for i in range(len(l)):
if type_safe_eq(l[i], item):
return i
return -1
def concat_maps(d1, d2):
"""Merges two maps together."""
d3 = dict(d1)
d3.update(d2)
return d3
def map_entries(m):
"""Return map entries."""
return [{'key': k, 'value': m[k]} for k in m]
def list_slice(l, start, stop):
"""Equivalent of JavaScript Array.prototype.slice."""
return l[slice(start, stop)]
def list_reverse(l):
"""Reverses a list. The original list passed is not modified."""
return l[::-1]
def number_list_sort(l):
"""Sorts in numerical order."""
# Lists of numbers are sorted numerically by default.
return sorted(l)
def string_list_sort(l):
"""Sorts in lexicographic order."""
# Lists of strings are sorted lexicographically by default.
return sorted(l)
def type_safe_eq(first, second):
"""An equality function that does type coercion for various scenarios.
This function emulates JavaScript's equalty behavior. In JS, Objects will be
converted to strings when compared to a string primitive.
Args:
first: The first value to compare.
second: The second value to compare.
Returns:
True/False depending on the result of the comparison.
"""
# If the values are empty or of the same type, no coersion necessary.
# TODO(dcphillips): Do a more basic type equality check if it's not slower
# (b/16661176).
if first is None or second is None or type(first) == type(second):
return first == second
try:
# TODO(dcphillips): This potentially loses precision for very large numbers.
# See b/16241488.
if isinstance(first, _NUMBER_TYPES) and not isinstance(first, bool):
return first == float(second)
if isinstance(second, _NUMBER_TYPES) and not isinstance(second, bool):
return float(first) == second
if isinstance(first, six.string_types):
return first == str(second)
if isinstance(second, six.string_types):
return str(first) == second
except ValueError:
# Ignore type coersion failures
pass
return first == second
def check_not_null(val):
"""A helper to implement the Soy Function checkNotNull.
Args:
val: The value to test.
Returns:
val if it was not None.
Raises:
RuntimeError: If val is None.
"""
if val is None:
raise RuntimeError('Unexpected null value')
return val
def is_set(field, container):
"""A helper to implement the Soy Function isSet.
Args:
field (str): The field to test.
container (Dict[str, Any]): The container to test.
Returns:
True if the field is set in the container.
"""
return field in container
def parse_int(s):
"""A function that attempts to convert the input string into an int.
Returns None if the input is not a valid int.
Args:
s: String to convert.
Returns:
int if s is a valid int string, otherwise None.
"""
try:
return int(s)
except ValueError:
return None
def parse_float(s):
"""A function that attempts to convert the input string into a float.
Returns None if the input is not a valid float, or if the input is NaN.
Args:
s: String to convert.
Returns:
float if s is a valid float string that is not NaN, otherwise None.
"""
try:
f = float(s)
except ValueError:
return None
return None if math.isnan(f) else f
def sqrt(num):
"""Returns the square root of the given number."""
return math.sqrt(num)
def unsupported(msg):
raise Exception('unsupported feature: ' + msg)
def map_to_legacy_object_map(m):
"""Converts a Soy map to a Soy legacy_object_map.
legacy_object_maps must have string keys, but maps do not have this
restriction.
Args:
m: Map to convert.
Returns:
An equivalent legacy_object_map, with keys coerced to strings.
"""
return {str(key): m[key] for key in m}
def str_to_ascii_lower_case(s):
"""Converts the ASCII characters in the given string to lower case."""
return ''.join([c.lower() if 'A' <= c <= 'Z' else c for c in s])
def str_to_ascii_upper_case(s):
"""Converts the ASCII characters in the given string to upper case."""
return ''.join([c.upper() if 'a' <= c <= 'z' else c for c in s])
def str_starts_with(s, val):
"""Returns whether s starts with val."""
return s.startswith(val)
def str_ends_with(s, val):
"""Returns whether s ends with val."""
return s.endswith(val)
def str_replace_all(s, match, token):
"""Replaces all occurrences in s of match with token."""
return s.replace(match, token)
def str_trim(s):
"""Trims leading and trailing whitespace from s."""
return s.strip()
def str_split(s, sep):
"""Splits s into an array on sep."""
return s.split(sep) if sep else list(s)
def str_substring(s, start, end):
"""Implements the substring method according to the JavaScript spec."""
if start < 0:
start = 0
if end is not None:
if end < 0:
end = 0
if start > end:
# pylint: disable=arguments-out-of-order
return str_substring(s, end, start)
return s[start:end]
def soy_round(num, precision=0):
"""Implements the soy rounding logic for the round() function.
Python rounds ties away from 0 instead of towards infinity as JS and Java do.
So to make the behavior consistent, we add the smallest possible float amount
to break ties towards infinity.
Args:
num: the number to round
precision: the number of digits after the point to preserve
Returns:
a rounded number
"""
float_breakdown = math.frexp(num)
tweaked_number = ((float_breakdown[0] + sys.float_info.epsilon) *
2**float_breakdown[1])
rounded_number = round(tweaked_number, precision)
if not precision or precision < 0:
return int(rounded_number)
return rounded_number
######################
# Utility functions. #
######################
# pylint: disable=unused-argument
def _empty_template_function(data=None, ij_data=None):
return ''
def _cache_module(namespace, module):
"""Cache a loaded module in sys.modules.
Besides the caching of the main module itself, any parent packages that don't
exist need to be cached as well.
Args:
namespace: The python namespace.
module: The module object to be cached.
"""
sys.modules[namespace] = module
while '.' in namespace:
namespace = namespace.rsplit('.', 1)[0]
if namespace in sys.modules:
return
# TODO(dcphillips): Determine if anything's gained by having real modules
# for the packages.
sys.modules[namespace] = {}
def _convert_to_js_string(value):
"""Convert a value to a string, with the JS string values for primitives.
Args:
value: The value to stringify.
Returns:
A string representation of value. For primitives, ensure that the result
matches the string value of their JS counterparts.
"""
if value is None:
return 'null'
elif isinstance(value, bool):
return str(value).lower()
else:
return str(value)
def _find_modules(name):
"""Walks the sys path and looks for modules that start with 'name'.
This function yields all results which match the pattern in the sys path.
It can be treated similar to os.walk(), but yields only files which match
the pattern. These are meant to be used for traditional import
syntax. Bad paths are ignored and skipped.
Args:
name: The name to match against the beginning of the module name.
Yields:
A tuple containing the path, the base system path, and the file name.
"""
# TODO(dcphillips): Allow for loading of compiled source once namespaces are
# limited to one file (b/16628735).
module_file_name = re.compile(r'^%s.*\.py$' % name)
# If scandir is available, it offers 5-20x improvement of walk performance.
walk = scandir.walk if scandir else os.walk
for path in sys.path:
try:
for root, _, files in walk(path):
for f in files:
if module_file_name.match(f):
yield path, root, f
except OSError:
# Ignore bad paths
pass
def _gen_delegate_id(template_id, variant=''):
return 'key_%s:%s' % (template_id, variant)
def create_template_type(template, name):
"""Returns a wrapper object for a given template function.
The wrapper object forwards calls to the underlying template, but overrides
the __str__ method.
Args:
template: The underlying template function.
name: The fully-qualified template name.
Returns:
A wrapper object that can be called like the underlying template.
"""
return _TemplateWrapper(template, name)
def bind_template_params(template, params):
"""Binds the given parameters to the given template."""
return lambda data, ij: template(dict(data, **params), ij)
class _TemplateWrapper:
"""A wrapper object that forwards to the underlying template."""
def __init__(self, template, name):
self.template = template
self.name = name
def __call__(self, *args):
return self.template(*args)
def __str__(self):
return '** FOR DEBUGGING ONLY: %s **' % self.name
| apache-2.0 | -4,886,007,463,098,833,000 | 28.446615 | 87 | 0.691576 | false | 3.894438 | false | false | false |
nodakai/watchman | tests/integration/test_sock_perms.py | 1 | 10512 | # vim:ts=4:sw=4:et:
# Copyright 2016-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
# no unicode literals
from __future__ import absolute_import, division, print_function
import os
import random
import stat
import string
import sys
import time
import pywatchman
import WatchmanInstance
import WatchmanTestCase
try:
import grp
except ImportError:
# Windows
pass
try:
import unittest2 as unittest
except ImportError:
import unittest
@unittest.skipIf(
os.name == "nt" or sys.platform == "darwin" or os.geteuid() == 0,
"win or root or bad ldap",
)
class TestSockPerms(unittest.TestCase):
def _new_instance(self, config, expect_success=True):
if expect_success:
start_timeout = 10
else:
# If the instance is going to fail anyway then there's no point
# waiting so long
start_timeout = 5
return WatchmanInstance.InstanceWithStateDir(
config=config, start_timeout=start_timeout
)
def _get_custom_gid(self):
# This is a bit hard to do: we need to find a group the user is a member
# of that's not the effective or real gid. If there are none then we
# must skip.
groups = os.getgroups()
for gid in groups:
if gid != os.getgid() and gid != os.getegid():
return gid
self.skipTest("no usable groups found")
def _get_non_member_group(self):
"""Get a group tuple that this user is not a member of."""
user_groups = set(os.getgroups())
for group in grp.getgrall():
if group.gr_gid not in user_groups:
return group
self.skipTest("no usable groups found")
def waitFor(self, cond, timeout=10):
deadline = time.time() + timeout
res = None
while time.time() < deadline:
try:
res = cond()
if res:
return [True, res]
except Exception:
pass
time.sleep(0.03)
return [False, res]
def assertWaitFor(self, cond, timeout=10, message=None, get_debug_output=None):
status, res = self.waitFor(cond, timeout)
if status:
return res
if message is None:
message = "%s was not met in %s seconds: %s" % (cond, timeout, res)
if get_debug_output is not None:
message += "\ndebug output:\n%s" % get_debug_output()
self.fail(message)
def test_too_open_user_dir(self):
instance = self._new_instance({}, expect_success=False)
os.makedirs(instance.user_dir)
os.chmod(instance.user_dir, 0o777)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "the permissions on %s allow others to write to it" % (
instance.user_dir
)
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
def test_invalid_sock_group(self):
# create a random group name
while True:
group_name = "".join(
random.choice(string.ascii_lowercase) for _ in range(8)
)
try:
grp.getgrnam(group_name)
except KeyError:
break
instance = self._new_instance({"sock_group": group_name}, expect_success=False)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
# This is the error we expect to find
wanted = "group '%s' does not exist" % group_name
# But if the site uses LDAP or YP/NIS or other similar technology for
# their password database then we might experience other infra flakeyness
# so we allow for the alternative error case to be present and consider
# it a pass.
we_love_ldap = "getting gid for '%s' failed:" % group_name
self.assertWaitFor(
lambda: (wanted in instance.getCLILogContents())
or (we_love_ldap in instance.getCLILogContents()),
get_debug_output=lambda: str(ctx.exception)
+ "\n"
+ instance.getCLILogContents(),
)
def test_user_not_in_sock_group(self):
group = self._get_non_member_group()
instance = self._new_instance(
{"sock_group": group.gr_name}, expect_success=False
)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "setting up group '%s' failed" % group.gr_name
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
def test_default_sock_group(self):
# By default the socket group should be the effective gid of the process
gid = os.getegid()
instance = self._new_instance({})
instance.start()
instance.stop()
self.assertFileGID(instance.user_dir, gid)
self.assertFileGID(instance.sock_file, gid)
def test_custom_sock_group(self):
gid = self._get_custom_gid()
group = grp.getgrgid(gid)
instance = self._new_instance({"sock_group": group.gr_name})
instance.start()
instance.stop()
self.assertFileGID(instance.user_dir, gid)
self.assertFileGID(instance.sock_file, gid)
def test_user_previously_in_sock_group(self):
"""This tests the case where a user was previously in sock_group
(so Watchman created the directory with that group), but no longer is
(so the socket is created with a different group)."""
# Since it's hard to drop a group from a process without being
# superuser, fake it. Use a private testing-only config option to set
# up separate groups for the directory and the file.
gid = self._get_custom_gid()
group = grp.getgrgid(gid)
non_member_group = self._get_non_member_group()
# Need to wait for the server to come up here, can't use
# expect_success=False.
instance = self._new_instance(
{"sock_group": group.gr_name, "__sock_file_group": non_member_group.gr_name}
)
with self.assertRaises(pywatchman.SocketConnectError):
instance.start()
wanted = (
"for socket '%s', gid %d doesn't match expected gid %d "
"(group name %s)."
% (
instance.getSockPath().unix_domain,
gid,
non_member_group.gr_gid,
non_member_group.gr_name,
)
)
self.assertWaitFor(lambda: wanted in instance.getServerLogContents())
def test_invalid_sock_access(self):
instance = self._new_instance({"sock_access": "bogus"}, expect_success=False)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "Expected config value sock_access to be an object"
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
instance = self._new_instance(
{"sock_access": {"group": "oui"}}, expect_success=False
)
with self.assertRaises(pywatchman.SocketConnectError) as ctx:
instance.start()
self.assertEqual(ctx.exception.sockpath, instance.getSockPath().unix_domain)
wanted = "Expected config value sock_access.group to be a boolean"
self.assertWaitFor(
lambda: wanted in instance.getCLILogContents(),
get_debug_output=lambda: instance.getCLILogContents(),
)
def test_default_sock_access(self):
instance = self._new_instance({})
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o700 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o600)
def test_custom_sock_access_group(self):
instance = self._new_instance({"sock_access": {"group": True}})
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o750 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o660)
def test_custom_sock_access_others(self):
instance = self._new_instance({"sock_access": {"group": True, "others": True}})
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o755 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o666)
def test_sock_access_upgrade(self):
instance = self._new_instance({"sock_access": {"group": True, "others": True}})
os.makedirs(instance.user_dir)
os.chmod(instance.user_dir, 0o700)
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o755 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o666)
def test_sock_access_downgrade(self):
instance = self._new_instance({"sock_access": {"group": True}})
os.makedirs(instance.user_dir)
os.chmod(instance.user_dir, 0o755 | stat.S_ISGID)
instance.start()
instance.stop()
self.assertFileMode(instance.user_dir, 0o750 | stat.S_ISGID)
self.assertFileMode(instance.sock_file, 0o660)
def test_sock_access_group_change(self):
gid = self._get_custom_gid()
group = grp.getgrgid(gid)
instance = self._new_instance({"sock_group": group.gr_name})
os.makedirs(instance.user_dir)
# ensure that a different group is set
os.chown(instance.user_dir, -1, os.getegid())
instance.start()
instance.stop()
self.assertFileGID(instance.user_dir, gid)
self.assertFileGID(instance.sock_file, gid)
def assertFileMode(self, f, mode):
st = os.lstat(f)
self.assertEqual(stat.S_IMODE(st.st_mode), mode)
def assertFileGID(self, f, gid):
st = os.lstat(f)
self.assertEqual(st.st_gid, gid)
| apache-2.0 | 3,227,173,042,821,205,500 | 35.884211 | 88 | 0.612348 | false | 3.880399 | true | false | false |
dgrtwo/gleam | examples/baseball.py | 1 | 2364 | import os
from collections import OrderedDict
from flask import Flask
from wtforms import fields
from ggplot import (aes, stat_smooth, geom_point, geom_text, ggtitle, ggplot,
xlab, ylab)
import numpy as np
import pandas as pd
from gleam import Page, panels
# setup
stats = ['At-Bats (AB)', 'Runs (R)', 'Hits (H)', 'Doubles (2B)',
'Triples (3B)', 'Home Runs (HR)', 'Runs Batted In (RBI)',
'Stolen Bases (SB)', 'Caught Stealing (CS)', 'Walks (BB)',
'Intentional Walk (IBB)', 'Salary', 'Attendance']
statchoices = [(s, s) for s in stats]
dir = os.path.split(__file__)[0]
players = pd.read_csv(os.path.join(dir, "baseball_data", "players.csv"))
teams = pd.read_csv(os.path.join(dir, "baseball_data", "teams.csv"))
class BaseballInput(panels.InputPanel):
xvar = fields.SelectField(label="X axis", choices=statchoices,
default="Hits (H)")
yvar = fields.SelectField(label="Y axis", choices=statchoices,
default="Runs (R)")
year = fields.IntegerField(label="Year", default=2013)
linear = fields.BooleanField(label="Linear Fit")
shownames = fields.BooleanField(label="Show Names")
class DataScatter(panels.PlotPanel):
height = 500
width = 700
def __init__(self, name, dat, ID_col):
self.name = name
self.dat = dat
self.ID_col = ID_col
panels.PlotPanel.__init__(self)
def plot(self, inputs):
"""Plot the given X and Y axes on a scatter plot"""
if inputs.year not in self.dat.Year.values:
return
if inputs.xvar not in self.dat or inputs.yvar not in self.dat:
return
subdat = self.dat[self.dat.Year == inputs.year]
p = ggplot(subdat, aes(x=inputs.xvar, y=inputs.yvar))
p = p + geom_point()
if inputs.shownames:
p = p + geom_text(aes(label=self.ID_col), vjust=1, hjust=1)
if inputs.linear:
p = p + stat_smooth(color="red", method="lm")
return p
class BaseballGleam(Page):
title = "Baseball Statistics"
input = BaseballInput()
output = panels.TabPanel([DataScatter("Teams", teams, "teamID"),
DataScatter("Players", players, "name")])
app = Flask("BaseballGleam")
BaseballGleam.add_flask(app)
app.debug = True
app.run()
| mit | 4,333,654,083,607,356,400 | 28.924051 | 77 | 0.60533 | false | 3.2879 | false | false | false |
gangadhar-kadam/mic-erpnext | stock/report/delivery_note_trends/delivery_note_trends.py | 1 | 1058 | # ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from controllers.trends import get_columns,get_data
def execute(filters=None):
if not filters: filters ={}
data = []
trans = "Delivery Note"
conditions = get_columns(filters, trans)
data = get_data(filters, conditions)
return conditions["columns"], data | agpl-3.0 | -2,810,063,489,910,439,000 | 36.821429 | 71 | 0.751418 | false | 3.833333 | false | false | false |
sevikkk/django-sql-explorer | explorer/forms.py | 1 | 3003 | from django import forms
from django.forms import ModelForm, Field, ValidationError
from explorer.models import Query, MSG_FAILED_BLACKLIST
from django.db import DatabaseError, connections
from crontab import CronTab
from explorer.utils import get_connections_list
_ = lambda x: x
class SqlField(Field):
def validate(self, value):
"""
Ensure that the SQL passes the blacklist and executes. Execution check is skipped if params are present.
:param value: The SQL for this Query model.
"""
query = Query(sql=value)
error = MSG_FAILED_BLACKLIST if not query.passes_blacklist() else None
#if not error and not query.available_params():
# try:
# query.try_execute()
# except DatabaseError as e:
# error = str(e)
if error:
raise ValidationError(
_(error),
code="InvalidSql"
)
class CrontabField(Field):
def validate(self, value):
"""
Ensure that the field is valid crontab entry
:param value: The schedule entry for this Query model.
"""
error = None
if not value:
return
if value.startswith('#'):
return
try:
cron = CronTab(value)
except ValueError, e:
error = str(e)
if error:
raise ValidationError(
_(error),
code="InvalidCrontabEntry"
)
class DatabaseField(forms.ChoiceField):
def __init__(self, *args, **kwargs):
super(DatabaseField, self).__init__(choices=get_connections_list(), *args, **kwargs)
def validate(self, value):
"""
Ensure that the field is valid crontab entry
:param value: The schedule entry for this Query model.
"""
error = None
if not value:
return
if value not in connections._databases:
error = "Connection is not configured, known connections: %s" % (", ".join(connections._databases.keys()))
if error:
raise ValidationError(
_(error),
code="InvalidDatabase"
)
class QueryForm(ModelForm):
sql = SqlField()
schedule = CrontabField()
database = DatabaseField()
def clean(self):
if self.instance and self.data.get('created_by_user', None):
self.cleaned_data['created_by_user'] = self.instance.created_by_user
return super(QueryForm, self).clean()
@property
def created_by_user_email(self):
return self.instance.created_by_user.email if self.instance.created_by_user else '--'
@property
def created_by_user_id(self):
return self.instance.created_by_user.id if self.instance.created_by_user else '--'
class Meta:
model = Query
fields = ['title', 'sql', 'description', 'created_by_user', 'database', 'cache_table', 'schedule', 'groups'] | mit | 8,805,063,488,350,659,000 | 25.584071 | 118 | 0.586414 | false | 4.396779 | false | false | false |
ywang037/delta-ntu-slerp4 | Training/train_mobilenet_casia_1771.py | 1 | 7420 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 4 14:47:47 2017
@author: slerp4
Compared with _debug version, this version excludes RMSprop optimizer
"""
#import tensorflow as tf
from keras import backend as K
from keras.applications.mobilenet import MobileNet
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD, Adam
from keras.callbacks import LearningRateScheduler, CSVLogger
import os, importlib
from timeit import default_timer as timer
import datetime
import math
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import tensorflow as tf
# check and set tensorflow as backend
if K.backend() != 'tensorflow':
os.environ['KERAS_BACKEND'] = 'tensorflow'
importlib.reload(K)
assert K.backend() == 'tensorflow'
print('{} backend is sucessfully set'.format(K.backend()))
elif K.backend() == 'tensorflow':
print('{} backend has already been set'.format(K.backend()))
# force to use gpu:0 tesla k20c
# Creates a graph.
with tf.device('/device:GPU:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the op.
print(sess.run(c))
# training hyper parameters
train_data_dir = '.\Datasets\casia-1771'
numclass = 1771
num_train_samples = 233505
batch_size = 64
#epochs = 100
alpha = 0.5 # choices=[0.25, 0.5, 0.75, 1.0]
inputsize = 224 # choices=[128, 160, 192, 224, 224], >=32 is ok
'''
# define step decay function - used to visualize learning rate change
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.lr = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.lr.append(step_decay(len(self.losses)))
print('Current learning rate:', step_decay(len(self.losses)))
'''
# learning rate schedule
def step_decay(epoch):
# initial_lrate = 0.01
drop = 0.5
epochs_drop = 20.0
lrate = init_lr * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
# Setup the model
# using CASIA-WebFaces dataset for training, 10575 identities in total
model = MobileNet(alpha=alpha, depth_multiplier=1, dropout=1e-3,
include_top=True, weights=None, input_tensor=None, pooling=None, classes=numclass)
model.summary()
print('\nPrepare to train cnn model {}-MobileNet-224 with top layer included'.format(alpha))
#print('Total classes: {}'.format(numclass))
#print('Training samples: {}'.format(num_train_samples))
optimizer_chosen = input('Optimizer (A: SGD/B: Adam)? ')
while optimizer_chosen not in ['A', 'B']:
optimizer_chosen = input('Optimizer (A: SGD/B: Adam)? ')
epochs = int(input('Number of epochs? '))
while epochs < 0:
epochs = int(input('Use a positive integer as the number of epochs: '))
init_lr = float(input('Initial learning rate? '))
while init_lr < 0 or init_lr>0.2:
init_lr = float(input('Use a learning rate in [0, 0.2]: '))
# preparing training data
print('\nDataset path: '+ train_data_dir)
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# load training and testing data
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(224, 224),
batch_size=batch_size)
# define the format of names of several files
stamp = str(alpha)+'-mobilenet-'+str(inputsize)+'-c{}-'.format(numclass)+'b{}-'.format(batch_size)+'e{}-'.format(epochs)
if optimizer_chosen == 'A':
# using step-decaying sgd
method = 'SGD'
print('\nUsing step-decaying stochastic gradient descent')
print('learning rate folds every 20 epochs')
sgd = SGD(lr=0.0, momentum=0.9, decay=0.0, nesterov=False)
# compile the model
# loss = mse can be tried also
train_start = timer()
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
'''
# use following scripts to have learning rate displayed
# learning schedule callback
loss_history = LossHistory()
lrate = LearningRateScheduler(step_decay)
# training logger callback, log in csv file
record = stamp + method
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
callbacks_list = [loss_history, lrate, csv_logger]
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples//batch_size,
epochs=epochs, validation_data=None, callbacks=callbacks_list, verbose=2)
'''
# learning schedule callback
lrate = LearningRateScheduler(step_decay)
# training logger callback, log in csv file
record = stamp + method + '-lr{}'.format(init_lr)
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
callbacks_list = [lrate, csv_logger]
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples//batch_size,
epochs=epochs, validation_data=None, callbacks=callbacks_list, verbose=1)
elif optimizer_chosen == 'B':
# using adam update as adaptive learning rate method
method = 'Adam'
print('\nUsing using adam update as adaptive learning rate method')
adam = Adam(lr=init_lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # original lr=0.001
# compile the model
# loss = mse can be tried also
train_start = timer()
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
# training logger callback, log in csv file
record = stamp + method + '-lr{}'.format(init_lr)
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples // batch_size,
epochs=epochs, validation_data=None, callbacks=[csv_logger], verbose=1)
train_end = timer()
mins, secs = divmod(train_end-train_start,60)
hour, mins = divmod(mins,60)
print('Training process took %d:%02d:%02d' % (hour,mins,secs))
# set a stamp of file name for saving the record and weights
now = datetime.datetime.now() #current date and time
save_name = record +'-'+now.strftime("%Y%m%d-%H%M")
#print(history.history)
print(history.history.keys())
# print plots of acc and loss in one pdf
pp = PdfPages(save_name +'.pdf')
# summarize history for accuracy
plt.plot(history.history['acc']) # plt.plot(history.history['val_acc'])
plt_title = str(alpha)+'-mobilenet-'+str(inputsize)+' trained on small dataset'
plt_legend = method + ', {} classes'.format(numclass)+', batch size ={}'.format(batch_size)
plt.title(plt_title)
plt.ylabel('Model accuracy')
plt.xlabel('Epoch')
plt.legend([plt_legend], loc='lower right')
pp.savefig()
plt.show()
# summarize history for loss
plt.plot(history.history['loss']) #plt.plot(history.history['val_loss'])
plt.title(plt_title)
plt.ylabel('Model loss')
plt.xlabel('Epoch')
plt.legend([plt_legend], loc='upper left') #plt.legend(['train', 'test'], loc='upper left')
pp.savefig()
plt.show()
pp.close()
# save trained weights
model.save_weights(save_name +'.h5')
| mit | -1,016,345,155,239,914,100 | 35.732673 | 120 | 0.684097 | false | 3.331837 | false | false | false |
gslab-econ/gslab_python | gslab_make/get_externals.py | 1 | 4632 | #! /usr/bin/env python
import os
import private.preliminaries as prelim
import private.metadata as metadata
import private.messages as messages
from private.getexternalsdirectives import SystemDirective
def get_externals(externals_file,
external_dir = '@DEFAULTVALUE@',
makelog = '@DEFAULTVALUE@',
quiet = False):
'''Fetch external files
Description:
This function interprets a formatted text document listing files
to be exported via SVN or a system copy command.
Syntax:
get_externals(externals_file [, externals_dir [, makelog [, quiet]]])
Usage:
The `externals_file` argument should be the path of a tab-delimited text
file containing information on the external files that the function call
should retrieve. This file needs to rows of numbers or characters, delimited
by either tabs or 4 spaces,one for each file to be exported via svn.
The proper format is: rev dir file outdir outfile notes
### Column descriptions:
* rev
* Revision number of the file/directory in integer format.
If left blank along with directory column, get_externals.py will
read the last specified revision number. If copying from a shared
drive rather than the repository, list revision number as COPY.
* dir
* Directory of the file/directory requested. As described above,
%xxx% placemarkers are substituted in from predefined values in
metadata.py. If left blank along with revision column,
get_externals.py will read the last specified directory.
* file
* Name of the file requested. If entire directory is required, leave
column as a single *. If a file name wildcard is required place
single * within filename. get_externals.py will attempt to screen
out bad file names. Cannot be left blank.
* outdir
* Desired output directory of the exported file/directory.
Typically of the form ./subdir/. If left blank, will be
filled with the first level of the externals relative path.
* outfile
* Desired output name of the exported file/directory. If left as
double quotes, indicates that it should have the same name.
Adding a directory name that is different from the default """"
will place this subdirectory within the outdir. Additionally,
get_externals can assign a prefix tag to exported file collections,
either through a folder export, or a wildcard call; it does so
when the outfile column contains text of the pattern '[prefix]*',
where the prefix [prefix] will be attached to exported files.
* notes
* Optional column with notes on the export. get_externals.py ignores this,
but logs it.
Example of externals.txt:
```
rev dir file outdir outfile notes
2 %svn%/directory/ * ./destination_directory/ """"
COPY %svn%/other_directory/ my_file.txt . """"
```
The destination directory is specified by an optional second
parameter whose default value is "../external". The log file produced by
get_externals is automatically added to an optional third parameter
whose default value is '../output/make.log'.
The fourth argument, quiet, is by default False. Setting this argument to
True suppresses standard output and errors from SVN.
'''
try:
LOGFILE = prelim.start_logging(metadata.settings['externalslog_file'], 'get_externals.py')
makelog, externals, last_dir, last_rev = \
prelim.externals_preliminaries(makelog, externals_file, LOGFILE)
for line in externals:
try:
directive = SystemDirective(line, LOGFILE, last_dir, last_rev)
directive.error_check()
directive.clean(external_dir)
directive.issue_sys_command(quiet)
# Save rev/dir for next line
last_dir = directive.dir
last_rev = directive.rev
except:
prelim.print_error(LOGFILE)
prelim.end_logging(LOGFILE, makelog, 'get_externals.py')
except Exception as errmsg:
print "Error with get_external: \n", errmsg
| mit | -6,831,750,537,376,042,000 | 45.265306 | 106 | 0.618523 | false | 4.790072 | false | false | false |
anselmobd/fo2 | script/TussorBipaRolo.py | 1 | 1346 | import sys
import android
import os
import json
import urllib.request
print('\n'*10)
print('='*30)
print(' Tussor')
print(' Coletor de códigos de barras')
print('='*30)
droid = android.Android()
print('\nCelular: "{}"'.format(os.environ['QPY_USERNO']))
choice = ' '
while choice == ' ':
print('')
print('='*30)
print(' "Enter" para bipar um código')
print(' Para sair: qq tecla + "Enter"')
print('='*30)
c = input('')
if c != '':
sys.exit()
code = droid.scanBarcode()
if code.result is None:
print('Nenhum código bipado!')
else:
barcode = code.result['extras']['SCAN_RESULT']
print('Código de barras: "{}"\n'.format(barcode))
data = {}
url = 'http://intranet.tussor.com.br:88/insumo/rolo/{}/{}/'.format(
barcode, os.environ['QPY_USERNO'])
webURL = urllib.request.urlopen(url)
data = webURL.read()
encoding = webURL.info().get_content_charset('utf-8')
rolo = json.loads(data.decode(encoding))
if rolo == {}:
print('Rolo não encontrado!')
else:
print(' Rolo: {:09}'.format(rolo['ROLO']))
print('Referência: {}'.format(rolo['REF']))
print(' Cor: {}'.format(rolo['COR']))
print(' Tamanho: {}'.format(rolo['TAM']))
| mit | 2,331,994,624,095,525,400 | 25.27451 | 75 | 0.54403 | false | 3.198091 | false | false | false |
facelessuser/TabsExtra | tabs_extra.py | 1 | 32989 | """
TabsExtra.
Copyright (c) 2014 - 2016 Isaac Muse <[email protected]>
License: MIT
"""
import sublime_plugin
import sublime
import time
import sys
from TabsExtra import tab_menu
import os
import functools
from operator import itemgetter
import sublime_api
from urllib.parse import urljoin
from urllib.request import pathname2url
SETTINGS = "tabs_extra.sublime-settings"
PREFS = "Preferences.sublime-settings"
LEFT = 0
RIGHT = 1
LAST = 2
LAST_ACTIVE = None
OVERRIDE_CONFIRM = '''TabsExtra will overwrite the entire "Tab Context.sublime-menu" file in "Packages/Default" with a new one. ST3 keeps an unmodified copy in the archive.
You do this at your own risk. If something goes wrong, you may need to manually fix the menu.
Are you sure you want to continue?
''' # noqa
RESTORE_CONFIRM = '''In ST3 TabsExtra will simply delete the override "Tab Context.sublime-menu" from "Packages/Default" to allow the archived menu to take effect.
You do this at your own risk. If something goes wrong, you may need to manually fix the menu.
Are you sure you want to continue?
''' # noqa
###############################
# Helpers
###############################
def log(msg, status=False):
"""Log message."""
string = str(msg)
print("TabsExtra: %s" % string)
if status:
sublime.status_message(string)
def debug(s):
"""Debug message."""
if sublime.load_settings(SETTINGS).get("debug", False):
log(s)
def sublime_format_path(pth):
"""Format path for sublime."""
import re
m = re.match(r"^([A-Za-z]{1}):(?:/|\\)(.*)", pth)
if sublime.platform() == "windows" and m is not None:
pth = m.group(1) + "/" + m.group(2)
return pth.replace("\\", "/")
def is_persistent():
"""Check if sticky tabs should be persistent."""
return sublime.load_settings(SETTINGS).get("persistent_sticky", False)
def sort_on_load_save():
"""Sort on save."""
return (
sublime.load_settings(SETTINGS).get("sort_on_load_save", False) and
not sublime.load_settings(PREFS).get("preview_on_click")
)
def timestamp_view(window, sheet):
"""Timestamp view."""
global LAST_ACTIVE
view = window.active_view()
if view is None:
return
# Detect if this focus is due to the last active tab being moved
if (
LAST_ACTIVE is not None and
not LAST_ACTIVE.settings().get("tabs_extra_is_closed", False) and
LAST_ACTIVE.window() is None
):
# Flag last active tab as being moved
window = view.window()
active_group, active_index = window.get_sheet_index(sheet)
LAST_ACTIVE.settings().set("tabs_extra_moving", [window.id(), active_group])
# Skip if moving a tab
LAST_ACTIVE = None
allow = False
else:
allow = True
if allow:
window = view.window()
active_group, active_index = window.get_sheet_index(sheet)
# Add time stamp of last activation
view.settings().set('tabs_extra_last_activated', time.time())
# Track the tabs last position to help with focusing after a tab is moved
view.settings().set('tabs_extra_last_activated_sheet_index', active_index)
LAST_ACTIVE = view
debug("activated - %s" % view.file_name())
else:
debug("skipping - %s" % view.file_name())
def get_group_view(window, group, index):
"""Get the view at the given index in the given group."""
sheets = window.sheets_in_group(int(group))
sheet = sheets[index] if -1 < index < len(sheets) else None
view = sheet.view() if sheet is not None else None
return view
class Focus(object):
"""View focus handler."""
win = None
obj = None
@classmethod
def cancel(cls):
"""Cancel focus."""
cls.win = None
cls.obj = None
@classmethod
def defer(cls, win, obj):
"""Defer focus."""
if cls.win is None and cls.obj is None:
cls.win = win
cls.obj = obj
sublime.set_timeout(cls.on_focus, 100)
else:
cls.win = win
cls.obj = obj
@classmethod
def on_focus(cls):
"""On focus event."""
cls._focus()
@classmethod
def focus(cls, win, obj):
"""Set the win and obj before calling focus."""
cls.win = win
cls.obj = obj
cls._focus()
@classmethod
def _focus(cls):
"""Perform view focus."""
try:
if cls.win is not None and cls.obj is not None:
if isinstance(cls.obj, sublime.View):
cls.win.focus_view(cls.obj)
timestamp_view(cls.win, cls.obj)
elif isinstance(cls.obj, sublime.Sheet):
cls.win.focus_sheet(cls.obj)
timestamp_view(cls.win, cls.obj)
except Exception:
pass
cls.cancel()
###############################
# Sticky Tabs
###############################
class TabsExtraClearAllStickyCommand(sublime_plugin.WindowCommand):
"""Clear all sticky tabs."""
def run(self, group=-1, force=False):
"""Clear all tab sticky states of current active group."""
if group == -1:
group = self.window.active_group()
if group >= 0:
persistent = is_persistent()
views = self.window.views_in_group(int(group))
if not persistent or force:
for v in views:
v.settings().erase("tabs_extra_sticky")
def is_visible(self, group=-1, force=False):
"""Show command if any tabs in active group are sticky."""
if group == -1:
group = self.window.active_group()
marked = False
views = self.window.views_in_group(int(group))
for v in views:
if v.settings().get("tabs_extra_sticky", False):
marked = True
break
return marked
class TabsExtraToggleStickyCommand(sublime_plugin.WindowCommand):
"""Toggle sticky state for tab."""
def run(self, group=-1, index=-1):
"""Toggle a tabs sticky state."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
if not view.settings().get("tabs_extra_sticky", False):
view.settings().set("tabs_extra_sticky", True)
else:
view.settings().erase("tabs_extra_sticky")
def is_checked(self, group=-1, index=-1):
"""Show in menu whether the tab is sticky."""
checked = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
checked = view.settings().get("tabs_extra_sticky", False)
return checked
class TabsExtraSetStickyCommand(sublime_plugin.TextCommand):
"""Set sticky value for the tab."""
def run(self, edit, value):
"""Set the sticky command to the specific value."""
if self.is_enabled(value):
self.view.settings().set("tabs_extra_sticky", bool(value))
def is_enabled(self, value):
"""Check if sticky value is already set to desired value."""
enabled = False
if self.view is not None:
current_value = self.view.settings().get("tabs_extra_sticky", False)
if current_value != value:
enabled = True
return enabled
###############################
# Close
###############################
class TabsExtraCloseMenuCommand(sublime_plugin.WindowCommand):
"""Close tabs via a quick panel menu."""
close_types = [
("Close", "single"),
("Close Other Tabs", "other"),
("Close Tabs to Right", "right"),
("Close Tabs to Left", "left"),
("Close All Tabs", "all")
]
def run(self, mode="normal", close_type=None):
"""Run command."""
self.mode = mode
self.group = -1
self.index = -1
sheet = self.window.active_sheet()
if sheet is not None:
self.group, self.index = self.window.get_sheet_index(sheet)
if self.group != -1 and self.index != -1:
value = None
if close_type is not None:
index = 0
for ct in self.close_types:
if ct[1] == close_type:
value = index
index += 1
if value is None:
self.window.show_quick_panel(
[x[0] for x in self.close_types],
self.check_selection
)
else:
self.check_selection(value)
def check_selection(self, value):
"""Check the user's selection."""
if value != -1:
close_unsaved = True
unsaved_prompt = True
if self.mode == "skip_unsaved":
close_unsaved = False
if self.mode == "dismiss_unsaved":
unsaved_prompt = False
close_type = self.close_types[value][1]
self.window.run_command(
"tabs_extra_close",
{
"group": int(self.group),
"index": int(self.index),
"close_type": close_type,
"unsaved_prompt": unsaved_prompt,
"close_unsaved": close_unsaved
}
)
def is_enabled(self, mode="normal"):
"""Check if command is enabled."""
group = -1
index = -1
sheet = self.window.active_sheet()
if sheet is not None:
group, index = self.window.get_sheet_index(sheet)
return group != -1 and index != -1 and mode in ["normal", "skip_unsaved", "dismiss_unsaved"]
class TabsExtraCloseAllCommand(sublime_plugin.WindowCommand):
"""Close all tabs in the whole window."""
def run(self):
"""Close all tabs in window; not just the tabs in the active group."""
for group in range(0, self.window.num_groups()):
sheet = self.window.active_sheet_in_group(group)
if sheet is not None:
index = self.window.get_sheet_index(sheet)[1]
self.window.run_command("tabs_extra_close", {"close_type": "all", "group": group, "index": index})
class TabsExtraCloseCommand(sublime_plugin.WindowCommand):
"""Close tab command."""
def init(self, close_type, group, index):
"""
Determine which views will be targeted by close command.
Also determine which tab states need to be cleaned up.
"""
self.persistent = is_persistent()
self.sheets = self.window.sheets_in_group(int(group))
assert(close_type in ["single", "left", "right", "other", "all"])
# Setup active index and group
active_sheet = self.window.active_sheet()
active_index = None
self.active_index = index
self.active_group = None
if active_sheet is not None:
active_group, active_index = self.window.get_sheet_index(active_sheet)
if group != active_group:
active_index = None
if active_index is not None:
self.active_index = active_index
# Determine targeted sheets to close and sheets to cleanup
if close_type == "single":
self.targets = [self.sheets[index]]
self.cleanup = bool(len(self.sheets[:index] + self.sheets[index + 1:]))
elif close_type == "left":
self.targets = self.sheets[:index]
self.cleanup = bool(len(self.sheets[index:]))
elif close_type == "right":
self.targets = self.sheets[index + 1:]
self.cleanup = bool(len(self.sheets[:index + 1]))
elif close_type == "other":
self.targets = self.sheets[:index] + self.sheets[index + 1:]
self.cleanup = True
elif close_type == "all":
self.targets = self.sheets[:]
self.cleanup = False
def can_close(self, is_sticky, is_single):
"""Prompt user in certain scenarios if okay to close."""
is_okay = True
if is_sticky:
if not is_single:
is_okay = False
return is_okay
def run(
self, group=-1, index=-1,
close_type="single", unsaved_prompt=True, close_unsaved=True
):
"""Close the specified tabs and cleanup sticky states."""
TabsExtraListener.extra_command_call = True
try:
if group >= 0 and index >= 0:
self.init(close_type, group, index)
if (
len(self.targets) and
not unsaved_prompt and
not all(not target.view().is_dirty() for target in self.targets) and
not sublime.ok_cancel_dialog(
"Are you sure you want to dismiss all targeted unsaved buffers?"
)
):
TabsExtraListener.extra_command_call = False
return
for s in self.targets:
v = s.view()
if v is not None:
if self.can_close(v.settings().get("tabs_extra_sticky", False), close_type == "single"):
if not self.persistent:
v.settings().erase("tabs_extra_sticky")
self.window.focus_view(v)
if not v.is_dirty() or close_unsaved:
if not unsaved_prompt:
v.set_scratch(True)
sublime_api.window_close_file(self.window.id(), v.id())
elif not self.persistent:
v.settings().erase("tabs_extra_sticky")
else:
self.window.focus_sheet(s)
self.window.run_command('close_file')
if not self.persistent and self.cleanup:
self.window.run_command("tabs_extra_clear_all_sticky", {"group": group})
except Exception:
pass
TabsExtraListener.extra_command_call = False
###############################
# Listener
###############################
class TabsExtraListener(sublime_plugin.EventListener):
"""Listener command to handle tab focus, closing, moving events."""
extra_command_call = False
def on_window_command(self, window, command_name, args):
"""Intercept and override specific close tab commands."""
extra_command_call = TabsExtraListener.extra_command_call
cmd = None
if args is None:
view = window.active_view()
if view is None:
return cmd
# Mark all actual file closes done from TabsExtra
# This helps us know when file close was called outside of TabsExtra commands
if extra_command_call and command_name == "close_file":
view.settings().set("tabs_extra_closing", True)
return cmd
group, index = window.get_view_index(view)
args = {"group": group, "index": index}
if command_name in ["close_by_index", "close"]:
command_name = "tabs_extra_close"
args["close_type"] = "single"
cmd = (command_name, args)
elif command_name == "close_all":
command_name = "tabs_extra_close_all"
args = {}
cmd = (command_name, args)
elif command_name == "close_others_by_index":
command_name = "tabs_extra_close"
args["close_type"] = "other"
cmd = (command_name, args)
elif command_name == "close_to_right_by_index":
command_name = "tabs_extra_close"
args["close_type"] = "right"
cmd = (command_name, args)
return cmd
def on_load(self, view):
"""Handle load focus or spawning."""
Focus.cancel()
if sort_on_load_save():
if not self.on_sort(view):
view.settings().set('tabsextra_to_sort', True)
else:
self.on_spawn(view)
def on_post_save(self, view):
"""On save sorting."""
if sort_on_load_save():
self.on_sort(view)
def on_sort(self, view):
"""Sort views."""
sorted_views = False
window = view.window()
if window and window.get_view_index(view)[1] != -1:
cmd = sublime.load_settings(SETTINGS).get("sort_on_load_save_command", {})
module = str(cmd.get("module", ""))
reverse = bool(cmd.get("reverse", False))
if module != "":
window.run_command(
"tabs_extra_sort",
{"sort_by": module, "reverse": reverse}
)
sorted_views = True
return sorted_views
def on_pre_close(self, view):
"""
If a view is closing without being marked, we know it was done outside of TabsExtra.
Attach view and window info so we can focus the right view after close.
"""
Focus.cancel()
view.settings().set("tabs_extra_is_closed", True)
if not view.settings().get("tabs_extra_closing", False):
TabsExtraListener.extra_command_call = True
window = view.window()
if window is not None:
view.settings().set("tabs_extra_view_info", view.window().get_view_index(view))
view.settings().set("tabs_extra_window_info", view.window().id())
else:
TabsExtraListener.extra_command_call = False
def on_close(self, view):
"""
Handle focusing the correct view in window group.
Close command was initiated outside of TabsExtra, so a focus is required.
"""
view_info = view.settings().get("tabs_extra_view_info", None)
window_info = view.settings().get("tabs_extra_window_info", None)
if view_info is not None and window_info is not None:
TabsExtraListener.extra_command_call = False
def on_activated(self, view):
"""
Timestamp each view when activated.
Detect if on_move event should be executed.
"""
if not TabsExtraListener.extra_command_call:
window = view.window()
if window is None:
return
s = window.active_sheet()
timestamp_view(window, s)
# Detect if tab was moved to a new group
# Run on_move event if it has.
moving = view.settings().get("tabs_extra_moving", None)
if moving is not None:
win_id, group_id = moving
window = view.window()
if window is None:
return
active_group = window.get_view_index(view)[0]
if window.id() != win_id or int(group_id) != int(active_group):
view.settings().erase("tabs_extra_moving")
elif sort_on_load_save() and view.settings().get('tabsextra_to_sort'):
view.settings().erase('tabsextra_to_sort')
self.on_sort(view)
###############################
# Wrappers
###############################
class TabsExtraViewWrapperCommand(sublime_plugin.WindowCommand):
"""Wrapper for for executing certain commands from the tab context menu."""
def run(self, command, group=-1, index=-1, args=None):
"""Wrap command in order to ensure view gets focused first."""
if args is None:
args = {}
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
self.window.focus_view(view)
self.window.run_command(command, args)
###############################
# File Management Commands
###############################
class TabsExtraDeleteCommand(sublime_plugin.WindowCommand):
"""Delete the file."""
def run(self, group=-1, index=-1):
"""Delete the tab's file."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
if sublime.ok_cancel_dialog("Delete %s?" % file_name, "Delete"):
if not view.close():
return
import Default.send2trash as send2trash # noqa: N813
send2trash.send2trash(file_name)
def is_visible(self, group=-1, index=-1):
"""Check if command should be visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraDuplicateCommand(sublime_plugin.WindowCommand):
"""Duplicate tab."""
def run(self, group=-1, index=-1):
"""Rename the given tab."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
v = self.window.show_input_panel(
"Duplicate:", file_name,
lambda x: self.on_done(file_name, x),
None, None
)
file_path_len = len(file_name)
file_name_len = len(os.path.basename(file_name))
v.sel().clear()
v.sel().add(
sublime.Region(
file_path_len - file_name_len,
file_path_len
)
)
def on_done(self, old, new):
"""Handle the tab duplication when the user is done with the input panel."""
new_path = os.path.dirname(new)
if os.path.exists(new_path) and os.path.isdir(new_path):
if not os.path.exists(new) or sublime.ok_cancel_dialog("Overwrite %s?" % new, "Replace"):
try:
with open(old, 'rb') as f:
text = f.read()
with open(new, 'wb') as f:
f.write(text)
self.window.open_file(new)
except Exception:
sublime.status_message("Unable to duplicate")
def is_visible(self, group=-1, index=-1):
"""Check if the command is visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraRenameCommand(sublime_plugin.WindowCommand):
"""Rename the tab's file."""
def run(self, group=-1, index=-1):
"""Rename the given tab."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
branch, leaf = os.path.split(file_name)
v = self.window.show_input_panel(
"New Name:", leaf,
functools.partial(self.on_done, file_name, branch),
None, None
)
name = os.path.splitext(leaf)[0]
v.sel().clear()
v.sel().add(sublime.Region(0, len(name)))
def on_done(self, old, branch, leaf):
"""Handle the renaming when user is done with the input panel."""
new = os.path.join(branch, leaf)
try:
os.rename(old, new)
v = self.window.find_open_file(old)
if v:
v.retarget(new)
except Exception:
sublime.status_message("Unable to rename")
def is_visible(self, group=-1, index=-1):
"""Check if the command is visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraMoveCommand(sublime_plugin.WindowCommand):
"""Move the tab's file."""
def run(self, group=-1, index=-1):
"""Move the file in the given tab."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
file_name = view.file_name()
if file_name is not None and os.path.exists(file_name):
v = self.window.show_input_panel(
"New Location:", file_name,
functools.partial(self.on_done, file_name),
None, None
)
file_path_len = len(file_name)
file_name_len = len(os.path.basename(file_name))
v.sel().clear()
v.sel().add(
sublime.Region(
file_path_len - file_name_len,
file_path_len
)
)
def on_done(self, old, new):
"""Handle the moving when user is done with the input panel."""
try:
directory = os.path.dirname(new)
if not os.path.exists(directory):
os.makedirs(directory)
os.rename(old, new)
v = self.window.find_open_file(old)
if v:
v.retarget(new)
except Exception:
sublime.status_message("Unable to move")
def is_visible(self, group=-1, index=-1):
"""Check if the command is visible."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None and os.path.exists(view.file_name()):
enabled = True
return enabled
class TabsExtraRevertCommand(TabsExtraViewWrapperCommand):
"""Revert changes in file."""
def is_visible(self, command, group=-1, index=-1, args=None):
"""Determine if command should be visible in menu."""
if args is None:
args = {}
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None and view.file_name() is not None:
enabled = view.is_dirty()
return enabled
class TabsExtraFileCommand(TabsExtraViewWrapperCommand):
"""Wrapper for file commands."""
def is_enabled(self, command, group=-1, index=-1, args=None):
"""Determine if command should be enabled."""
if args is None:
args = {}
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
enabled = view.file_name() is not None
return enabled
class TabsExtraFilePathCommand(sublime_plugin.WindowCommand):
"""Get file paths."""
def run(self, group=-1, index=-1, path_type='path'):
"""Run the command."""
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
self.window.focus_view(view)
view.run_command('copy_path')
pth = sublime.get_clipboard()
if path_type == 'name':
pth = os.path.basename(pth)
elif path_type == 'path_uri':
pth = urljoin('file:', pathname2url(pth))
sublime.set_clipboard(pth)
def is_enabled(self, group=-1, index=-1, path_type='path'):
"""Determine if command should be enabled."""
enabled = False
if group >= 0 and index >= 0:
view = get_group_view(self.window, group, index)
if view is not None:
enabled = view.file_name() is not None
return enabled
###############################
# Sort
###############################
class TabsExtraSortMenuCommand(sublime_plugin.WindowCommand):
"""Sort tabs."""
def run(self):
"""Using "sort_layout" setting, construct a quick panel sort menu."""
sort_layout = sublime.load_settings(SETTINGS).get("sort_layout", [])
if len(sort_layout):
self.sort_commands = []
sort_menu = []
for sort_entry in sort_layout:
caption = str(sort_entry.get("caption", ""))
module = str(sort_entry.get("module", ""))
reverse = bool(sort_entry.get("reverse", False))
if module != "":
self.sort_commands.append((module, reverse))
sort_menu.append(caption)
if len(sort_menu):
self.window.show_quick_panel(sort_menu, self.check_selection)
def check_selection(self, value):
"""Launch the selected sort command."""
if value != -1:
command = self.sort_commands[value]
self.window.run_command("tabs_extra_sort", {"sort_by": command[0], "reverse": command[1]})
class TabsExtraSortCommand(sublime_plugin.WindowCommand):
"""Sort tabs."""
def run(self, group=-1, sort_by=None, reverse=False):
"""Sort Tabs."""
if sort_by is not None:
if group == -1:
group = self.window.active_group()
self.group = group
self.reverse = reverse
views = self.window.views_in_group(int(group))
if len(views):
sort_module = self.get_sort_module(sort_by)
if sort_module is not None:
view_data = []
sort_module.run(views, view_data)
self.sort(view_data)
self.window.focus_view(self.window.active_view())
def sort(self, view_data):
"""Sort the views."""
indexes = tuple([x for x in range(0, len(view_data[0]) - 1)])
sorted_views = sorted(view_data, key=itemgetter(*indexes))
if self.reverse:
sorted_views = sorted_views[::-1]
if sorted_views != view_data:
for index in range(0, len(sorted_views)):
self.window.set_view_index(sorted_views[index][-1], self.group, index)
def get_sort_module(self, module_name):
"""Import the sort_by module."""
import imp
path_name = os.path.join("Packages", os.path.normpath(module_name.replace('.', '/')))
path_name += ".py"
module = imp.new_module(module_name)
sys.modules[module_name] = module
exec(
compile(
sublime.load_resource(sublime_format_path(path_name)),
module_name, 'exec'
),
sys.modules[module_name].__dict__
)
return module
###############################
# Menu Installation
###############################
class TabsExtraInstallOverrideMenuCommand(sublime_plugin.ApplicationCommand):
"""Install TabsExtra menu overriding the default tab context menu."""
def run(self):
"""Install/upgrade the override tab menu."""
msg = OVERRIDE_CONFIRM
if sublime.ok_cancel_dialog(msg):
tab_menu.upgrade_override_menu()
class TabsExtraUninstallOverrideMenuCommand(sublime_plugin.ApplicationCommand):
"""Uninstall the TabsExtra override menu."""
def run(self):
"""Uninstall the override tab menu."""
msg = RESTORE_CONFIRM
if sublime.ok_cancel_dialog(msg):
tab_menu.uninstall_override_menu()
class TabsExtraInstallMenuCommand(sublime_plugin.ApplicationCommand):
"""Install the TabsExtra menu by appending it to the existing tab context menu."""
def run(self):
"""Install/upgrade the standard tab menu."""
tab_menu.upgrade_default_menu()
###############################
# Plugin Loading
###############################
def plugin_loaded():
"""Handle plugin setup."""
win = sublime.active_window()
if win is not None:
sheet = win.active_sheet()
if sheet is not None:
timestamp_view(win, sheet)
| mit | 4,686,407,101,717,471,000 | 32.593686 | 173 | 0.5363 | false | 4.14643 | false | false | false |
Justaphf/BitcoinUnlimited | qa/rpc-tests/mempool_push.py | 1 | 9835 | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
# This is a template to make creating new QA tests easy.
# You can also use this template to quickly start and connect a few regtest nodes.
import time
import sys
if sys.version_info[0] < 3:
raise "Use Python 3"
import logging
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
BCH_UNCONF_DEPTH = 25
BCH_UNCONF_SIZE_KB = 101
BCH_UNCONF_SIZE = BCH_UNCONF_SIZE_KB*1000
DELAY_TIME = 45
class MyTest (BitcoinTestFramework):
def setup_chain(self,bitcoinConfDict=None, wallets=None):
logging.info("Initializing test directory " + self.options.tmpdir)
initialize_chain(self.options.tmpdir, bitcoinConfDict, wallets)
def setup_network(self, split=False):
mempoolConf = [
["-blockprioritysize=2000000", "-limitdescendantcount=25", "-limitancestorcount=25",
"-limitancestorsize=101", "-limitdescendantsize=101"],
["-blockprioritysize=2000000",
"-maxmempool=8080",
"-limitancestorsize=%d" % (BCH_UNCONF_SIZE_KB*2),
"-limitdescendantsize=%d" % (BCH_UNCONF_SIZE_KB*2),
"-limitancestorcount=%d" % (BCH_UNCONF_DEPTH*2),
"-limitdescendantcount=%d" % (BCH_UNCONF_DEPTH*2),
"-net.unconfChainResendAction=2",
"-net.restrictInputs=0"],
["-blockprioritysize=2000000", "-limitdescendantcount=1000", "-limitancestorcount=1000",
"-limitancestorsize=1000", "-limitdescendantsize=1000", "-net.unconfChainResendAction=2",
"-net.restrictInputs=0"],
["-blockprioritysize=2000000", "-limitdescendantcount=25", "-limitancestorcount=25",
"-limitancestorsize=150","-limitdescendantsize=101", "-net.unconfChainResendAction=2"]
]
self.nodes = start_nodes(4, self.options.tmpdir, mempoolConf)
connect_nodes_full(self.nodes)
self.is_network_split=False
self.sync_blocks()
def run_test (self):
# kick us out of IBD mode since the cached blocks will be old time so it'll look like our blockchain isn't up to date
# if we are in IBD mode, we don't request incoming tx.
self.nodes[0].generate(1)
logging.info("ancestor count test")
bal = self.nodes[1].getbalance()
addr = self.nodes[1].getnewaddress()
txhex = []
for i in range(0,BCH_UNCONF_DEPTH*2):
try:
txhex.append(self.nodes[1].sendtoaddress(addr, bal-1)) # enough so that it uses all UTXO, but has fee left over
logging.info("tx depth %d" % i) # Keep travis from timing out
except JSONRPCException as e: # an exception you don't catch is a testing error
print(str(e))
raise
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH)
waitFor(DELAY_TIME, lambda: self.nodes[1].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH*2)
# Set small to commit just a few tx so we can see if the missing ones get pushed
self.nodes[0].set("mining.blockSize=6000")
blk = self.nodes[0].generate(1)[0]
blkhex = self.nodes[0].getblock(blk)
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH)
# generate the block somewhere else and see if the tx get pushed
self.nodes[2].set("mining.blockSize=4000")
blk2 = self.nodes[2].generate(1)[0]
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH)
waitFor(DELAY_TIME, lambda: self.nodes[1].getbestblockhash() == blk2) # make sure its settled so we can get a good leftover count for the next test.
unconfLeftOver = self.nodes[1].getmempoolinfo()["size"]
assert(unconfLeftOver >= BCH_UNCONF_DEPTH) # if someone bumps the BCH network unconfirmed depth, you need to build a bigger unconf chain
# Let's consume all BCH_UNCONF_DEPTH tx
self.nodes[0].set("mining.blockSize=8000000")
waitFor(DELAY_TIME, lambda: len(self.nodes[0].getblocktemplate()["transactions"])>=BCH_UNCONF_DEPTH)
blk3 = self.nodes[0].generate(1)[0]
blk3data = self.nodes[0].getblock(blk3)
# this would be ideal, but a particular block is not guaranteed to contain all tx in the mempool
# assert_equal(len(blk3data["tx"]), BCH_UNCONF_DEPTH + 1) # chain of BCH_UNCONF_DEPTH unconfirmed + coinbase
committedTxCount = len(blk3data["tx"])-1 # -1 to remove coinbase
waitFor(DELAY_TIME, lambda: self.nodes[1].getbestblockhash() == blk3)
waitFor(DELAY_TIME, lambda: self.nodes[1].getmempoolinfo()["size"] == unconfLeftOver - committedTxCount)
# make sure that everything that can be pushed is pushed
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == min(unconfLeftOver - committedTxCount, BCH_UNCONF_DEPTH))
# clean up: confirm all the left over tx from the prior test
self.nodes[1].generate(1)
logging.info("ancestor size test")
# Grab existing addresses on all the nodes to create destinations for sendmany
# Grabbing existing addrs is a lot faster than creating new ones
addrlist = []
for node in self.nodes:
tmpaddrs = node.listaddressgroupings()
for axx in tmpaddrs:
addrlist.append(axx[0][0])
amounts = {}
for a in addrlist:
amounts[a] = "0.00001"
bal = self.nodes[1].getbalance()
amounts[addr] = bal - Decimal("5.0")
# Wait for sync before issuing the tx chain so that no txes are rejected as nonfinal
self.sync_blocks()
logging.info("Block heights: %s" % str([x.getblockcount() for x in self.nodes]))
# Create an unconfirmed chain that exceeds what node 0 allows
cumulativeTxSize = 0
while cumulativeTxSize < BCH_UNCONF_SIZE:
txhash = self.nodes[1].sendmany("",amounts,0)
tx = self.nodes[1].getrawtransaction(txhash)
txinfo = self.nodes[1].gettransaction(txhash)
logging.info("fee: %s fee sat/byte: %s" % (str(txinfo["fee"]), str(txinfo["fee"]*100000000/Decimal(len(tx)/2)) ))
cumulativeTxSize += len(tx)/2 # /2 because tx is a hex representation of the tx
logging.info("total size: %d" % cumulativeTxSize)
txCommitted = self.nodes[1].getmempoolinfo()["size"]
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == txCommitted-1) # nodes[0] will eliminate 1 tx because ancestor size too big
waitFor(DELAY_TIME, lambda: self.nodes[2].getmempoolinfo()["size"] == txCommitted) # nodes[2] should have gotten everything because its ancestor size conf is large
self.nodes[0].generate(1)
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == 1) # node 1 should push the tx that's now acceptable to node 0
self.nodes[0].generate(1) # clean up
self.sync_blocks() # Wait for sync before issuing the tx chain so that no txes are rejected as nonfinal
logging.info("Block heights: %s" % str([x.getblockcount() for x in self.nodes]))
# Now let's run a more realistic test with 2 mining nodes of varying mempool depth, and one application node with a huge depth
logging.info("deep unconfirmed chain test")
# Because the TX push races the block, connect the network in a special way to avoid this race.
# This is undesirable for a test, but in the real network will likely result in a faster dispersal of the TX because the miners are interconnected
for n in self.nodes:
disconnect_all(n)
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 2)
bal = self.nodes[2].getbalance()
addr = self.nodes[2].getnewaddress()
txhex = []
for i in range(0,51):
try:
txhex.append(self.nodes[2].sendtoaddress(addr, bal-1)) # enough so that it uses all UTXO, but has fee left over
logging.info("send depth %d" % i) # Keep travis from timing out
except JSONRPCException as e: # an exception you don't catch is a testing error
print(str(e))
raise
count = 0
while self.nodes[2].getmempoolinfo()["size"] != 0:
# these checks aren't going to work at the end when I run out of tx so check for that
if self.nodes[2].getmempoolinfo()["size"] >= BCH_UNCONF_DEPTH*2:
waitFor(DELAY_TIME, lambda: self.nodes[0].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH)
waitFor(DELAY_TIME, lambda: self.nodes[1].getmempoolinfo()["size"] == BCH_UNCONF_DEPTH*2)
logging.info("%d: sizes %d, %d, %d" % (count,self.nodes[0].getmempoolinfo()["size"],self.nodes[1].getmempoolinfo()["size"],self.nodes[2].getmempoolinfo()["size"]))
blk = self.nodes[0].generate(1)[0]
waitFor(DELAY_TIME, lambda: self.nodes[2].getbestblockhash() == blk)
count+=1
if __name__ == '__main__':
t = MyTest()
t.main (None, { "blockprioritysize": 2000000, "keypool":5 })
# Create a convenient function for an interactive python debugging session
def Test():
t = MyTest()
t.drop_to_pdb = True
bitcoinConf = {
"debug": [ "net", "blk", "thin", "mempool", "req", "bench", "evict"],
"blockprioritysize": 2000000 # we don't want any transactions rejected due to insufficient fees...
}
flags = standardFlags()
t.main(flags, bitcoinConf, None)
| mit | -8,783,700,860,843,381,000 | 51.037037 | 175 | 0.642501 | false | 3.646644 | true | false | false |
tonioo/modoboa | modoboa/core/app_settings.py | 1 | 16059 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.conf import settings
from django.contrib.auth import password_validation
from django.utils.translation import ugettext as _, ugettext_lazy
from modoboa.core.password_hashers import get_dovecot_schemes
from modoboa.core.password_hashers.base import PasswordHasher
from modoboa.lib import fields as lib_fields
from modoboa.lib.form_utils import (
HorizontalRadioSelect, SeparatorField, YesNoField
)
from modoboa.parameters import forms as param_forms, tools as param_tools
from . import constants
def enabled_applications():
"""Return the list of installed extensions."""
from modoboa.core.extensions import exts_pool
result = [("user", _("User profile"))]
for extension in exts_pool.list_all():
if "topredirection_url" not in extension:
continue
result.append((extension["name"], extension["label"]))
return sorted(result, key=lambda e: e[0])
class GeneralParametersForm(param_forms.AdminParametersForm):
"""General parameters."""
app = "core"
sep1 = SeparatorField(label=ugettext_lazy("Authentication"))
authentication_type = forms.ChoiceField(
label=ugettext_lazy("Authentication type"),
choices=[("local", ugettext_lazy("Local")),
("ldap", "LDAP")],
initial="local",
help_text=ugettext_lazy("The backend used for authentication"),
widget=HorizontalRadioSelect()
)
password_scheme = forms.ChoiceField(
label=ugettext_lazy("Default password scheme"),
choices=[(hasher.name, ugettext_lazy(hasher.label))
for hasher in PasswordHasher.get_password_hashers()
if hasher().scheme in get_dovecot_schemes()],
initial="sha512crypt",
help_text=ugettext_lazy("Scheme used to crypt mailbox passwords"),
widget=forms.Select(attrs={"class": "form-control"})
)
rounds_number = forms.IntegerField(
label=ugettext_lazy("Rounds"),
initial=70000,
help_text=ugettext_lazy(
"Number of rounds to use (only used by sha256crypt and "
"sha512crypt). Must be between 1000 and 999999999, inclusive."
),
widget=forms.TextInput(attrs={"class": "form-control"})
)
update_scheme = YesNoField(
label=ugettext_lazy("Update password scheme at login"),
initial=True,
help_text=ugettext_lazy(
"Update user password at login to use the default password scheme"
)
)
default_password = forms.CharField(
label=ugettext_lazy("Default password"),
initial="password",
help_text=ugettext_lazy(
"Default password for automatically created accounts.")
)
random_password_length = forms.IntegerField(
label=ugettext_lazy("Random password length"),
min_value=8,
initial=8,
help_text=ugettext_lazy(
"Length of randomly generated passwords.")
)
# LDAP specific settings
ldap_sep = SeparatorField(label=ugettext_lazy("LDAP settings"))
ldap_server_address = forms.CharField(
label=ugettext_lazy("Server address"),
initial="localhost",
help_text=ugettext_lazy(
"The IP address or the DNS name of the LDAP server"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_server_port = forms.IntegerField(
label=ugettext_lazy("Server port"),
initial=389,
help_text=ugettext_lazy("The TCP port number used by the LDAP server"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_secured = forms.ChoiceField(
label=ugettext_lazy("Use a secured connection"),
choices=constants.LDAP_SECURE_MODES,
initial="none",
help_text=ugettext_lazy(
"Use an SSL/STARTTLS connection to access the LDAP server")
)
ldap_auth_method = forms.ChoiceField(
label=ugettext_lazy("Authentication method"),
choices=[("searchbind", ugettext_lazy("Search and bind")),
("directbind", ugettext_lazy("Direct bind"))],
initial="searchbind",
help_text=ugettext_lazy("Choose the authentication method to use"),
widget=forms.Select(attrs={"class": "form-control"})
)
ldap_bind_dn = forms.CharField(
label=ugettext_lazy("Bind DN"),
initial="",
help_text=ugettext_lazy(
"The distinguished name to use when binding to the LDAP server. "
"Leave empty for an anonymous bind"
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_bind_password = forms.CharField(
label=ugettext_lazy("Bind password"),
initial="",
help_text=ugettext_lazy(
"The password to use when binding to the LDAP server "
"(with 'Bind DN')"
),
widget=forms.PasswordInput(
attrs={"class": "form-control"}, render_value=True),
required=False
)
ldap_search_base = forms.CharField(
label=ugettext_lazy("Users search base"),
initial="",
help_text=ugettext_lazy(
"The distinguished name of the search base used to find users"
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_search_filter = forms.CharField(
label=ugettext_lazy("Search filter"),
initial="(mail=%(user)s)",
help_text=ugettext_lazy(
"An optional filter string (e.g. '(objectClass=person)'). "
"In order to be valid, it must be enclosed in parentheses."
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_user_dn_template = forms.CharField(
label=ugettext_lazy("User DN template"),
initial="",
help_text=ugettext_lazy(
"The template used to construct a user's DN. It should contain "
"one placeholder (ie. %(user)s)"
),
required=False,
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_password_attribute = forms.CharField(
label=ugettext_lazy("Password attribute"),
initial="userPassword",
help_text=ugettext_lazy("The attribute used to store user passwords"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
ldap_is_active_directory = YesNoField(
label=ugettext_lazy("Active Directory"),
initial=False,
help_text=ugettext_lazy(
"Tell if the LDAP server is an Active Directory one")
)
ldap_admin_groups = forms.CharField(
label=ugettext_lazy("Administrator groups"),
initial="",
help_text=ugettext_lazy(
"Members of those LDAP Posix groups will be created as domain "
"administrators. Use ';' characters to separate groups."
),
required=False
)
ldap_group_type = forms.ChoiceField(
label=ugettext_lazy("Group type"),
initial="posixgroup",
choices=constants.LDAP_GROUP_TYPES,
help_text=ugettext_lazy(
"The LDAP group type to use with your directory."
)
)
ldap_groups_search_base = forms.CharField(
label=ugettext_lazy("Groups search base"),
initial="",
help_text=ugettext_lazy(
"The distinguished name of the search base used to find groups"
),
required=False
)
dash_sep = SeparatorField(label=ugettext_lazy("Dashboard"))
rss_feed_url = forms.URLField(
label=ugettext_lazy("Custom RSS feed"),
required=False,
help_text=ugettext_lazy(
"Display custom RSS feed to resellers and domain administrators"
)
)
hide_features_widget = YesNoField(
label=ugettext_lazy("Hide features widget"),
initial=False,
help_text=ugettext_lazy(
"Hide features widget for resellers and domain administrators"
)
)
notif_sep = SeparatorField(label=ugettext_lazy("Notifications"))
sender_address = lib_fields.UTF8EmailField(
label=_("Sender address"),
initial="[email protected]",
help_text=_(
"Email address used to send notifications."
)
)
api_sep = SeparatorField(label=ugettext_lazy("Public API"))
enable_api_communication = YesNoField(
label=ugettext_lazy("Enable communication"),
initial=True,
help_text=ugettext_lazy(
"Enable communication with Modoboa public API")
)
check_new_versions = YesNoField(
label=ugettext_lazy("Check new versions"),
initial=True,
help_text=ugettext_lazy(
"Automatically checks if a newer version is available")
)
send_statistics = YesNoField(
label=ugettext_lazy("Send statistics"),
initial=True,
help_text=ugettext_lazy(
"Send statistics to Modoboa public API "
"(counters and used extensions)")
)
sep3 = SeparatorField(label=ugettext_lazy("Miscellaneous"))
inactive_account_threshold = forms.IntegerField(
label=_("Inactive account threshold"),
initial=30,
help_text=_(
"An account with a last login date greater than this threshold "
"(in days) will be considered as inactive"
),
widget=forms.TextInput(attrs={"class": "form-control"})
)
top_notifications_check_interval = forms.IntegerField(
label=_("Top notifications check interval"),
initial=30,
help_text=_(
"Interval between two top notification checks (in seconds)"
),
widget=forms.TextInput(attrs={"class": "form-control"})
)
log_maximum_age = forms.IntegerField(
label=ugettext_lazy("Maximum log record age"),
initial=365,
help_text=ugettext_lazy("The maximum age in days of a log record"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
items_per_page = forms.IntegerField(
label=ugettext_lazy("Items per page"),
initial=30,
help_text=ugettext_lazy("Number of displayed items per page"),
widget=forms.TextInput(attrs={"class": "form-control"})
)
default_top_redirection = forms.ChoiceField(
label=ugettext_lazy("Default top redirection"),
choices=[],
initial="user",
help_text=ugettext_lazy(
"The default redirection used when no application is specified"
),
widget=forms.Select(attrs={"class": "form-control"})
)
# Visibility rules
visibility_rules = {
"ldap_sep": "authentication_type=ldap",
"ldap_server_address": "authentication_type=ldap",
"ldap_server_port": "authentication_type=ldap",
"ldap_secured": "authentication_type=ldap",
"ldap_auth_method": "authentication_type=ldap",
"ldap_bind_dn": "ldap_auth_method=searchbind",
"ldap_bind_password": "ldap_auth_method=searchbind",
"ldap_search_base": "ldap_auth_method=searchbind",
"ldap_search_filter": "ldap_auth_method=searchbind",
"ldap_user_dn_template": "ldap_auth_method=directbind",
"ldap_password_attribute": "authentication_type=ldap",
"ldap_is_active_directory": "authentication_type=ldap",
"ldap_admin_groups": "authentication_type=ldap",
"ldap_group_type": "authentication_type=ldap",
"ldap_groups_search_base": "authentication_type=ldap",
"check_new_versions": "enable_api_communication=True",
"send_statistics": "enable_api_communication=True",
}
def __init__(self, *args, **kwargs):
super(GeneralParametersForm, self).__init__(*args, **kwargs)
self.fields["default_top_redirection"].choices = enabled_applications()
def clean_ldap_user_dn_template(self):
tpl = self.cleaned_data["ldap_user_dn_template"]
try:
tpl % {"user": "toto"}
except (KeyError, ValueError):
raise forms.ValidationError(_("Invalid syntax"))
return tpl
def clean_rounds_number(self):
value = self.cleaned_data["rounds_number"]
if value < 1000 or value > 999999999:
raise forms.ValidationError(_("Invalid rounds number"))
return value
def clean_default_password(self):
"""Check password complexity."""
value = self.cleaned_data["default_password"]
password_validation.validate_password(value)
return value
def clean(self):
"""Custom validation method
Depending on 'ldap_auth_method' value, we check for different
required parameters.
"""
super(GeneralParametersForm, self).clean()
cleaned_data = self.cleaned_data
if cleaned_data["authentication_type"] != "ldap":
return cleaned_data
if cleaned_data["ldap_auth_method"] == "searchbind":
required_fields = ["ldap_search_base", "ldap_search_filter"]
else:
required_fields = ["ldap_user_dn_template"]
for f in required_fields:
if f not in cleaned_data or cleaned_data[f] == u'':
self.add_error(f, _("This field is required"))
return cleaned_data
def to_django_settings(self):
"""Apply LDAP related parameters to Django settings.
Doing so, we can use the django_auth_ldap module.
"""
try:
import ldap
from django_auth_ldap.config import (
LDAPSearch, PosixGroupType, GroupOfNamesType)
ldap_available = True
except ImportError:
ldap_available = False
values = dict(param_tools.get_global_parameters("core"))
if not ldap_available or values["authentication_type"] != "ldap":
return
if not hasattr(settings, "AUTH_LDAP_USER_ATTR_MAP"):
setattr(settings, "AUTH_LDAP_USER_ATTR_MAP", {
"first_name": "givenName",
"email": "mail",
"last_name": "sn"
})
ldap_uri = "ldaps://" if values["ldap_secured"] == "ssl" else "ldap://"
ldap_uri += "%s:%s" % (
values["ldap_server_address"], values["ldap_server_port"])
setattr(settings, "AUTH_LDAP_SERVER_URI", ldap_uri)
if values["ldap_secured"] == "starttls":
setattr(settings, "AUTH_LDAP_START_TLS", True)
if values["ldap_group_type"] == "groupofnames":
setattr(settings, "AUTH_LDAP_GROUP_TYPE", GroupOfNamesType())
searchfilter = "(objectClass=groupOfNames)"
else:
setattr(settings, "AUTH_LDAP_GROUP_TYPE", PosixGroupType())
searchfilter = "(objectClass=posixGroup)"
setattr(settings, "AUTH_LDAP_GROUP_SEARCH", LDAPSearch(
values["ldap_groups_search_base"], ldap.SCOPE_SUBTREE,
searchfilter
))
if values["ldap_auth_method"] == "searchbind":
setattr(settings, "AUTH_LDAP_BIND_DN", values["ldap_bind_dn"])
setattr(
settings, "AUTH_LDAP_BIND_PASSWORD",
values["ldap_bind_password"]
)
search = LDAPSearch(
values["ldap_search_base"], ldap.SCOPE_SUBTREE,
values["ldap_search_filter"]
)
setattr(settings, "AUTH_LDAP_USER_SEARCH", search)
else:
setattr(
settings, "AUTH_LDAP_USER_DN_TEMPLATE",
values["ldap_user_dn_template"]
)
if values["ldap_is_active_directory"]:
if not hasattr(settings, "AUTH_LDAP_GLOBAL_OPTIONS"):
setattr(settings, "AUTH_LDAP_GLOBAL_OPTIONS", {
ldap.OPT_REFERRALS: False
})
else:
settings.AUTH_LDAP_GLOBAL_OPTIONS[ldap.OPT_REFERRALS] = False
| isc | -478,008,678,375,216,060 | 34.845982 | 79 | 0.609067 | false | 4.263074 | false | false | false |
manxueitp/cozmo-test | cozmo_sdk_examples/tutorials/02_cozmo_face/03_alarm_clock.py | 1 | 9691 | #!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Cozmo Alarm Clock
Use Cozmo's face to display the current time
Play an alarm (Cozmo tells you to wake up) at a set time
NOTE: This is an example program. Anki takes no responsibility
if Cozmo fails to wake you up on time!
'''
import datetime
import math
import sys
import time
try:
from PIL import Image, ImageDraw, ImageFont
except ImportError:
sys.exit("Cannot import from PIL. Do `pip3 install --user Pillow` to install")
import cozmo
#: bool: Set to True to display the clock as analog
#: (with a small digital readout below)
SHOW_ANALOG_CLOCK = False
def make_text_image(text_to_draw, x, y, font=None):
'''Make a PIL.Image with the given text printed on it
Args:
text_to_draw (string): the text to draw to the image
x (int): x pixel location
y (int): y pixel location
font (PIL.ImageFont): the font to use
Returns:
:class:(`PIL.Image.Image`): a PIL image with the text drawn on it
'''
# make a blank image for the text, initialized to opaque black
text_image = Image.new('RGBA', cozmo.oled_face.dimensions(), (0, 0, 0, 255))
# get a drawing context
dc = ImageDraw.Draw(text_image)
# draw the text
dc.text((x, y), text_to_draw, fill=(255, 255, 255, 255), font=font)
return text_image
# get a font - location depends on OS so try a couple of options
# failing that the default of None will just use a default font
_clock_font = None
try:
_clock_font = ImageFont.truetype("arial.ttf", 20)
except IOError:
try:
_clock_font = ImageFont.truetype("/Library/Fonts/Arial.ttf", 20)
except IOError:
pass
def draw_clock_hand(dc, cen_x, cen_y, circle_ratio, hand_length):
'''Draw a single clock hand (hours, minutes or seconds)
Args:
dc: (:class:`PIL.ImageDraw.ImageDraw`): drawing context to use
cen_x (float): x coordinate of center of hand
cen_y (float): y coordinate of center of hand
circle_ratio (float): ratio (from 0.0 to 1.0) that hand has travelled
hand_length (float): the length of the hand
'''
hand_angle = circle_ratio * math.pi * 2.0
vec_x = hand_length * math.sin(hand_angle)
vec_y = -hand_length * math.cos(hand_angle)
# x_scalar doubles the x size to compensate for the interlacing
# in y that would otherwise make the screen appear 2x tall
x_scalar = 2.0
# pointy end of hand
hand_end_x = int(cen_x + (x_scalar * vec_x))
hand_end_y = int(cen_y + vec_y)
# 2 points, perpendicular to the direction of the hand,
# to give a triangle with some width
hand_width_ratio = 0.1
hand_end_x2 = int(cen_x - ((x_scalar * vec_y) * hand_width_ratio))
hand_end_y2 = int(cen_y + (vec_x * hand_width_ratio))
hand_end_x3 = int(cen_x + ((x_scalar * vec_y) * hand_width_ratio))
hand_end_y3 = int(cen_y - (vec_x * hand_width_ratio))
dc.polygon([(hand_end_x, hand_end_y), (hand_end_x2, hand_end_y2),
(hand_end_x3, hand_end_y3)], fill=(255, 255, 255, 255))
def make_clock_image(current_time):
'''Make a PIL.Image with the current time displayed on it
Args:
text_to_draw (:class:`datetime.time`): the time to display
Returns:
:class:(`PIL.Image.Image`): a PIL image with the time displayed on it
'''
time_text = time.strftime("%I:%M:%S %p")
if not SHOW_ANALOG_CLOCK:
return make_text_image(time_text, 8, 6, _clock_font)
# make a blank image for the text, initialized to opaque black
clock_image = Image.new('RGBA', cozmo.oled_face.dimensions(), (0, 0, 0, 255))
# get a drawing context
dc = ImageDraw.Draw(clock_image)
# calculate position of clock elements
text_height = 9
screen_width, screen_height = cozmo.oled_face.dimensions()
analog_width = screen_width
analog_height = screen_height - text_height
cen_x = analog_width * 0.5
cen_y = analog_height * 0.5
# calculate size of clock hands
sec_hand_length = (analog_width if (analog_width < analog_height) else analog_height) * 0.5
min_hand_length = 0.85 * sec_hand_length
hour_hand_length = 0.7 * sec_hand_length
# calculate rotation for each hand
sec_ratio = current_time.second / 60.0
min_ratio = (current_time.minute + sec_ratio) / 60.0
hour_ratio = (current_time.hour + min_ratio) / 12.0
# draw the clock hands
draw_clock_hand(dc, cen_x, cen_y, hour_ratio, hour_hand_length)
draw_clock_hand(dc, cen_x, cen_y, min_ratio, min_hand_length)
draw_clock_hand(dc, cen_x, cen_y, sec_ratio, sec_hand_length)
# draw the digital time_text at the bottom
x = 32
y = screen_height - text_height
dc.text((x, y), time_text, fill=(255, 255, 255, 255), font=None)
return clock_image
def convert_to_time_int(in_value, time_unit):
'''Convert in_value to an int and ensure it is in the valid range for that time unit
(e.g. 0..23 for hours)'''
max_for_time_unit = {'hours': 23, 'minutes': 59, 'seconds': 59}
max_val = max_for_time_unit[time_unit]
try:
int_val = int(in_value)
except ValueError:
raise ValueError("%s value '%s' is not an int" % (time_unit, in_value))
if int_val < 0:
raise ValueError("%s value %s is negative" % (time_unit, int_val))
if int_val > max_val:
raise ValueError("%s value %s exceeded %s" % (time_unit, int_val, max_val))
return int_val
def extract_time_from_args():
''' Extract a (24-hour-clock) user-specified time from the command-line
Supports colon and space separators - e.g. all 3 of "11 22 33", "11:22:33" and "11 22:33"
would map to the same time.
The seconds value is optional and defaults to 0 if not provided.'''
# split sys.argv further for any args that contain a ":"
split_time_args = []
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
split_args = arg.split(':')
for split_arg in split_args:
split_time_args.append(split_arg)
if len(split_time_args) >= 2:
try:
hours = convert_to_time_int(split_time_args[0], 'hours')
minutes = convert_to_time_int(split_time_args[1], 'minutes')
seconds = 0
if len(split_time_args) >= 3:
seconds = convert_to_time_int(split_time_args[2], 'seconds')
return datetime.time(hours, minutes, seconds)
except ValueError as e:
print("ValueError %s" % e)
# Default to no alarm
return None
def get_in_position(robot: cozmo.robot.Robot):
'''If necessary, Move Cozmo's Head and Lift to make it easy to see Cozmo's face'''
if (robot.lift_height.distance_mm > 45) or (robot.head_angle.degrees < 40):
with robot.perform_off_charger():
robot.set_lift_height(0.0).wait_for_completed()
robot.set_head_angle(cozmo.robot.MAX_HEAD_ANGLE).wait_for_completed()
def alarm_clock(robot: cozmo.robot.Robot):
'''The core of the alarm_clock program'''
alarm_time = extract_time_from_args()
if alarm_time:
print("Alarm set for %s" % alarm_time)
else:
print("No Alarm time provided. Usage example: 'alarm_clock.py 17:23' to set alarm for 5:23 PM. (Input uses the 24-hour clock.)")
print("Press CTRL-C to quit")
get_in_position(robot)
was_before_alarm_time = False
last_displayed_time = None
while True:
# Check the current time, and see if it's time to play the alarm
current_time = datetime.datetime.now().time()
do_alarm = False
if alarm_time:
is_before_alarm_time = current_time < alarm_time
do_alarm = was_before_alarm_time and not is_before_alarm_time # did we just cross the alarm time
was_before_alarm_time = is_before_alarm_time
if do_alarm:
# Cancel the latest image display action so that the alarm actions can play
robot.abort_all_actions()
# Speak The Time (off the charger as it's an animation)
with robot.perform_off_charger():
short_time_string = str(current_time.hour) + ":" + str(current_time.minute)
robot.say_text("Wake up lazy human! it's " + short_time_string).wait_for_completed()
else:
# See if the displayed time needs updating
if (last_displayed_time is None) or (current_time.second != last_displayed_time.second):
# Create the updated image with this time
clock_image = make_clock_image(current_time)
oled_face_data = cozmo.oled_face.convert_image_to_screen_data(clock_image)
# display for 1 second
robot.display_oled_face_image(oled_face_data, 1000.0)
last_displayed_time = current_time
# only sleep for a fraction of a second to ensure we update the seconds as soon as they change
time.sleep(0.1)
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger for this example
cozmo.run_program(alarm_clock)
| mit | -5,040,915,958,097,453,000 | 33.610714 | 136 | 0.640594 | false | 3.400351 | false | false | false |
JioCloud/tempest-lib | tempest_lib/services/identity/v3/token_client.py | 1 | 6711 | # Copyright 2015 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from tempest_lib.common import rest_client
from tempest_lib import exceptions
class V3TokenClient(rest_client.RestClient):
def __init__(self, auth_url, disable_ssl_certificate_validation=None,
ca_certs=None, trace_requests=None):
dscv = disable_ssl_certificate_validation
super(V3TokenClient, self).__init__(
None, None, None, disable_ssl_certificate_validation=dscv,
ca_certs=ca_certs, trace_requests=trace_requests)
if 'auth/tokens' not in auth_url:
auth_url = auth_url.rstrip('/') + '/auth/tokens'
self.auth_url = auth_url
def auth(self, user_id=None, username=None, password=None, project_id=None,
project_name=None, user_domain_id=None, user_domain_name=None,
project_domain_id=None, project_domain_name=None, domain_id=None,
domain_name=None, token=None):
"""Obtains a token from the authentication service
:param user_id: user id
:param username: user name
:param user_domain_id: the user domain id
:param user_domain_name: the user domain name
:param project_domain_id: the project domain id
:param project_domain_name: the project domain name
:param domain_id: a domain id to scope to
:param domain_name: a domain name to scope to
:param project_id: a project id to scope to
:param project_name: a project name to scope to
:param token: a token to re-scope.
Accepts different combinations of credentials.
Sample sample valid combinations:
- token
- token, project_name, project_domain_id
- user_id, password
- username, password, user_domain_id
- username, password, project_name, user_domain_id, project_domain_id
Validation is left to the server side.
"""
creds = {
'auth': {
'identity': {
'methods': [],
}
}
}
id_obj = creds['auth']['identity']
if token:
id_obj['methods'].append('token')
id_obj['token'] = {
'id': token
}
if (user_id or username) and password:
id_obj['methods'].append('password')
id_obj['password'] = {
'user': {
'password': password,
}
}
if user_id:
id_obj['password']['user']['id'] = user_id
else:
id_obj['password']['user']['name'] = username
_domain = None
if user_domain_id is not None:
_domain = dict(id=user_domain_id)
elif user_domain_name is not None:
_domain = dict(name=user_domain_name)
if _domain:
id_obj['password']['user']['domain'] = _domain
if (project_id or project_name):
_project = dict()
if project_id:
_project['id'] = project_id
elif project_name:
_project['name'] = project_name
if project_domain_id is not None:
_project['domain'] = {'id': project_domain_id}
elif project_domain_name is not None:
_project['domain'] = {'name': project_domain_name}
creds['auth']['scope'] = dict(project=_project)
elif domain_id:
creds['auth']['scope'] = dict(domain={'id': domain_id})
elif domain_name:
creds['auth']['scope'] = dict(domain={'name': domain_name})
body = json.dumps(creds)
resp, body = self.post(self.auth_url, body=body)
self.expected_success(201, resp.status)
return rest_client.ResponseBody(resp, body)
def request(self, method, url, extra_headers=False, headers=None,
body=None):
"""A simple HTTP request interface."""
if headers is None:
# Always accept 'json', for xml token client too.
# Because XML response is not easily
# converted to the corresponding JSON one
headers = self.get_headers(accept_type="json")
elif extra_headers:
try:
headers.update(self.get_headers(accept_type="json"))
except (ValueError, TypeError):
headers = self.get_headers(accept_type="json")
resp, resp_body = self.raw_request(url, method,
headers=headers, body=body)
self._log_request(method, url, resp)
if resp.status in [401, 403]:
resp_body = json.loads(resp_body)
raise exceptions.Unauthorized(resp_body['error']['message'])
elif resp.status not in [200, 201, 204]:
raise exceptions.IdentityError(
'Unexpected status code {0}'.format(resp.status))
return resp, json.loads(resp_body)
def get_token(self, **kwargs):
"""Returns (token id, token data) for supplied credentials"""
auth_data = kwargs.pop('auth_data', False)
if not (kwargs.get('user_domain_id') or
kwargs.get('user_domain_name')):
kwargs['user_domain_name'] = 'Default'
if not (kwargs.get('project_domain_id') or
kwargs.get('project_domain_name')):
kwargs['project_domain_name'] = 'Default'
body = self.auth(**kwargs)
token = body.response.get('x-subject-token')
if auth_data:
return token, body['token']
else:
return token
class V3TokenClientJSON(V3TokenClient):
LOG = logging.getLogger(__name__)
def _warn(self):
self.LOG.warning("%s class was deprecated and renamed to %s" %
(self.__class__.__name__, 'V3TokenClient'))
def __init__(self, *args, **kwargs):
self._warn()
super(V3TokenClientJSON, self).__init__(*args, **kwargs)
| apache-2.0 | 876,780,352,556,253,000 | 36.49162 | 79 | 0.56996 | false | 4.186525 | false | false | false |
cprogrammer1994/ModernGL | examples/old-examples/GLWindow/particle_system_2.py | 1 | 1794 | import math
import random
import struct
import GLWindow
import ModernGL
# Window & Context
wnd = GLWindow.create_window()
ctx = ModernGL.create_context()
prog = ctx.program(
ctx.vertex_shader('''
#version 330
uniform vec2 Screen;
in vec2 vert;
void main() {
gl_Position = vec4((vert / Screen) * 2.0 - 1.0, 0.0, 1.0);
}
'''),
ctx.fragment_shader('''
#version 330
out vec4 color;
void main() {
color = vec4(0.30, 0.50, 1.00, 1.0);
}
''')
])
tvert = ctx.vertex_shader('''
#version 330
in vec2 in_pos;
in vec2 in_prev;
out vec2 out_pos;
out vec2 out_prev;
void main() {
out_pos = in_pos * 2.0 - in_prev;
out_prev = in_pos;
}
''')
transform = ctx.program(tvert, ['out_pos', 'out_prev'])
def particle():
a = random.uniform(0.0, math.pi * 2.0)
r = random.uniform(0.0, 1.0)
cx, cy = wnd.mouse[0], wnd.mouse[1]
return struct.pack('2f2f', cx, cy, cx + math.cos(a) * r, cy + math.sin(a) * r)
vbo1 = ctx.buffer(b''.join(particle() for i in range(1024)))
vbo2 = ctx.buffer(reserve=vbo1.size)
vao1 = ctx.simple_vertex_array(transform, vbo1, ['in_pos', 'in_prev'])
vao2 = ctx.simple_vertex_array(transform, vbo2, ['in_pos', 'in_prev'])
render_vao = ctx.vertex_array(prog, [
(vbo1, '2f8x', ['vert']),
])
idx = 0
ctx.point_size = 5.0
while wnd.update():
ctx.viewport = wnd.viewport
ctx.clear(0.9, 0.9, 0.9)
prog.uniforms['Screen'].value = wnd.size
for i in range(8):
vbo1.write(particle(), offset=idx * struct.calcsize('2f2f'))
idx = (idx + 1) % 1024
render_vao.render(ModernGL.POINTS, 1024)
vao1.transform(vbo2, ModernGL.POINTS, 1024)
ctx.copy_buffer(vbo1, vbo2)
| mit | 3,263,518,089,570,220,000 | 19.386364 | 82 | 0.574136 | false | 2.738931 | false | false | false |
rliskovenko/DataRobot | app/test.py | 1 | 3085 | import random
import string
import app
import json
import unittest
from datetime import datetime
App = app.app
testSet = {
True : [
'''[{"date": "2015-05-12T14:36:00.451765",
"md5checksum": "e8c83e232b64ce94fdd0e4539ad0d44f",
"name": "John Doe",
"uid": "1"},
{"date": "2015-05-13T14:38:00.451765",
"md5checksum": "b419795d50db2a35e94c8364978d898f",
"name": "Jane Doe",
"uid": "2"}]''',
'''{"date": "2015-05-12T14:37:00.451765",
"md5checksum": "e8c83e232b64ce94fdd0e4539ad0d44f",
"name": "Carl Doe",
"uid": "3"}'''
],
False : [
'''[{"date": "2015-05-12T14:36:00.451765",
"md5checksum": "fffffff32b64ce94fdd0e4539ad0d44f",
"name": "John Doe",
"uid": "11"},
{"date": "2015-05-13T14:38:00.451765",
"md5checksum": "b419795d50db2a35e94c8364978d898f",
"name": "Jane Doe",
"uid": "12"}]''',
'''{"date": "2015-05-12T14:37:00.451765",
"md5checksum": "ffffff232b64ce94fdd0e4539ad0d44f",
"name": "Carl Doe",
"uid": "13"}''',
'''{"date": "2015-05-14T14:37:00.451765",
"md5checksum": "ffffff232b64ce94fdd0e4539ad0d44f",
"name": "Rozalie Doe",
"uid": "14"}'''
]
}
class DataRobotTestCase( unittest.TestCase ):
def __dbNameGen(self):
return 'test' + ''.join( random.SystemRandom().choice( string.ascii_uppercase + string.digits ) for _ in range( 8 ) )
def __test_add( self, data ):
return self.app.post( '/', data )
def __test_get( self, data ):
jsonData = json.loads( data )
__makeGetUrl = lambda ( x ): '/' + '/'.join( [ x['uid'], datetime.strptime( x['date'], "%Y-%m-%dT%H:%M:%S.%f" ).strftime( "%Y-%m-%d" ) ] )
if isinstance( jsonData, list ):
return [ self.app.get( __makeGetUrl( obj ) ) for obj in jsonData ]
else:
return self.app.get( __makeGetUrl( jsonData ) )
def __run_test(self, data=testSet, sub=__test_add ):
for ( expected, tests ) in data.iteritems():
for test in tests:
res = sub( test )
if isinstance( res, list ):
for subRes in res:
assert expected == ( 'OK' in json.loads( subRes.data )['status'] )
else:
assert expected == ( 'OK' in json.loads( res.data )['status'] )
def setUp(self):
app.MONGODB_HOST = 'localhost'
app.MONGODB_PORT = 27017
app.MONGODB_DB = self.__dbNameGen()
app.TESTING = True
self.app = App.test_client()
def tearDown(self):
app.connection.drop_database( app.MONGODB_DB )
def test_add(self):
self.__run_test( testSet, self.__test_add )
def test_get(self):
self.__run_test( testSet, self.__test_get )
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | 1,129,024,472,684,088,200 | 33.662921 | 146 | 0.506969 | false | 3.196891 | true | false | false |
tanzer/ls-emacs | scripts/lse_compile_language.py | 1 | 3736 | # -*- coding: utf-8 -*-
# Copyright (C) 1994-2017 Mag. Christian Tanzer. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. [email protected]
#
#++
# Name
# lse_compile_language
#
# Purpose
# Compile emacs language definition
#
# Revision Dates
# 14-Nov-2007 (MG) Creation (ported from bash script)
# 24-May-2011 (MG) Allow language filenames as parameter
# Extract `lse_base_dir` out of the filename of the
# script
# 6-Mar-2012 (MG) Use `subprocess` instead of `os.system` to call
# emacs binary
# 29-Aug-2017 (MG) Switch to python 3
# 29-Aug-2017 (MG) Remove dependency to external libraries
# ««revision-date»»···
#--
import glob
import os
import re
import subprocess
def compile_language (* languages, ** kw) :
pjoin = os.path.join
env = os.environ.get
lse_base_dir = os.path.abspath \
(os.path.join (os.path.dirname (__file__), ".."))
lse_dir = env ("EMACSLSESRC", pjoin (lse_base_dir, "lse"))
lsc_dir = env ("EMACSLSEDIR", pjoin (lse_base_dir, "lse"))
emacs_binary = kw.pop ("emacs_binary", "emacs")
emacs_cmd_file = os.path.abspath (kw.pop ("emacs_cmd_file", None))
if not lse_dir :
raise ValueError ("EMACS LSE Source dir not defined")
files = []
pattern = re.compile (".*lse-language-(.+)\.lse")
for lang_pattern in languages :
if os.path.isfile (lang_pattern) :
match = pattern.match (lang_pattern)
if match :
new_files = [(lang_pattern, match.group (1))]
else :
new_files = []
for lse_language in glob.glob \
(pjoin (lse_dir, "lse-language-%s.lse" % (lang_pattern, ))) :
match = pattern.match (lse_language)
if match :
new_files.append \
((lse_language.replace ("\\", "/"), match.group (1)))
files.extend (new_files)
if not new_files :
print ("No laguages found for pattern `%s`" % (lang_pattern, ))
if files :
correct_path = lambda s : s.replace (os.path.sep, "/")
print ("Compile languages %s" % (", ".join (n for f, n in files), ))
emacs_cmd = \
[ '(setq load-path\n'
' (append (list "%s" "%s") load-path)\n'
')' % (correct_path (lse_base_dir), correct_path (lse_dir))
, '(load "ls-emacs")'
, '(setq trim-versions-without-asking t)'
, '(setq delete-old-versions t)'
]
emacs_cmd.extend ('(lse-language:compile "%s")' % n for _, n in files)
open (emacs_cmd_file, "w").write (" ".join (emacs_cmd))
for k, v in ( ("EMACSLSESRC", lse_dir), ("EMACSLSEDIR", lsc_dir)) :
os.environ [k] = v
try :
subprocess.check_call \
([emacs_binary, "-batch", "-l", emacs_cmd_file])
except :
print ("Error compiling language")
if os.path.isfile (emacs_cmd_file) :
os.unlink (emacs_cmd_file)
# end def compile_language
if __name__ == "__main__" :
import argparse
parser = argparse.ArgumentParser ()
parser.add_argument ("language", type = str, nargs = "+")
parser.add_argument ("-b", "--emacs_binary", type = str, default="emacs")
parser.add_argument \
( "-c", "--emacs_cmd_file", type = str
, default="lse_compile_language_cmdfile"
)
cmd = parser.parse_args ()
compile_language \
( emacs_binary = cmd.emacs_binary
, emacs_cmd_file = cmd.emacs_cmd_file
, * cmd.language
)
### __END__ lse_compile_language
| gpl-2.0 | -165,061,289,321,706,660 | 36.666667 | 78 | 0.539287 | false | 3.279683 | false | false | false |
Cadair/solarbextrapolation | solarbextrapolation/analyticalmodels/base.py | 1 | 8605 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 19:30:22 2015
@author: alex_
"""
# General Imports
import matplotlib as mpl
mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict.
import numpy as np
#import pickle
import time
from datetime import datetime
#from collections import namedtuple
import warnings
import inspect
#from sunpy.sun._constants import physical_constants as con
# SunPy imports
import sunpy.map
from sunpy.sun import constants, sun
from sunpy.time import parse_time, is_time
from astropy.table import Table
import astropy.units as u
from mayavi import mlab
# Internal imports
#from solarbextrapolation.utilities import si_this_map
from solarbextrapolation.map3dclasses import Map3D
class AnalyticalModel(object):
"""
Common class for the development of anylitical models of magnetic fields.
Use the models to evaluate the accuracy of an extrapolation routine with
the figures of merit.
"""
def __init__(self, **kwargs):
# Default grid shape and physical ranges for the volume the model covers.
self.shape = kwargs.get('shape', u.Quantity([5, 5, 5] * u.pixel)) # (x,y,z)
self.xrange = kwargs.get('xrange', u.Quantity([-10, 10] * u.Mm))
self.yrange = kwargs.get('yrange', u.Quantity([-10, 10] * u.Mm))
self.yrange = kwargs.get('zrange', u.Quantity([0, 20] * u.Mm))
# Metadata
self.meta = {'ZNAXIS': 3, 'ZNAXIS1': self.shape[0].value, 'ZNAxXIS2': self.shape[0].value, 'ZNAXIS3': self.shape[0].value}
self.meta['analytical_model_notes'] = kwargs.get('notes', '')
self.meta['BUNIT'] = kwargs.get('bunit', u.T)
# CRVALn, CDELTn and NAXIS (alreadu in meta) used for storing range in 2D fits files.
self.filepath = kwargs.get('filepath', None)
self.routine = kwargs.get('analytical_model_routine', type(self))
# Default 3D magnetic field
#X,Y,Z = np.zeros(self.shape.value), np.zeros(self.shape.value), np.zeros(self.shape.value)
npField = np.zeros([3]+list(np.array(self.shape.value, dtype=np.int)))
self.field = Map3D(npField, self.meta)
# Default magnetic field on boundary
magnetogram = np.zeros(np.array(self.shape[0:2].value, dtype=np.int))
magnetogram_header = {'ZNAXIS': 2, 'ZNAXIS1': self.shape[0].value, 'ZNAXIS2': self.shape[1].value}
self.magnetogram = sunpy.map.Map((magnetogram, magnetogram_header))
def _generate_field(self, **kwargs):
"""
The method for running a model to generate the field.
This is the primary method to be edited in subclasses for specific
model implementations.
"""
# Model code goes here.
arr_4d = np.zeros([int(self.map_boundary_data.data.shape[0]), int(self.map_boundary_data.data.shape[1]), 1, 3])
# Turn the 4D array into a Map3D object.
map_output = Map3D( arr_4d, self.meta, xrange=self.xrange, yrange=self.yrange, zrange=self.zrange, xobsrange=self.xrange, yobsrange=self.yrange )
return map_output
def generate(self, **kwargs):
"""
Method to be called to calculate the vector field and return as a Map3D object.
Times and saves the extrapolation where applicable.
"""
# Record the time and duration of the extrapolation.
dt_start = datetime.now()
tim_start = time.time()
arr_output = self._generate_field(**kwargs)
tim_duration = time.time() - tim_start
# Add the duration and time to the meta/header data.
arr_output.meta['extrapolator_start_time'] = dt_start.isoformat()
arr_output.meta['extrapolator_duration'] = tim_duration
arr_output.meta['extrapolator_duration_unit'] = u.s
# Save the Map3D if a filepath has been set. (to avoid loosing work)
if self.filepath:
arr_output.save(self.filepath)
# Add the output map to the object and return.
self.map = arr_output
return arr_output
def to_los_magnetogram(self, **kwargs):
"""
Calculate the LoS vector field as a SunPy map and return.
Generally this will require that you have run generate(self, ``**kwargs``)
first, so in the base class this is checked, but it is not always the
case as some models may allow this to be determined without calculating
the full field.
.. I'm not sure if this is a good default.
"""
return self.magnetogram
def to_vec_magnetogram(self, **kwargs):
"""
Calculate the vector field as a SunPy map and return.
Generally this will require that you have run ``generate(self, **kwargs)``
first, so in the base class this is checked, but it is not always the
case as some models may allow this to be determined without calculating
the full field. ######### I'm not sure if this is a good default.
"""
return self.magnetogram
if __name__ == '__main__':
# User-specified parameters
tup_shape = ( 20, 20, 20 )
x_range = ( -80.0, 80 ) * u.Mm
y_range = ( -80.0, 80 ) * u.Mm
z_range = ( 0.0, 120 ) * u.Mm
# Derived parameters (make SI where applicable)
x_0 = x_range[0].to(u.m).value
Dx = (( x_range[1] - x_range[0] ) / ( tup_shape[0] * 1.0 )).to(u.m).value
x_size = Dx * tup_shape[0]
y_0 = y_range[0].to(u.m).value
Dy = (( y_range[1] - y_range[0] ) / ( tup_shape[1] * 1.0 )).to(u.m).value
y_size = Dy * tup_shape[1]
z_0 = z_range[0].to(u.m).value
Dz = (( z_range[1] - z_range[0] ) / ( tup_shape[2] * 1.0 )).to(u.m).value
z_size = Dy * tup_shape[2]
# Define the extrapolator as a child of the Extrapolators class
class AnaOnes(AnalyticalModel):
def __init__(self, **kwargs):
super(AnaOnes, self).__init__(**kwargs)
def _generate_field(self, **kwargs):
# Adding in custom parameters to the metadata
self.meta['analytical_model_routine'] = 'Ones Model'
# Generate a trivial field and return (X,Y,Z,Vec)
outshape = list(np.array(self.shape.value, dtype=np.int)) + [3]
arr_4d = np.ones(outshape)
return Map3D(arr_4d, self.meta)
# Setup an anylitical model
xrange = u.Quantity([ 50, 300] * u.arcsec)
yrange = u.Quantity([-350, -100] * u.arcsec)
zrange = u.Quantity([ 0, 250] * u.arcsec)
aAnaMod = AnaOnes()
aMap3D = aAnaMod.generate()
# Visualise the 3D vector field
from solarbextrapolation.visualisation_functions import visualise
"""
fig = visualise(aMap3D,
show_boundary_axes=False,
boundary_units=[1.0*u.arcsec, 1.0*u.arcsec],
show_volume_axes=True,
debug=False)
"""
fig = visualise(aMap3D,
show_boundary_axes=False,
show_volume_axes=False,
debug=False)
mlab.show()
"""
# For B_I field only, to save re-creating this interpolator for every cell.
A_I_r_perp_interpolator = interpolate_A_I_from_r_perp(flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, (x_size**2 + y_size**2 + z_size**2)**(0.5)*1.2, 1000`0)
field = np.zeros( ( tup_shape[0], tup_shape[1], tup_shape[2], 3 ) )
for i in range(0, tup_shape[0]):
for j in range(0, tup_shape[1]):
for k in range(0, tup_shape[2]):
# Position of this point in space
x_pos = x_0 + ( i + 0.5 ) * Dx
y_pos = y_0 + ( j + 0.5 ) * Dy
z_pos = z_0 + ( k + 0.5 ) * Dz
#field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0)
#field[i,j,k] = B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q)
#field[i,j,k] = B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator)
field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0) + B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q) + B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator)
map_field = Map3D( field, {}, xrange=x_range, yrange=y_range, zrange=z_range )
np_boundary_data = field[:,:,0,2].T
dummyDataToMap(np_boundary_data, x_range, y_range)
#dic_boundary_data = { 'datavals': np_boundary_data.data.shape[0]**2, 'dsun_obs': 147065396219.34, }
visualise(map_field, scale=1.0*u.Mm, show_volume_axes=True, debug=True)
"""
| mit | -4,708,356,561,791,386,000 | 39.21028 | 264 | 0.604648 | false | 3.072117 | false | false | false |
RNAcentral/rnacentral-import-pipeline | rnacentral_pipeline/rnacentral/r2dt/parser.py | 1 | 3852 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import logging
import typing as ty
from pathlib import Path
from rnacentral_pipeline import psql
from rnacentral_pipeline.rnacentral.r2dt import data, ribovore
LOGGER = logging.getLogger(__name__)
def load_model_info(handle: ty.TextIO) -> ty.Dict[str, data.ModelDatabaseInfo]:
mapping = {}
for entry in psql.json_handler(handle):
info = data.ModelDatabaseInfo.build(entry)
mapping[entry["model_name"]] = info
if info.source is data.Source.gtrnadb:
mapping[entry["model_name"].replace("_", "-")] = info
mapping[entry["model_name"].replace("-", "_")] = info
if entry["model_name"] == "tRNA":
mapping["RF00005"] = info
return mapping
def load_hit_info(base: Path, allow_missing: bool):
source_directories = [
(base / "crw", data.Source.crw),
(base / "gtrnadb", data.Source.gtrnadb),
(base / "ribovision-lsu", data.Source.ribovision),
(base / "ribovision-ssu", data.Source.ribovision),
(base / "rfam", data.Source.rfam),
(base / "RF00005", data.Source.rfam),
(base / "rnasep", data.Source.rnase_p),
]
has_ribovision = {data.Source.crw, data.Source.ribovision, data.Source.rfam}
hit_info = {}
for (path, source) in source_directories:
if not path.exists():
continue
if source in has_ribovision and path.name != "RF00005":
update = ribovore.as_dict(path, allow_missing=allow_missing)
if update:
hit_info.update(update)
return hit_info
def parse(
info_path: ty.TextIO, base: Path, allow_missing=False
) -> ty.Iterator[data.R2DTResult]:
if not base.exists():
raise ValueError("Cannot parse missing directory: %s" % base)
hit_info = load_hit_info(base, allow_missing)
model_info = load_model_info(info_path)
result_base = base / "results"
metadata_path = result_base / "tsv" / "metadata.tsv"
seen = set()
seen_urs = set()
with metadata_path.open("r") as raw:
reader = csv.reader(raw, delimiter="\t")
for row in reader:
urs = row[0]
model_name = row[1]
source = data.Source.build(row[2])
if model_name not in model_info:
raise ValueError("No info for model %s", model_name)
minfo = model_info[model_name]
info = data.R2DTResultInfo(urs, minfo, source, result_base)
if info in seen:
LOGGER.warn("Dupcliate line in metadata for, %s", info)
continue
seen.add(info)
if info.urs in seen_urs:
raise ValueError(f"Impossible state of >1 hit per URS for {info}")
seen_urs.add(info.urs)
try:
info.validate()
except Exception as e:
if allow_missing:
LOGGER.warn("Did not find all required files for %s", urs)
LOGGER.exception(e)
continue
else:
raise e
hit = None
if info.has_hit_info():
hit = hit_info[urs]
yield data.R2DTResult.from_info(info, hit_info=hit)
| apache-2.0 | 3,470,230,791,381,858,300 | 34.666667 | 82 | 0.60514 | false | 3.668571 | false | false | false |
keras-team/autokeras | examples/automodel_with_cnn.py | 1 | 1358 | # Library import
import numpy as np
import tensorflow as tf
import autokeras as ak
# Prepare example Data - Shape 1D
num_instances = 100
num_features = 5
x_train = np.random.rand(num_instances, num_features).astype(np.float32)
y_train = np.zeros(num_instances).astype(np.float32)
y_train[0 : int(num_instances / 2)] = 1
x_test = np.random.rand(num_instances, num_features).astype(np.float32)
y_test = np.zeros(num_instances).astype(np.float32)
y_train[0 : int(num_instances / 2)] = 1
x_train = np.expand_dims(
x_train, axis=2
) # This step it's very important an CNN will only accept this data shape
print(x_train.shape)
print(y_train.shape)
# Prepare Automodel for search
input_node = ak.Input()
output_node = ak.ConvBlock()(input_node)
# output_node = ak.DenseBlock()(output_node) #optional
# output_node = ak.SpatialReduction()(output_node) #optional
output_node = ak.ClassificationHead(num_classes=2, multi_label=True)(output_node)
auto_model = ak.AutoModel(
inputs=input_node, outputs=output_node, overwrite=True, max_trials=1
)
# Search
auto_model.fit(x_train, y_train, epochs=1)
print(auto_model.evaluate(x_test, y_test))
# Export as a Keras Model
model = auto_model.export_model()
print(type(model.summary()))
# print model as image
tf.keras.utils.plot_model(
model, show_shapes=True, expand_nested=True, to_file="name.png"
)
| apache-2.0 | 1,371,042,242,022,571,300 | 27.291667 | 81 | 0.72975 | false | 2.877119 | false | false | false |
jeremy-miller/life-python | life/display.py | 1 | 1176 | """This module displays the Life 'grid'."""
import numpy
class DisplayClass(object): # pylint: disable=R0903
"""This class displays the Life 'grid'.
No OpenGL or Matplotlib UI is used since this program is being executed
in a Docker container. The 'curses' Python package is also not used
since it also has problems detecting the terminal when executed in a
Docker container.
"""
@staticmethod
def display(grid):
"""This function displays the Life 'grid' to the console.
Each iteration of the game will display a new grid in the console.
This function loops through each index in the grid, checking if
each cell is 'living' or 'dead', and adding the appropriate symbol
to the grid output.
Args:
grid (array): A Numpy two-dimensional array which is the 'grid' to be
displayed in the console.
"""
output = ''
for index, value in numpy.ndenumerate(grid): # example 'index' = (0,0), example 'value' = 1
if value:
output += ' O'
else:
output += ' .'
if index[1] == grid.shape[1] - 1: # check to see if we are at the end of a row
output += '\n'
print(output)
| mit | 8,947,968,542,597,361,000 | 31.666667 | 96 | 0.654762 | false | 3.959596 | false | false | false |
jeffsilverm/presentation | SeaGL-2018/network_stats_result_2_csv.py | 1 | 3115 | #! /usr/bin/python3
# -*- coding: utf-8 -*-
import csv
import datetime
import sys
def str_to_time_delta(string) -> datetime.timedelta:
"""
:param string: Input in format 0:01:37.083557
:return: datetime.timedelta
"""
flds = string.split(":")
hours = flds[0]
minutes = flds[1]
seconds = flds[2]
td = datetime.timedelta(hours=hours, minutes=minutes, seconds=seconds)
return td
# From
# with open('eggs.csv', 'w', newline='') as csv_file:
# spamwriter = csv.writer(csv_file, delimiter=' ',
# quotechar='|', quoting=csv.QUOTE_MINIMAL)
#
with open(file=sys.argv[2], mode="w", newline="") as csv_file:
spamwriter = csv.writer(csv_file)
# git tag MONDAY
spamwriter.writerow(
['retries', 'elapsed', 'delay', 'loss', 'size', 'rate', 'proto', 'GTRs'])
with open(file=sys.argv[1], mode="r") as f:
for line in f:
# format of a line is:
# Retries: 0 Elapsed time: 0:01:16.489403 Delay: 10.3 loss percent: 20 size: 1000000 bytes data rate:
# 13073.706432249184 bytes/sec protocol: IPv6
# I'm not going to do any sanity checking. I might regret that later
# 0 "Retries:
# 1 retries as an string of an integer
# 2 "Elapsed"
# 3 "time:"
# 4 elapsed_time as a string of a datetime.timedelta
# 5 "Delay:"
# 6 delay_ms as a string of a float
# 7 "loss"
# 8 "percent:"
# 9 loss_percent as a float
# 10 "size:"
# 11 size a string as a integer
# 12 "bytes"
# 13 "data"
# 14 "rate:"
# 15 data_rate a string as a float
# 16 "bytes/sec"
# 17 "protocol:"
# 18 a string either IPv4 or IPv6
# After the November 5th, added Global TCP Retries (GTRs)
# 19: "Global"
# 20: "TCP"
# 21: "retries:"
# 22 GTRs a string as an int
fields = line.split()
# I'm converting the strings to data types and then
# back to strs again because I am doing some sanity checking
retries = int(fields[1])
# Pandas can handle an elapsed time, no need to convert
elapsed_time = fields[4]
delay_ms = float(fields[6])
loss_percent = float(fields[9])
size = int(fields[11])
data_rate = float(fields[15])
if fields[18] == "IPv4":
protocol = "IPv4"
elif fields[18] == "IPv6":
protocol = "IPv6"
else:
raise ValueError("fields[18] should be 'IPv4' or 'IPv6' but is "
f"{fields[18]}")
gtrs = int(fields[22])
row_str = [str(retries), str(elapsed_time), str(delay_ms),
str(loss_percent), str(size), str(data_rate), protocol, gtrs]
spamwriter.writerow(row_str)
| gpl-2.0 | 7,128,494,810,145,858,000 | 36.53012 | 113 | 0.50626 | false | 3.730539 | false | false | false |
bcgsc/ProbeGenerator | probe_generator/reference.py | 1 | 2779 | """Parse and extract base pair sequences from an Ensembl reference genome.
"""
from probe_generator import sequence
from probe_generator.exceptions import NonFatalError
def bases(sequence_range, genome):
"""Return the bases from a SequenceRange object.
"""
raw_bases = _raw_bases(
sequence_range.chromosome,
sequence_range.start,
sequence_range.end,
genome)
if sequence_range.reverse_complement:
return sequence.reverse_complement(raw_bases)
else:
return raw_bases
def reference_genome(genome):
"""Map chromosomes to base pair sequences.
`genome` is a handle to a reference genome in Ensembl FASTA format.
Returns a dictionary.
"""
genome_map = {}
chromosome = None
for line in genome:
if line.startswith('>'):
chromosome = line[1:].split()[0]
# In an Ensembl reference genome, the chromosome is the first
# string of characters after the '>' but before whitespace.
# E.g.:
# >chr Homo spaiens some chromosome etc etc
# NNN...
genome_map[chromosome] = []
elif chromosome is None:
raise InvalidGenomeFile(
"could not parse input: {!r}".format(
line))
else:
genome_map[chromosome].append(line.strip())
if not genome_map:
raise InvalidGenomeFile("genome file empty!")
return {chromosome: ''.join(bases)
for (chromosome, bases)
in genome_map.items()}
def _raw_bases(chromosome, start, end, genome):
"""Return a string of the base pairs of chromosome from start to end.
The start and end attributes follow the Python convention for slices
(indexed from zero, start inclusive, end exclusive).
The genome is a dictionary relating chromosome names to base pair sequences
(which are strings).
"""
try:
base_pairs = genome[chromosome][start:end]
except KeyError:
raise MissingChromosome(
"no such chromosome: {!r}".format(
chromosome))
if end - start != len(base_pairs):
raise NonContainedRange(
"range [{0}:{1}] outside the "
"range of chromosome {2!r}".format(
start, end, chromosome))
return base_pairs
class NonContainedRange(Exception):
"""Raised when the range of base pairs which is to be sliced from a
chromosome includes base pairs outside the chromosome.
"""
class InvalidGenomeFile(Exception):
"""Raised when a a genome_file cannot be parsed.
"""
class MissingChromosome(NonFatalError):
"""Raised when a chromosome is not present in the reference genome.
"""
| gpl-3.0 | 8,711,707,107,422,563,000 | 28.252632 | 79 | 0.618928 | false | 4.32866 | false | false | false |
crackinglandia/pype32 | setup.py | 1 | 4235 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Nahuel Riva
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__revision__ = "$Id$"
__all__ = ['metadata', 'setup']
from distutils.core import setup
from distutils import version
from warnings import warn
import re
import os
import sys
import glob
# Distutils hack: in order to be able to build MSI installers with loose
# version numbers, we subclass StrictVersion to accept loose version numbers
# and convert them to the strict format. This works because Distutils will
# happily reinstall a package even if the version number matches exactly the
# one already installed on the system - so we can simply strip all extraneous
# characters and beta/postrelease version numbers will be treated just like
# the base version number.
if __name__ == '__main__':
StrictVersion = version.StrictVersion
class NotSoStrictVersion (StrictVersion):
def parse (self, vstring):
components = []
for token in vstring.split('.'):
token = token.strip()
match = re.search('^[0-9]+', token)
if match:
number = token[ match.start() : match.end() ]
components.append(number)
vstring = '.'.join(components)
return StrictVersion.parse(self, vstring)
version.StrictVersion = NotSoStrictVersion
# Get the base directory
here = os.path.dirname(__file__)
if not here:
here = os.path.curdir
# Text describing the module (reStructured text)
try:
readme = os.path.join(here, 'README')
long_description = open(readme, 'r').read()
except Exception:
warn("README file not found or unreadable!")
long_description = """pype32 is python library to read and write PE/PE+ binary files."""
# Get the list of scripts in the "tools" folder
scripts = glob.glob(os.path.join(here, 'tools', '*.py'))
# Set the parameters for the setup script
metadata = {
# Setup instructions
'provides' : ['pype32'],
'packages' : ['pype32'],
'scripts' : scripts,
# Metadata
'name' : 'pype32',
'version' : '0.1-alpha5',
'description' : 'Yet another Python library to read and write PE/PE+ files.',
'long_description' : long_description,
'author' : 'Nahuel Riva',
'author_email' : 'crackinglandia'+chr(64)+'gmail'+chr(0x2e)+'com',
'url' : 'https://github.com/crackinglandia/pype32',
'keywords' : ['pecoff', 'x86', 'x64', '.net', 'parser'],
'download_url' : 'https://github.com/crackinglandia/pype32/tarball/v0.1-alpha5',
}
# Execute the setup script
if __name__ == '__main__':
setup(**metadata)
| bsd-3-clause | 1,081,775,200,471,822,000 | 39.721154 | 92 | 0.677922 | false | 4.076035 | false | false | false |
myvoice-nigeria/myvoice | myvoice/clinics/migrations/0013_auto__chg_field_visit_service.py | 1 | 13197 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Visit.service'
db.alter_column(u'clinics_visit', 'service_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['clinics.Service'], null=True))
def backwards(self, orm):
# Changing field 'Visit.service'
db.alter_column(u'clinics_visit', 'service_id', self.gf('django.db.models.fields.related.ForeignKey')(default=0, to=orm['clinics.Service']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_renovated': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'year_opened': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstatistic': {
'Meta': {'unique_together': "[('clinic', 'statistic', 'month')]", 'object_name': 'ClinicStatistic'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'float_value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'int_value': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'month': ('django.db.models.fields.DateField', [], {}),
'rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'statistic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.Statistic']"}),
'text_value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'clinics.patient': {
'Meta': {'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.region': {
'Meta': {'unique_together': "(('external_id', 'type'),)", 'object_name': 'Region'},
'alternate_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'external_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'lga'", 'max_length': '16'})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'clinics.visitregistrationerror': {
'Meta': {'object_name': 'VisitRegistrationError'},
'error_count': ('django.db.models.fields.PositiveIntegerField', [], {}),
'error_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'clinics.visitregistrationerrorlog': {
'Meta': {'object_name': 'VisitRegistrationErrorLog'},
'error_type': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '160'}),
'message_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'statistics.statistic': {
'Meta': {'object_name': 'Statistic'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['statistics.StatisticGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'statistic_type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'statistics.statisticgroup': {
'Meta': {'object_name': 'StatisticGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
}
}
complete_apps = ['clinics'] | bsd-2-clause | -2,682,326,157,635,498,500 | 74.850575 | 195 | 0.548079 | false | 3.568686 | false | false | false |
m00nlight/hackerrank | algorithm/contests/Counter-Code-2015/C.py | 1 | 1148 | from __future__ import division
from sys import stdin
def solve(n, m):
if n % 2 == 0:
if m % n == 0:
return n // 2 + 1
elif m % n % 2 == 1:
return (m % n + 1) // 2
else:
return n + 1 - m % n // 2
else:
idx = m % (2 * n)
if idx == 0:
return (n + 1) // 2
else:
if idx <= (n + 1):
if idx == n:
return (n + 1) // 2
elif idx == n + 1:
return n
else:
if idx % 2 == 1:
return (idx + 1) // 2
else:
return (n + 1 - idx // 2)
else:
idx = idx - (n + 1)
if idx % 2 == 1:
return (idx + 1) // 2
else:
return (n - idx // 2)
if __name__ == '__main__':
t = int(stdin.readline())
for _ in range(t):
n, m = map(int, stdin.readline().strip().split())
ans = solve(n, m)
print(str(ans) + ' ' + str(m // n - (1 if m % n == 0 else 0))) | gpl-2.0 | -6,132,813,347,740,916,000 | 27.02439 | 70 | 0.313589 | false | 3.788779 | false | false | false |
linsalrob/PhageHosts | code/codon_distance.py | 1 | 2853 | '''
Calculate the distance between two codon usages.
We have two files, the first with just the phages and the second
with their hosts. Then we need to calculate which of the hosts is
closest
'''
import os
import sys
sys.path.append('/home3/redwards/bioinformatics/Modules')
import numpy as np
import scipy
remove_ambiguous = True # do we want ambiguous bases or not
codons = set([
'AAA', 'AAC', 'AAG', 'AAT', 'ACA', 'ACC', 'ACG', 'ACT',
'AGA', 'AGC', 'AGG', 'AGT', 'ATA', 'ATC', 'ATG', 'ATT',
'CAA', 'CAC', 'CAG', 'CAT', 'CCA', 'CCC', 'CCG', 'CCT',
'CGA', 'CGC', 'CGG', 'CGT', 'CTA', 'CTC', 'CTG', 'CTT',
'GAA', 'GAC', 'GAG', 'GAT', 'GCA', 'GCC', 'GCG', 'GCT',
'GGA', 'GGC', 'GGG', 'GGT', 'GTA', 'GTC', 'GTG', 'GTT',
'TAA', 'TAC', 'TAG', 'TAT', 'TCA', 'TCC', 'TCG', 'TCT',
'TGA', 'TGC', 'TGG', 'TGT', 'TTA', 'TTC', 'TTG', 'TTT'
])
def distance(x, y):
'''
Calculate the Euclidean distance between codon usages. An alternate
solution would be to use either np.linalg.norm or
scipy.spatial but neither of these are working on my system'''
return np.sqrt(np.sum((x-y)**2))
def remove_ambiguous_bases(header, cds):
'''
Remove any codons that contain ambiguous bases.
'''
temp=[cds[0]]
for i in range(1,len(header)):
if header[i] in codons:
temp.append(cds[i])
return temp
try:
phageF = sys.argv[1]
bactF = sys.argv[2]
except:
sys.exit(sys.argv[0] + " <phage file> <hosts file>\n")
cds = {}
header = None
with open(bactF, 'r') as bf:
for line in bf:
if line.startswith('Locus'):
header = line.strip().split("\t")
for i in range(len(header)):
header[i] = header[i].strip()
continue
line = line.rstrip()
p = line.split("\t")
if remove_ambiguous:
p = remove_ambiguous_bases(header, p)
cds[p[0]] = np.array([float(x) for x in p[1:len(p)]])
header = None
with open(phageF, 'r') as ph:
for line in ph:
if line.startswith('Locus'):
header = line.strip().split("\t")
for i in range(len(header)):
header[i] = header[i].strip()
continue
line = line.rstrip()
p = line.split("\t")
lowestScore = 1000
bestHits = []
if remove_ambiguous:
p = remove_ambiguous_bases(header, p)
a1 = np.array([float(x) for x in p[1:len(p)]])
for c in cds:
#dist = scipy.spatial.distance.cdist(a1, cds[c])
#dist = np.linalg.norm(a1-cds[c])
dist = distance(a1, cds[c])
if dist < lowestScore:
lowestScore = dist
bestHits = [c]
elif dist == lowestScore:
bestHits.append(c)
print p[0]+ "\t" + "\t".join(bestHits)
| mit | -4,651,376,398,173,520,000 | 29.351064 | 72 | 0.539783 | false | 3.091008 | false | false | false |
MTgeophysics/mtpy | tests/analysis/test_pt.py | 1 | 7128 | # -*- coding: utf-8 -*-
"""
TEST mtpy.core.mt.MT
@author: YG
"""
from unittest import TestCase
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 31 13:19:35 2017
@author: u64125
[email protected]
"""
import os
import numpy as np
from mtpy.core.mt import MT
from tests import TEST_MTPY_ROOT
import mtpy.analysis.geometry as mtg
class Test_PT(TestCase):
def test_pt(self):
self.mtobj = MT(os.path.normpath(os.path.join(TEST_MTPY_ROOT, "examples/data/edi_files/pb42c.edi")))
self.pt_expected = np.array([[[ 1.30644963e+00, -2.67740187e-02],
[ -1.33702443e-02, 1.28968939e+00]],
[[ 1.21678059e+00, -1.07765729e-02],
[ -8.20007589e-03, 1.23374034e+00]],
[[ 1.17164177e+00, 1.09018782e-03],
[ -6.68510048e-03, 1.18271654e+00]],
[[ 1.22540541e+00, 4.38999476e-03],
[ -4.20009647e-03, 1.24116127e+00]],
[[ 1.22262143e+00, -1.27947436e-02],
[ -4.73195876e-03, 1.25493677e+00]],
[[ 1.21501297e+00, -8.79427102e-03],
[ 1.03830156e-02, 1.22427493e+00]],
[[ 1.22785045e+00, 1.39792917e-02],
[ -7.08673035e-03, 1.23846962e+00]],
[[ 1.26661703e+00, -1.11292454e-02],
[ 1.82801360e-03, 1.26240177e+00]],
[[ 1.18539706e+00, 6.39442474e-03],
[ -1.01453767e-02, 1.25514910e+00]],
[[ 1.28549981e+00, -1.00606766e-01],
[ 3.97760695e-02, 1.32053655e+00]],
[[ 1.22555721e+00, -6.29531701e-02],
[ 3.36638894e-02, 1.24514491e+00]],
[[ 1.15217304e+00, 2.47597860e-02],
[ -4.69132792e-02, 1.28928907e+00]],
[[ 1.07175797e+00, -3.58092355e-03],
[ -3.12450311e-02, 1.19733081e+00]],
[[ 1.00918431e+00, -1.48723334e-02],
[ -1.04135860e-03, 1.06274597e+00]],
[[ 9.15517149e-01, -7.13677311e-03],
[ 4.49100302e-03, 9.67281170e-01]],
[[ 7.82696110e-01, 1.70157289e-02],
[ 1.87039067e-02, 8.29411722e-01]],
[[ 7.05442477e-01, 3.78377052e-02],
[ 2.11076586e-02, 7.39844699e-01]],
[[ 6.35185233e-01, 4.73463102e-02],
[ 3.31681155e-02, 6.45232848e-01]],
[[ 5.55546920e-01, 6.54610202e-02],
[ 6.89078895e-02, 5.23858436e-01]],
[[ 5.33096567e-01, 7.08103577e-02],
[ 6.49382268e-02, 4.46884668e-01]],
[[ 5.27354094e-01, 8.09968253e-02],
[ 1.96849609e-02, 3.71188472e-01]],
[[ 5.11384716e-01, 8.77380469e-02],
[ 1.36652476e-02, 2.64391007e-01]],
[[ 5.07676485e-01, 8.88590722e-02],
[ -2.89224644e-03, 2.26830209e-01]],
[[ 5.32226186e-01, 7.99515723e-02],
[ -8.08381040e-03, 1.72606458e-01]],
[[ 5.88599443e-01, 7.82062018e-02],
[ -8.45485953e-03, 1.64746123e-01]],
[[ 6.08649155e-01, 8.25165235e-02],
[ -2.18321304e-02, 1.89799568e-01]],
[[ 6.72877101e-01, 7.17000488e-02],
[ -8.23242896e-02, 2.38847621e-01]],
[[ 7.83704974e-01, 9.35718439e-02],
[ -1.08804893e-01, 2.69048188e-01]],
[[ 8.10341816e-01, 9.92141045e-02],
[ -1.26495824e-01, 2.81539705e-01]],
[[ 9.44396211e-01, 9.79869018e-02],
[ -1.86664281e-01, 3.53878350e-01]],
[[ 1.20372744e+00, 1.43106117e-01],
[ -1.82486049e-01, 4.45265471e-01]],
[[ 1.16782854e+00, 1.13799885e-01],
[ -1.75825646e-01, 4.46497807e-01]],
[[ 1.34754960e+00, 7.86821351e-02],
[ -1.52050649e-01, 5.27637774e-01]],
[[ 1.54766037e+00, 1.07732214e-01],
[ -1.24203091e-01, 6.35758473e-01]],
[[ 1.57964820e+00, 7.39413746e-02],
[ -1.02148722e-01, 6.66546887e-01]],
[[ 1.62101014e+00, 9.00546725e-02],
[ -5.05253680e-02, 7.14423033e-01]],
[[ 1.68957924e+00, 3.97165705e-02],
[ 4.57251401e-02, 7.76737215e-01]],
[[ 1.66003469e+00, 3.22243697e-02],
[ 9.00225059e-02, 8.14143062e-01]],
[[ 1.62779118e+00, 3.26316490e-03],
[ 1.68213765e-01, 7.85939990e-01]],
[[ 1.51783857e+00, -1.45050231e-02],
[ 2.23460898e-01, 7.96441583e-01]],
[[ 1.41377974e+00, -3.64217144e-02],
[ 2.56732302e-01, 8.12803360e-01]],
[[ 1.32448223e+00, -9.04193565e-02],
[ 2.46858147e-01, 8.54516882e-01]],
[[ 1.22981959e+00, -1.86648528e-01],
[ 3.20105326e-01, 8.15014902e-01]]])
assert(np.all(np.abs((self.pt_expected - self.mtobj.pt.pt)/self.pt_expected) < 1e-6))
alpha_expected = np.array([-33.66972565, -65.89384737, -76.59867325, 89.65473659,
-75.76307747, 85.13326608, 73.50684783, -32.810132 ,
-88.46092736, -59.97035554, -61.88664666, -85.4110878 ,
-82.24967714, -81.72640079, -88.53701804, 71.29889577,
60.1345369 , 48.55666153, 38.3651419 , 28.79048968,
16.40517236, 11.16030354, 8.50965433, 5.65066256,
4.67255493, 4.12192474, -0.70110747, -0.84768598,
-1.47667976, -4.27011302, -1.48608617, -2.45732916,
-2.55670157, -0.51738522, -0.88470366, 1.24832387,
2.67364329, 4.11167901, 5.75654718, 8.07694833,
10.06615916, 9.20560479, 8.91737594])
beta_expected = np.array([-0.14790673, -0.03012061, 0.09460956, 0.09976904, -0.09322928,
-0.22522043, 0.24468941, -0.14677427, 0.19414636, -1.54172397,
-1.11970814, 0.84076362, 0.3492499 , -0.19123344, -0.17692124,
-0.02999968, 0.33160131, 0.31720792, -0.09148111, 0.17165854,
1.95175741, 2.72709705, 3.56012648, 3.55975888, 3.28108606,
3.72287137, 4.79442926, 5.44077452, 5.8397381 , 6.18330647,
5.58466467, 5.08560032, 3.50735531, 3.03177428, 2.24126272,
1.7223648 , -0.06979335, -0.66910857, -1.95471268, -2.93540374,
-3.75023764, -4.39936596, -6.95935213])
azimuth_expected = alpha_expected-beta_expected
assert(np.all(np.abs((alpha_expected - self.mtobj.pt.alpha)/alpha_expected) < 1e-6))
assert(np.all(np.abs((beta_expected - self.mtobj.pt.beta)/beta_expected) < 1e-6))
assert(np.all(np.abs((azimuth_expected - self.mtobj.pt.azimuth)/azimuth_expected) < 1e-6))
# pi1 = 0.5*((self.pt_expected[:,0,0] - self.pt_expected[:,1,1])**2 +\
# (self.pt_expected[:,0,1] + self.pt_expected[:,1,0])**2)**0.5
# pi2 = 0.5*((self.pt_expected[:,0,0] + self.pt_expected[:,1,1])**2 +\
# (self.pt_expected[:,0,1] - self.pt_expected[:,1,0])**2)**0.5
# phimin_expected = np.degrees(pi2 - pi1)
# phimax_expected = np.degrees(pi2 + pi1)
# assert(np.all(np.abs(phimin_expected - self.mtobj.pt.phimin)/phimin_expected) < 1e-6)
# assert(np.all(np.abs(phimax_expected - self.mtobj.pt.phimax)/phimax_expected) < 1e-6) | gpl-3.0 | 4,011,371,443,513,376,300 | 34.824121 | 108 | 0.542789 | false | 2.19661 | true | false | false |
gnmiller/craig-bot | craig-bot/lib/python3.6/site-packages/discord/ext/commands/context.py | 1 | 10694 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2019 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import discord.abc
import discord.utils
class Context(discord.abc.Messageable):
r"""Represents the context in which a command is being invoked under.
This class contains a lot of meta data to help you understand more about
the invocation context. This class is not created manually and is instead
passed around to commands as the first parameter.
This class implements the :class:`abc.Messageable` ABC.
Attributes
-----------
message: :class:`.Message`
The message that triggered the command being executed.
bot: :class:`.Bot`
The bot that contains the command being executed.
args: :class:`list`
The list of transformed arguments that were passed into the command.
If this is accessed during the :func:`on_command_error` event
then this list could be incomplete.
kwargs: :class:`dict`
A dictionary of transformed arguments that were passed into the command.
Similar to :attr:`args`\, if this is accessed in the
:func:`on_command_error` event then this dict could be incomplete.
prefix: :class:`str`
The prefix that was used to invoke the command.
command
The command (i.e. :class:`.Command` or its subclasses) that is being
invoked currently.
invoked_with: :class:`str`
The command name that triggered this invocation. Useful for finding out
which alias called the command.
invoked_subcommand
The subcommand (i.e. :class:`.Command` or its subclasses) that was
invoked. If no valid subcommand was invoked then this is equal to
`None`.
subcommand_passed: Optional[:class:`str`]
The string that was attempted to call a subcommand. This does not have
to point to a valid registered subcommand and could just point to a
nonsense string. If nothing was passed to attempt a call to a
subcommand then this is set to `None`.
command_failed: :class:`bool`
A boolean that indicates if the command failed to be parsed, checked,
or invoked.
"""
def __init__(self, **attrs):
self.message = attrs.pop('message', None)
self.bot = attrs.pop('bot', None)
self.args = attrs.pop('args', [])
self.kwargs = attrs.pop('kwargs', {})
self.prefix = attrs.pop('prefix')
self.command = attrs.pop('command', None)
self.view = attrs.pop('view', None)
self.invoked_with = attrs.pop('invoked_with', None)
self.invoked_subcommand = attrs.pop('invoked_subcommand', None)
self.subcommand_passed = attrs.pop('subcommand_passed', None)
self.command_failed = attrs.pop('command_failed', False)
self._state = self.message._state
async def invoke(self, *args, **kwargs):
r"""|coro|
Calls a command with the arguments given.
This is useful if you want to just call the callback that a
:class:`.Command` holds internally.
.. note::
This does not handle converters, checks, cooldowns, pre-invoke,
or after-invoke hooks in any matter. It calls the internal callback
directly as-if it was a regular function.
You must take care in passing the proper arguments when
using this function.
.. warning::
The first parameter passed **must** be the command being invoked.
Parameters
-----------
command: :class:`.Command`
A command or subclass of a command that is going to be called.
\*args
The arguments to to use.
\*\*kwargs
The keyword arguments to use.
"""
try:
command = args[0]
except IndexError:
raise TypeError('Missing command to invoke.') from None
arguments = []
if command.cog is not None:
arguments.append(command.cog)
arguments.append(self)
arguments.extend(args[1:])
ret = await command.callback(*arguments, **kwargs)
return ret
async def reinvoke(self, *, call_hooks=False, restart=True):
"""|coro|
Calls the command again.
This is similar to :meth:`~.Context.invoke` except that it bypasses
checks, cooldowns, and error handlers.
.. note::
If you want to bypass :exc:`.UserInputError` derived exceptions,
it is recommended to use the regular :meth:`~.Context.invoke`
as it will work more naturally. After all, this will end up
using the old arguments the user has used and will thus just
fail again.
Parameters
------------
call_hooks: :class:`bool`
Whether to call the before and after invoke hooks.
restart: :class:`bool`
Whether to start the call chain from the very beginning
or where we left off (i.e. the command that caused the error).
The default is to start where we left off.
"""
cmd = self.command
view = self.view
if cmd is None:
raise ValueError('This context is not valid.')
# some state to revert to when we're done
index, previous = view.index, view.previous
invoked_with = self.invoked_with
invoked_subcommand = self.invoked_subcommand
subcommand_passed = self.subcommand_passed
if restart:
to_call = cmd.root_parent or cmd
view.index = len(self.prefix)
view.previous = 0
view.get_word() # advance to get the root command
else:
to_call = cmd
try:
await to_call.reinvoke(self, call_hooks=call_hooks)
finally:
self.command = cmd
view.index = index
view.previous = previous
self.invoked_with = invoked_with
self.invoked_subcommand = invoked_subcommand
self.subcommand_passed = subcommand_passed
@property
def valid(self):
"""Checks if the invocation context is valid to be invoked with."""
return self.prefix is not None and self.command is not None
async def _get_channel(self):
return self.channel
@property
def cog(self):
"""Returns the cog associated with this context's command. None if it does not exist."""
if self.command is None:
return None
return self.command.cog
@discord.utils.cached_property
def guild(self):
"""Returns the guild associated with this context's command. None if not available."""
return self.message.guild
@discord.utils.cached_property
def channel(self):
"""Returns the channel associated with this context's command. Shorthand for :attr:`.Message.channel`."""
return self.message.channel
@discord.utils.cached_property
def author(self):
"""Returns the author associated with this context's command. Shorthand for :attr:`.Message.author`"""
return self.message.author
@discord.utils.cached_property
def me(self):
"""Similar to :attr:`.Guild.me` except it may return the :class:`.ClientUser` in private message contexts."""
return self.guild.me if self.guild is not None else self.bot.user
@property
def voice_client(self):
r"""Optional[:class:`.VoiceClient`]: A shortcut to :attr:`.Guild.voice_client`\, if applicable."""
g = self.guild
return g.voice_client if g else None
async def send_help(self, *args):
"""send_help(entity=<bot>)
|coro|
Shows the help command for the specified entity if given.
The entity can be a command or a cog.
If no entity is given, then it'll show help for the
entire bot.
If the entity is a string, then it looks up whether it's a
:class:`Cog` or a :class:`Command`.
.. note::
Due to the way this function works, instead of returning
something similar to :meth:`~.commands.HelpCommand.command_not_found`
this returns :class:`None` on bad input or no help command.
Parameters
------------
entity: Optional[Union[:class:`Command`, :class:`Cog`, :class:`str`]]
The entity to show help for.
Returns
--------
Any
The result of the help command, if any.
"""
from .core import Group, Command
bot = self.bot
cmd = bot.help_command
if cmd is None:
return None
cmd = cmd.copy()
cmd.context = self
if len(args) == 0:
await cmd.prepare_help_command(self, None)
mapping = cmd.get_bot_mapping()
return await cmd.send_bot_help(mapping)
entity = args[0]
if entity is None:
return None
if isinstance(entity, str):
entity = bot.get_cog(entity) or bot.get_command(entity)
try:
qualified_name = entity.qualified_name
except AttributeError:
# if we're here then it's not a cog, group, or command.
return None
await cmd.prepare_help_command(self, entity.qualified_name)
if hasattr(entity, '__cog_commands__'):
return await cmd.send_cog_help(entity)
elif isinstance(entity, Group):
return await cmd.send_group_help(entity)
elif isinstance(entity, Command):
return await cmd.send_command_help(entity)
else:
return None
| mit | -5,694,569,240,403,202,000 | 34.885906 | 117 | 0.628016 | false | 4.465136 | false | false | false |
seanbell/opensurfaces | server/bsdfs/experiments.py | 1 | 9415 | import json
from decimal import Decimal
from collections import Counter
from django.conf import settings
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.models import User
from accounts.models import UserProfile
from shapes.models import Shape, MaterialShape
from bsdfs.models import EnvironmentMap, ShapeBsdfLabel_wd, ShapeBsdfQuality
def configure_experiments():
""" This function is automatically called by
the command ./manage.py mtconfigure """
# must be imported locally to avoid a circular import
from mturk.utils import configure_experiment
# aliases
sandbox = settings.MTURK_SANDBOX
production = not sandbox
# set up envmaps
envmap = EnvironmentMap.objects.get_or_create(
user=User.objects.get_or_create(
username='admin')[0].get_profile(),
name='ennis')
for envmap in EnvironmentMap.objects.all():
configure_experiment(
slug='bsdf_wd',
template_dir='bsdfs/experiments',
module='bsdfs.experiments',
examples_group_attr='shape',
variant={'envmap_id': envmap.id},
version=1, # 2: intrinsic images, 1: original opensurfaces
reward=Decimal('0.10'),
num_outputs_max=1,
contents_per_hit=10,
max_active_hits=2000,
content_type_model=MaterialShape,
out_content_type_model=ShapeBsdfLabel_wd,
out_content_attr='shape',
content_filter={
#'synthetic': True,
#'synthetic_slug__in': ['teapot', 'teacup', 'spoon', 'coyote'],
'invalid': False,
'pixel_area__gt': Shape.MIN_PIXEL_AREA,
'num_vertices__gte': 10,
'correct': True,
'substance__isnull': False,
'substance__fail': False,
'photo__whitebalanced': True,
'photo__scene_category_correct': True,
},
title='Adjust a blob to match an image',
description='Looking at an image, your goal is to adjust the appearance '
'of a blob so that it matches a target photograph. A modern '
'browser is required.',
keywords='material,appearance,image,picture,classify,BRDF,microfacet,blob,appearance',
frame_height=1150,
requirements={},
#qualifications='{ "bsdf_match": 1 }',
auto_add_hits=False, # settings.MTURK_SANDBOX,
)
for attr in ('color', 'gloss'):
content_filter = {
'invalid': False,
'shape__invalid': False,
'give_up': False,
#'shape__pixel_area__gt': Shape.MIN_PIXEL_AREA,
#'shape__correct': True,
#'shape__substance__isnull': False,
#'shape__substance__fail': False,
#'shape__photo__whitebalanced': True,
#'shape__photo__scene_category_correct': True,
}
if production and attr == 'gloss':
content_filter['color_correct'] = True
configure_experiment(
slug='quality_bsdf_%s' % attr,
template_dir='bsdfs/experiments',
module='bsdfs.experiments',
examples_group_attr='shape',
variant={'bsdf_version': 'wd'},
version=1, # 2: intrinsic images, 1: original opensurfaces
reward=Decimal('0.04'),
num_outputs_max=5,
contents_per_hit=40,
content_type_model=ShapeBsdfLabel_wd,
out_content_type_model=ShapeBsdfQuality,
out_content_attr='shapebsdflabel_wd',
content_filter=content_filter,
title='Click on blobs that match an image (%s)' % attr,
description='This task involves clicking on images that match a blob next to the image.',
keywords='material,substance,shape,image,picture,classify,label,blob,match,appearance',
#frame_height=7500,
requirements={},
auto_add_hits=False, # settings.MTURK_SANDBOX,
)
def update_votes_cubam(show_progress=False):
""" This function is automatically called by
mturk.tasks.mturk_update_votes_cubam_task """
from mturk.cubam import update_votes_cubam
changed_objects = []
for bsdf_version in ('wd',):
bsdf_ct = ContentType.objects.get(
app_label="bsdfs", model="shapebsdflabel_%s" % bsdf_version)
bsdf_model = bsdf_ct.model_class()
# gloss
changed_objects += update_votes_cubam(
bsdf_model, ShapeBsdfQuality.objects.filter(
invalid=False, content_type=bsdf_ct,
gloss_correct__isnull=False),
'object_id', 'gloss_correct', 'gloss_correct',
score_threshold=0, min_votes=5,
show_progress=show_progress,
return_changed_objects=True,
experiment_filter={
'slug': 'quality_bsdf_gloss',
'variant': json.dumps({'bsdf_version': bsdf_version}),
}
)
# color
changed_objects += update_votes_cubam(
bsdf_model, ShapeBsdfQuality.objects.filter(
invalid=False, content_type=bsdf_ct,
color_correct__isnull=False),
'object_id', 'color_correct', 'color_correct',
score_threshold=0, min_votes=5,
show_progress=show_progress,
return_changed_objects=True,
experiment_filter={
'slug': 'quality_bsdf_color',
'variant': json.dumps({'bsdf_version': bsdf_version}),
}
)
return changed_objects
def update_changed_objects(changed_objects):
""" This function is automatically called by
mturk.tasks.mturk_update_votes_cubam_task
with all objects that were changed by new votes. """
pass
def external_task_extra_context(slug, context):
""" Add extra context for each task (called by
``mturk.views.external.external_task_GET``) """
if slug.startswith('bsdf'):
context['html_yes'] = 'blob matches'
context['html_no'] = 'blob does not match'
elif slug.startswith('quality_bsdf_color'):
context['html_yes'] = 'color matches'
context['html_no'] = 'color does not match'
elif slug.startswith('quality_bsdf_gloss'):
context['html_yes'] = 'gloss matches'
context['html_no'] = 'gloss does not match'
def configure_qualifications():
from mturk.models import MtQualification, MtQualificationAssignment
from mturk.utils import get_or_create_mturk_worker
#
# BSDF matching
bsdfmatch = MtQualification.objects.get_or_create(
slug="bsdf_match",
defaults={
'name': "Appearance Matching Master",
'keywords': "appearance,matching,blob,graphics,BRDF",
'description': "You are an expert at matching the appearance of a synthetic blob and a shape in an image."
}
)[0]
good_users = dict(Counter(
ShapeBsdfLabel_wd.objects
.filter(color_correct=True, gloss_correct=True)
.values_list('user__mturk_worker_id', flat=True)
).most_common())
bad_users = dict(Counter(
ShapeBsdfLabel_wd.objects
.filter(Q(color_correct=False) | Q(gloss_correct=False))
.values_list('user__mturk_worker_id', flat=True)
).most_common())
for (id, ngood) in good_users.iteritems():
nbad = bad_users[id] if id in bad_users else 0
if ngood + nbad > 0:
perc = float(ngood) / float(ngood + nbad)
if ngood >= 30:
worker = UserProfile.objects.get(mturk_worker_id=id)
if perc >= 0.75:
bsdfmatch_asst, created = bsdfmatch.assignments.get_or_create(
worker=worker)
print 'Granting bsdf_match to %s (%s good, %s bad)' % (id, ngood, nbad)
bsdfmatch_asst.set_value(1)
elif perc < 0.1 and not worker.always_approve:
# worker.block(reason=("For blob matching tasks, your accuracy is %s%%, which is too low. " +
#"Most workers have an accuracy above 75%%.") % int(perc * 100))
print 'WOULD block user %s (%s good, %s bad, %s%%)' % (
worker.mturk_worker_id, ngood, nbad, perc * 100)
elif perc < 0.5:
try:
bsdfmatch.assignments.get(worker=worker).set_value(0)
print 'Revoking bsdf_match from %s (%s good, %s bad)' % (id, ngood, nbad)
except MtQualificationAssignment.DoesNotExist:
pass
elif nbad >= 30 and perc < 0.1 and not worker.always_approve:
# worker.block(reason=("For blob matching tasks, your accuracy is %s%%, which is too low. " +
#"Most workers have an accuracy above 75%%.") % int(perc * 100))
print 'WOULD block user %s (%s good, %s bad, %s%%)' % (
worker.mturk_worker_id, ngood, nbad, perc * 100)
#
# Grant quals to admin
if settings.MTURK_ADMIN_WORKER_ID:
admin_user = get_or_create_mturk_worker(settings.MTURK_ADMIN_WORKER_ID)
bsdfmatch.assignments.get_or_create(worker=admin_user)[0].set_value(1)
| mit | 1,125,210,961,934,694,400 | 38.229167 | 118 | 0.581413 | false | 3.842857 | true | false | false |
N9dZ/LearnCodeTheHardWay | ex24.py | 1 | 1131 | print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
# there's variable for a real poem
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
# print content of the poem
print "----------------"
print poem
print "----------------"
five = 10 - 2 + 3 - 6
print "This should be five: %s" % five
# a function to make some calculations
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
# some calculations
start_point = 10000
beans, jars, crates = secret_formula(start_point)
# one variable for one value
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
# use the function's return to value the results directly
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point) | gpl-2.0 | 2,550,332,171,832,219,600 | 31.342857 | 83 | 0.697613 | false | 3.132964 | false | false | false |
tanghaibao/goatools | goatools/godag/relationship_str.py | 1 | 3956 | """Create strings representing relationships on GO Terms.
+------- has 'part_of' relationship(s)
| +-- pointed to by a GO ID with a 'part_of' relationship
| |
V V
GO:0008150 L00 D00 .... .rdu biological_process
GO:0050896 L01 D01 .... .rdu response to stimulus
GO:0042221 L02 D02 .... p... response to chemical
GO:0032501 L01 D01 .... .rdu multicellular organismal process
GO:0003008 L02 D02 .... .r.. system process
GO:0051606 L02 D02 .... .... detection of stimulus
GO:0050877 L03 D03 .... .rdu nervous system process
GO:0009593 L03 D03 P... .... detection of chemical stimulus
GO:0007600 L04 D04 .... pr.. sensory perception
GO:0050906 L03 D03 P... .... detection of stimulus involved in sensory perception
GO:0050890 L04 D04 .... .... cognition
GO:0050907 L04 D04 P... .... detection of chemical stimulus involved in sensory perception
GO:0007606 L05 D05 .... p... sensory perception of chemical stimulus
GO:0050893 L05 D05 P... .... sensory processing
GO:0050911 L05 D05 P... .... detection of chemical stimulus involved in sensory perception of smell
GO:0007608 L06 D06 .... p... sensory perception of smell
"""
__copyright__ = "Copyright (C) 2010-2019, DV Klopfenstein, H Tang, All rights reserved."
__author__ = "DV Klopfenstein"
from collections import OrderedDict
from goatools.godag.consts import RELATIONSHIP_LIST
from goatools.godag.consts import RELATIONSHIP_SET
# pylint: disable=too-few-public-methods,bad-whitespace
class RelationshipStr(object):
"""Create strings representing relationships on GO Terms."""
# go-basic.obo: fmt(1.2) rel(2019-02-20) 47,177 GO Terms; optional_attrs(relationship)
# relationship:
# 6,882 part_of
# 3,230 regulates
# 2,804 negatively_regulates
# 2,785 positively_regulates
rel2chr = OrderedDict([
('part_of', 'P'),
('regulates', 'R'),
('negatively_regulates', 'D'),
('positively_regulates', 'U')])
rev2chr = OrderedDict([
('part_of', 'p'),
('regulates', 'r'),
('negatively_regulates', 'd'),
('positively_regulates', 'u')])
def __init__(self, relationships=None):
assert set(self.rel2chr.keys()) == RELATIONSHIP_SET
# Ordered relationships
_rels = relationships if relationships else set()
self.rels = [r for r in RELATIONSHIP_LIST if r in _rels]
def str_relationships(self, goobj):
"""Get a string representing the presence of absence of relationships. Ex: P..."""
rel_cur = goobj.relationship
return "".join([self.rel2chr.get(r, '?') if r in rel_cur else '.' for r in self.rels])
def str_rel_short(self, goobj):
"""Get a string representing the presence of absence of relationships. Ex: P"""
if not goobj.relationship:
return ''
rel_cur = goobj.relationship
return "".join([self.rel2chr.get(r, '?') for r in self.rels if r in rel_cur])
def str_relationships_rev(self, goobj):
"""Get a string representing the presence of absence of relationships. Ex: pr.."""
rel_cur = goobj.relationship_rev
return "".join([self.rev2chr[r] if r in rel_cur else '.' for r in self.rels])
def prt_keys(self, prt, pre):
"""Print the alias for a relationship and its alias."""
prt.write('{PRE}Relationship to parent: {ABC}\n'.format(
PRE=pre, ABC=''.join(self.rel2chr.values())))
for rel, alias in self.rel2chr.items():
prt.write('{PRE} {A} {DESC}\n'.format(PRE=pre, A=alias, DESC=rel))
prt.write('\n{PRE}Relationship to child: {ABC}\n'.format(
PRE=pre, ABC=''.join(self.rev2chr.values())))
for rel, alias in self.rev2chr.items():
prt.write('{PRE} {A} {DESC}\n'.format(PRE=pre, A=alias, DESC=rel))
# Copyright (C) 2010-2019, DV Klopfenstein, H Tang, All rights reserved.
| bsd-2-clause | 6,686,008,549,504,692,000 | 42.472527 | 99 | 0.63094 | false | 3.299416 | false | false | false |
jclgoodwin/bustimes.org.uk | vehicles/test_service_map_consumer.py | 1 | 3587 | import vcr
from freezegun import freeze_time
from channels.testing import WebsocketCommunicator
from django.test import TestCase, override_settings
from django.core.cache import cache
from django.utils import timezone
from busstops.models import Region, Service, ServiceCode, StopPoint, DataSource, SIRISource, Operator
from bustimes.models import Route, Calendar, Trip
from buses.routing import application
from .siri_one_shot import siri_one_shot
@override_settings(CACHES={'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'}})
class WebsocketConsumerTest(TestCase):
@classmethod
def setUpTestData(cls):
source = DataSource.objects.create(name='Icarus')
destination = StopPoint.objects.create(common_name='Plymouth Aerodrome', active=True)
region = Region.objects.create(id='SW', name='South West')
operator = Operator.objects.create(id='SDVN', region=region, name='Stagecoach Devonshire')
cls.service = Service.objects.create(service_code='swe_33-FLC-_-y10', date='2019-06-08')
cls.service.operator.add(operator)
route = Route.objects.create(service=cls.service, source=source)
calendar = Calendar.objects.create(start_date='2019-06-08', mon=True, tue=True, wed=True, thu=True, fri=True,
sat=True, sun=True)
Trip.objects.create(route=route, start='20:40', end='20:50', calendar=calendar, destination=destination)
cls.code_1 = ServiceCode.objects.create(service=cls.service, code='FLCN', scheme='Devon SIRI')
cls.code_2 = ServiceCode.objects.create(service=cls.service, code='FLC', scheme='Bucks SIRI')
cls.siri_source = SIRISource.objects.create(name='Devon', requestor_ref='torbaydevon_siri_traveline',
url='http://data.icarus.cloudamber.com/StopMonitoringRequest.ashx')
async def test_service_map_consumer(self):
with vcr.use_cassette('data/vcr/icarus.yaml'):
with freeze_time('2019-06-08'):
url = f"/ws/vehicle_positions/services/{self.service.id}"
communicator = WebsocketCommunicator(application, url)
connected, subprotocol = await communicator.connect()
self.assertTrue(connected)
message = await communicator.receive_json_from()
self.assertEqual(message, [])
def test_siri_one_shot(self):
# url = f'/vehicles.json?service={self.service.id}'
with vcr.use_cassette('data/vcr/icarus.yaml'):
with freeze_time('2019-06-08'):
now = timezone.now()
with self.assertNumQueries(2):
self.assertEqual('nothing scheduled', siri_one_shot(self.code_1, now, False))
with self.assertNumQueries(1):
self.assertEqual('cached (nothing scheduled)', siri_one_shot(self.code_1, now, False))
self.assertEqual('nothing scheduled', cache.get(f'{self.service.id}:Icarus'))
with freeze_time('2019-06-08 20:37+01:00'):
now = timezone.now()
with self.assertNumQueries(49):
self.assertIsNone(siri_one_shot(self.code_1, now, True))
with self.assertNumQueries(1):
self.assertEqual('cached (line name)', siri_one_shot(self.code_1, now, True))
key = 'http://data.icarus.cloudamber.com/StopMonitoringRequest.ashx:torbaydevon_siri_traveline:FLCN'
self.assertEqual('line name', cache.get(key))
| mpl-2.0 | -4,007,397,036,994,494,000 | 51.75 | 119 | 0.647338 | false | 3.717098 | true | false | false |
hugolm84/tomahawk-charts | scraper/tomahawk/spiders/rdiospider.py | 1 | 2964 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Hugo Lindström <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from scrapy.http.request import Request
import json
from tomahawkspider import TomahawkCrawlSpider, TomahawkSpiderHelper
from tomahawk.itemloaders import TomahawkItemLoader
class RdioSpider(TomahawkCrawlSpider):
name = "Rdio"
base_url = "http://api.rdio.com/1/"
oauth_key = 'gk8zmyzj5xztt8aj48csaart'
oauth_consumer_secret = 'yt35kakDyW'
# Regions, might change http://www.rdio.com/availability/
regions = [ "US"]#, "SE", "CA", "DE", "GB", "AU",
#"BE", "BR", "DK", "EE", "FI", "FR",
#"IS", "IE","IT", "LV", "LT", "NL",
#"NZ", "NO", "PT", "ES"]
default_region = "US"
default_type = "Track"
base_types = ["Artist", "Album", "Track"]
def __init__(self, name=None, **kwargs):
super(RdioSpider, self).__init__()
def start_requests(self):
for base_type in self.base_types:
for region in self.regions:
yield Request(url=self.base_url, method='POST', dont_filter=True,
meta={'oauth_method_args': {'method': 'getTopCharts','type': base_type,'_region': region}},
callback=self.__parse_as_chart__)
def do_create_chart(self, chart, response):
meta = response.meta['oauth_method_args']
name = "Top Overall"
type = meta['type']
region = meta['_region']
chart.add_value("name", name)
chart.add_value("id", name+type+region)
chart.add_value("type", type)
chart.add_value("geo", region)
chart.add_value("description", "%s %s's in %s" % (name, type, region))
return chart
def do_parse(self, chart, response):
response = json.loads(response.body)
item_type = self.do_get_type(chart)
for rank, items in enumerate(response['result']):
entry = TomahawkItemLoader()
entry.add_value(item_type, items.pop('name'))
if item_type != TomahawkSpiderHelper.ArtistType.lower():
entry.add_value("artist",items.pop("artist"))
entry.add_value("rank", rank)
chart.add_value("list", entry.load_item())
return self.do_process_item(chart)
| gpl-2.0 | -8,073,873,393,976,325,000 | 36.987179 | 121 | 0.619305 | false | 3.5358 | false | false | false |
kakunbsc/enigma2.2 | lib/python/Plugins/SystemPlugins/NFIFlash/flasher.py | 2 | 10609 | from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Screens.Standby import TryQuitMainloop
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.Sources.Progress import Progress
from Components.Sources.Boolean import Boolean
from Components.Label import Label
from Components.FileList import FileList
from Components.Task import Task, Job, JobManager
from Tools.Directories import fileExists
from Tools.HardwareInfo import HardwareInfo
from os import system
from enigma import eConsoleAppContainer
import re
class writeNAND(Task):
def __init__(self,job,param,box):
Task.__init__(self,job, ("Writing image file to NAND Flash"))
self.setTool("/usr/lib/enigma2/python/Plugins/SystemPlugins/NFIFlash/mywritenand")
if box == "dm7025":
self.end = 256
elif box[:5] == "dm800":
self.end = 512
if box == "dm8000":
self.setTool("/usr/lib/enigma2/python/Plugins/SystemPlugins/NFIFlash/dm8000_writenand")
self.args += param
self.weighting = 1
def processOutput(self, data):
print "[writeNand] " + data
if data == "." or data.endswith(" ."):
self.progress += 1
elif data.find("*** done!") > 0:
print "data.found done"
self.setProgress(self.end)
else:
self.output_line = data
class NFISummary(Screen):
skin = """
<screen position="0,0" size="132,64">
<widget source="title" render="Label" position="2,0" size="120,14" valign="center" font="Regular;12" />
<widget source="content" render="Label" position="2,14" size="120,34" font="Regular;12" transparent="1" zPosition="1" />
<widget source="job_progresslabel" render="Label" position="66,50" size="60,14" font="Regular;12" transparent="1" halign="right" zPosition="0" />
<widget source="job_progressbar" render="Progress" position="2,50" size="66,14" borderWidth="1" />
</screen>"""
def __init__(self, session, parent):
Screen.__init__(self, session, parent)
self["title"] = StaticText(_("Image flash utility"))
self["content"] = StaticText(_("Please select .NFI flash image file from medium"))
self["job_progressbar"] = Progress()
self["job_progresslabel"] = StaticText("")
def setText(self, text):
self["content"].setText(text)
class NFIFlash(Screen):
skin = """
<screen name="NFIFlash" position="90,95" size="560,420" title="Image flash utility">
<ePixmap pixmap="750S/buttons/green.png" position="140,0" zPosition="0" size="140,40" transparent="1" alphatest="on" />
<ePixmap pixmap="750S/buttons/yellow.png" position="280,0" zPosition="0" size="140,40" transparent="1" alphatest="on" />
<ePixmap pixmap="750S/buttons/blue.png" position="420,0" zPosition="0" size="140,40" transparent="1" alphatest="on" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#18188b" transparent="1" />
<widget source="listlabel" render="Label" position="16,44" size="200,21" valign="center" font="Regular;18" />
<widget name="filelist" position="0,68" size="260,260" scrollbarMode="showOnDemand" />
<widget source="infolabel" render="Label" position="270,44" size="280,284" font="Regular;16" />
<widget source="job_progressbar" render="Progress" position="10,374" size="540,26" borderWidth="1" backgroundColor="#254f7497" />
<widget source="job_progresslabel" render="Label" position="180,378" zPosition="2" font="Regular;18" halign="center" transparent="1" size="200,22" foregroundColor="#000000" />
<widget source="statusbar" render="Label" position="10,404" size="540,16" font="Regular;16" foregroundColor="#cccccc" />
</screen>"""
def __init__(self, session, cancelable = True, close_on_finish = False):
self.skin = NFIFlash.skin
Screen.__init__(self, session)
self["job_progressbar"] = Progress()
self["job_progresslabel"] = StaticText("")
self["finished"] = Boolean()
self["infolabel"] = StaticText("")
self["statusbar"] = StaticText(_("Please select .NFI flash image file from medium"))
self["listlabel"] = StaticText(_("select .NFI flash file")+":")
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["actions"] = ActionMap(["OkCancelActions", "ColorActions", "DirectionActions"],
{
"green": self.ok,
"yellow": self.reboot,
"ok": self.ok,
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down
}, -1)
currDir = "/media/usb/"
self.filelist = FileList(currDir, matchingPattern = "^.*\.(nfi|NFI)")
self["filelist"] = self.filelist
self.nfifile = ""
self.md5sum = ""
self.job = None
self.box = HardwareInfo().get_device_name()
def closeCB(self):
if ( self.job is None or self.job.status is not self.job.IN_PROGRESS ) and not self.no_autostart:
self.close()
#else:
#if self.cancelable:
#self.cancel()
def up(self):
self["filelist"].up()
self.check_for_NFO()
def down(self):
self["filelist"].down()
self.check_for_NFO()
def right(self):
self["filelist"].pageDown()
self.check_for_NFO()
def left(self):
self["filelist"].pageUp()
self.check_for_NFO()
def check_for_NFO(self):
self.session.summary.setText(self["filelist"].getFilename())
if self["filelist"].getFilename() is None:
return
if self["filelist"].getCurrentDirectory() is not None:
self.nfifile = self["filelist"].getCurrentDirectory()+self["filelist"].getFilename()
if self.nfifile.upper().endswith(".NFI"):
self["key_green"].text = _("Flash")
nfofilename = self.nfifile[0:-3]+"nfo"
if fileExists(nfofilename):
nfocontent = open(nfofilename, "r").read()
self["infolabel"].text = nfocontent
pos = nfocontent.find("MD5:")
if pos > 0:
self.md5sum = nfocontent[pos+5:pos+5+32] + " " + self.nfifile
else:
self.md5sum = ""
else:
self["infolabel"].text = _("No details for this image file") + ":\n" + self["filelist"].getFilename()
self.md5sum = ""
else:
self["infolabel"].text = ""
self["key_green"].text = ""
def ok(self):
if self.job is None or self.job.status is not self.job.IN_PROGRESS:
if self["filelist"].canDescent(): # isDir
self["filelist"].descent()
self.session.summary.setText(self["filelist"].getFilename())
self.check_for_NFO()
else:
self.queryFlash()
def queryFlash(self):
fd = open(self.nfifile, 'r')
print fd
sign = fd.read(11)
print sign
if sign.find("NFI1" + self.box + "\0") == 0:
if self.md5sum != "":
self["statusbar"].text = ("Please wait for md5 signature verification...")
self.session.summary.setText(("Please wait for md5 signature verification..."))
self.container = eConsoleAppContainer()
self.container.setCWD(self["filelist"].getCurrentDirectory())
self.container.appClosed.append(self.md5finished)
self.container.dataSent.append(self.md5ready)
self.container.execute("md5sum -cw -")
self.container.write(self.md5sum)
else:
self.session.openWithCallback(self.queryCB, MessageBox, _("This .NFI file does not have a md5sum signature and is not guaranteed to work. Do you really want to burn this image to flash memory?"), MessageBox.TYPE_YESNO)
else:
self.session.open(MessageBox, (_("This .NFI file does not contain a valid %s image!") % (self.box.upper())), MessageBox.TYPE_ERROR)
def md5ready(self, retval):
self.container.sendEOF()
def md5finished(self, retval):
if retval==0:
self.session.openWithCallback(self.queryCB, MessageBox, _("This .NFI file has a valid md5 signature. Continue programming this image to flash memory?"), MessageBox.TYPE_YESNO)
else:
self.session.openWithCallback(self.queryCB, MessageBox, _("The md5sum validation failed, the file may be corrupted! Are you sure that you want to burn this image to flash memory? You are doing this at your own risk!"), MessageBox.TYPE_YESNO)
def queryCB(self, answer):
if answer == True:
self.createJob()
else:
self["statusbar"].text = _("Please select .NFI flash image file from medium")
def createJob(self):
self.job = Job("Image flashing job")
param = [self.nfifile]
writeNAND(self.job,param,self.box)
#writeNAND2(self.job,param)
#writeNAND3(self.job,param)
self.job.state_changed.append(self.update_job)
self.job.end = 540
self.cwd = self["filelist"].getCurrentDirectory()
self["job_progressbar"].range = self.job.end
self.startJob()
def startJob(self):
self["key_blue"].text = ""
self["key_yellow"].text = ""
self["key_green"].text = ""
#self["progress0"].show()
#self["progress1"].show()
self.job.start(self.jobcb)
def update_job(self):
j = self.job
#print "[job state_changed]"
if j.status == j.IN_PROGRESS:
self.session.summary["job_progressbar"].value = j.progress
self.session.summary["job_progressbar"].range = j.end
self.session.summary["job_progresslabel"].text = "%.2f%%" % (100*j.progress/float(j.end))
self["job_progressbar"].range = j.end
self["job_progressbar"].value = j.progress
#print "[update_job] j.progress=%f, j.getProgress()=%f, j.end=%d, text=%f" % (j.progress, j.getProgress(), j.end, (100*j.progress/float(j.end)))
self["job_progresslabel"].text = "%.2f%%" % (100*j.progress/float(j.end))
self.session.summary.setText(j.tasks[j.current_task].name)
self["statusbar"].text = (j.tasks[j.current_task].name)
elif j.status == j.FINISHED:
self["statusbar"].text = _("Writing NFI image file to flash completed")
self.session.summary.setText(_("NFI image flashing completed. Press Yellow to Reboot!"))
self["key_yellow"].text = _("Reboot")
elif j.status == j.FAILED:
self["statusbar"].text = j.tasks[j.current_task].name + " " + _("failed")
self.session.open(MessageBox, (_("Flashing failed") + ":\n" + j.tasks[j.current_task].name + ":\n" + j.tasks[j.current_task].output_line), MessageBox.TYPE_ERROR)
def jobcb(self, jobref, fasel, blubber):
print "[jobcb] %s %s %s" % (jobref, fasel, blubber)
self["key_green"].text = _("Flash")
def reboot(self):
if self.job.status == self.job.FINISHED:
self["statusbar"].text = ("rebooting...")
TryQuitMainloop(self.session,2)
def createSummary(self):
return NFISummary
| gpl-2.0 | 8,285,512,115,970,571,000 | 39.96139 | 244 | 0.685456 | false | 3.062644 | false | false | false |
feilaoda/FlickBoard | project/lib/filter.py | 1 | 7291 | # -*- coding: utf-8 -*-
from datetime import datetime
import urllib2
import re
import urllib, hashlib
import string
from itertools import imap
def none2string(value):
if value is None:
return ''
return value
def video(value):
if value is None:
return None
videos = re.findall('(http://v.youku.com/v_show/id_[a-zA-Z0-9\=]+.html)\s?', value)
if (len(videos) > 0):
for video in videos:
video_id = re.findall('http://v.youku.com/v_show/id_([a-zA-Z0-9\=]+).html', video)
value = value.replace('http://v.youku.com/v_show/id_' + video_id[0] + '.html',
'<div class="mediaVideo"><embed src="http://player.youku.com/player.php/sid/' + video_id[0] + '/v.swf" allowFullScreen="true" quality="high" width="480" height="400" align="middle" allowScriptAccess="always" type="application/x-shockwave-flash"></embed></div>')
return value
else:
return urlink(value)
def download_urlize(value):
if value is None:
return None
links = re.findall('(\[dl\]http://[a-zA-Z0-9\:\/\?=\-\_\.\&]+\[\/dl\])\s?', value)
if (len(links) > 0):
for link in links:
url = re.findall('(http://[a-zA-Z0-9\/\?=\-\_\.\&]+)', link)
if len(url) > 0:
value = value.replace(link, '<a href="%s" target="_blank">Download</a>' % (url[0]))
return value
return None
def mentions(value):
if value is None:
return None
ms = re.findall('(@[\w\_]+\.?)\s?', value)
if (len(ms) > 0):
for m in ms:
m_id = re.findall('@([a-zA-Z0-9\_\x80-\xff]+\.?)', m)
if (len(m_id) > 0):
if (m_id[0].endswith('.') != True and len(m_id[0])<32):
value = value.replace('@' + m_id[0], '<a href="/member/info/' + m_id[0] + '" rel="external">@' + m_id[0] + '</a>')
return value
else:
return value
# gravatar filter
def gravatar(value,arg):
default = "http://v2ex.appspot.com/static/img/avatar_" + str(arg) + ".png"
if type(value).__name__ != 'Member':
return '<img src="' + default + '" border="0" align="absmiddle" />'
if arg == 'large':
number_size = 73
member_avatar_url = value.avatar_large_url
elif arg == 'normal':
number_size = 48
member_avatar_url = value.avatar_normal_url
elif arg == 'mini':
number_size = 24
member_avatar_url = value.avatar_mini_url
if member_avatar_url:
return '<img src="'+ member_avatar_url +'" border="0" alt="' + value.username + '" />'
else:
gravatar_url = "http://www.gravatar.com/avatar/" + hashlib.md5(value.email.lower()).hexdigest() + "?"
gravatar_url += urllib.urlencode({'s' : str(number_size), 'd' : default})
return '<img src="' + gravatar_url + '" border="0" alt="' + value.username + '" align="absmiddle" />'
# avatar filter
def avatar(value, arg):
default = "/static/img/avatar_" + str(arg) + ".png"
if type(value).__name__ not in ['Member', 'Node']:
return '<img src="' + default + '" border="0" />'
if arg == 'large':
number_size = 73
member_avatar_url = value.avatar_large_url
elif arg == 'normal':
number_size = 48
member_avatar_url = value.avatar_normal_url
elif arg == 'mini':
number_size = 24
member_avatar_url = value.avatar_mini_url
if value.avatar_mini_url:
return '<img src="'+ member_avatar_url +'" border="0" />'
else:
return '<img src="' + default + '" border="0" />'
# github gist script support
def gist(value):
return re.sub(r'(http://gist.github.com/[\d]+)', r'<script src="\1.js"></script>', value)
_base_js_escapes = (
('\\', r'\u005C'),
('\'', r'\u0027'),
('"', r'\u0022'),
('>', r'\u003E'),
('<', r'\u003C'),
('&', r'\u0026'),
('=', r'\u003D'),
('-', r'\u002D'),
(';', r'\u003B'),
(u'\u2028', r'\u2028'),
(u'\u2029', r'\u2029')
)
# Escape every ASCII character with a value less than 32.
_js_escapes = (_base_js_escapes +
tuple([('%c' % z, '\\u%04X' % z) for z in range(32)]))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
for bad, good in _js_escapes:
value = value.replace(bad, good)
return value
_word_split_re = re.compile(r'(\s+)')
_punctuation_re = re.compile(
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
'|'.join(imap(re.escape, ('(', '<', '<'))),
'|'.join(imap(re.escape, ('.', ',', ')', '>', '\n', '>')))
)
)
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_digits = '0123456789'
# special singleton representing missing values for the runtime
missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
# internal code
internal_code = set()
def urlink(text, trim_url_limit=None, nofollow=False, external=True):
if text is None:
return None
#trim_url_limit=None, nofollow=False, external=True
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and '...'
or '')) or x
words = _word_split_re.split(unicode(text))
nofollow_attr = nofollow and ' rel="nofollow" ' or ''
external_attr = external and ' target="_blank" ' or ''
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or (
'@' not in middle and
not middle.startswith('http://') and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith('.org') or
middle.endswith('.net') or
middle.endswith('.com')
)):
middle = '<a href="http://%s"%s%s>%s</a>' % (middle,
nofollow_attr, external_attr, trim_url(middle))
if middle.startswith('http://') or \
middle.startswith('https://'):
middle = '<a href="%s"%s%s>%s</a>' % (middle,
nofollow_attr, external_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and \
not ':' in middle and _simple_email_re.match(middle):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u''.join(words)
| mit | -399,325,461,132,370,750 | 35.09901 | 295 | 0.535592 | false | 3.40224 | false | false | false |
lidaobing/itcc | itcc/molecule/utils.py | 1 | 5019 | # $Id$
import sys
import os.path
import math
from itcc.molecule import read, write
from itcc.molecule.tools import neighbours, is_pyramid
from itcc.molecule import relalist
try:
sorted
except:
from itcc.core.tools import sorted_ as sorted
def mirrormol():
if len(sys.argv) != 2:
sys.stderr.write('Usage: %s <xyzfname>\n' % os.path.basename(sys.argv[0]))
sys.exit(1)
mol = read.readxyz(file(sys.argv[1]))
mol.coords = -mol.coords
write.writexyz(mol)
def printbonds():
if len(sys.argv) != 2:
sys.stderr.write('Usage: %s <xyzfname>\n' % os.path.basename(sys.argv[0]))
sys.exit(1)
mol = read.readxyz(file(sys.argv[1]))
a = relalist.Relalist(mol)
print a
def detailcmp():
from optparse import OptionParser
usage = '%prog [options] <xyzfname1> <xyzfname2>'
parser = OptionParser(usage=usage)
parser.add_option('-a', "--atoms", dest="atoms",
help="only compare selected atoms, 1-based",
metavar="STRING")
parser.add_option('-A', "--atomsfile", dest="atomsfile",
help="read the selected atoms from file",
metavar="FILE")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("incorrect number of arguments")
if options.atoms and options.atomsfile:
parser.error("options conflict")
if options.atomsfile:
options.atoms = file(options.atomsfile).read()
atoms = None
if options.atoms:
atoms = [int(x)-1 for x in options.atoms.split()]
mol1 = read.readxyz(file(args[0]))
mol2 = read.readxyz(file(args[1]))
r1 = relalist.Relalist(mol1)
bonds_data = []
for i,j in r1.bonds:
if atoms is not None and (i not in atoms or j not in atoms): continue
l1 = mol1.calclen(i,j)
l2 = mol2.calclen(i,j)
bonds_data.append((abs(l1-l2), (i+1,j+1), l1, l2))
angles_data = []
for i,j,k in r1.angles:
if atoms is not None \
and (i not in atoms \
or j not in atoms \
or k not in atoms):
continue
a1 = math.degrees(mol1.calcang(i,j,k))
a2 = math.degrees(mol2.calcang(i,j,k))
angles_data.append((abs(a1-a2), (i+1,j+1,k+1), a1, a2))
torsions_data = []
for i,j,k,l in r1.torsions:
if atoms is not None \
and (i not in atoms \
or j not in atoms \
or k not in atoms
or l not in atoms):
continue
t1 = math.degrees(mol1.calctor(i,j,k,l))
t2 = math.degrees(mol2.calctor(i,j,k,l))
torsions_data.append((180-abs(abs(t1-t2)-180), (i+1,j+1,k+1,l+1), t1, t2))
print 'bonds:'
for x in sorted(bonds_data):
print x
print
print 'angles:'
for x in sorted(angles_data):
print x
print
print 'torsions:'
for x in sorted(torsions_data):
print x[1][0], x[1][1], x[1][2], x[1][3], x[2], x[3], x[0]
def rg():
if len(sys.argv) < 2:
sys.stderr.write('Usage: %s XYZFNAME...\n' % os.path.basename(sys.argv[0]))
sys.exit(1)
from itcc.molecule import radius_of_gyration
for fname in sys.argv[1:]:
ifile = sys.stdin
if fname != '-':
ifile = file(fname)
mol = read.readxyz(ifile)
print ifile.name, radius_of_gyration(mol)
def sub_pyramid_check(fname, atoms):
mol = read.readxyz(file(fname))
if atoms is None:
atoms = range(len(mol))
res = []
for atom in atoms:
neis = neighbours(mol, atom)
if len(neis) != 4:
continue
if is_pyramid(mol.coords[atom],
mol.coords[neis[0]],
mol.coords[neis[1]],
mol.coords[neis[2]],
mol.coords[neis[3]]):
res.append(atom)
return res
def pyramid_check():
from optparse import OptionParser
usage = '%prog [options] <xyzfname>...'
parser = OptionParser(usage=usage)
parser.add_option('-a', "--atoms", dest="atoms",
help="only compare selected atoms, 1-based",
metavar="STRING")
parser.add_option('-A', "--atomsfile", dest="atomsfile",
help="read the selected atoms from file",
metavar="FILE")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("incorrect number of arguments")
if options.atoms and options.atomsfile:
parser.error("options conflict")
if options.atomsfile:
options.atoms = file(options.atomsfile).read()
atoms = None
if options.atoms:
atoms = [int(x)-1 for x in options.atoms.split()]
for fname in args:
res = sub_pyramid_check(fname, atoms)
if res:
print fname, ' '.join(str(x+1) for x in res)
| gpl-3.0 | 6,977,058,385,700,711,000 | 28.350877 | 83 | 0.546523 | false | 3.423602 | false | false | false |
wmfs/chimp | src/calc/TimestampColumn.py | 1 | 3205 | '''
Created on 4 Mar 2012
@author: Tim.Needham
'''
import cs
import chimpsql
import chimpspec
class TimestampColumn:
'''
classdocs
'''
def __init__(self, timestampColumnTag):
self.type = "timestampColumn"
self.taskOrder = 2
self.outputColumn = cs.grabAttribute(timestampColumnTag,"outputColumn")
self.triggeringColumns=[]
triggeringColumnsTag = timestampColumnTag.getElementsByTagName("triggeringColumns")
if len(triggeringColumnsTag)>0:
for column in triggeringColumnsTag[0].getElementsByTagName("column"):
columnName = cs.grabAttribute(column, "name")
self.triggeringColumns.append(columnName)
def debug(self, appLogger):
appLogger.debug(" timestampColumn")
appLogger.debug(" outputColumn : {0}".format(self.outputColumn))
def getExtraSystemFields(self):
extraSystemFields = []
field = chimpspec.SpecificationRecordField(None, None, column=self.outputColumn, type="datetime", mandatory=True, default="now()")
extraSystemFields.append(field)
return(extraSystemFields)
def requiresFile(self):
return(False)
def getTriggeringColumns(self):
return(self.triggeringColumns)
def getComputedTimestampFunction(self, sourceName, schemaName):
self.name = "computed_{0}_{1}_timestamp_update".format(sourceName, self.outputColumn)
dml = ("CREATE OR REPLACE FUNCTION {0}.{1}()\n"
" RETURNS trigger AS\n"
"$BODY$\n"
" BEGIN\n"
" new.{2} = now();\n"
" RETURN new;\n"
" END;\n"
"$BODY$\n"
"LANGUAGE plpgsql;\n\n".format(schemaName, self.name, self.outputColumn))
return chimpsql.Function(self.name, schemaName, [], dml)
def getComputedTimestampTrigger(self, sourceName, schemaName, tableName, triggerFunction):
triggerName = "h_computed_{0}_{1}_timestamp_update".format(sourceName, self.outputColumn)
when = " OR ".join(map(lambda column: "old.{0} IS DISTINCT FROM new.{0}".format(column), self.triggeringColumns))
return chimpsql.Trigger(triggerName, tableName, triggerFunction.name, triggerFunction.schema,
("CREATE TRIGGER {0}\n"
"BEFORE UPDATE OF {1}\n"
"ON {2}.{3}\n"
"FOR EACH ROW\n"
"WHEN ({4})\n"
"EXECUTE PROCEDURE {5}.{6}();\n\n").format(triggerName, ", ".join(self.triggeringColumns), schemaName, tableName, when, schemaName, triggerFunction.name))
def getComputedTimestampIndex(self, sourceName, schemaName, storageTableName):
indexName = "{0}_{1}_{2}_timestamp".format(schemaName, sourceName, self.outputColumn)
return chimpsql.Index(indexName, storageTableName, schemaName,
"CREATE INDEX {0} ON {1}.{2} ({3});\n".format(indexName, schemaName, storageTableName, self.outputColumn))
| gpl-3.0 | 434,596,775,635,696,060 | 41.746667 | 178 | 0.5922 | false | 4.140827 | false | false | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/route_filter.py | 1 | 2621 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class RouteFilter(Resource):
"""Route Filter Resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param rules: Collection of RouteFilterRules contained within a route
filter.
:type rules: list[~azure.mgmt.network.v2017_08_01.models.RouteFilterRule]
:param peerings: A collection of references to express route circuit
peerings.
:type peerings:
list[~azure.mgmt.network.v2017_08_01.models.ExpressRouteCircuitPeering]
:ivar provisioning_state: The provisioning state of the resource. Possible
values are: 'Updating', 'Deleting', 'Succeeded' and 'Failed'.
:vartype provisioning_state: str
:ivar etag: Gets a unique read-only string that changes whenever the
resource is updated.
:vartype etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'rules': {'key': 'properties.rules', 'type': '[RouteFilterRule]'},
'peerings': {'key': 'properties.peerings', 'type': '[ExpressRouteCircuitPeering]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(RouteFilter, self).__init__(**kwargs)
self.rules = kwargs.get('rules', None)
self.peerings = kwargs.get('peerings', None)
self.provisioning_state = None
self.etag = None
| mit | -4,254,058,854,959,345,700 | 36.442857 | 91 | 0.592522 | false | 4.095313 | false | false | false |
lukechurch/coda | csv_conversion/internal2csv.py | 1 | 5252 | '''
Copyright (c) 2017 Coda authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import argparse
import unicodecsv
import os
nonDecoOutput = ["id", "owner", "data", "timestamp"]
parser = argparse.ArgumentParser()
parser.add_argument("file", help="filepath of the dataset file to convert")
parser.add_argument("--senderIdCol", help="name of column header containing sender ID")
parser.add_argument("--dataCol", help="name of column header containing message text")
parser.add_argument("--messageIdCol", help="name of column header containing unique message ID")
parser.add_argument("--timestamp", help="name of column header containing message timestamps")
args = parser.parse_args()
with open(args.file, "rb") as raw_file:
hs = [h.strip() for h in raw_file.next().split(';')]
header = dict([(h.strip(), True) for h in hs])
missingHeaders = []
for h in nonDecoOutput:
if h not in header:
missingHeaders.append(h)
if len(missingHeaders) > 0:
print "ERROR: Wrong format, missing columns: " + ", ".join(missingHeaders)
else:
reader = unicodecsv.DictReader(raw_file, delimiter=";", fieldnames=hs)
headerStringsForNewFile = {}
schemeIds = {}
schemes = []
dir_path = os.path.dirname(os.path.realpath(args.file))
if args.senderIdCol:
headerStringsForNewFile["owner"] = args.senderIdCol
else:
headerStringsForNewFile["owner"] = "sender"
if args.dataCol:
headerStringsForNewFile["data"] = args.dataCol
else:
headerStringsForNewFile["data"] = "message"
if args.messageIdCol:
headerStringsForNewFile["id"] = args.messageIdCol
else:
headerStringsForNewFile["id"] = "msgId"
if args.timestamp:
headerStringsForNewFile["timestamp"] = args.timestamp
else:
headerStringsForNewFile["timestamp"] = "timestamp"
rowCount = 0
events = {}
eventOrder = []
try:
for row in reader:
if len(row["data"]) == 0 or len(row["id"]) == 0 or len(row["owner"]) == 0:
continue
if row["schemeId"] not in schemeIds:
schemes.append(row["schemeName"])
schemeIds[row["schemeId"]] = 1
if row["id"] not in events:
eventObj = {headerStringsForNewFile["id"]: row["id"],
headerStringsForNewFile["owner"]: row["owner"],
headerStringsForNewFile["timestamp"]: row["timestamp"],
headerStringsForNewFile["data"]: row["data"],
row["schemeName"]: row["deco_codeValue"]}
eventOrder.append(row["id"])
events[row["id"]] = eventObj
else:
events[row["id"]][row["schemeName"]] = row["deco_codeValue"]
rowCount += 1
except UnicodeDecodeError as dec:
print "Can't decode line #%d as unicode!" % rowCount
if len(events) == 0:
print "ERROR: No line read from file has been correctly filled in."
else:
fileName = os.path.splitext(args.file)[0]
with open(os.path.join(dir_path, fileName + "-converted.csv"), "wb") as out:
header = nonDecoOutput + schemes
dialect = unicodecsv.excel
dialect.delimiter = ";"
writer = unicodecsv.DictWriter(out, fieldnames=[headerStringsForNewFile[h] for h in nonDecoOutput] + schemes, dialect=dialect)
writer.writeheader()
for eventId in eventOrder:
writer.writerow(events[eventId])
with open(os.path.join(dir_path, fileName + "-converted.csv"), "r") as myFile:
lines = myFile.readlines()
with open(os.path.join(dir_path, fileName + "-converted.csv"), "w") as myFile:
lines[-1] = lines[-1].strip()
myFile.writelines([item for item in lines if len(item) > 0])
print "SUCCESS: Converted the CSV, stored at \"%s\"" % os.path.join(dir_path, fileName + "-converted.csv") | mit | 1,804,412,583,267,640,300 | 40.039063 | 142 | 0.614813 | false | 4.297872 | false | false | false |
hammerlab/isovar | isovar/variant_orf_helpers.py | 1 | 5017 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from .logging import get_logger
from .variant_orf import VariantORF
logger = get_logger(__name__)
def match_variant_sequence_to_reference_context(
variant_sequence,
reference_context,
min_transcript_prefix_length,
max_transcript_mismatches,
count_mismatches_after_variant=False,
max_trimming_attempts=2):
"""
Iteratively trim low-coverage subsequences of a variant sequence
until it either matches the given reference context or there
are too few nucleotides left in the variant sequence.
Parameters
----------
variant_sequence : VariantSequence
Assembled sequence from RNA reads, will need to be to be reverse
complemented if matching against a reference transcript on the
negative strand.
reference_context : ReferenceContext
Sequence of reference transcript before the variant and associated
metadata.
min_transcript_prefix_length : int
Minimum number of nucleotides we try to match against a reference
transcript.
max_transcript_mismatches : int
Maximum number of nucleotide differences between reference transcript
sequence and the variant sequence.
count_mismatches_after_variant : bool
Set to true if the number of mismatches after the variant locus should
count toward the total max_transcript_mismatches, which by default
only counts mismatches before the variant locus.
max_trimming_attempts : int
How many times do we try trimming the VariantSequence to higher
levels of coverage before giving up?
Returns VariantORF or None
"""
# if we can't get the variant sequence to match this reference
# context then keep trimming it by coverage until either
for i in range(max_trimming_attempts + 1):
# check the reverse-complemented prefix if the reference context is
# on the negative strand since variant sequence is aligned to
# genomic DNA (positive strand)
variant_sequence_too_short = (
(reference_context.strand == "+" and
len(variant_sequence.prefix) < min_transcript_prefix_length) or
(reference_context.strand == "-" and
len(variant_sequence.suffix) < min_transcript_prefix_length)
)
if variant_sequence_too_short:
logger.info(
"Prefix of variant sequence %s shorter than min allowed %d (iter=%d)",
variant_sequence,
min_transcript_prefix_length,
i + 1)
return None
variant_orf = \
VariantORF.from_variant_sequence_and_reference_context(
variant_sequence=variant_sequence,
reference_context=reference_context)
if variant_orf is None:
return None
n_mismatch_before_variant = (
variant_orf.num_mismatches_before_variant)
n_mismatch_after_variant = (
variant_orf.num_mismatches_after_variant)
logger.info("Iter #%d/%d: %s (len=%d)" % (
i + 1,
max_trimming_attempts + 1,
variant_orf,
len(variant_orf.cdna_sequence)))
total_mismatches = n_mismatch_before_variant
if count_mismatches_after_variant:
total_mismatches += n_mismatch_after_variant
if total_mismatches <= max_transcript_mismatches:
# if we got a variant sequence + reading frame with sufficiently
# few mismatches then call it a day
return variant_orf
logger.info(
("Too many mismatches (%d) between variant sequence %s and "
"reference context %s (attempt=%d/%d)"),
n_mismatch_before_variant,
variant_sequence,
reference_context,
i + 1,
max_trimming_attempts + 1)
# if portions of the sequence are supported by only 1 read
# then try trimming to 2 to see if the better supported
# subsequence can be better matched against the reference
current_min_coverage = variant_sequence.min_coverage()
logger.info(
"Trimming to subsequence covered by at least %d reads",
current_min_coverage + 1)
variant_sequence = variant_sequence.trim_by_coverage(
current_min_coverage + 1)
return None
| apache-2.0 | -7,773,328,974,950,547,000 | 38.503937 | 86 | 0.651983 | false | 4.447695 | false | false | false |
vfine/webplatform | pmModules/users.py | 1 | 4422 | # users.py
# Display user info
# $Id: users.py 13454 2012-11-08 17:54:19Z fine $
#
import re, os
from datetime import datetime, timedelta
import pmConfig.pmConfig as config
import pmUtils.pmUtils as utils
from pmCore.pmModule import pmModule
from pmTaskBuffer.pmTaskBuffer import pmtaskbuffer as pmt
from operator import itemgetter, attrgetter
class users(pmModule):
#______________________________________________________________________________________
def __init__(self,name=None,parent=None,obj=None):
pmModule.__init__(self,name,parent,obj)
self.publishUI(self.doJson)
#______________________________________________________________________________________
def makeTop(self,users,top):
return sorted(users, key=itemgetter(top), reverse=True)
#______________________________________________________________________________________
def doJson(self,hours=None,days=180,tstart=None,tend=None,PRODUSERNAME=None,top='nJobsA', topsize=0):
""" Get the list of the users
<ul>
<li> hours = use the last hours
<li> days = use the last days
<li> topsize - the size of the top list
<br> = 0 - all users are shown
<li> top - select top list using 'top' column
<ul>
<li> 'nJobsA' - the number of the jobs
<li> 'CPUA1' - Personal the CPU used for the last 24 hours
<li> 'CPUA7' - Personal Cpu used for the last 7 days
<li> 'CPUP1' - Group Cpu for the last 24 hours
<li> 'CPUP7' - Group Cpu for the last 7 days
</ul>
</ul>
"""
if days == None: days = 0
if hours == None: hours = 0
main = {"buffer":{ "params" : {'hours' : days*24+hours }
, "method" : 'getUsers'
, "type" : False
}
}
columns="name,njobsa,latestjob,cpua1,cpua7,cpup1,cpup7"
if topsize==0 or topsize==None: columns+=",scriptcache"
q = pmt.getUsers(PRODUSERNAME,days*24+hours,columns=columns)
header = q['header']
users = q['rows']
if PRODUSERNAME == None:
if topsize > 0:
title = "Recent %(topsize)d Top Panda Analysis Users" % { 'topsize' : topsize }
else:
title = "Recent Panda Analysis Users"
else:
title = "PanDA jobs for %s" % PRODUSERNAME
main["buffer"]["params"]['user'] = PRODUSERNAME,
iNJobs = utils.name2Index(header,"njobsa")
iLatest = utils.name2Index(header,"latestjob")
jobpertime = {"anajobs" : 0, "n1000" : 0, "n10k" : 0 }
recent = { "d3" :0, "d7" :0 , "d30" : 0, "d90" : 0, "d180" :0 }
for u in users:
nxtp = u[iNJobs]
if nxtp == None: continue
nxtp = int(nxtp)
if nxtp > 0 : jobpertime["anajobs"] += nxtp
if nxtp > 1000:
jobpertime["n1000"] += 1;
if nxtp > 10000: jobpertime["n10k"] += 1
nxtp = u[iLatest]
if nxtp != None:
diffdays = (datetime.utcnow() - nxtp).days;
if diffdays < 4: recent["d3"] += 1
if diffdays < 8: recent["d7"] += 1
if diffdays < 31: recent["d30"] += 1
if diffdays < 91: recent["d90"] += 1
if diffdays < 181: recent["d180"] += 1
if topsize > 0 and top != None:
iTop = utils.name2Index(header,top)
users = self.makeTop(users,iTop)[:topsize]
main["buffer"]["top"] = { 'top' : top, 'size' : topsize }
# remove thr group
main["buffer"]["data"] = {'header' : header,'rows' : users }
main["buffer"]["totaljobs"] = jobpertime
main["buffer"]["recent"] = recent
self.publishTitle(title)
self.publish(main)
self.publish( "%s/%s" % (utils.fileScriptURL(),"taskBuffer/%s.js" % "getUsers"),role="script")
return
def leftMenu(self):
""" Return html for inclusion in left menu """
txt = "<a href='%s/users'>Users</a>" % this.server().script()
return txt
def topMenu(self):
""" Return html for inclusion in top menu """
def leftMenu():
""" Return html for inclusion in left menu """
txt = "<a href='%s/users'>Users</a>" % self.config().pandamon['url']
return txt
| lgpl-3.0 | 3,956,176,911,021,721,000 | 39.2 | 104 | 0.504749 | false | 3.566129 | false | false | false |
vulogov/zap_proxy | etc/python/CacheDriver.py | 1 | 1608 | __version__ = 'v0.1.0'
import time
class CacheDriver:
def __init__(self, creator):
self.creator = creator
self.ready = False
self.name = ""
def set_cache_args(self, args):
self.args = args
def set(self, name, key, value):
if not self.ready or self.name != name:
self._open(name)
if not self.ready:
return False
self._set(key, time.time(), value)
def get(self, name, key):
if not self.ready or self.name != name:
self._open(name)
if not self.ready:
raise KeyError, key
return self._get(key)
def age(self, name, key):
if not self.ready or self.name != name:
self._open(name)
if not self.ready:
raise KeyError, key
return self._age(key)
def acquire(self, name, key):
self.set(name, "lock:%s"%key, 1)
def release(self, name, key):
self.set(name, "lock:%s"%key, 0)
def lock(self, name, key):
res = self.get("lock:%s"%name, key)
if res == None or res == 0:
return False
return True
def close(self):
if not self.ready:
return
self._close()
class CacheDriverCreator:
def __init__(self, name, env, logger, cls, args, argv):
self.cls = cls
self.name = name
self.env = env
self.logger = logger
self.args = args
self.argv = argv
self.init_cache()
def init_cache(self):
pass
def driver(self):
return self.cls(self) | gpl-3.0 | -9,193,560,970,287,625,000 | 26.271186 | 59 | 0.521144 | false | 3.748252 | false | false | false |
blueshed/blueshed-micro | blueshed/micro/web/rpc_handler.py | 1 | 4761 | from pkg_resources import resource_filename # @UnresolvedImport
from tornado import web
from tornado.escape import json_decode
from tornado.web import asynchronous, RequestHandler
import tornado.concurrent
from blueshed.micro.utils.json_utils import dumps
from blueshed.micro.web.context_mixin import ContextMixin
from blueshed.micro.web.cors_mixin import CorsMixin, cors
import functools
import logging
acceptable_form_mime_types = [
"application/x-www-form-urlencoded; charset=UTF-8",
"application/x-www-form-urlencoded"
]
acceptable_json_mime_types = [
"application/json; charset=UTF-8",
"application/json;"
]
class RpcHandler(ContextMixin, CorsMixin, RequestHandler):
'''
Calls services in application.settings['services']
get:
returns the meta data about a service
or all services
suffix .js returns a client control
javascript object for websocket support
suffix <service name>.html returns
an html form to run the service
post:
form-encoded or json-encoded input
result is always json
'''
def initialize(self,
html_template=None,
js_template=None,
http_origins=None,
ws_url=None):
RequestHandler.initialize(self)
self.set_cors_methods("OPTIONS,GET,POST")
if http_origins:
self.set_cors_whitelist(http_origins)
self._html_template = html_template
self._js_template = js_template
self._ws_url = ws_url if ws_url else ''
def get_template_path(self):
''' overrides the template path to use this module '''
if self._html_template is None and self._js_template is None:
return resource_filename('blueshed.micro.web', "templates")
return RequestHandler.get_template_path(self)
def write_error(self, *args, **kwargs):
''' Must override base write error to stop uncaught HTTP errors from clearing CORS headers '''
self.write_cors_headers()
RequestHandler.write_error(self, *args, **kwargs)
def options(self, *args, **kwargs):
self.cors_options()
@cors
def get(self, path=None):
services = self.get_service(path)
if services is None:
services = self.settings['services']
if path is not None and path.endswith(".js"):
self.set_header('content-type', 'text/javascript')
self.render(self._js_template or "api-tmpl.js",
services=services.values(),
ws_url=self._ws_url)
return
elif path is not None and path.endswith(".html"):
self.render(self._html_template or "service.html",
service=services,
error=None,
result=None)
return
self.set_header('content-type', 'application/json; charset=UTF-8')
self.write(dumps(services, indent=4))
@asynchronous
@cors
def post(self, path):
content_type = self.request.headers['content-type']
if content_type in acceptable_json_mime_types:
kwargs = json_decode(self.request.body)
elif content_type in acceptable_form_mime_types:
kwargs = dict([(k, self.get_argument(k))
for k in self.request.body_arguments.keys()
if k[0] != "_"])
elif content_type and content_type.startswith("multipart/form-data"):
kwargs = dict([(k, self.get_argument(k))
for k in self.request.body_arguments.keys()
if k[0] != "_"])
else:
raise web.HTTPError(415, 'content type not supported {}'.format(
self.request.headers['content-type']))
service = self.get_service(path)
service.parse_http_kwargs(kwargs)
context = self.settings['micro_context'](
-1, -1, service.name, {"current_user": self.current_user},
self)
try:
logging.info("%s(%r)", service.name, kwargs)
result = service.perform(context, **kwargs)
if tornado.concurrent.is_future(result):
result.add_done_callback(
functools.partial(self.handle_future,
service,
context,
True))
else:
self.handle_result(service, context, result)
self.finish()
except Exception as ex:
self.write_err(context, ex)
self.finish()
| mit | -7,662,630,218,613,734,000 | 37.088 | 102 | 0.572989 | false | 4.392066 | false | false | false |
gh4w/some | web/diego/pronostix/views.py | 1 | 2629 | from datetime import datetime
from django.utils import timezone
from django.views import generic
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, HttpResponseRedirect
from django.urls import reverse
import pronostix.models as models
import pronostix.forms as forms
def index(request):
return render(request, 'pronostix/index.html')
class ProchainesRencontresView(generic.ListView):
template_name = 'pronostix/prochaines_rencontres.html'
context_object_name = 'rencontres'
def get_queryset(self):
maintenant = datetime.now()
return models.Rencontre.objects.filter(date__gt=maintenant).order_by('date')
class ResultatsView(generic.ListView):
template_name = 'pronostix/resultats.html'
context_object_name = 'resultats'
def get_queryset(self):
return models.Resultat.objects.all().order_by('rencontre__date')
@login_required
def lister_pronostics(request):
liste = []
now = timezone.now()
for rencontre in models.Rencontre.objects.filter(date__gt = now).order_by('date'):
prono = rencontre.pronostic_set.filter(utilisateur = request.user).first()
liste.append((rencontre, prono))
return render(request, 'pronostix/pronostics.html', { 'models': liste })
@login_required
def modifier_pronostic(request, rencontre_id):
prono = get_object_or_404(models.Pronostic, utilisateur = request.user, rencontre_id = rencontre_id)
if request.method == 'POST':
form = forms.PronosticForm(request.POST, instance = prono)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('pronostix:pronostics'))
else:
form = forms.PronosticForm(instance = prono)
return render(request, 'pronostix/modifier_pronostic.html', {'prono': form})
@login_required
def ajouter_pronostic(request, rencontre_id):
rencontre = get_object_or_404(models.Rencontre, pk = rencontre_id)
if request.method == 'POST':
form = forms.PronosticForm(request.POST)
if form.is_valid():
prono = form.save(commit = False)
prono.utilisateur = request.user
prono.rencontre = rencontre
prono.save()
return HttpResponseRedirect(reverse('pronostix:pronostics'))
else:
form = forms.PronosticForm()
return render(request, 'pronostix/ajouter_pronostic.html', { 'rencontre': rencontre, 'prono': form } )
class HelloView(generic.ListView):
template_name = 'pronostix/hello.html'
def get_queryset(self):
return None
| mit | 533,804,269,170,800,700 | 36.028169 | 106 | 0.692278 | false | 3.340534 | false | false | false |
schnitzlein/weatherstation | obsolete/Display.py | 1 | 1636 | import os, syslog
import pygame
import logging
class PyLcd :
screen = None;
colourBlack = (0, 0, 0)
def __init__(self):
"Ininitializes a new pygame screen using the framebuffer"
# Based on "Python GUI in Linux frame buffer"
# http://www.karoltomala.com/blog/?p=679
disp_no = os.getenv("DISPLAY")
if disp_no:
print("I'm running under X Server. With display = {0}\nexit now.".format(disp_no))
exit(0)
os.putenv('SDL_FBDEV', '/dev/fb1')
# Select frame buffer driver
# Make sure that SDL_VIDEODRIVER is set
driver = 'fbcon'
if not os.getenv('SDL_VIDEODRIVER'):
os.putenv('SDL_VIDEODRIVER', driver)
try:
pygame.display.init()
except Exception as e:
print("exception: {}".format(e))
except pygame.error:
print('Driver: {0} failed.'.format(driver))
exit(0)
size = (pygame.display.Info().current_w, pygame.display.Info().current_h)
self.screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
if self.screen:
logging.debug("screen Initialized h: {} w: {}".format(pygame.display.Info().current_h, pygame.display.Info().current_w))
# Clear the screen to start
self.screen.fill((0, 0, 0))
# Initialise font support
pygame.font.init()
# Render the screen
pygame.display.update()
def __del__(self):
logging.info("pygame screen destructor called -> QUIT now.")
pygame.display.quit()
#print("Destructor pygame display shuts down.")
| mit | 4,967,346,822,978,412,000 | 34.565217 | 132 | 0.586797 | false | 3.867612 | false | false | false |
dgilland/alchy | alchy/_compat.py | 1 | 3201 | # -*- coding: utf-8 -*-
# flake8: noqa
# pylint: skip-file
"""Python 2/3 compatibility
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
Borrowed from
https://github.com/mitsuhiko/flask/blob/master/flask/_compat.py
"""
import sys
PY3 = sys.version_info[0] == 3
def _identity(x): return x
if PY3:
text_type = str
string_types = (str,)
integer_types = (int,)
def iterkeys(d): return iter(d.keys())
def itervalues(d): return iter(d.values())
def iteritems(d): return iter(d.items())
from io import StringIO
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
implements_to_string = _identity
else:
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
def iterkeys(d): return d.iterkeys()
def itervalues(d): return d.itervalues()
def iteritems(d): return d.iteritems()
from cStringIO import StringIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instantiation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Certain versions of pypy have a bug where clearing the exception stack
# breaks the __exit__ function in a very peculiar way. This is currently
# true for pypy 2.2.1 for instance. The second level of exception blocks
# is necessary because pypy seems to forget to check if an exception
# happend until the next bytecode instruction?
BROKEN_PYPY_CTXMGR_EXIT = False
if hasattr(sys, 'pypy_version_info'):
class _Mgr(object):
def __enter__(self):
return self
def __exit__(self, *args):
sys.exc_clear()
try:
try:
with _Mgr():
raise AssertionError()
except:
raise
except TypeError:
BROKEN_PYPY_CTXMGR_EXIT = True
except AssertionError:
pass
# Define classmethod_func(f) to retrieve the unbound function of classmethod f
if sys.version_info[:2] >= (2, 7):
def classmethod_func(f): return f.__func__
else:
def classmethod_func(f): return f.__get__(1).im_func
| mit | -7,518,559,914,268,568,000 | 27.327434 | 78 | 0.630116 | false | 3.824373 | false | false | false |
stephantul/somber | somber/plsom.py | 1 | 5148 | """The PLSOM."""
import logging
from typing import Callable, Dict, Optional, Tuple
import numpy as np
from tqdm import tqdm
from somber.som import BaseSom
from somber.components.initializers import range_initialization
from somber.components.utilities import Scaler
logger = logging.getLogger(__name__)
class PLSom(BaseSom):
# Static property names
param_names = {"map_dimensions", "weights", "data_dimensionality", "params"}
def __init__(
self,
map_dimensions: Tuple[int],
data_dimensionality: Optional[int] = None,
beta: Optional[float] = None,
initializer: Callable = range_initialization,
scaler: Optional[Scaler] = None,
) -> None:
"""
An implementation of the PLSom.
The ParameterLess Som is a SOM which does not rely on time-induced
plasticity adaptation. Instead, the plasticity of the SOM is adapted
in an online fashion by continuously monitoring the error of each presented
item.
In general, the PLSom is less prone to catastrophic interference, or
"forgetting" than the original SOM. Simultaneously, it is also more suited
to re-adapting to changes in distribution. This is because the SOM loses
its plasticity according to an exponentially decreasing learning rate and
neighborhood size.
:param map_dimensions: A tuple describing the map size. For example, (10, 10)
will create a 10 * 10 map with 100 neurons, while (10, 10, 10) creates a
10 * 10 * 10 map with 1000 neurons.
:param data_dimensionality: The dimensionality of the input data.
:param initializer: A function which takes in the input data and weight matrix
and returns an initialized weight matrix. The initializers are defined in
somber.components.initializers. Can be set to None.
:param scaler: An initialized instance of Scaler() which is used to scale the
data to have mean 0 and stdev 1.
"""
super().__init__(
map_dimensions,
data_dimensionality=data_dimensionality,
argfunc="argmin",
valfunc="min",
params={"r": {"value": 0, "factor": 1, "orig": 0}},
initializer=initializer,
scaler=scaler,
)
self.beta = beta if beta else 2
def _epoch(
self,
X: np.ndarray,
batch_size: int,
updates_epoch: int,
constants: Dict[str, float],
progressbar: tqdm,
) -> None:
"""
Run a single epoch.
This function shuffles the data internally,
as this improves performance.
:param X: The training data.
:param batch_size: The batch size
:param updates_epoch: The number of updates to perform per epoch
:param constants: A dictionary containing the constants with which to update the
parameters in self.parameters.
:param progressbar: The progressbar instance to show and update during training
"""
# Create batches
X_ = self._create_batches(X, batch_size)
X_len = np.prod(X.shape[:-1])
# Initialize the previous activation
prev = self._init_prev(X_)
prev = self.distance_function(X_[0], self.weights)[0]
influences = self._update_params(prev)
# Iterate over the training data
for idx, x in enumerate(X_):
# Our batches are padded, so we need to
# make sure we know when we hit the padding
# so we don't inadvertently learn zeroes.
diff = X_len - (idx * batch_size)
if diff and diff < batch_size:
x = x[:diff]
# Prev_activation may be None
if prev is not None:
prev = prev[:diff]
# if idx > 0 and idx % update_step == 0:
influences = self._update_params(prev)
prev = self._propagate(x, influences, prev_activation=prev)
if progressbar is not None:
progressbar.update(batch_size)
def _update_params(self, constants: np.ndarray) -> np.ndarray:
"""Update the params."""
constants = np.max(np.min(constants, 1))
self.params["r"]["value"] = max([self.params["r"]["value"], constants])
epsilon = constants / self.params["r"]["value"]
influence = self._calculate_influence(epsilon)
# Account for learning rate
return influence * epsilon
def _calculate_influence(self, epsilon: float) -> np.ndarray:
"""
Pre-calculate the influence for a given value of epsilon.
The neighborhood has size num_neurons * num_neurons, so for a
30 * 30 map, the neighborhood will be size (900, 900).
:param epsilon: The neighborhood value.
:param neighborhood: The influence from each neuron to each other neuron.
"""
n = (self.beta - 1) * np.log(1 + epsilon * (np.e - 1)) + 1
grid = np.exp((-self.distance_grid) / n ** 2)
return grid.reshape(self.num_neurons, self.num_neurons)
| mit | 6,037,134,522,918,761,000 | 37.41791 | 88 | 0.612277 | false | 4.148268 | false | false | false |
skggm/skggm | examples/trace_plot_example.py | 1 | 3138 | """
Visualize Regularization Path
=============================
Plot the edge level coefficients (inverse covariance entries)
as a function of the regularization parameter.
"""
import sys
import numpy as np
from sklearn.datasets import make_sparse_spd_matrix
sys.path.append("..")
from inverse_covariance import QuicGraphicalLasso
from inverse_covariance.plot_util import trace_plot
from inverse_covariance.profiling import LatticeGraph
def make_data(n_samples, n_features):
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(
n_features, alpha=.98, smallest_coef=.4, largest_coef=.7, random_state=prng
)
cov = np.linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
return X, cov, prec
def make_data_banded(n_samples, n_features):
alpha = 0.1
cov, prec, adj = LatticeGraph(
n_blocks=2, random_sign=True, chain_blocks=True, seed=1
).create(n_features, alpha)
prng = np.random.RandomState(2)
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
return X, cov, prec
def show_quic_coefficient_trace(X):
path = np.logspace(np.log10(0.01), np.log10(1.0), num=50, endpoint=True)[::-1]
estimator = QuicGraphicalLasso(lam=1.0, path=path, mode="path")
estimator.fit(X)
trace_plot(estimator.precision_, estimator.path_, n_edges=20)
def show_quic_coefficient_trace_truth(X, truth):
path = np.logspace(np.log10(0.01), np.log10(1.0), num=50, endpoint=True)[::-1]
estimator = QuicGraphicalLasso(lam=1.0, path=path, mode="path")
estimator.fit(X)
trace_plot(estimator.precision_, estimator.path_, n_edges=6, ground_truth=truth)
if __name__ == "__main__":
# example 1
n_samples = 10
n_features = 5
X, cov, prec = make_data(n_samples, n_features)
print("Showing basic Erdos-Renyi example with ")
print(" n_samples=10")
print(" n_features=5")
print(" n_edges=20")
show_quic_coefficient_trace(X)
# use ground truth for display
print("Showing basic Erdos-Renyi example with ")
print(" n_samples=100")
print(" n_features=5")
print(" n_edges=6")
print(" ground_truth (shows only false pos and negatives)")
show_quic_coefficient_trace_truth(X, prec)
# example 2
n_samples = 110
n_features = 100
X, cov, prec = make_data_banded(n_samples, n_features)
print("Showing basic Lattice example with ")
print(" n_samples=110")
print(" n_features=100")
print(" n_blocks=2")
print(" random_sign=True")
print(" n_edges=20")
show_quic_coefficient_trace(X)
# use ground truth for display
print("Showing basic Lattice example with ")
print(" n_samples=110")
print(" n_features=100")
print(" n_blocks=2")
print(" random_sign=True")
print(" n_edges=6")
print(" ground_truth (shows only false pos and negatives)")
show_quic_coefficient_trace_truth(X, prec)
| mit | -7,484,878,322,205,193,000 | 29.764706 | 84 | 0.646272 | false | 3.131737 | false | false | false |
takaakiaoki/PyFoam | unittests/Basics/TemplateFile.py | 1 | 6744 |
import unittest
from PyFoam.Basics.TemplateFile import TemplateFile,TemplateFileOldFormat,PyratempPreprocessor
from PyFoam.Error import FatalErrorPyFoamException
from tempfile import mktemp
from PyFoam.ThirdParty.six import PY3
import sys
theSuite=unittest.TestSuite()
template1="""$$ y = 3+x
This should be $x+y$"""
template2="""
$$ xxx=13
$$ xx=34+xxx
$2*x+xx-xxx$
"""
templateFor="""$$ y = 2*x
<!--(for i in range(y))--> @!i!@ <!--(end)-->#!
"""
templateMath="sqrt(x) = $sqrt(x)$"
templateList="""<!--(for e in theList)-->#!
<!--(if e.lower()=="joe")-->#!
Big @!e!@
<!--(else)-->#!
Little @!e!@
<!--(end)-->#!
<!--(end)-->#!
"""
templateMacro="""<!--(macro tabsquare)-->
@!x!@ \t = @!x*x!@
<!--(end)-->
<!--(for i in vals)-->@!tabsquare(x=i)!@<!--(end)-->#!
"""
templateBuiltIn="""
<!--(if True)-->TRUE<!--(end)-->
<!--(if not False)-->FALSE<!--(end)-->
@!min(2,3)!@ @!max(2,3)!@
@!chr(42)!@ @!ord(' ')!@
"""
class TemplateFileTest(unittest.TestCase):
def testTemplateFileString(self):
t=TemplateFile(content=template1,expressionDelimiter="$")
self.assertEqual(t.getString({"x":-1}),"This should be 1")
fName=mktemp()
t.writeToFile(fName,{"x":1+2.})
result=open(fName).read()
self.assertEqual(result,"This should be 9.0")
def testTemplateFileFile(self):
fName=mktemp()
open(fName,"w").write(template1)
t=TemplateFile(name=fName,expressionDelimiter="$")
self.assertEqual(t.getString({"x":-1}),"This should be 1")
def testTemplateFileLongVars(self):
t=TemplateFile(content=template2,expressionDelimiter="$")
self.assertEqual(int(t.getString({"x":1})),36)
def testTemplateFileForLoop(self):
t=TemplateFile(content=templateFor)
self.assertEqual(t.getString({"x":2})," 0 1 2 3 ")
def testTemplateFileMacro(self):
t=TemplateFile(content=templateMacro)
if PY3 and sys.version_info.minor>1:
self.assertEqual(t.getString({"vals":[2,3.3,-1]}),"2 \t = 4\n3.3 \t = 10.889999999999999\n-1 \t = 1\n")
else:
self.assertEqual(t.getString({"vals":[2,3.3,-1]}),"2 \t = 4\n3.3 \t = 10.89\n-1 \t = 1\n")
def testTemplateFileListLoop(self):
t=TemplateFile(content=templateList)
self.assertEqual(t.getString({"theList":["Henry","Joe","joe","Tom"]}),"Little Henry\nBig Joe\nBig joe\nLittle Tom\n")
def testTemplateFileLongMath(self):
t=TemplateFile(content=templateMath,expressionDelimiter="$")
self.assertEqual(t.getString({"x":4}),"sqrt(x) = 2.0")
def testTemplateFileMathRealDelim(self):
t=TemplateFile(content=templateMath.replace("$","|"))
self.assertEqual(t.getString({"x":4}),"sqrt(x) = 2.0")
def testTemplateFilePercentDelimiter(self):
t=TemplateFile(content="x=$!x!$")
self.assertEqual(t.getString({"x":4}),"x=4")
def testTemplateFileBuiltinStuff(self):
t=TemplateFile(content=templateBuiltIn)
self.assertEqual(t.getString({}),"\nTRUE\nFALSE\n2 3\n* 32\n")
theSuite.addTest(unittest.makeSuite(TemplateFileTest,"test"))
class TemplateFileOldFormatTest(unittest.TestCase):
def testTemplateFileString(self):
t=TemplateFileOldFormat(content=template1)
self.assertEqual(t.getString({"x":-1}),"This should be 1\n")
fName=mktemp()
t.writeToFile(fName,{"x":"1+sqrt(4)"})
result=open(fName).read()
self.assertEqual(result,"This should be 9.0\n")
def testTemplateFileFile(self):
fName=mktemp()
open(fName,"w").write(template1)
t=TemplateFileOldFormat(name=fName)
self.assertEqual(t.getString({"x":-1}),"This should be 1\n")
def testTemplateFileLongVars(self):
t=TemplateFileOldFormat(content=template2)
self.assertEqual(int(t.getString({"x":1})),36)
def testTemplateFileLongMath(self):
t=TemplateFileOldFormat(content=templateMath)
self.assertEqual(t.getString({"x":4}),"sqrt(x) = 2.0\n")
theSuite.addTest(unittest.makeSuite(TemplateFileOldFormatTest,"test"))
class PyratempPreprocessorTest(unittest.TestCase):
def testFullPreprocessing(self):
p=PyratempPreprocessor()
self.assertEqual(p("nix\nda"),"nix\nda")
self.assertEqual(p("nix\nda\n"),"nix\nda\n")
self.assertEqual(p(""),"")
self.assertEqual(p("\n"),"\n")
self.assertEqual(p("$$ a=2 "),'$!setvar("a", "2")!$#!')
self.assertEqual(p(" $$ a=2 ")," $$ a=2 ")
self.assertRaises(FatalErrorPyFoamException,p,"$$ a ")
# Does not work with old nose
# with self.assertRaises(FatalErrorPyFoamException):
# p("$$ a ")
self.assertEqual(p("$$ a=2\n"),'$!setvar("a", "2")!$#!\n')
self.assertEqual(p("$$ a=2\n$$ b=3"),'$!setvar("a", "2")!$#!\n$!setvar("b", "3")!$#!')
self.assertEqual(p(" $foo$ $bar$ ")," $!foo!$ $!bar!$ ")
self.assertEqual(p("$foo$ $bar$"),"$!foo!$ $!bar!$")
self.assertEqual(p("$foo$ $bar$\n"),"$!foo!$ $!bar!$\n")
def testNoVarLinePreprocessing(self):
p=PyratempPreprocessor(dovarline=False)
self.assertEqual(p("nix\nda"),"nix\nda")
self.assertEqual(p("nix\nda\n"),"nix\nda\n")
self.assertEqual(p(""),"")
self.assertEqual(p("\n"),"\n")
self.assertEqual(p("$$ a=2 "),'$$ a=2 ')
self.assertEqual(p(" $$ a=2 ")," $$ a=2 ")
self.assertEqual(p("$$ a "),"$$ a ")
self.assertEqual(p("$$ a=2\n"),'$$ a=2\n')
self.assertEqual(p("$$ a=2\n$$ b=3"),'$$ a=2\n$$ b=3')
self.assertEqual(p(" $foo$ $bar$ ")," $!foo!$ $!bar!$ ")
self.assertEqual(p("$foo$ $bar$"),"$!foo!$ $!bar!$")
self.assertEqual(p("$foo$ $bar$\n"),"$!foo!$ $!bar!$\n")
def testNoExprPreprocessing(self):
p=PyratempPreprocessor(doexpr=False)
self.assertEqual(p("nix\nda"),"nix\nda")
self.assertEqual(p("nix\nda\n"),"nix\nda\n")
self.assertEqual(p(""),"")
self.assertEqual(p("\n"),"\n")
self.assertEqual(p("$$ a=2 "),'$!setvar("a", "2")!$#!')
self.assertEqual(p(" $$ a=2 ")," $$ a=2 ")
self.assertRaises(FatalErrorPyFoamException,p,"$$ a ")
# Does not work with old nose
# with self.assertRaises(FatalErrorPyFoamException):
# p("$$ a ")
self.assertEqual(p("$$ a=2\n"),'$!setvar("a", "2")!$#!\n')
self.assertEqual(p("$$ a=2\n$$ b=3"),'$!setvar("a", "2")!$#!\n$!setvar("b", "3")!$#!')
self.assertEqual(p(" $foo$ $bar$ ")," $foo$ $bar$ ")
self.assertEqual(p("$foo$ $bar$"),"$foo$ $bar$")
self.assertEqual(p("$foo$ $bar$\n"),"$foo$ $bar$\n")
theSuite.addTest(unittest.makeSuite(PyratempPreprocessorTest,"test"))
| gpl-2.0 | -8,537,140,287,998,110,000 | 36.259669 | 125 | 0.586447 | false | 2.898152 | true | false | false |
danakj/chromium | mojo/public/tools/bindings/pylib/mojom/parse/translate.py | 3 | 9030 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Translates parse tree to Mojom IR."""
import re
from . import ast
def _DuplicateName(values):
"""Returns the 'name' of the first entry in |values| whose 'name' has already
been encountered. If there are no duplicates, returns None."""
names = set()
for value in values:
if value['name'] in names:
return value['name']
names.add(value['name'])
return None
def _MapTreeForType(func, tree, type_to_map, scope):
assert isinstance(type_to_map, type)
if not tree:
return []
result = [func(subtree)
for subtree in tree if isinstance(subtree, type_to_map)]
duplicate_name = _DuplicateName(result)
if duplicate_name:
raise Exception('Names in mojom must be unique within a scope. The name '
'"%s" is used more than once within the scope "%s".' %
(duplicate_name, scope))
return result
def _MapKind(kind):
map_to_kind = {'bool': 'b',
'int8': 'i8',
'int16': 'i16',
'int32': 'i32',
'int64': 'i64',
'uint8': 'u8',
'uint16': 'u16',
'uint32': 'u32',
'uint64': 'u64',
'float': 'f',
'double': 'd',
'string': 's',
'handle': 'h',
'handle<data_pipe_consumer>': 'h:d:c',
'handle<data_pipe_producer>': 'h:d:p',
'handle<message_pipe>': 'h:m',
'handle<shared_buffer>': 'h:s'}
if kind.endswith('?'):
base_kind = _MapKind(kind[0:-1])
# NOTE: This doesn't rule out enum types. Those will be detected later, when
# cross-reference is established.
reference_kinds = ('m', 's', 'h', 'a', 'r', 'x', 'asso')
if re.split('[^a-z]', base_kind, 1)[0] not in reference_kinds:
raise Exception(
'A type (spec "%s") cannot be made nullable' % base_kind)
return '?' + base_kind
if kind.endswith('}'):
lbracket = kind.rfind('{')
value = kind[0:lbracket]
return 'm[' + _MapKind(kind[lbracket+1:-1]) + '][' + _MapKind(value) + ']'
if kind.endswith(']'):
lbracket = kind.rfind('[')
typename = kind[0:lbracket]
return 'a' + kind[lbracket+1:-1] + ':' + _MapKind(typename)
if kind.endswith('&'):
return 'r:' + _MapKind(kind[0:-1])
if kind.startswith('asso<'):
assert kind.endswith('>')
return 'asso:' + _MapKind(kind[5:-1])
if kind in map_to_kind:
return map_to_kind[kind]
return 'x:' + kind
def _AddOptional(dictionary, key, value):
if value is not None:
dictionary[key] = value;
def _AttributeListToDict(attribute_list):
if attribute_list is None:
return None
assert isinstance(attribute_list, ast.AttributeList)
# TODO(vtl): Check for duplicate keys here.
return dict([(attribute.key, attribute.value)
for attribute in attribute_list])
def _EnumToDict(enum):
def EnumValueToDict(enum_value):
assert isinstance(enum_value, ast.EnumValue)
data = {'name': enum_value.name}
_AddOptional(data, 'value', enum_value.value)
_AddOptional(data, 'attributes',
_AttributeListToDict(enum_value.attribute_list))
return data
assert isinstance(enum, ast.Enum)
data = {'name': enum.name,
'native_only': enum.enum_value_list is None }
if not data['native_only']:
data.update({'fields': map(EnumValueToDict, enum.enum_value_list)})
_AddOptional(data, 'attributes', _AttributeListToDict(enum.attribute_list))
return data
def _ConstToDict(const):
assert isinstance(const, ast.Const)
return {'name': const.name,
'kind': _MapKind(const.typename),
'value': const.value}
class _MojomBuilder(object):
def __init__(self):
self.mojom = {}
def Build(self, tree, name):
def StructToDict(struct):
def StructFieldToDict(struct_field):
assert isinstance(struct_field, ast.StructField)
data = {'name': struct_field.name,
'kind': _MapKind(struct_field.typename)}
_AddOptional(data, 'ordinal',
struct_field.ordinal.value
if struct_field.ordinal else None)
_AddOptional(data, 'default', struct_field.default_value)
_AddOptional(data, 'attributes',
_AttributeListToDict(struct_field.attribute_list))
return data
assert isinstance(struct, ast.Struct)
data = {'name': struct.name,
'native_only': struct.body is None}
if not data['native_only']:
data.update({
'fields': _MapTreeForType(StructFieldToDict, struct.body,
ast.StructField, struct.name),
'enums': _MapTreeForType(_EnumToDict, struct.body, ast.Enum,
struct.name),
'constants': _MapTreeForType(_ConstToDict, struct.body,
ast.Const, struct.name)})
_AddOptional(data, 'attributes',
_AttributeListToDict(struct.attribute_list))
return data
def UnionToDict(union):
def UnionFieldToDict(union_field):
assert isinstance(union_field, ast.UnionField)
data = {'name': union_field.name,
'kind': _MapKind(union_field.typename)}
_AddOptional(data, 'ordinal',
union_field.ordinal.value
if union_field.ordinal else None)
_AddOptional(data, 'attributes',
_AttributeListToDict(union_field.attribute_list))
return data
assert isinstance(union, ast.Union)
data = {'name': union.name,
'fields': _MapTreeForType(UnionFieldToDict, union.body,
ast.UnionField, union.name)}
_AddOptional(data, 'attributes',
_AttributeListToDict(union.attribute_list))
return data
def InterfaceToDict(interface):
def MethodToDict(method):
def ParameterToDict(param):
assert isinstance(param, ast.Parameter)
data = {'name': param.name,
'kind': _MapKind(param.typename)}
_AddOptional(data, 'ordinal',
param.ordinal.value if param.ordinal else None)
_AddOptional(data, 'attributes',
_AttributeListToDict(param.attribute_list))
return data
assert isinstance(method, ast.Method)
data = {'name': method.name,
'parameters': map(ParameterToDict, method.parameter_list)}
if method.response_parameter_list is not None:
data['response_parameters'] = map(ParameterToDict,
method.response_parameter_list)
_AddOptional(data, 'ordinal',
method.ordinal.value if method.ordinal else None)
_AddOptional(data, 'attributes',
_AttributeListToDict(method.attribute_list))
return data
assert isinstance(interface, ast.Interface)
data = {'name': interface.name,
'methods': _MapTreeForType(MethodToDict, interface.body,
ast.Method, interface.name),
'enums': _MapTreeForType(_EnumToDict, interface.body, ast.Enum,
interface.name),
'constants': _MapTreeForType(_ConstToDict, interface.body,
ast.Const, interface.name)}
_AddOptional(data, 'attributes',
_AttributeListToDict(interface.attribute_list))
return data
assert isinstance(tree, ast.Mojom)
self.mojom['name'] = name
self.mojom['namespace'] = tree.module.name[1] if tree.module else ''
self.mojom['imports'] = \
[{'filename': imp.import_filename} for imp in tree.import_list]
self.mojom['structs'] = \
_MapTreeForType(StructToDict, tree.definition_list, ast.Struct, name)
self.mojom['unions'] = \
_MapTreeForType(UnionToDict, tree.definition_list, ast.Union, name)
self.mojom['interfaces'] = \
_MapTreeForType(InterfaceToDict, tree.definition_list, ast.Interface,
name)
self.mojom['enums'] = \
_MapTreeForType(_EnumToDict, tree.definition_list, ast.Enum, name)
self.mojom['constants'] = \
_MapTreeForType(_ConstToDict, tree.definition_list, ast.Const, name)
_AddOptional(self.mojom, 'attributes',
_AttributeListToDict(tree.module.attribute_list)
if tree.module else None)
return self.mojom
def Translate(tree, name):
"""Translate AST to Mojom IR.
Args:
tree: The AST as a mojom.parse.ast.Mojom object.
name: The filename as a str.
Returns:
The Mojom IR as a dict.
"""
return _MojomBuilder().Build(tree, name)
| bsd-3-clause | -7,084,876,562,291,309,000 | 37.262712 | 80 | 0.58206 | false | 3.878866 | false | false | false |
DavidAndreev/indico | indico/web/forms/jinja_helpers.py | 1 | 4705 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import json
from wtforms.fields import RadioField, BooleanField
from wtforms.widgets.core import Input, Select, TextArea, HiddenInput
from wtforms.validators import Length, NumberRange
from indico.util.struct.enum import TitledEnum
from indico.web.forms.fields import IndicoSelectMultipleCheckboxField, IndicoEnumRadioField
from indico.web.forms.validators import ConfirmPassword, HiddenUnless, IndicoRegexp
from indico.web.forms.widgets import SelectizeWidget
def is_single_line_field(field):
if isinstance(field.widget, SelectizeWidget):
return True
if isinstance(field.widget, Select):
return not field.widget.multiple
if isinstance(field.widget, Input):
return field.widget.input_type not in {'checkbox', 'radio', 'hidden'}
if isinstance(field.widget, TextArea):
return True
return getattr(field.widget, 'single_line', False)
def _attrs_for_validators(field, validators):
attrs = {}
for validator in validators:
if isinstance(validator, Length):
if validator.min >= 0:
attrs['minlength'] = validator.min
if validator.max >= 0:
attrs['maxlength'] = validator.max
elif isinstance(validator, IndicoRegexp) and validator.client_side:
attrs['pattern'] = validator.regex.pattern
elif isinstance(validator, NumberRange):
if validator.min is not None:
attrs['min'] = validator.min
if validator.max is not None:
attrs['max'] = validator.max
elif isinstance(validator, ConfirmPassword):
attrs['data-confirm-password'] = field.get_form()[validator.fieldname].name
elif isinstance(validator, HiddenUnless):
condition_field = field.get_form()[validator.field]
checked_only = isinstance(condition_field, (RadioField, BooleanField, IndicoEnumRadioField))
val = validator.value
attrs['data-hidden-unless'] = json.dumps({'field': condition_field.name,
'value': val if not isinstance(val, TitledEnum) else val.name,
'checked_only': checked_only})
return attrs
def render_field(field, widget_attrs, disabled=None):
"""Renders a WTForms field, taking into account validators"""
if not widget_attrs.get('placeholder'):
widget_attrs = dict(widget_attrs)
widget_attrs.pop('placeholder', None)
args = _attrs_for_validators(field, field.validators)
args['required'] = (field.flags.required and not field.flags.conditional
and not isinstance(field, IndicoSelectMultipleCheckboxField))
args.update(widget_attrs)
if disabled is not None:
args['disabled'] = disabled
return field(**args)
def iter_form_fields(form, fields=None, skip=None, hidden_fields=False):
"""Iterates over the fields in a WTForm
:param fields: If specified only fields that are in this list are
yielded. This also overrides the field order.
:param skip: If specified, only fields NOT in this set/list are
yielded.
:param hidden_fields: How to handle hidden fields. Setting this to
``True`` or ``False`` will yield only hidden
or non-hidden fields. Setting it to ``None``
will yield all fields.
"""
if fields is not None:
field_iter = (form[field_name] for field_name in fields if field_name in form)
else:
field_iter = iter(form)
if skip:
skip = set(skip)
field_iter = (field for field in field_iter if field.short_name not in skip)
if hidden_fields is not None:
field_iter = (field for field in field_iter if isinstance(field.widget, HiddenInput) == hidden_fields)
for field in field_iter:
yield field
| gpl-3.0 | 9,104,690,325,091,572,000 | 43.386792 | 116 | 0.663549 | false | 4.28897 | false | false | false |
FluxIX/pyShellScript | src/pyshell/environment.py | 1 | 7462 | from .tee_output_file import TeeOutputFile
class Environment( object ):
class CloneOptions( object ):
InheritVariables = "inherit_vars"
InheritStreams = "inherit_streams"
MakeParentLink = "parent_link"
def __init__( self, starting_directory = None, parent = None, starting_variables = None, standard_output = None, error_output = None ):
if starting_directory is None:
import os
starting_directory = os.curdir
self.directory_stack = []
self.push_directory( starting_directory )
self.parent = parent
if starting_variables is None:
starting_variables = {}
self.variables = starting_variables
if standard_output is None:
standard_output = TeeOutputFile()
self.__standard_output = standard_output
if error_output is None:
error_output = TeeOutputFile()
self.__error_output = error_output
self._attached = False
def __del__( self ):
if self._detach():
def is_internal_stream( stream ):
import sys
return stream is sys.__stdout__ or stream is sys.__stderr__ or stream is sys.__stdin__
if not is_internal_stream( self.standard_output ):
del self.__standard_output
if not is_internal_stream( self.error_output ):
del self.__error_output
def get_directory_stack( self ):
return self.__directory_stack
def _set_directory_stack( self, value ):
if value is not None:
self.__directory_stack = value
else:
raise ValueError( "Directory stack cannot be None." )
directory_stack = property( get_directory_stack, _set_directory_stack, None, None )
def push_directory( self, directory, suppress_errors = False ):
if directory is not None:
import os
if not os.path.isabs( directory ):
d = os.path.abspath( directory )
else:
d = directory
d = os.path.normpath( d )
if os.path.isdir( d ):
self.directory_stack.append( d )
result = True
elif not suppress_errors:
raise ValueError( "Only directories can be pushed." )
else:
result = False
elif not suppress_errors:
raise ValueError( "Pushed directory cannot be None." )
else:
result = False
return result
def pop_directory( self ):
return self.directory_stack.pop()
@property
def current_directory( self ):
return self.directory_stack[ -1 ]
def get_parent( self ):
return self.__parent
def _set_parent( self, value ):
self.__parent = value
parent = property( get_parent, _set_parent, None, None )
@property
def has_parent( self ):
return self.parent is not None
def get_variables( self ):
return self.__variables
def set_variables( self, value ):
self.__variables = value
variables = property( get_variables, set_variables, None, None )
def clone( self, **kwargs ):
key = Environment.CloneOptions.InheritVariables
if key in kwargs:
inherit_vars = bool( kwargs[ key ] )
else:
inherit_vars = False
key = Environment.CloneOptions.MakeParentLink
if key in kwargs:
parent_link = bool( kwargs[ key ] )
else:
parent_link = False
if parent_link:
parent = self
else:
parent = None
variables = {}
if inherit_vars:
for key in self.variables:
variables[ key ] = self.variables[ key ]
key = Environment.CloneOptions.InheritStreams
if key in kwargs:
inherit_streams = bool( kwargs[ key ] )
else:
inherit_streams = False
if inherit_streams:
standard_output = self.standard_output.clone()
error_output = self.error_output.clone()
else:
standard_output = None
error_output = None
result = Environment( self.current_directory, parent, variables, standard_output, error_output )
return result
@property
def standard_output( self ):
return self.__standard_output
@property
def error_output( self ):
return self.__error_output
def _attach( self ):
result = not self._attached
if result:
import os
import sys
self._previous_working_directory = os.getcwd()
self._previous_standard_output = sys.stdout
self._previous_error_output = sys.stderr
self._previous_environment_variables = os.environ
os.chdir( self.current_directory )
sys.stdout = self.standard_output
sys.stderr = self.error_output
os.environ = self.variables
self._attached = True
return result
def _detach( self ):
result = self._attached
if result:
import os
import sys
os.chdir( self._previous_working_directory )
sys.stdout = self._previous_standard_output
sys.stderr = self._previous_error_output
os.environ = self._previous_environment_variables
self._attached = False
return result
class EnvironmentBuilder( object ):
def __init__( self ):
self.starting_directory = None
self.parent = None
self.starting_variables = None
self.standard_output = None
self.error_output = None
def get_starting_directory( self ):
return self.__starting_directory
def set_starting_directory( self, value ):
self.__starting_directory = value
return self
starting_directory = property( get_starting_directory, set_starting_directory, None, None )
def get_parent( self ):
return self.__parent
def set_parent( self, value ):
self.__parent = value
return self
parent = property( get_parent, set_parent, None, None )
def get_starting_variables( self ):
return self.__starting_variables
def set_starting_variables( self, value ):
self.__starting_variables = value
return self
starting_variables = property( get_starting_variables, set_starting_variables, None, None )
def get_standard_output( self ):
return self.__standard_output
def set_standard_output( self, value ):
self.__standard_output = value
return self
standard_output = property( get_standard_output, set_standard_output, None, None )
def get_error_output( self ):
return self.__error_output
def set_error_output( self, value ):
self.__error_output = value
return self
error_output = property( get_error_output, set_error_output, None, None )
def inherit_starting_variables( self ):
starting_variables = {}
import os
for key in os.environ:
starting_variables[ key ] = os.environ[ key ]
self.starting_variables = starting_variables
return self
def build( self ):
return Environment( self.starting_directory, self.parent, self.starting_variables, self.standard_output, self.error_output )
| lgpl-3.0 | -8,011,103,368,289,027,000 | 27.922481 | 139 | 0.585902 | false | 4.614719 | false | false | false |
Iepoev/vsc-mympirun | lib/vsc/mympirun/mpi/mpi.py | 1 | 41888 | #
# Copyright 2011-2016 Ghent University
#
# This file is part of vsc-mympirun,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# the Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/hpcugent/vsc-mympirun
#
# vsc-mympirun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# vsc-mympirun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with vsc-mympirun. If not, see <http://www.gnu.org/licenses/>.
#
"""
Base MPI class, all actual classes should inherit from this one
@author: Stijn De Weirdt
@author: Jeroen De Clerck
"""
import os
import pkgutil
import random
import re
import resource
import shutil
import socket
import stat
import string
import subprocess
import time
from IPy import IP
from vsc.utils.fancylogger import getLogger
from vsc.utils.missing import get_subclasses, nub
from vsc.utils.run import run_simple, run_to_file, run_async_to_stdout
# part of the directory that contains the installed fakes
INSTALLATION_SUBDIRECTORY_NAME = '(VSC-tools|(?:vsc-)?mympirun)'
# the fake subdir to contain the fake mpirun symlink
# also hardcoded in setup.py !
FAKE_SUBDIRECTORY_NAME = 'fake'
LOGGER = getLogger()
def what_mpi(name):
"""
Return the path of the selected mpirun and its class.
@param name: The name of the executable used to run mympirun
@return: A triplet containing the following variables:
- The path to the executable used to run mympirun (should be the path to an mpirun implementation)
- The corresponding python class of the MPI variant
- The python classes of the supported MPI flavors (from the various .py files in mympirun/mpi)
"""
# import all modules in this dir: http://stackoverflow.com/a/16853487
for loader, modulename, _ in pkgutil.walk_packages([os.path.dirname(__file__)]):
loader.find_module(modulename).load_module(modulename)
supp_mpi_impl = get_subclasses(MPI) # supported MPI implementations
# remove fake mpirun from $PATH
stripfake()
# get the path of the mpirun executable
mpirun_path = which('mpirun')
if mpirun_path is None:
# no MPI implementation installed
LOGGER.warn("no mpirun command found")
return None, None, supp_mpi_impl
scriptname = os.path.basename(os.path.abspath(name))
# check if mympirun was called by a known mpirun alias (like
# ompirun for OpenMPI or mhmpirun for mpich)
for mpi in supp_mpi_impl:
if mpi._is_mpiscriptname_for(scriptname):
LOGGER.debug("%s was used to call mympirun", scriptname)
return scriptname, mpi, supp_mpi_impl
# mympirun was not called through a known alias, so find out which MPI
# implementation the user has installed
for mpi in supp_mpi_impl:
if mpi._is_mpirun_for(mpirun_path):
return scriptname, mpi, supp_mpi_impl
# no specific flavor found, default to mpirun_path
LOGGER.warn("The executable that called mympirun (%s) isn't supported"
", defaulting to %s", name, mpirun_path)
return mpirun_path, None, supp_mpi_impl
def stripfake():
"""
If the user loaded the vsc-mympirun module but called mpirun, some $PATH trickery catches the attempt.
This function removes the fake path trickery from $PATH (assumes (VSC-tools|mympirun)/1.0.0/bin/fake).
"""
LOGGER.debug("PATH before stripfake(): %s", os.environ['PATH'])
# compile a regex that matches the faked mpirun
reg_fakepath = re.compile(
r"" + os.sep.join(['.*?',
INSTALLATION_SUBDIRECTORY_NAME + '.*?',
'bin',
'%(fake_subdir)s(%(sep)s[^%(sep)s]*)?$' %
{
'fake_subdir': FAKE_SUBDIRECTORY_NAME,
'sep': os.sep
}
]))
oldpath = os.environ.get('PATH', '').split(os.pathsep)
# remove all $PATH elements that match the fakepath regex
os.environ['PATH'] = os.pathsep.join([x for x in oldpath if not reg_fakepath.match(x)])
LOGGER.debug("PATH after stripfake(): %s", os.environ['PATH'])
return
def which(cmd):
"""
Return (first) path in $PATH for specified command, or None if command is not found.
taken from easybuild/tools/filetools.py, 6/7/2016
"""
paths = os.environ.get('PATH', '').split(os.pathsep)
for path in paths:
cmd_path = os.path.join(path, cmd)
# only accept path is command is there, and both readable and executable
if os.access(cmd_path, os.R_OK | os.X_OK):
LOGGER.info("Command %s found at %s", cmd, cmd_path)
return cmd_path
LOGGER.warning("Could not find command '%s' (with permissions to read/execute it) in $PATH (%s)", cmd, paths)
return None
class MPI(object):
"""
Base MPI class to generate the mpirun command line.
To add a new MPI class just create a new class that extends the MPI class, see http://stackoverflow.com/q/456672
"""
RUNTIMEOPTION = None
_mpirun_for = []
_mpiscriptname_for = []
_mpirun_version = None
MPIRUN_LOCALHOSTNAME = 'localhost'
DEFAULT_RSH = None
HYDRA = None
HYDRA_LAUNCHER_NAME = "launcher"
DEVICE_LOCATION_MAP = {'ib': '/dev/infiniband', 'det': '/dev/det', 'shm': '/dev/shm', 'socket': None}
DEVICE_ORDER = ['ib', 'det', 'shm', 'socket']
DEVICE_MPIDEVICE_MAP = {'ib': 'rdma', 'det': 'det', 'shm': 'shm', 'socket': 'socket'}
NETMASK_TYPE_MAP = {'ib': 'ib', 'det': 'eth', 'shm': 'eth', 'socket': 'eth'}
PINNING_OVERRIDE_METHOD = 'numactl'
PINNING_OVERRIDE_TYPE_DEFAULT = None
REMOTE_OPTION_TEMPLATE = "--rsh=%(rsh)s"
MPDBOOT_OPTIONS = []
MPDBOOT_SET_INTERFACE = True
MPIEXEC_TEMPLATE_GLOBAL_OPTION = "-genv %(name)s '%(value)s'"
OPTS_FROM_ENV_TEMPLATE = "-x '%(name)s'"
MPIEXEC_OPTIONS = []
MODULE_ENVIRONMENT_VARIABLES = ['MODULEPATH', 'LOADEDMODULES', 'MODULESHOME']
OPTS_FROM_ENV_BASE = ['LD_LIBRARY_PATH', 'PATH', 'PYTHONPATH', 'CLASSPATH', 'LD_PRELOAD', 'PYTHONUNBUFFERED']
OPTS_FROM_ENV_BASE_PREFIX = ['OMP', 'MKL', 'KMP', 'DAPL', 'PSM', 'IPATH', 'TMI', 'PSC', 'O64', 'VSMP']
OPTS_FROM_ENV_FLAVOR_PREFIX = [] # to be set per flavor
def __init__(self, options, cmdargs, **kwargs):
if not hasattr(self, 'log'):
self.log = getLogger(self.__class__.__name__)
self.options = options
self.cmdargs = cmdargs
self.device = None
self.hydra_info = None
self.has_hydra = self._has_hydra()
self.netmasktype = None
self.netmask = None
self.mympirunbasedir = None
self.mympirundir = None
self.mpdboot_node_filename = None
self.mpdboot_options = None
self.mpdboot_totalnum = None
self.mpdboot_localhost_interface = None
self.mpiexec_node_filename = None
self.mpiexec_options = None
self.mpiexec_global_options = {}
self.mpiexec_opts_from_env = [] # list of variables
self.mpirun_cmd = None
self.pinning_override_type = getattr(self.options, 'overridepin', self.PINNING_OVERRIDE_TYPE_DEFAULT)
super(MPI, self).__init__(**kwargs)
# sanity checks
if getattr(self, 'sched_id', None) is None:
self.log.raiseException("__init__: sched_id is None (should be set by one of the Sched classes)")
if not self.cmdargs:
self.log.raiseException("__init__: no executable or command provided")
# factory methods for MPI
@classmethod
def _is_mpirun_for(cls, mpirun_path):
"""
Check if this class provides support for the mpirun that was called.
@param cls: the class that calls this function
@param mpirun_path: the path to the mpirun aka `which mpirun`
@return: true if $mpirun_path is defined as an mpirun implementation of $cls
"""
# regex matches "cls._mpirun_for/version number"
reg = re.compile(r"(?:%s)%s(\d+(?:(?:\.|-)\d+(?:(?:\.|-)\d+\S+)?)?)" %
("|".join(cls._mpirun_for), os.sep))
reg_match = reg.search(mpirun_path)
LOGGER.debug("_is_mpisrun_for(), reg_match: %s", reg_match)
if reg_match:
if cls._mpirun_version is None:
return True
else:
# do version check (reg_match.group(1) is the version number)
return cls._mpirun_version(reg_match.group(1))
else:
return False
@classmethod
def _is_mpiscriptname_for(cls, scriptname):
"""
Check if this class provides support for scriptname.
@param cls: the class that calls this function
@param scriptname: the executable that called mympirun
@return: true if $scriptname is defined as an mpiscriptname of $cls
"""
return scriptname in cls._mpiscriptname_for
# other general functionality
def _has_hydra(self):
"""Has HYDRA or not"""
return self.HYDRA
### main ###
def main(self):
"""Main method"""
self.prepare()
self.make_mpdboot()
# prepare these separately
self.set_mpiexec_global_options()
self.set_mpiexec_opts_from_env()
self.set_mpiexec_options()
self.make_mpirun()
# actual execution
for runfunc, cmd in self.mpirun_prepare_execution():
self.log.debug("main: going to execute cmd %s", " ".join(cmd))
exitcode, _ = runfunc(cmd)
if exitcode > 0:
self.cleanup()
self.log.raiseException("main: exitcode %s > 0; cmd %s" % (exitcode, cmd))
break
self.cleanup()
### BEGIN prepare ###
def prepare(self):
"""Collect information to create the commands."""
self.check_usable_cpus()
self.check_limit()
self.set_omp_threads()
self.set_netmask()
self.make_node_file()
self.set_pinning()
def check_usable_cpus(self):
"""Check and log if non-standard cpus (eg due to cpusets)."""
if not self.foundppn == len(self.cpus):
self.log.info("check_usable_cpus: non-standard cpus found: requested ppn %s, found cpus %s, usable cpus %s",
self.ppn, self.foundppn, len(self.cpus))
def check_limit(self):
"""Check if the softlimit of the stack exceeds 1MB, if it doesn't, show an error."""
soft, _ = resource.getrlimit(resource.RLIMIT_STACK) # in bytes
if soft > -1 and soft < 1024 * 1024:
# non-fatal
self.log.error("Stack size %s%s too low? Increase with ulimit -s unlimited", soft, 'kB')
def set_omp_threads(self):
"""
Sets ompthreads to the amount of threads every MPI process should use.
For example, with hybrid 2 every MPI process should have a total 2 threads (each on a seperate processors).
This way each node will have 8 MPI processes (assuming ppn is 16). Will default to 1 if hybrid is disabled.
"""
if 'OMP_NUM_THREADS' in os.environ:
threads = os.environ['OMP_NUM_THREADS']
else:
if not self.options.hybrid:
threads = 1
else:
threads = max(self.ppn // self.options.hybrid, 1)
self.log.debug("Set OMP_NUM_THREADS to %s", threads)
os.environ['OMP_NUM_THREADS'] = str(threads)
setattr(self.options, 'ompthreads', threads)
def set_netmask(self):
"""
Set self.netmask to a list containing (ip address/netmask).
Based on the hosts IP address (from ip addr show) and the selected netmasktype from select_device.
"""
if self.netmasktype is None:
self.select_device()
device_ip_reg_map = {
'eth': r"ether.*?\n.*?inet\s+(\d+\.\d+.\d+.\d+/\d+)",
'ib': r"infiniband.*?\n.*?inet\s+(\d+\.\d+.\d+.\d+/\d+)",
}
if self.netmasktype not in device_ip_reg_map:
self.log.raiseException("set_netmask: can't get netmask for %s: unknown mode (device_ip_reg_map %s)" %
(self.netmasktype, device_ip_reg_map))
cmd = "/sbin/ip addr show"
exitcode, out = run_simple(cmd)
if exitcode > 0:
self.log.raiseException("set_netmask: failed to run cmd %s, ec: %s" % (cmd, exitcode))
reg = re.compile(device_ip_reg_map[self.netmasktype])
if not reg.search(out):
self.log.raiseException("set_netmask: can't get netmask for %s: no matches found (reg %s out %s)" %
(self.netmasktype, device_ip_reg_map[self.netmasktype], out))
res = []
for ipaddr_mask in reg.finditer(out):
ip_info = IP(ipaddr_mask.group(1), make_net=True)
network_netmask = "%s/%s" % (ip_info.net(), ip_info.netmask())
res.append(network_netmask)
self.log.debug("set_netmask: convert ipaddr_mask %s into network_netmask %s",
ipaddr_mask.group(1), network_netmask)
self.log.debug("set_netmask: return complete netmask %s", res)
if res:
self.netmask = os.pathsep.join(res)
def select_device(self, force=False):
"""
Select a device (such as infiniband), either with command line arguments or the best available.
See DEVICE_ORDER for order of preference.
"""
if self.device is not None and not force:
self.log.debug("select_device: device already set: %s", self.device)
return
founddev = None
if getattr(self.options, 'rdma', None):
founddev = 'ib'
self.set_device(founddev)
elif getattr(self.options, 'socket', None):
founddev = 'socket'
self.set_device(founddev)
else:
for dev in self.DEVICE_ORDER:
if dev in ('shm',):
# only use shm if a single node is used
if self.nruniquenodes > 1:
continue
path = self.DEVICE_LOCATION_MAP[dev]
if path is None or os.path.exists(path):
founddev = dev
self.device = self.DEVICE_MPIDEVICE_MAP[dev]
self.log.debug("select_device: found path %s for device %s", path, self.device)
break
if self.device is None:
self.log.raiseException("select_device: failed to set device.")
self.netmasktype = self.NETMASK_TYPE_MAP[founddev]
self.log.debug("select_device: set netmasktype %s for device %s (founddev %s)",
self.netmasktype, self.device, founddev)
def set_device(self, founddev):
"""Set self.device to founddev, but doublecheck if the path to this device actually exists """
self.device = self.DEVICE_MPIDEVICE_MAP[founddev]
path = self.DEVICE_LOCATION_MAP[founddev]
if path is None or not os.path.exists(path):
self.log.warning("Forcing device %s (founddevice %s), but path %s not found.",
self.device, founddev, path)
def make_node_file(self):
"""
Make a nodefile and mpdbootfile.
Parses the list of nodes that run an MPI process and writes this information to a nodefile.
Also parses the list of unique nodes and writes this information to a mpdbootfile
(based on hyrda and universe options).
"""
self.make_mympirundir()
if self.mpinodes is None:
self.make_node_list()
nodetxt = "\n".join(self.mpinodes + [''])
mpdboottxt = ""
for uniquenode in self.uniquenodes:
txt = uniquenode
if not self.has_hydra:
if self.options.universe is not None and self.options.universe > 0:
txt += ":%s" % self.get_universe_ncpus()
txt += " ifhn=%s" % uniquenode
mpdboottxt += "%s\n" % txt
try:
nodefn = os.path.join(self.mympirundir, 'nodes')
open(nodefn, 'w').write(nodetxt)
self.mpiexec_node_filename = nodefn
self.log.debug("make_node_file: wrote nodefile %s:\n%s", nodefn, nodetxt)
mpdfn = os.path.join(self.mympirundir, 'mpdboot')
open(mpdfn, 'w').write(mpdboottxt)
self.mpdboot_node_filename = mpdfn
self.log.debug("make_node_file: wrote mpdbootfile %s:\n%s", mpdfn, mpdboottxt)
except Exception:
self.log.raiseException('make_node_file: failed to write nodefile %s mpbboot nodefile %s' % (nodefn, mpdfn))
def get_universe_ncpus(self):
"""Return ppn for universe"""
return self.mpiprocesspernode
def make_mympirundir(self):
"""
Make a dir called .mympirun_id_timestamp in either the given basepath or $HOME.
Temporary files such as the nodefile will be written to this directory.
Allows for easy cleanup after finishing the script.
"""
basepath = getattr(self.options, 'basepath', None)
if basepath is None:
basepath = os.environ['HOME']
if not os.path.exists(basepath):
self.log.raiseException("make_mympirun_dir: basepath %s should exist." % basepath)
self.mympirunbasedir = os.path.join(basepath, '.mympirun')
destdir = os.path.join(self.mympirunbasedir, "%s_%s" % (self.sched_id, time.strftime("%Y%m%d_%H%M%S")))
if not os.path.exists(destdir):
try:
os.makedirs(destdir)
except os.error:
self.log.raiseException('make_mympirun_dir: failed to make job dir %s' % destdir)
self.log.debug("make_mympirun_dir: tmp mympirundir %s", destdir)
self.mympirundir = destdir
### BEGIN pinning ###
def set_pinning(self, mp=None):
if not hasattr(self.options, 'pinmpi'):
setattr(self.options, 'pinmpi', None)
mp = self._pin_flavour(mp)
if isinstance(mp, bool):
self.log.debug("set_pinning: setting pin_flavour %s", mp)
self.options.pinmpi = mp
if not isinstance(self.options.pinmpi, bool):
if self.options.hybrid is not None:
# always pin!
self.options.pinmpi = True
else:
# always pin!
self.options.pinmpi = True
if self.pinning_override_type is not None:
self.log.debug("set_pinning: previous pinning %s; will be overwritten, pinning_override_type set to %s",
self.options.pinmpi, self.pinning_override_type)
self.options.pinmpi = False
else:
self.log.debug("set_pinning: pinmpi %s", self.options.pinmpi)
def _pin_flavour(self, mp=None):
return mp
### BEGIN mpdboot ###
def make_mpdboot(self):
"""
Make the mpdboot configuration.
Read a password from ~/.mpd.conf (if this does not exist, create it).
"""
# check .mpd.conf existence
mpdconffn = os.path.expanduser('~/.mpd.conf')
if not os.path.exists(mpdconffn):
self.log.warning(("make_mpdboot: mpd.conf file not found at %s. Creating this file "
"(text file with minimal entry 'password=<somesecretpassword>')"), mpdconffn)
mpdconff = open(mpdconffn, 'w')
mpdconff.write("password=%s" % ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in range(10)))
mpdconff.close()
# set correct permissions on this file.
os.chmod(mpdconffn, stat.S_IREAD)
self.set_mpdboot_localhost_interface()
self.make_mpdboot_options()
self.log.debug("make_mpdboot set options %s", self.mpdboot_options)
def set_mpdboot_localhost_interface(self):
"""Sets mpdboot_localhost_interface to the first result of get_localhosts()."""
localhosts = self.get_localhosts()
if len(localhosts) > 1:
self.log.warning(("set_mpdboot_localhost_interface: more then one match for localhost from unique nodes "
" found %s, using 1st."), localhosts)
nodename, iface = localhosts[0] # take the first one
self.log.debug("set_mpdboot_localhost_interface: mpd localhost interface %s found for %s", iface, nodename)
self.mpdboot_localhost_interface = (nodename, iface)
def get_localhosts(self):
"""
Get the localhost interfaces, based on the hostnames from the nodes in self.uniquenodes.
Raises Exception if no localhost interface was found.
@return: the list of interfaces that correspond to the list of uniquenodes
"""
iface_prefix = ['eth', 'em', 'ib', 'wlan']
reg_iface = re.compile(r'((?:%s)\d+(?:\.\d+)?(?::\d+)?|lo)' % '|'.join(iface_prefix))
# iterate over uniquenodes and get their interfaces
# add the found interface to res if it matches reg_iface
res = []
for idx, nodename in enumerate(self.uniquenodes):
ip = socket.gethostbyname(nodename)
cmd = "/sbin/ip -4 -o addr show to %s/32" % ip
exitcode, out = run_simple(cmd)
if exitcode == 0:
regex = reg_iface.search(out)
if regex:
iface = regex.group(1)
self.log.debug("get_localhost idx %s: localhost interface %s found for %s (ip: %s)",
idx, iface, nodename, ip)
res.append((nodename, iface))
else:
self.log.debug("get_localhost idx %s: no interface match for prefixes %s out %s",
idx, iface_prefix, out)
else:
self.log.error("get_localhost idx %s: cmd %s failed with output %s", idx, cmd, out)
if not res:
self.log.raiseException("get_localhost: can't find localhost from uniq nodes %s" % self.uniquenodes)
return res
def make_mpdboot_options(self):
"""Add various options to mpdboot_options"""
self.mpdboot_options = self.MPDBOOT_OPTIONS[:]
# add the mpd nodefile to mpdboot options
self.mpdboot_options.append("--file=%s" % self.mpdboot_node_filename)
# add the interface to mpdboot options
if self.MPDBOOT_SET_INTERFACE:
if self.has_hydra:
iface = "-iface %s" % self.mpdboot_localhost_interface[1]
else:
iface = "--ifhn=%s" % self.mpdboot_localhost_interface[0]
self.log.debug('Set mpdboot interface option "%s"', iface)
self.mpdboot_options.append(iface)
else:
self.log.debug('No mpdboot interface option')
# add the number of mpi processes (aka mpi universe) to mpdboot options
if self.options.universe is not None and self.options.universe > 0:
self.mpdboot_options.append("--ncpus=%s" % self.get_universe_ncpus())
# add nr of unique nodes as totalnum if defined
if self.mpdboot_totalnum:
self.mpdboot_options.append("--totalnum=%s" % self.mpdboot_totalnum)
# set verbosity
if self.options.mpdbootverbose:
self.mpdboot_options.append("--verbose")
# mpdboot rsh command
if not self.has_hydra:
self.mpdboot_options.append(self.REMOTE_OPTION_TEMPLATE % {'rsh': self.get_rsh()})
### BEGIN mpiexec ###
def set_mpiexec_global_options(self):
"""
Set mpiexec_global_options.
Unless explicitly asked not to, will add all environment variables to mpiexec_global_options.
"""
self.mpiexec_global_options['MKL_NUM_THREADS'] = '1'
if not self.options.noenvmodules:
for env_var in self.MODULE_ENVIRONMENT_VARIABLES:
if env_var in os.environ and env_var not in self.mpiexec_global_options:
self.mpiexec_global_options[env_var] = os.environ[env_var]
def set_mpiexec_opts_from_env(self):
"""
Get relevant environment variables and append them to mpiexec_opts_from_env
Gets the union of OPTS_FROM_ENV_BASE and the environment variables that start with a given prefix.
These will then be parsed and passed to mpiexec as an option
"""
# get all unique variables that are both in os.environ and in OPTS_FROM_ENV_BASE
vars_to_pass = nub(filter(os.environ.has_key, self.OPTS_FROM_ENV_BASE))
for env_prefix in self.OPTS_FROM_ENV_FLAVOR_PREFIX + self.OPTS_FROM_ENV_BASE_PREFIX + self.options.variablesprefix:
for env_var in os.environ.keys():
# add all environment variable keys that are equal to <prefix> or start with <prefix>_
# to mpiexec_opts_from_env, but only if they aren't already in vars_to_pass
if (env_prefix == env_var or env_var.startswith("%s_" % env_prefix)) and env_var not in vars_to_pass:
self.mpiexec_opts_from_env.append(env_var)
def set_mpiexec_options(self):
"""Add various options to mpiexec_options."""
self.mpiexec_options = self.MPIEXEC_OPTIONS[:]
if self.has_hydra:
self.make_mpiexec_hydra_options()
else:
self.mpiexec_options.append("-machinefile %s" % self.mpiexec_node_filename)
# mpdboot global variables
self.mpiexec_options += self.get_mpiexec_global_options()
# number of procs to start
if self.options.universe is not None and self.options.universe > 0:
self.mpiexec_options.append("-np %s" % self.options.universe)
else:
self.mpiexec_options.append("-np %s" % (self.mpiprocesspernode * self.nruniquenodes))
# pass local env variables to mpiexec
self.mpiexec_options += self.get_mpiexec_opts_from_env()
def make_mpiexec_hydra_options(self):
"""Hydra specific mpiexec options."""
self.get_hydra_info()
self.mpiexec_options.append("--hostfile %s" % self.mpiexec_node_filename)
if self.options.branchcount is not None:
self.mpiexec_options.append("--branch-count %d" % self.options.branchcount)
# default launcher seems ssh
if getattr(self, 'HYDRA_RMK', None) is not None:
rmk = [x for x in self.HYDRA_RMK if x in self.hydra_info.get('rmk', [])]
if len(rmk) > 0:
self.log.debug("make_mpiexe_hydra_options: HYDRA: rmk %s, using first", rmk)
self.mpiexec_options.append("-rmk %s" % rmk[0])
else:
self.log.debug("make_mpiexe_hydra_options: no rmk from HYDRA_RMK %s and hydra_info %s",
self.HYDRA_RMK, self.hydra_info)
else:
launcher = None
if getattr(self, 'HYDRA_LAUNCHER', None) is not None:
launcher = [x for x in self.HYDRA_LAUNCHER if x in self.hydra_info.get('launcher', [])]
if launcher:
self.log.debug("make_mpiexec_hydra_options: HYDRA: launcher %s, using first one", launcher)
else:
self.log.debug("make_mpiexe_hydra_options: no launcher from HYDRA_LAUNCHER %s and hydra_info %s",
self.HYDRA_LAUNCHER, self.hydra_info)
launcher_exec = self.HYDRA_LAUNCHER_EXEC
if not launcher:
launcher_exec = self.get_rsh()
else:
self.mpiexec_options.append("-%s %s" % (self.HYDRA_LAUNCHER_NAME, launcher[0]))
if launcher_exec is not None:
self.log.debug("make_mpiexec_hydra_options: HYDRA using launcher exec %s", launcher_exec)
self.mpiexec_options.append("-%s-exec %s" % (self.HYDRA_LAUNCHER_NAME, launcher_exec))
def get_hydra_info(self):
"""Get a dict with hydra info."""
reg_hydra_info = re.compile(r"^\s+(?P<key>\S[^:\n]*)\s*:(?P<value>.*?)\s*$", re.M)
cmd = "mpirun -info"
exitcode, out = run_simple(cmd)
if exitcode > 0:
self.log.raiseException("get_hydra_info: failed to run cmd %s: %s" % (cmd, out))
hydra_info = {}
for regex in reg_hydra_info.finditer(out):
key = regex.groupdict()['key']
if key is None:
self.log.raiseException("get_hydra_info: failed to get hydra info: missing key in %s (out: %s)" %
(regex.groupdict(), out))
key = key.strip().lower()
value = regex.groupdict()['value']
if value is None:
self.log.debug("get_hydra_info: failed to get hydra info: missing value in %s (out: %s)" %
(regex.groupdict(), out))
value = ''
values = [x.strip().strip('"').strip("'") for x in value.split() if x.strip()]
hydra_info[key] = values
self.log.debug("get_hydra_info: found info %s", hydra_info)
keymap = {
"rmk": r'^resource\s+management\s+kernel.*available',
"launcher": r'^%s.*available' % self.HYDRA_LAUNCHER_NAME,
"chkpt": r'^checkpointing.*available',
}
self.hydra_info = {}
for newkey, regtxt in keymap.items():
reg = re.compile(regtxt, re.I)
matches = [v for k, v in hydra_info.items() if reg.search(k)]
if len(matches) == 0:
continue
else:
if len(matches) > 1:
self.log.warning("get_hydra_info: more than one match %s found: newkey %s regtxt %s hydrainfo %s",
matches, newkey, regtxt, hydra_info)
self.hydra_info[newkey] = matches[0]
self.log.debug("get_hydra_info: filtered info %s", self.hydra_info)
def get_mpiexec_global_options(self):
"""
Create the global options to pass to mpiexec.
Iterates over mpiexec_global_options, and picks the options that aren't already in mpiexec_opts_from_env.
This way the options that are set with environment variables get a higher priority.
@return: the final list of options, including the correct command line argument for the mpi flavor
"""
global_options = []
for key, val in self.mpiexec_global_options.items():
if key in self.mpiexec_opts_from_env:
# environment variable is already set
self.log.debug("get_mpiexec_global_options: found global option %s in mpiexec_opts_from_env.", key)
else:
# insert the keyvalue pair into the correct command line argument
# the command for setting the environment variable depends on the mpi flavor
global_options.append(self.MPIEXEC_TEMPLATE_GLOBAL_OPTION % {'name': key, "value": val})
self.log.debug("get_mpiexec_global_options: template %s return options %s",
self.MPIEXEC_TEMPLATE_GLOBAL_OPTION, global_options)
return global_options
def get_mpiexec_opts_from_env(self):
"""
gets the environment variables that should be passed to mpiexec as an option.
Parses mpiexec_opts_from_env so that the chosen mpi flavor can understand it when it is passed to the
command line argument.
"""
self.log.debug("get_mpiexec_opts_from_env: variables (and current value) to pass: %s",
[[x, os.environ[x]] for x in self.mpiexec_opts_from_env])
if '%(commaseparated)s' in self.OPTS_FROM_ENV_TEMPLATE:
self.log.debug("get_mpiexec_opts_from_env: found commaseparated in template.")
environment_options = [self.OPTS_FROM_ENV_TEMPLATE %
{'commaseparated': ','.join(self.mpiexec_opts_from_env)}]
else:
environment_options = [self.OPTS_FROM_ENV_TEMPLATE %
{'name': x, 'value': os.environ[x]} for x in self.mpiexec_opts_from_env]
self.log.debug("get_mpiexec_opts_from_env: template %s return options %s",
self.OPTS_FROM_ENV_TEMPLATE, environment_options)
return environment_options
### BEGIN mpirun ###
def make_mpirun(self):
"""Make the mpirun command (or whatever). It typically consists of a mpdboot and a mpiexec part."""
self.mpirun_cmd = ['mpirun']
self._make_final_mpirun_cmd()
if self.options.mpirunoptions is not None:
self.mpirun_cmd.append(self.options.mpirunoptions)
self.log.debug("make_mpirun: added user provided options %s", self.options.mpirunoptions)
if self.pinning_override_type is not None:
p_o = self.pinning_override()
if p_o is None or not os.path.isfile(p_o):
self.log.raiseException("make_mpirun: no valid pinning_overrride %s (see previous errors)" % p_o)
else:
self.mpirun_cmd += [p_o]
# the executable
# use undocumented subprocess API call to quote whitespace (executed with Popen(shell=True))
# (see http://stackoverflow.com/questions/4748344/whats-the-reverse-of-shlex-split for alternatives if needed)
quoted_args_string = subprocess.list2cmdline(self.cmdargs)
self.log.debug("make_mpirun: adding cmdargs %s (quoted %s)", self.cmdargs, quoted_args_string)
self.mpirun_cmd.append(quoted_args_string)
def _make_final_mpirun_cmd(self):
"""
Create the acual mpirun command.
Append the mpdboot and mpiexec options to the command.
"""
self.mpirun_cmd += self.mpdboot_options
self.mpirun_cmd += self.mpiexec_options
def pinning_override(self):
"""
Create own pinning
- using taskset or numactl?
- start the real executable with correct pinning
There are self.mpiprocesspernode number of processes to start on (self.nruniquenodes * self.ppn) requested slots
Each node has to accept self.mpiprocesspernode/self.ppn processes over self.ppn number of cpu slots
Do we assume heterogenous nodes (ie same cpu layout as current node?)
- We should but in reality we don't because of different cpusets!
What do we support?
- packed/compact : all together, ranks close to each other
- spread: as far away as possible from each other
Option:
- threaded (default yes): eg in hybrid, pin on all available cores or just one
When in this mode, one needs to disable default/native pinning
There seems no clean way to simply prefix the variables before the real exe
- some mpirun are binary, others are bash
- no clean way to pass the variable
- a simple bash script also resolves the csh problem?
Simple shell check. This is the login shell of the current user
- not necessarily the current shell
- but it is when multinode is used i think (eg startup with ssh)
"""
variableexpression = self.get_pinning_override_variable()
if variableexpression is None:
self.log.raiseException("pinning_override: no variable name found/set.")
self.log.debug("pinning_override: using variable expression %s as local node rank.", variableexpression)
rankname = 'MYMPIRUN_LOCALRANK'
rankmapname = 'MYMPIRUN_LOCALRANK_MAP'
wrappertxt = "#!/bin/bash\n%s=%s\n" % (rankname, variableexpression)
# number of local processors
# - eg numactl -s grep physcpubind
if not self.ppn == self.foundppn:
self.log.raiseException(("pinning_override: number of found procs %s is different from "
"requested ppn %s. Not yet supported.") % (self.foundppn, self.ppn))
override_type = self.pinning_override_type
multithread = True
if override_type.endswith('pin'):
override_type = override_type[:-3]
multithread = False
self.log.debug("pinning_override: type %s multithread %s", override_type, multithread)
# The whole method is very primitive
# - assume cpu layout on OS has correct numbering
# What about pinned threads of threaded apps?
# - eg use likwid to pin those threads too
# cores per process
corespp = self.foundppn // self.mpiprocesspernode
corespp_rest = self.foundppn % self.mpiprocesspernode
if (corespp < 1) or (self.mpiprocesspernode == self.foundppn):
multi = False
self.log.debug(("pinning_override: exactly one or more than one process for each core: mpi processes: %s "
"ppn: %s. Multithreading is disabled."), self.mpiprocesspernode, self.foundppn)
if corespp_rest > 0:
self.log.debug(("pinning_override: number of mpiprocesses (%s) is not an exact multiple of "
"number of procs (%s). Ignoring rest."), self.mpiprocesspernode, self.foundppn)
map_func = None
if override_type in ('packed', 'compact',):
if multi:
# consecutive domains
map_func = lambda x: "%s-%s" % (x * corespp, (x + 1) * corespp - 1)
else:
# consecutive cores
map_func = lambda x: x
elif override_type in ('cycle',):
# eg double with GAMESS
if multi:
self.log.raiseException(
"pinning_override: trying to set pin type to 'cycle' with multithreading enabled: not supported")
else:
map_func = lambda x: (x % self.foundppn)
elif override_type in ('spread',):
if multi:
# spread domains
map_func = lambda x: "%s-%s" % (x * corespp, (x + 1) * corespp - 1)
else:
# spread cores
map_func = lambda x: (x * corespp)
else:
self.log.raiseException("pinning_override: unsupported pinning_override_type %s" %
self.pinning_override_type)
rankmap = [map_func(x) for x in range(self.mpiprocesspernode)]
wrappertxt += "%s=(%s)\n" % (rankmapname, ' '.join(rankmap))
pinning_exe = which(self.PINNING_OVERRIDE_METHOD) # default numactl
if not pinning_exe:
self.log.raiseException("pinning_override: can't find executable %s" % self.PINNING_OVERRIDE_METHOD)
if self.PINNING_OVERRIDE_METHOD in ('numactl',):
pinning_exe += ' --physcpubind="${%s[$%s]}"' % (rankmapname, rankname)
wrappertxt += "%s $@" % pinning_exe
wrapperpath = os.path.join(self.jobdir, 'pinning_override_wrapper.sh')
try:
open(wrapperpath, 'w').write(wrappertxt)
os.chmod(wrapperpath, stat.S_IRWXU)
self.log.debug("pinning_override: wrote wrapper file %s:\n%s", wrapperpath, wrappertxt)
except IOError:
self.log.raiseException('pinning_override: failed to write wrapper file %s', wrapperpath)
self.log.debug("pinning_override: pinning_exe %s to wrapper %s", pinning_exe, wrapperpath)
return wrapperpath
def get_pinning_override_variable(self):
"""
Key element is that one needs to know the rank or something similar of each process
- preferably in environment
- eg QLogic PSC_MPI_NODE_RANK: this instance is the nth local rank.
- alternative is small c mpi program with bash wrapper
- see also likwid-mpirun for alternative example
- mentions similar OMPI_COMM_WORLD_RANK for OpenMPI and PMI_RANK for IntelMPI
- local_rank is remainder of myrank diveded by number of nodes?
This is a bash expression.
- eg $((x/y)) is also fine
"""
self.log.raiseException("get_pinning_override_variable: not implemented.")
def mpirun_prepare_execution(self):
"""
Make a function that runs mpirun with all arguments correctly set
@return: a tuple containing the final function and the final command
"""
def main_runfunc(cmd):
"""The function that will run mpirun"""
if self.options.output is not None:
return run_to_file(cmd, filename=self.options.output)
else:
return run_async_to_stdout(cmd)
return [(main_runfunc, self.mpirun_cmd)]
def cleanup(self):
"""Remove temporary directory (mympirundir)"""
try:
shutil.rmtree(self.mympirundir)
self.log.debug("cleanup: removed mympirundir %s", self.mympirundir)
except OSError:
self.log.raiseException("cleanup: cleaning up mympirundir %s failed" % self.mympirundir)
| gpl-2.0 | -5,106,039,025,082,344,000 | 40.228346 | 123 | 0.597999 | false | 3.732003 | false | false | false |
setsulla/owanimo | lib/puyo/util/log.py | 1 | 2199 | import os
import logging
import logging.config
import define
from system import FILE as f
from system import DIRECTORY as d
CONF_FILE = "logging.conf"
BASE_NAME = "Puyopuyo.Owanimo"
BASE_LEVEL = logging.DEBUG
BASE_FORMAT = '%(processName)s.%(name)s ( PID %(process)d ) : %(asctime)s - %(levelname)s - %(message)s'
class Log(object):
def __init__(self, name):
d.create(define.APP_LOG)
self.logger = logging.getLogger(name)
self.logger.setLevel(BASE_LEVEL)
self.__addHandler(self.consoleHandler())
self.__addHandler(self.fileHandler(
os.path.normpath(os.path.join(define.APP_LOG,"system.log"))))
def __addHandler(self, handler):
self.logger.addHandler(handler)
def consoleHandler(self):
f = logging.Formatter(BASE_FORMAT)
h = logging.StreamHandler()
h.setLevel(BASE_LEVEL)
h.setFormatter(f)
return h
def fileHandler(self, filename):
f.create(filename)
fo = logging.Formatter(BASE_FORMAT)
h = logging.FileHandler(filename, 'a+')
h.setLevel(BASE_LEVEL)
h.setFormatter(fo)
return h
def rotateFileHandler(self, filename):
if os.path.exists(filename):
f = logging.Formatter(BASE_FORMAT)
h = logging.handlers.TimedRotatingFileHandler(
filename = os.path.normpath(os.path.join(filename)),
when = 'D',
backupCount = 5
)
h.setLevel(BASE_LEVEL)
h.setFormatter(f)
return trh
else:
return None
def debug(self, message):
self.logger.debug(message)
def warning(self, message):
self.logger.warning(message)
def info(self, message):
self.logger.info(message)
def critical(self, message):
self.logger.critical(message)
def error(self, message):
self.logger.error(message)
def __del__(self):
del self
LOG = Log(BASE_NAME)
if __name__ == "__main__":
logger = LOG
logger.debug("debug")
logger.warning("warning")
logger.info("info")
logger.critical("critical")
logger.error("error")
| mit | -1,362,475,733,673,479,400 | 25.493976 | 104 | 0.600728 | false | 3.864675 | false | false | false |
CelineBoudier/rapid-router | game/tests/test_scoreboard.py | 1 | 14455 | # -*- coding: utf-8 -*-
# Code for Life
#
# Copyright (C) 2016, Ocado Innovation Limited
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ADDITIONAL TERMS – Section 7 GNU General Public Licence
#
# This licence does not grant any right, title or interest in any “Ocado” logos,
# trade names or the trademark “Ocado” or any other trademarks or domain names
# owned by Ocado Innovation Limited or the Ocado group of companies or any other
# distinctive brand features of “Ocado” as may be secured from time to time. You
# must not distribute any modification of this program using the trademark
# “Ocado” or claim any affiliation or association with Ocado or its employees.
#
# You are not authorised to use the name Ocado (or any of its trade names) or
# the names of any author or contributor in advertising or for publicity purposes
# pertaining to the distribution of this program, without the prior written
# authorisation of Ocado.
#
# Any propagation, distribution or conveyance of this program must include this
# copyright notice and these terms. You must not misrepresent the origins of this
# program; modified versions of the program must be marked as such and not
# identified as the original program.
from datetime import timedelta, datetime
from django.utils.timezone import utc
from django.test import TestCase
from hamcrest import *
from game.models import Level, Attempt
from game.views.scoreboard import StudentRow, scoreboard_data
from game.views.scoreboard_csv import scoreboard_csv_multiple_levels, scoreboard_csv_single_level
from portal.models import Class
from portal.tests.utils.classes import create_class_directly
from portal.tests.utils.student import create_school_student_directly
from portal.tests.utils.teacher import signup_teacher_directly
Headers = ['Class', 'Name', 'Total Score', 'Total Time', 'Started Levels %', 'Attempted levels %', 'Finished levels %']
class ScoreboardTestCase(TestCase):
def test_teacher_multiple_students_multiple_levels(self):
level_ids = ids_of_levels_named(["1", "2"])
level1 = Level.objects.get(name="1")
level2 = Level.objects.get(name="2")
clas, student, student2 = set_up_data()
create_attempt(student, level1, 10.5)
create_attempt(student2, level1, 2.3)
create_attempt(student2, level2, 16.7)
student_data, headers = scoreboard_data(Teacher(), level_ids, [clas.id])
assert_that(headers, equal_to(['Class', 'Name', 'Total Score', 'Total Time', 'Progress', u'Level 1', u'Level 2']))
assert_that(student_data, has_length(2))
assert_student_row(student_row=student_data[0],
class_name=clas.name,
student_name=student.user.user.first_name,
total_score=10.5,
total_time=timedelta(0),
progress=(0.0, 0.0, 50.0),
scores=[10.5, ''])
assert_student_row(student_row=student_data[1],
class_name=clas.name,
student_name=student2.user.user.first_name,
total_score=19.0,
total_time=timedelta(0),
progress=(0.0, 50.0, 50.0),
scores=[2.3, 16.7])
def test_teacher_multiple_students_single_level(self):
level_ids = ids_of_levels_named(["1"])
level1 = Level.objects.get(name="1")
clas, student, student2 = set_up_data()
create_attempt(student, level1, 10.5)
create_attempt(student2, level1, 2.3)
student_data, headers = scoreboard_data(Teacher(), level_ids, [clas.id])
assert_that(headers, equal_to(['Class', 'Name', 'Score', 'Total Time', 'Start Time', 'Finish Time']))
assert_that(student_data, has_length(2))
assert_student_row_single_level(student_row=student_data[0],
class_name=clas.name,
student_name=student.user.user.first_name,
total_score=10.5,
total_time=timedelta(0))
assert_student_row_single_level(student_row=student_data[1],
class_name=clas.name,
student_name=student2.user.user.first_name,
total_score=2.3,
total_time=timedelta(0))
def test_student_multiple_students_multiple_levels(self):
level_ids = ids_of_levels_named(["1", "2"])
level1 = Level.objects.get(name="1")
level2 = Level.objects.get(name="2")
clas, student, student2 = set_up_data(True)
create_attempt(student, level1, 10.5)
create_attempt(student2, level1, 2.3)
create_attempt(student2, level2, 16.7)
student_data, headers = scoreboard_data(Student(student), level_ids, [clas.id])
assert_that(headers, equal_to(['Class', 'Name', 'Total Score', 'Total Time', 'Progress', u'Level 1', u'Level 2']))
assert_that(student_data, has_length(2))
assert_student_row(student_row=student_data[0],
class_name=clas.name,
student_name=student.user.user.first_name,
total_score=10.5,
total_time=timedelta(0),
progress=(0.0, 0.0, 50.0),
scores=[10.5, ''])
assert_student_row(student_row=student_data[1],
class_name=clas.name,
student_name=student2.user.user.first_name,
total_score=19.0,
total_time=timedelta(0),
progress=(0.0, 50.0, 50.0),
scores=[2.3, 16.7])
def test_student_multiple_students_single_level(self):
level_ids = ids_of_levels_named(["2"])
level2 = Level.objects.get(name="2")
clas, student, student2 = set_up_data(True)
create_attempt(student, level2, 10.5)
create_attempt(student2, level2, 16.7)
student_data, headers = scoreboard_data(Student(student), level_ids, [clas.id])
assert_that(headers, equal_to(['Class', 'Name', 'Score', 'Total Time', 'Start Time', 'Finish Time']))
assert_that(student_data, has_length(2))
assert_student_row_single_level(student_row=student_data[0],
class_name=clas.name,
student_name=student.user.user.first_name,
total_score=10.5,
total_time=timedelta(0))
assert_student_row_single_level(student_row=student_data[1],
class_name=clas.name,
student_name=student2.user.user.first_name,
total_score=16.7,
total_time=timedelta(0))
def test_student_multiple_students_multiple_levels_cannot_see_classmates(self):
level_ids = ids_of_levels_named(["1", "2"])
level1 = Level.objects.get(name="1")
level2 = Level.objects.get(name="2")
clas, student, student2 = set_up_data()
create_attempt(student, level1, 10.5)
create_attempt(student2, level1, 2.3)
create_attempt(student2, level2, 16.7)
student_data, headers = scoreboard_data(Student(student), level_ids, [clas.id])
assert_that(headers, equal_to(['Class', 'Name', 'Total Score', 'Total Time', 'Progress', u'Level 1', u'Level 2']))
assert_that(student_data, has_length(1))
assert_student_row(student_row=student_data[0],
class_name=clas.name,
student_name=student.user.user.first_name,
total_score=10.5,
total_time=timedelta(0),
progress=(0.0, 0.0, 50.0),
scores=[10.5, ''])
class ScoreboardCsvTestCase(TestCase):
def test_multiple_levels(self):
levels = Level.objects.sorted_levels()
student_rows = [(self.student_row()), (self.student_row())]
response = scoreboard_csv_multiple_levels(student_rows, levels)
actual_header, actual_rows = self.actual_data(response.content)
expected_header = self.expected_header(levels)
expected_rows = self.expected_rows_multiple_levels(student_rows)
assert_that(actual_header, equal_to(expected_header))
assert_that(actual_rows, equal_to(expected_rows))
def test_single_level(self):
student_rows = [(self.student_row()), (self.student_row())]
response = scoreboard_csv_single_level(student_rows)
actual_header, actual_rows = self.actual_data(response.content)
expected_header = 'Class,Name,Score,Total Time,Start Time,Finish Time'
expected_rows = self.expected_rows_single_level(student_rows)
assert_that(actual_header, equal_to(expected_header))
assert_that(actual_rows, equal_to(expected_rows))
def expected_rows_single_level(self, student_rows):
return map(self.expected_row_single_level, student_rows) + [""]
def expected_rows_multiple_levels(self, student_rows):
return map(self.expected_row_multiple_levels, student_rows) + [""]
def student_row(self):
email, password = signup_teacher_directly()
_, class_name, access_code = create_class_directly(email)
_, _, student = create_school_student_directly(access_code)
total_time = timedelta(0, 30)
scores = [x for x in range(20)]
total_score = sum(scores)
progress = (0, 0, 0)
all_scores = scores + [""] * 89
row = StudentRow(student=student,
total_time=total_time,
total_score=total_score,
start_time=datetime.fromtimestamp(1435305072, tz=utc),
finish_time=datetime.fromtimestamp(1438305072, tz=utc),
progress=progress,
scores=all_scores,
class_field=Class(name="MyClass"))
return row
def expected_row_multiple_levels(self, student_row):
beginning = "%s,%s,190,0:00:30,0,0,0,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19," \
% (student_row.class_field.name, student_row.name)
padding = ','.join([""] * 89)
return beginning + padding
def expected_row_single_level(self, student_row):
return "%s,%s,190,0:00:30,2015-06-26 07:51:12+00:00,2015-07-31 01:11:12+00:00" % (
student_row.class_field.name, student_row.name)
def expected_header(self, levels):
level_strings = map(str, levels)
all_header_strings = Headers + level_strings
joined = ','.join(all_header_strings)
return joined
def actual_data(self, content):
split = content.split("\r\n")
header = split[0]
rows = split[1:]
return header, rows
class Student:
def __init__(self, student):
self.student = student
def is_student(self): return True
def is_teacher(self): return False
def is_independent_student(self): return False
class Teacher:
def is_student(self): return False
def is_teacher(self): return True
def is_independent_student(self): return False
def assert_student_row(student_row, class_name, student_name, total_score, total_time, progress, scores):
assert_that(student_row.class_field.name, equal_to(class_name))
assert_that(student_row.name, equal_to(student_name))
assert_that(student_row.total_score, equal_to(total_score))
assert_that(student_row.total_time, equal_to(total_time))
assert_that(student_row.progress, equal_to(progress))
assert_that(student_row.scores, equal_to(scores))
def assert_student_row_single_level(student_row, class_name, student_name, total_score, total_time):
assert_that(student_row.class_field.name, equal_to(class_name))
assert_that(student_row.name, equal_to(student_name))
assert_that(student_row.total_score, equal_to(total_score))
assert_that(student_row.total_time, equal_to(total_time))
def create_attempt(student, level, score):
attempt = Attempt.objects.create(finish_time=datetime.fromtimestamp(1435305072),
level=level,
student=student,
score=score,
is_best_attempt=True)
attempt.start_time=datetime.fromtimestamp(1435305072)
attempt.save()
def ids_of_levels_named(names):
levels = Level.objects.filter(name__in=names)
assert_that(len(levels), equal_to(len(names)))
level_ids = map(lambda x: x.id, levels)
return level_ids
def set_up_data(classmates_data_viewable=False):
email, password = signup_teacher_directly()
clas, class_name, access_code = create_class_directly(email)
if classmates_data_viewable:
clas.classmates_data_viewable = True
clas.save()
_, _, student = create_school_student_directly(access_code)
_, _, student2 = create_school_student_directly(access_code)
create_random_school_data()
return clas, student, student2
def create_random_school_data():
email, password = signup_teacher_directly()
clas, class_name, access_code = create_class_directly(email)
create_school_student_directly(access_code)
create_school_student_directly(access_code)
| agpl-3.0 | 6,419,585,294,995,314,000 | 41.090379 | 122 | 0.605805 | false | 3.750844 | true | false | false |
openhumanoids/oh-distro | software/models/common_components/multisense_sl/mit_modifications/multisense_sl.py | 1 | 1937 | import os
drc_base_path = os.getenv("DRC_BASE")
import sys
sys.path.append(os.path.join(drc_base_path, "software", "models",
"model_transformation"))
import mitUrdfUtils as mit
from jointNameMap import jointNameMap
from lxml import etree
import tempfile
from glob import glob
os.chdir(os.path.dirname(os.path.realpath(__file__)))
meshesDirectory = '../meshes'
original_urdf_path = "../multisense_sl_original.urdf"
urdf_path = "../multisense_sl.urdf"
no_joint_urdf_path = "../multisense_sl_no_joint.urdf"
convex_hull_urdf_path = "../multisense_sl_convex_hull.urdf"
no_collision_urdf_path = "../multisense_sl_no_collision.urdf"
# Convert meshes
for inFile in glob(os.path.join(meshesDirectory, "*.dae")):
mit.convertMeshTo(inFile, ".obj")
mit.convertMeshTo(inFile, ".wrl")
for inFile in glob(os.path.join(meshesDirectory, "*.obj")):
if "chull" not in inFile:
mit.createConvexHullMesh(inFile)
for inFile in glob(os.path.join(meshesDirectory, "*.wrl")):
if "chull" not in inFile:
mit.createConvexHullMesh(inFile)
# Expand all includes to allow us to appropriately change mesh filenames
tmp = tempfile.NamedTemporaryFile()
mit.xacro(original_urdf_path, tmp.name, includes_only=True,
recursive_includes=True)
# Load urdf
urdf = etree.parse(tmp.name)
# Replace package:// syntax
#mit.replacePackageWithPathInMeshPaths(urdf, "../common_components")
# Use MITDRC joint names and .obj meshes
mit.useObjMeshes(urdf)
mit.renameJoints(urdf, jointNameMap)
urdf.write(urdf_path, pretty_print=True)
# Generate no-joint urdf
mit.weldAllJoints(urdf)
urdf.write(no_joint_urdf_path, pretty_print=True)
# Generate no-joint, no-collision urdf
mit.removeAllCollisions(urdf)
urdf.write(no_collision_urdf_path, pretty_print=True)
# Generate convex hull urdf
mit.addCollisionsFromVisuals(urdf)
mit.useConvexHullMeshes(urdf)
urdf.write(convex_hull_urdf_path, pretty_print=True)
| bsd-3-clause | -8,525,078,679,496,667,000 | 29.265625 | 72 | 0.737223 | false | 2.861152 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.