text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""
sphinx.util
~~~~~~~~~~~
Utility functions for Sphinx.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import re
import sys
import shutil
import fnmatch
import tempfile
import posixpath
import traceback
import unicodedata
from os import path
from codecs import open, BOM_UTF8
from collections import deque
import docutils
from docutils.utils import relative_path
import jinja2
import sphinx
from sphinx.errors import PycodeError
from sphinx.util.pycompat import bytes
# import other utilities; partly for backwards compatibility, so don't
# prune unused ones indiscriminately
from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, walk, \
mtimes_of_files, movefile, copyfile, copytimes, make_filename, ustrftime
from sphinx.util.nodes import nested_parse_with_titles, split_explicit_title, \
explicit_title_re, caption_ref_re
from sphinx.util.matching import patfilter
# Generally useful regular expressions.
ws_re = re.compile(r'\s+')
url_re = re.compile(r'(?P<schema>.+)://.*')
# High-level utility functions.
def docname_join(basedocname, docname):
return posixpath.normpath(
posixpath.join('/' + basedocname, '..', docname))[1:]
def path_stabilize(filepath):
"normalize path separater and unicode string"
newpath = filepath.replace(os.path.sep, SEP)
if isinstance(newpath, unicode):
newpath = unicodedata.normalize('NFC', newpath)
return newpath
def get_matching_files(dirname, exclude_matchers=()):
"""Get all file names in a directory, recursively.
Exclude files and dirs matching some matcher in *exclude_matchers*.
"""
# dirname is a normalized absolute path.
dirname = path.normpath(path.abspath(dirname))
dirlen = len(dirname) + 1 # exclude final os.path.sep
for root, dirs, files in walk(dirname, followlinks=True):
relativeroot = root[dirlen:]
qdirs = enumerate(path_stabilize(path.join(relativeroot, dn))
for dn in dirs)
qfiles = enumerate(path_stabilize(path.join(relativeroot, fn))
for fn in files)
for matcher in exclude_matchers:
qdirs = [entry for entry in qdirs if not matcher(entry[1])]
qfiles = [entry for entry in qfiles if not matcher(entry[1])]
dirs[:] = sorted(dirs[i] for (i, _) in qdirs)
for i, filename in sorted(qfiles):
yield filename
def get_matching_docs(dirname, suffix, exclude_matchers=()):
"""Get all file names (without suffix) matching a suffix in a directory,
recursively.
Exclude files and dirs matching a pattern in *exclude_patterns*.
"""
suffixpattern = '*' + suffix
for filename in get_matching_files(dirname, exclude_matchers):
if not fnmatch.fnmatch(filename, suffixpattern):
continue
yield filename[:-len(suffix)]
class FilenameUniqDict(dict):
"""
A dictionary that automatically generates unique names for its keys,
interpreted as filenames, and keeps track of a set of docnames they
appear in. Used for images and downloadable files in the environment.
"""
def __init__(self):
self._existing = set()
def add_file(self, docname, newfile):
if newfile in self:
self[newfile][0].add(docname)
return self[newfile][1]
uniquename = path.basename(newfile)
base, ext = path.splitext(uniquename)
i = 0
while uniquename in self._existing:
i += 1
uniquename = '%s%s%s' % (base, i, ext)
self[newfile] = (set([docname]), uniquename)
self._existing.add(uniquename)
return uniquename
def purge_doc(self, docname):
for filename, (docs, unique) in self.items():
docs.discard(docname)
if not docs:
del self[filename]
self._existing.discard(unique)
def __getstate__(self):
return self._existing
def __setstate__(self, state):
self._existing = state
def copy_static_entry(source, targetdir, builder, context={},
exclude_matchers=(), level=0):
"""Copy a HTML builder static_path entry from source to targetdir.
Handles all possible cases of files, directories and subdirectories.
"""
if exclude_matchers:
relpath = relative_path(builder.srcdir, source)
for matcher in exclude_matchers:
if matcher(relpath):
return
if path.isfile(source):
target = path.join(targetdir, path.basename(source))
if source.lower().endswith('_t') and builder.templates:
# templated!
fsrc = open(source, 'r', encoding='utf-8')
fdst = open(target[:-2], 'w', encoding='utf-8')
fdst.write(builder.templates.render_string(fsrc.read(), context))
fsrc.close()
fdst.close()
else:
copyfile(source, target)
elif path.isdir(source):
if not path.isdir(targetdir):
os.mkdir(targetdir)
for entry in os.listdir(source):
if entry.startswith('.'):
continue
newtarget = targetdir
if path.isdir(path.join(source, entry)):
newtarget = path.join(targetdir, entry)
copy_static_entry(path.join(source, entry), newtarget,
builder, context, level=level+1,
exclude_matchers=exclude_matchers)
_DEBUG_HEADER = '''\
# Sphinx version: %s
# Python version: %s
# Docutils version: %s %s
# Jinja2 version: %s
# Loaded extensions:
'''
def save_traceback(app):
"""Save the current exception's traceback in a temporary file."""
import platform
exc = traceback.format_exc()
fd, path = tempfile.mkstemp('.log', 'sphinx-err-')
os.write(fd, (_DEBUG_HEADER %
(sphinx.__version__,
platform.python_version(),
docutils.__version__, docutils.__version_details__,
jinja2.__version__)).encode('utf-8'))
if app is not None:
for extname, extmod in app._extensions.iteritems():
os.write(fd, ('# %s from %s\n' % (
extname, getattr(extmod, '__file__', 'unknown'))
).encode('utf-8'))
os.write(fd, exc.encode('utf-8'))
os.close(fd)
return path
def get_module_source(modname):
"""Try to find the source code for a module.
Can return ('file', 'filename') in which case the source is in the given
file, or ('string', 'source') which which case the source is the string.
"""
if modname not in sys.modules:
try:
__import__(modname)
except Exception, err:
raise PycodeError('error importing %r' % modname, err)
mod = sys.modules[modname]
filename = getattr(mod, '__file__', None)
loader = getattr(mod, '__loader__', None)
if loader and getattr(loader, 'get_filename', None):
try:
filename = loader.get_filename(modname)
except Exception, err:
raise PycodeError('error getting filename for %r' % filename, err)
if filename is None and loader:
try:
return 'string', loader.get_source(modname)
except Exception, err:
raise PycodeError('error getting source for %r' % modname, err)
if filename is None:
raise PycodeError('no source found for module %r' % modname)
filename = path.normpath(path.abspath(filename))
lfilename = filename.lower()
if lfilename.endswith('.pyo') or lfilename.endswith('.pyc'):
filename = filename[:-1]
if not path.isfile(filename) and path.isfile(filename + 'w'):
filename += 'w'
elif not (lfilename.endswith('.py') or lfilename.endswith('.pyw')):
raise PycodeError('source is not a .py file: %r' % filename)
if not path.isfile(filename):
raise PycodeError('source file is not present: %r' % filename)
return 'file', filename
# a regex to recognize coding cookies
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def detect_encoding(readline):
"""Like tokenize.detect_encoding() from Py3k, but a bit simplified."""
def read_or_stop():
try:
return readline()
except StopIteration:
return None
def get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace('_', '-')
if enc == 'utf-8' or enc.startswith('utf-8-'):
return 'utf-8'
if enc in ('latin-1', 'iso-8859-1', 'iso-latin-1') or \
enc.startswith(('latin-1-', 'iso-8859-1-', 'iso-latin-1-')):
return 'iso-8859-1'
return orig_enc
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = _coding_re.findall(line_string)
if not matches:
return None
return get_normal_name(matches[0])
default = sys.getdefaultencoding()
first = read_or_stop()
if first and first.startswith(BOM_UTF8):
first = first[3:]
default = 'utf-8-sig'
if not first:
return default
encoding = find_cookie(first)
if encoding:
return encoding
second = read_or_stop()
if not second:
return default
encoding = find_cookie(second)
if encoding:
return encoding
return default
# Low-level utility functions and classes.
class Tee(object):
"""
File-like object writing to two streams.
"""
def __init__(self, stream1, stream2):
self.stream1 = stream1
self.stream2 = stream2
def write(self, text):
self.stream1.write(text)
self.stream2.write(text)
def flush(self):
if hasattr(self.stream1, 'flush'):
self.stream1.flush()
if hasattr(self.stream2, 'flush'):
self.stream2.flush()
def parselinenos(spec, total):
"""Parse a line number spec (such as "1,2,4-6") and return a list of
wanted line numbers.
"""
items = list()
parts = spec.split(',')
for part in parts:
try:
begend = part.strip().split('-')
if len(begend) > 2:
raise ValueError
if len(begend) == 1:
items.append(int(begend[0])-1)
else:
start = (begend[0] == '') and 0 or int(begend[0])-1
end = (begend[1] == '') and total or int(begend[1])
items.extend(xrange(start, end))
except Exception:
raise ValueError('invalid line number spec: %r' % spec)
return items
def force_decode(string, encoding):
"""Forcibly get a unicode string out of a bytestring."""
if isinstance(string, bytes):
try:
if encoding:
string = string.decode(encoding)
else:
# try decoding with utf-8, should only work for real UTF-8
string = string.decode('utf-8')
except UnicodeError:
# last resort -- can't fail
string = string.decode('latin1')
return string
class attrdict(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, val):
self[key] = val
def __delattr__(self, key):
del self[key]
def rpartition(s, t):
"""Similar to str.rpartition from 2.5, but doesn't return the separator."""
i = s.rfind(t)
if i != -1:
return s[:i], s[i+len(t):]
return '', s
def split_into(n, type, value):
"""Split an index entry into a given number of parts at semicolons."""
parts = map(lambda x: x.strip(), value.split(';', n-1))
if sum(1 for part in parts if part) < n:
raise ValueError('invalid %s index entry %r' % (type, value))
return parts
def split_index_msg(type, value):
# new entry types must be listed in directives/other.py!
result = []
try:
if type == 'single':
try:
result = split_into(2, 'single', value)
except ValueError:
result = split_into(1, 'single', value)
elif type == 'pair':
result = split_into(2, 'pair', value)
elif type == 'triple':
result = split_into(3, 'triple', value)
elif type == 'see':
result = split_into(2, 'see', value)
elif type == 'seealso':
result = split_into(2, 'see', value)
except ValueError:
pass
return result
def format_exception_cut_frames(x=1):
"""Format an exception with traceback, but only the last x frames."""
typ, val, tb = sys.exc_info()
#res = ['Traceback (most recent call last):\n']
res = []
tbres = traceback.format_tb(tb)
res += tbres[-x:]
res += traceback.format_exception_only(typ, val)
return ''.join(res)
class PeekableIterator(object):
"""
An iterator which wraps any iterable and makes it possible to peek to see
what's the next item.
"""
def __init__(self, iterable):
self.remaining = deque()
self._iterator = iter(iterable)
def __iter__(self):
return self
def next(self):
"""Return the next item from the iterator."""
if self.remaining:
return self.remaining.popleft()
return self._iterator.next()
def push(self, item):
"""Push the `item` on the internal stack, it will be returned on the
next :meth:`next` call.
"""
self.remaining.append(item)
def peek(self):
"""Return the next item without changing the state of the iterator."""
item = self.next()
self.push(item)
return item
| {
"content_hash": "c157fe61c7af96e486e50d916e266701",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 79,
"avg_line_length": 31.799086757990867,
"alnum_prop": 0.5955628948879954,
"repo_name": "SurfasJones/icecream-info",
"id": "a20fc19be8c0684dc5fe5bd54e0e64b2d1f4521b",
"size": "13952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "icecream/lib/python2.7/site-packages/sphinx/util/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "288937"
},
{
"name": "JavaScript",
"bytes": "589933"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "18137514"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "Shell",
"bytes": "10274"
},
{
"name": "TeX",
"bytes": "56626"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem298.py
#
# Selective Amnesia
# =================
# Published on Friday, 25th June 2010, 09:00 pm
#
# table.p298, table.p298 th, table.p298 td { border-width: 1px 1px 1px 1px;
# border-style: solid solid solid solid; border-color: black black black
# black; text-align:center; -moz-border-radius: 0px 0px 0px 0px; }
# table.p298 { border-spacing: 1px; border-collapse: separate;
# background-color: rgb(255,255,255); } table.p298 th, table.p298 td {
# padding: 1px 6px 1px 6px; } table.p298 th { background-color:
# rgb(200,220,250); } table.p298 td { background-color: rgb(255,255,255); }
# Larry and Robin play a memory game involving of a sequence of random numbers
# between 1 and 10, inclusive, that are called out one at a time. Each player
# can remember up to 5 previous numbers. When the called number is in a
# player's memory, that player is awarded a point. If it's not, the player adds
# the called number to his memory, removing another number if his memory is
# full. Both players start with empty memories. Both players always add new
# missed numbers to their memory but use a different strategy in deciding which
# number to remove: Larry's strategy is to remove the number that hasn't been
# called in the longest time. Robin's strategy is to remove the number that's
# been in the memory the longest time. Example game: Turn Callednumber
# Larry'smemory Larry'sscore Robin'smemory Robin'sscore 1 1 1 0 1 0 2 2 1,2
# 0 1,2 0 3 4 1,2,4 0 1,2,4 0 4 6 1,2,4,6 0 1,2,4,6 0 5 1 1,2,4,6 1
# 1,2,4,6 1 6 8 1,2,4,6,8 1 1,2,4,6,8 1 7 10 1,4,6,8,10 1 2,4,6,8,10 1 8
# 2 1,2,6,8,10 1 2,4,6,8,10 2 9 4 1,2,4,8,10 1 2,4,6,8,10 3 10 1 1,2,4,8,10
# 2 1,4,6,8,10 3 Denoting Larry's score by L and Robin's score by R, what is
# the expected value of |L-R| after 50 turns? Give your answer rounded to eight
# decimal places using the format x.xxxxxxxx .
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| {
"content_hash": "8fb84088e4b0d3e97509f652924e61a7",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 52.53846153846154,
"alnum_prop": 0.6837481698389458,
"repo_name": "olduvaihand/ProjectEuler",
"id": "a6f7d70a34b42d708cc2d749e0df978189abbd97",
"size": "2051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/problem298.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "0"
},
{
"name": "Python",
"bytes": "422751"
}
],
"symlink_target": ""
} |
"""
Defines tasks for uwsgi to run at regular intervals
"""
import datetime
import os
import uwsgidecorators
from flask import Flask
import model
from database import db_session
EXECUTOR_TIMEOUT_MINS = 3
@uwsgidecorators.timer(15, target="spooler")
def reset_overdue_runs(signum):
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv(
"CODE_COURT_DB_URI"
) or "sqlite:////tmp/code_court.db"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
# reset overdue runs
overdue_runs = model.Run.query.filter(
model.Run.finished_execing_time == None
).filter(
model.Run.started_execing_time != None
).filter(
(datetime.datetime.utcnow() - datetime.timedelta(minutes=EXECUTOR_TIMEOUT_MINS))
> model.Run.started_execing_time
).all()
for run in overdue_runs:
run.started_execing_time = None
db_session.commit()
| {
"content_hash": "4c67ffabb435b6ec5ac0ba86c2cbc13e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 88,
"avg_line_length": 24.263157894736842,
"alnum_prop": 0.6767895878524945,
"repo_name": "BenDoan/code_court",
"id": "69e20d3d5679dbb31d9e01cec8b38eb10f6ff3dd",
"size": "922",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code_court/courthouse/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "119040"
},
{
"name": "Dockerfile",
"bytes": "1551"
},
{
"name": "HTML",
"bytes": "524161"
},
{
"name": "JavaScript",
"bytes": "1914634"
},
{
"name": "Python",
"bytes": "189157"
},
{
"name": "Shell",
"bytes": "2628"
},
{
"name": "Vue",
"bytes": "25738"
}
],
"symlink_target": ""
} |
import logging
logging.basicConfig(level=logging.ERROR)
try:
import cmt.ee_authenticate
except:
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
import cmt.ee_authenticate
import matplotlib
#matplotlib.use('tkagg')
import sys
import os
import ee
import cmt.domain
import cmt.mapclient_qt
import cmt.util.gui_util
'''
GUI related utilities too small for their own file
'''
def visualizeDomain(domain, show=True):
'''Draw all the sensors and ground truth from a domain'''
cmt.mapclient_qt.centerMap(domain.center[0], domain.center[1], 11)
for s in domain.sensor_list:
apply(cmt.mapclient_qt.addToMap, s.visualize(show=show))
if domain.ground_truth != None:
cmt.mapclient_qt.addToMap(domain.ground_truth.mask(domain.ground_truth), {}, 'Ground Truth', False) | {
"content_hash": "85c10f07b31b5a2c414eca9630757eae",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 107,
"avg_line_length": 26.363636363636363,
"alnum_prop": 0.7183908045977011,
"repo_name": "nasa/CrisisMappingToolkit",
"id": "4d65c3bf094c9208e5e384172c37a8da837407de",
"size": "1781",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cmt/util/gui_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2150"
},
{
"name": "JavaScript",
"bytes": "4257"
},
{
"name": "Jupyter Notebook",
"bytes": "18106"
},
{
"name": "Python",
"bytes": "849303"
}
],
"symlink_target": ""
} |
import sys
from QueryPublic import QueryPublicBitfinex
from Trade import Trade, TradeListBitfinex
from DBConnections import BitfinexDBConnection
from time import sleep
from datetime import datetime, timedelta
query = QueryPublicBitfinex()
with BitfinexDBConnection() as dbconn:
try:
cursor = dbconn.cursor()
cursor.execute("SELECT timestamp FROM tradesbtcusd ORDER BY tid DESC LIMIT 1")
lasttimestamp = cursor.fetchone()[0]
sinceTime = query.unixtime2datetime(lasttimestamp)
except:
sinceTime = datetime(1990, 1, 1, 1, 0, 0, 0)
lasttimestamp = query.datetime2unixtime(sinceTime)
finally:
sinceTime = datetime(1990, 1, 1, 1, 0, 0, 0)
lasttimestamp = query.datetime2unixtime(sinceTime)
cursor.close()
while sinceTime < datetime.today():
try:
print(sinceTime)
#trades = query.tradesOneThousand(pair = 'ethusd', since_timestamp = lasttimestamp)
trades = query.trades(pair = 'ethusd', parameters = {'timestamp': lasttimestamp, 'limit_trades': 10})
tradeList = TradeListBitfinex(trades)
timestamp = tradeList[-1].timestamp
#sinceTime = query.unixtime2datetime(timestamp)
sleep(10)
tradeList.saveInDB()
lasttimestamp = int(timestamp) + 1
sinceTime = query.unixtime2datetime(lasttimestamp)
except (KeyboardInterrupt, SystemExit):
print("KeyboardInterrrupt, Bye!")
sys.exit()
except Exception as ex:
print(type(ex))
print(ex)
#sys.exit()
| {
"content_hash": "2a70aaf7f1b127fae6c60f2b09b4a332",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 109,
"avg_line_length": 33.73913043478261,
"alnum_prop": 0.6746134020618557,
"repo_name": "sebschre/cryptoAssetManager",
"id": "388969ec7e2c7a15d88e0f350d420e47052a5f5a",
"size": "1576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fillbitfinexDB.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25204"
}
],
"symlink_target": ""
} |
"""Parses .swood files."""
# See https://github.com/milkey-mouse/swood/issues/1
from os.path import normpath, abspath, dirname, join
from collections import defaultdict
from enum import Enum
from PIL.Image import BILINEAR
from .sample import Sample
from .instruments import *
from . import complain
import zipfile
import string
# user-friendly repr for zip-loaded samples
zipfile.ZipExtFile.__repr__ = (
lambda self: "<zipped WAV file '{}'>".format(self.name))
class SoundFontSyntaxError(complain.ComplainToUser, SyntaxError):
"""Tells the user when something is wrong with the config file."""
def __init__(self, line, line_text, error_desc):
self.line = line
self.line_text = line_text
self.error_desc = error_desc
def __str__(self):
return "Syntax error on line {}:\n".format(self.line + 1) + \
self.line_text + "\n" + self.error_desc
class Instrument:
"""Holds information about a MIDI instrument or track."""
def __init__(self, fullclip=False, noscale=False, sample=None, volume=0.9, pan=0.5, pitch=None):
self.fullclip = fullclip
self.noscale = noscale
self.sample = sample
self.volume = volume
self.pitch = pitch
self.pan = pan
def __hash__(self):
if isinstance(self.sample, Sample):
return hash((self.noscale, self.sample.filename, self.volume, self.pan))
else:
return hash((self.noscale, None, self.volume, self.pan))
def __eq__(self, other):
return hash(self) == hash(other)
def __repr__(self):
return "Instrument(noscale={}, sample={}, volume={}, pan={})".format(self.noscale, self.sample, self.volume, self.pan)
class SoundFont:
"""Parses and holds information about .swood files."""
def __init__(self, filename, arguments, binsize=8192, pbar=True):
self.arguments = arguments
self._binsize = binsize
self.pbar = pbar
self.load_instruments()
self.samples = set()
self.channels = {}
if isinstance(filename, str):
self.file = open(filename)
elif filename is not None:
self.file = filename
if filename is not None:
if zipfile.is_zipfile(self.file):
self.file = zipfile.ZipFile(self.file)
self.load_zip()
self.load_samples_from_zip()
else:
self.load_ini()
self.load_samples_from_txt()
def load_instruments(self):
self.instruments = defaultdict(list)
self.percussion = defaultdict(list)
for names in instruments:
new_instrument = Instrument()
for name in names:
if isinstance(name, str):
name = name.lower()
self.instruments[name].append(new_instrument)
self.instruments["non-percussion"].append(new_instrument)
self.instruments["all"].append(new_instrument)
# percussion is a bit weird as it doesn't actually use MIDI instruments;
# any event on channel 10 is percussion, and the actual instrument is
# denoted by the note number (with valid #s ranging 35-81).
for idx, *names in percussion:
new_instrument = Instrument(fullclip=True, noscale=True)
self.percussion[idx].append(new_instrument)
for name in names:
if isinstance(name, str):
name = name.lower()
self.percussion[name].append(new_instrument)
self.percussion["percussion"].append(new_instrument)
self.instruments["all"].append(new_instrument)
def load_ini(self):
self.file.seek(0)
if "b" in self.file.mode:
self.parse(self.file.read().decode("utf-8"))
else:
self.parse(self.file.read())
def load_zip(self):
"""Parses a ZIP of a .swood INI file and its samples without extracting."""
try:
valid_extensions = {"swood", "ini", "txt"}
ini_path = next(fn for fn in self.file.namelist()
if fn.split(".")[-1] in valid_extensions)
except StopIteration:
raise complain.ComplainToUser(
"Couldn't find config file in ZIP. Be sure it ends in .ini, .swood, or .txt.'")
config_txt = self.file.read(ini_path)
self.parse(config_txt.decode("utf-8"))
def strip_comments(self, line):
hash_index = line.find("#")
if hash_index == -1:
return line.strip(string.whitespace + "\n")
else:
return line[:hash_index].strip(string.whitespace + "\n")
def parse(self, config):
affected_instruments = []
parse_arguments = None
for linenum, raw_text in enumerate(config.replace("\r\n", "\n").split("\n")):
text = self.strip_comments(raw_text)
if text == "":
continue
elif text.startswith("[") and text.endswith("]"):
header_name = text[1:-1].lower()
if header_name in ("arguments", "args", "options"):
affected_instruments = []
parse_arguments = True
elif header_name in ("default", "all"):
affected_instruments = self.instruments["all"]
parse_arguments = False
elif header_name in self.instruments:
affected_instruments = self.instruments[header_name]
parse_arguments = False
elif header_name in self.percussion:
affected_instruments = self.percussion[header_name]
parse_arguments = False
elif header_name in ("non percussion", "nonpercussion"):
affected_instruments = self.percussion["non-percussion"]
parse_arguments = False
elif len(header_name) == 3 and header_name.startswith("p"):
try:
affected_instruments = \
self.percussion[int(header_name[1:])]
parse_arguments = False
except (ValueError, KeyError):
raise SoundFontSyntaxError(
linenum, raw_text, "Header not recognized.")
else:
raise SoundFontSyntaxError(
linenum, raw_text, "Header not recognized.")
elif "=" in text:
parts = text.split("=")
name = parts[0].strip()
value = parts[1]
if parse_arguments is None:
raise SoundFontSyntaxError(
linenum, raw_text,
"No header specified. For defaults, specify '[default]' on the line before."
)
elif parse_arguments:
possible_args = {
"transpose": int,
"speed": float,
"cachesize": float,
"binsize": int,
}
if name in possible_args:
try:
self.arguments[name] = possible_args[name](value)
except ValueError:
raise SoundFontSyntaxError(
linenum, raw_text, "'{}' is not a valid value for '{}'".format(value, name))
elif name in ("file", "sample"):
for instrument in affected_instruments:
if value.lower() in ("", "none", "null"):
instrument.sample = None
else:
instrument.sample = value
self.samples.add(value)
elif name in ("volume", "vol"):
for instrument in affected_instruments:
try:
instrument.volume = int(value) / 100
if instrument.volume > 0.95:
print(
"Warning: Volumes higher than 95 may cause clipping or other glitches", file=sys.stderr)
except ValueError:
raise SoundFontSyntaxError(
linenum, raw_text, "'{}' is not a valid number".format(value))
elif name == "pan":
for instrument in affected_instruments:
try:
pan = float(value)
if pan < 0 or pan > 1:
raise SoundFontSyntaxError(
linenum, raw_text, "'{}' is outside of the allowed 0.0-1.0 range for pan".format(value))
else:
instrument.pan = pan
except ValueError:
raise SoundFontSyntaxError(
linenum, raw_text, "'{}' is not a valid number".format(value))
elif name == "pitch":
for instrument in affected_instruments:
try:
pitch = float(value)
if pan < 0:
raise SoundFontSyntaxError(
linenum, raw_text, "'{}' is below 0".format(value))
else:
instrument.pitch = pitch
except ValueError:
raise SoundFontSyntaxError(
linenum, raw_text, "'{}' is not a valid number".format(value))
elif name == "fullclip":
for instrument in affected_instruments:
if value.lower() in ("true", "1"):
instrument.fullclip = True
elif value.lower() in ("false", "0"):
instrument.fullclip = False
else:
raise SoundFontSyntaxError(linenum, raw_text,
"fullclip must be 'True' or 'False'; '{}' is invalid".format(value))
elif name == "noscale":
for instrument in affected_instruments:
if value.lower() in ("true", "1"):
instrument.noscale = True
elif value.lower() in ("false", "0"):
instrument.noscale = False
else:
raise SoundFontSyntaxError(linenum, raw_text,
"noscale must be 'True' or 'False'; '{}' is invalid".format(value))
else:
raise SoundFontSyntaxError(
linenum, raw_text, "'{}' is not a valid property".format(name))
def wavpath(self, relpath):
# only works on non-zip files
return normpath(join(dirname(abspath(self.file.name)), relpath))
def load_samples_from_txt(self):
loaded_samples = {}
for fn in self.samples:
loaded_samples[fn] = Sample(
self.wavpath(fn),
self._binsize,
pbar=self.pbar
)
self.add_samples(loaded_samples)
def load_samples_from_zip(self):
loaded_samples = {}
for fn in self.samples:
try:
with self.file.open(fn) as zipped_wav:
loaded_samples[fn] = Sample(
zipped_wav, self._binsize, pbar=self.pbar)
except KeyError: # file not found in zip
raise complain.ComplainToUser(
"Sample '{}' not found in config ZIP")
self.add_samples(loaded_samples)
def add_samples(self, loaded_samples):
for instruments in self.instruments.values():
for instrument in instruments:
if isinstance(instrument.sample, str):
real_instrument = loaded_samples[instrument.sample]
real_instrument.fundamental_freq = instrument.pitch
instrument.sample = real_instrument
self.framerate = max(s.framerate for s in loaded_samples.values())
self.channels = max(s.channels for s in loaded_samples.values())
self.length = max(len(s) for s in loaded_samples.values())
for samp in loaded_samples.values():
multiplier = self.framerate / samp.framerate
samp._img = samp.img.resize(
(int(round(samp.img.size[0] * multiplier)), samp.channels),
resample=BILINEAR)
samp.framerate = self.framerate
for instruments in self.instruments.values():
for instrument in instruments:
if isinstance(instrument.sample, str):
instrument.sample = loaded_samples[instrument.sample]
if self.channels != 2:
warned_pan = False
for instruments in self.instruments.values():
for instrument in instruments:
if instrument.pan != 0.5:
instrument.pan = 0.5
if not warned_pan:
print(
"Warning: Audio has >2 channels; pan ignored", file=sys.stderr)
warned_pan = True
def __len__(self):
return self.length
def DefaultFont(samp):
sf = SoundFont(None, None, pbar=samp.pbar)
sf.framerate = samp.framerate
sf.channels = samp.channels
sf.length = samp.length
for instruments in sf.instruments.values():
for instrument in instruments:
instrument.sample = samp
return sf
| {
"content_hash": "5cb94d9046b315fa56da9967650b2b7d",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 126,
"avg_line_length": 43.64596273291925,
"alnum_prop": 0.5062615625444713,
"repo_name": "milkey-mouse/swood",
"id": "830c27064a98b307b9c9f341da7c1b6ad637040e",
"size": "14054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swood/soundfont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "NSIS",
"bytes": "5748"
},
{
"name": "Python",
"bytes": "115815"
}
],
"symlink_target": ""
} |
"""
This is an implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
Decimal('0.00')).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678')
Decimal('1.2345E+12345680')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print(dig / Decimal(3))
0.333333333
>>> getcontext().prec = 18
>>> print(dig / Decimal(3))
0.333333333333333333
>>> print(dig.sqrt())
1
>>> print(Decimal(3).sqrt())
1.73205080756887729
>>> print(Decimal(3) ** 123)
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print(inf)
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print(neginf)
-Infinity
>>> print(neginf + inf)
NaN
>>> print(neginf * inf)
-Infinity
>>> print(dig / 0)
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print(dig / 0)
Traceback (most recent call last):
...
...
...
decimal.DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> print(c.divide(Decimal(0), Decimal(0)))
Traceback (most recent call last):
...
...
...
decimal.InvalidOperation: 0 / 0
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print(c.divide(Decimal(0), Decimal(0)))
NaN
>>> print(c.flags[InvalidOperation])
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Named tuple representation
'DecimalTuple',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
'FloatOperation',
# Exceptional conditions that trigger InvalidOperation
'DivisionImpossible', 'InvalidContext', 'ConversionSyntax', 'DivisionUndefined',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext',
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
# C version: compile time choice that enables the thread local context
'HAVE_THREADS'
]
__xname__ = __name__ # sys.modules lookup (--without-threads)
__name__ = 'decimal' # For pickling
__version__ = '1.70' # Highest version of the spec this complies with
# See http://speleotrove.com/decimal/
__libmpdec_version__ = "2.4.2" # compatible libmpdec version
import math as _math
import numbers as _numbers
import sys
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
MIN_EMIN = -999999999999999999
else:
MAX_PREC = 425000000
MAX_EMAX = 425000000
MIN_EMIN = -425000000
MIN_ETINY = MIN_EMIN - (MAX_PREC-1)
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if a string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
class FloatOperation(DecimalException, TypeError):
"""Enable stricter semantics for mixing floats and Decimals.
If the signal is not trapped (default), mixing floats and Decimals is
permitted in the Decimal() constructor, context.create_decimal() and
all comparison operators. Both conversion and comparisons are exact.
Any occurrence of a mixed operation is silently recorded by setting
FloatOperation in the context flags. Explicit conversions with
Decimal.from_float() or context.create_decimal_from_float() do not
set the flag.
Otherwise (the signal is trapped), only equality comparisons and explicit
conversions are silent. All other mixed operations raise FloatOperation.
"""
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal, FloatOperation]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
# Valid rounding modes
_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING,
ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP)
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context.
import contextvars
_current_context_var = contextvars.ContextVar('decimal_context')
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _current_context_var.get()
except LookupError:
context = Context()
_current_context_var.set(context)
return context
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_current_context_var.set(context)
del contextvars # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
# Do not subclass Decimal from numbers.Real and do not register it as such
# (because Decimals are not interoperable with floats). See the notes in
# numbers.py for more detail.
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, str):
m = _parser(value.strip().replace("_", ""))
if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
if m.group('sign') == "-":
self._sign = 1
else:
self._sign = 0
intpart = m.group('int')
if intpart is not None:
# finite number
fracpart = m.group('frac') or ''
exp = int(m.group('exp') or '0')
self._int = str(int(intpart+fracpart))
self._exp = exp - len(fracpart)
self._is_special = False
else:
diag = m.group('diag')
if diag is not None:
# NaN
self._int = str(int(diag or '0')).lstrip('0')
if m.group('signal'):
self._exp = 'N'
else:
self._exp = 'n'
else:
# infinity
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
# From an integer
if isinstance(value, int):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], int) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, int) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], int):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
if context is None:
context = getcontext()
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are "
"enabled")
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
@classmethod
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, int): # handle integer inputs
sign = 0 if f >= 0 else 1
k = 0
coeff = str(abs(f))
elif isinstance(f, float):
if _math.isinf(f) or _math.isnan(f):
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
coeff = str(n*5**k)
else:
raise TypeError("argument must be int or float.")
result = _dec_from_triple(sign, coeff, -k)
if cls is Decimal:
return result
else:
return cls(result)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __bool__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __lt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compare self to other. Return a decimal value:
a or b is a NaN ==> Decimal('NaN')
a < b ==> Decimal('-1')
a == b ==> Decimal('0')
a > b ==> Decimal('1')
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# In order to make sure that the hash of a Decimal instance
# agrees with the hash of a numerically equal integer, float
# or Fraction, we follow the rules for numeric hashes outlined
# in the documentation. (See library docs, 'Built-in Types').
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
return _PyHASH_NAN
else:
if self._sign:
return -_PyHASH_INF
else:
return _PyHASH_INF
if self._exp >= 0:
exp_hash = pow(10, self._exp, _PyHASH_MODULUS)
else:
exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS)
hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS
ans = hash_ if self >= 0 else -hash_
return -2 if ans == -1 else ans
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def as_integer_ratio(self):
"""Express a finite Decimal instance in the form n / d.
Returns a pair (n, d) of integers. When called on an infinity
or NaN, raises OverflowError or ValueError respectively.
>>> Decimal('3.14').as_integer_ratio()
(157, 50)
>>> Decimal('-123e5').as_integer_ratio()
(-12300000, 1)
>>> Decimal('0.00').as_integer_ratio()
(0, 1)
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot convert NaN to integer ratio")
else:
raise OverflowError("cannot convert Infinity to integer ratio")
if not self:
return 0, 1
# Find n, d in lowest terms such that abs(self) == n / d;
# we'll deal with the sign later.
n = int(self._int)
if self._exp >= 0:
# self is an integer.
n, d = n * 10**self._exp, 1
else:
# Find d2, d5 such that abs(self) = n / (2**d2 * 5**d5).
d5 = -self._exp
while d5 > 0 and n % 5 == 0:
n //= 5
d5 -= 1
# (n & -n).bit_length() - 1 counts trailing zeros in binary
# representation of n (provided n is nonzero).
d2 = -self._exp
shift2 = min((n & -n).bit_length() - 1, d2)
if shift2:
n >>= shift2
d2 -= shift2
d = 5**d5 << d2
if self._sign:
n = -n
return n, d
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to a string, using engineering notation if an exponent is needed.
Engineering notation has an exponent which is a multiple of 3. This
can leave up to 3 digits to the left of the decimal place and may
require the addition of either one or two trailing zeros.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more than precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
@property
def real(self):
return self
@property
def imag(self):
return Decimal(0)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if clamp==0, and between Etiny and Etop if clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context.clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if clamp == 1 and self has too few digits
if context.clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def __round__(self, n=None):
"""Round self to the nearest integer, or to a given precision.
If only one argument is supplied, round a finite Decimal
instance self to the nearest integer. If self is infinite or
a NaN then a Python exception is raised. If self is finite
and lies exactly halfway between two integers then it is
rounded to the integer with even last digit.
>>> round(Decimal('123.456'))
123
>>> round(Decimal('-456.789'))
-457
>>> round(Decimal('-3.0'))
-3
>>> round(Decimal('2.5'))
2
>>> round(Decimal('3.5'))
4
>>> round(Decimal('Inf'))
Traceback (most recent call last):
...
OverflowError: cannot round an infinity
>>> round(Decimal('NaN'))
Traceback (most recent call last):
...
ValueError: cannot round a NaN
If a second argument n is supplied, self is rounded to n
decimal places using the rounding mode for the current
context.
For an integer n, round(self, -n) is exactly equivalent to
self.quantize(Decimal('1En')).
>>> round(Decimal('123.456'), 0)
Decimal('123')
>>> round(Decimal('123.456'), 2)
Decimal('123.46')
>>> round(Decimal('123.456'), -2)
Decimal('1E+2')
>>> round(Decimal('-Infinity'), 37)
Decimal('NaN')
>>> round(Decimal('sNaN123'), 0)
Decimal('NaN123')
"""
if n is not None:
# two-argument form: use the equivalent quantize call
if not isinstance(n, int):
raise TypeError('Second argument to round should be integral')
exp = _dec_from_triple(0, '1', -n)
return self.quantize(exp)
# one-argument form
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_HALF_EVEN))
def __floor__(self):
"""Return the floor of self, as an integer.
For a finite Decimal instance self, return the greatest
integer n such that n <= self. If self is infinite or a NaN
then a Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_FLOOR))
def __ceil__(self):
"""Return the ceiling of self, as an integer.
For a finite Decimal instance self, return the least integer n
such that n >= self. If self is infinite or a NaN then a
Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_CEILING))
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
third = _convert_other(third, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
other = _convert_other(other)
if other is NotImplemented:
return other
modulo = _convert_other(modulo)
if modulo is NotImplemented:
return modulo
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero; '
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in range(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1 << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context.clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other, context=None):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other, context=None):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other, context=None):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other, context=None):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
if spec['type'] == '%':
body += '%'
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None, traps=None,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self.clamp = clamp if clamp is not None else dc.clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals + traps)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals + flags)
else:
self.flags = flags
def _set_integer_check(self, name, value, vmin, vmax):
if not isinstance(value, int):
raise TypeError("%s must be an integer" % name)
if vmin == '-inf':
if value > vmax:
raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value))
elif vmax == 'inf':
if value < vmin:
raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value))
else:
if value < vmin or value > vmax:
raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value))
return object.__setattr__(self, name, value)
def _set_signal_dict(self, name, d):
if not isinstance(d, dict):
raise TypeError("%s must be a signal dict" % d)
for key in d:
if not key in _signals:
raise KeyError("%s is not a valid signal dict" % d)
for key in _signals:
if not key in d:
raise KeyError("%s is not a valid signal dict" % d)
return object.__setattr__(self, name, d)
def __setattr__(self, name, value):
if name == 'prec':
return self._set_integer_check(name, value, 1, 'inf')
elif name == 'Emin':
return self._set_integer_check(name, value, '-inf', 0)
elif name == 'Emax':
return self._set_integer_check(name, value, 0, 'inf')
elif name == 'capitals':
return self._set_integer_check(name, value, 0, 1)
elif name == 'clamp':
return self._set_integer_check(name, value, 0, 1)
elif name == 'rounding':
if not value in _rounding_modes:
# raise TypeError even for strings to have consistency
# among various implementations.
raise TypeError("%s: invalid rounding mode" % value)
return object.__setattr__(self, name, value)
elif name == 'flags' or name == 'traps':
return self._set_signal_dict(name, value)
elif name == '_ignored_flags':
return object.__setattr__(self, name, value)
else:
raise AttributeError(
"'decimal.Context' object has no attribute '%s'" % name)
def __delattr__(self, name):
raise AttributeError("%s cannot be deleted" % name)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
flags = [sig for sig, v in self.flags.items() if v]
traps = [sig for sig, v in self.traps.items() if v]
return (self.__class__,
(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, flags, traps))
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, '
'clamp=%(clamp)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def clear_traps(self):
"""Reset all traps to zero"""
for flag in self.traps:
self.traps[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, self.flags, self.traps,
self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp,
self.flags.copy(), self.traps.copy(),
self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding = type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, str) and (num != num.strip() or '_' in num):
return self._raise_error(ConversionSyntax,
"trailing or leading whitespace and "
"underscores are not permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self.clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
decimal.Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
if not isinstance(a, Decimal):
raise TypeError("canonical requires a Decimal as an argument.")
return a.canonical()
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
if not isinstance(a, Decimal):
raise TypeError("is_canonical requires a Decimal as an argument.")
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Convert to a string, using engineering notation if an exponent is needed.
Engineering notation has an exponent which is a multiple of 3. This
can leave up to 3 digits to the left of the decimal place and may
require the addition of either one or two trailing zeros.
The operation is not affected by the context.
>>> ExtendedContext.to_eng_string(Decimal('123E+1'))
'1.23E+3'
>>> ExtendedContext.to_eng_string(Decimal('123E+3'))
'123E+3'
>>> ExtendedContext.to_eng_string(Decimal('123E-10'))
'12.3E-9'
>>> ExtendedContext.to_eng_string(Decimal('-123E-12'))
'-123E-12'
>>> ExtendedContext.to_eng_string(Decimal('7E-7'))
'700E-9'
>>> ExtendedContext.to_eng_string(Decimal('7E+1'))
'70'
>>> ExtendedContext.to_eng_string(Decimal('0E+1'))
'0.00E+3'
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
_nbits = int.bit_length
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and abs(y) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest((M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in range(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((x<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = M<<R
for i in range(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in range(R-1, -1, -1):
Mshift = M<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, int):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
def _convert_for_comparison(self, other, equality_op=False):
"""Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
if isinstance(other, Decimal):
return self, other
# Comparison with a Rational instance (also includes integers):
# self op n/d <=> self*d op n (for n and d integers, d positive).
# A NaN or infinity can be left unchanged without affecting the
# comparison result.
if isinstance(other, _numbers.Rational):
if not self._is_special:
self = _dec_from_triple(self._sign,
str(int(self._int) * other.denominator),
self._exp)
return self, Decimal(other.numerator)
# Comparisons with float and complex types. == and != comparisons
# with complex numbers should succeed, returning either True or False
# as appropriate. Other comparisons return NotImplemented.
if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0:
other = other.real
if isinstance(other, float):
context = getcontext()
if equality_op:
context.flags[FloatOperation] = 1
else:
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are enabled")
return self, Decimal.from_float(other)
return NotImplemented, NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=28, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=999999,
Emin=-999999,
capitals=1,
clamp=0
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
import re
_parser = re.compile(r""" # A numeric string consists of:
# \s*
(?P<sign>[-+])? # an optional sign, followed by either...
(
(?=\d|\.\d) # ...a number (with at least one digit)
(?P<int>\d*) # having a (possibly empty) integer part
(\.(?P<frac>\d*))? # followed by an optional fractional part
(E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
|
Inf(inity)? # ...an infinity, or...
|
(?P<signal>s)? # ...an (optionally signaling)
NaN # NaN
(?P<diag>\d*) # with (possibly empty) diagnostic info.
)
# \s*
\Z
""", re.VERBOSE | re.IGNORECASE).match
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
_parse_format_specifier_regex = re.compile(r"""\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<alt>\#)?
(?P<zeropad>0)?
(?P<minimumwidth>(?!0)\d+)?
(?P<thousands_sep>,)?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[eEfFgGn%])?
\Z
""", re.VERBOSE|re.DOTALL)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gGn':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart or spec['alt']:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo _PyHASH_MODULUS
_PyHASH_MODULUS = sys.hash_info.modulus
# hash values to use for positive and negative infinities, and nans
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS
_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
del sys
| {
"content_hash": "36c6c6caf0879f6808f0c7362ced0cda",
"timestamp": "",
"source": "github",
"line_count": 6391,
"max_line_length": 100,
"avg_line_length": 35.65780003129401,
"alnum_prop": 0.5511367376222635,
"repo_name": "xyuanmu/XX-Net",
"id": "c14d8ca86a1181d951048b2fb939560793b1bb64",
"size": "228511",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python3.8.2/Lib/_pydecimal.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4145"
},
{
"name": "C",
"bytes": "53301"
},
{
"name": "CSS",
"bytes": "94951"
},
{
"name": "HTML",
"bytes": "252022"
},
{
"name": "JavaScript",
"bytes": "22405"
},
{
"name": "Python",
"bytes": "15474534"
},
{
"name": "Shell",
"bytes": "10208"
},
{
"name": "Visual Basic",
"bytes": "1795"
}
],
"symlink_target": ""
} |
"""settings.py
Udacity conference server-side Python App Engine app user settings
$Id$
created/forked from conference.py by wesc on 2014 may 24
"""
# Replace the following lines with client IDs obtained from the APIs
# Console or Cloud Console.
WEB_CLIENT_ID = '897025632095-df4358n84k27aaf0rj0dff52db4dtgh8.apps.googleusercontent.com'
ANDROID_CLIENT_ID = 'replace with Android client ID'
IOS_CLIENT_ID = 'replace with iOS client ID'
ANDROID_AUDIENCE = WEB_CLIENT_ID
| {
"content_hash": "d2d403575dcb61824f171a66a195814e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 90,
"avg_line_length": 29.5,
"alnum_prop": 0.7860169491525424,
"repo_name": "zacharytamas/udacity-courses",
"id": "31663ec939a5bea412ceabbdb8ee48e4be6be71a",
"size": "495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UD858/settings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "23913"
},
{
"name": "JavaScript",
"bytes": "32836"
},
{
"name": "Python",
"bytes": "34475"
}
],
"symlink_target": ""
} |
from blogMiddleware import BlogMiddleware
urls = [(r"^/blog/(\d{4})/(\d{2})/(\d{2})$", BlogMiddleware)]
| {
"content_hash": "4d74ad8c5e14b6bfa44471fe72c20123",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 61,
"avg_line_length": 35,
"alnum_prop": 0.638095238095238,
"repo_name": "tobi-weber/levitas",
"id": "d8f46b2adec3dd16225560865c9a4749f584425f",
"size": "129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/examples/regex/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1090"
},
{
"name": "Python",
"bytes": "168238"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
} |
"""
Created on Thu Apr 16 07:59:47 2015
@author: Richard Decal
bootstrapping on upwind-left vs -right quadrants.
To compare upwind positional distributions, we examined the percentage of trajectory
positions found in the left half of the wind tunnel. For each experimental condition,
we can calculate this left side bias exactly once. We use this bias to determine
if the experimental upwind distributions of position are likely to be drawn from
the upwind control distribution of position. We used bootstrapping methods to
collect samples from the control distribution of position, and these samples
are repeatedly drawn in order to find the probability distribution of the left
side bias values.
Position Samples - To build our sample set, positions from the control trajectories
were randomly selected. We built our samples such that the number of points drawn
equal the number of points seen in either of the experimental conditions.
Samples of positions were collected in two ways: individual points and whole
trajectories.
Individual points were selected randomly from any trajectory.
This sampling strategy allows us to compare probabilities in a truly random fashion,
however, position values found along a mosquito trajectory are highly correlated,
and thus are not truly random. To ensure that the correlation seen along a trajectory
does not influence the bias or distribution seen, we randomly selected entire trajectories.
The point or trajectory samples were collected in order to get a single side
bias value. However, to determine the probability of seeing the side bias by chance,
one needs to know the distribution of side bias values. Thus, the samples were
redrawn and the left side biases were calculated 10 000 times. The distribution
was well-fit by a Guassian, which, by assuming the observed mean and standard
deviation are close or equal to the expected mean and standard deviation, can
be used to calculate statistical significance of the observed left side bias.
Here, solving for the 95% confidence interval of the distribution of left side
biases equivocates to the probability of drawing a given left side bias value
from chance. That is, if the left side biases calculated from the experiments
are greater than twice the standard deviation of the distribution, the p-value
is less than 0.05.
"""
upwind_slice = ensemble.loc[(ensemble['position_x'] > 0.6) & (ensemble['position_x'] < 0.95)]
upwind_left_quad = upwind_slice.loc[(ensemble['position_y'] < 0)]
upwind_right_quad = upwind_slice.loc[(ensemble['position_y'] > 0)] | {
"content_hash": "a1c4e29a5a6c08a6a96dfd148dff6a78",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 93,
"avg_line_length": 57.22222222222222,
"alnum_prop": 0.8003883495145631,
"repo_name": "isomerase/mozziesniff",
"id": "5bec6e728c6c20f104ff3a399ea2bcbfa9263e38",
"size": "2599",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "roboskeeter/math/optimizers/bootstrapping.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "272171"
}
],
"symlink_target": ""
} |
from random import choice
from flask import Flask, json, Response, request
from flask.views import MethodView
from flask_sqlalchemy import SQLAlchemy
class ApiFlask(Flask):
def make_response(self, rv):
if isinstance(rv, ApiResult):
return rv.to_response()
return Flask.make_response(self, rv)
class ApiResult:
def __init__(self, value, status=200):
self.value = value
self.status = status
def to_response(self):
return Response(json.dumps(self.value),
status=self.status,
mimetype='application/json')
class ApiException(Exception):
def __init__(self, message, status=400):
Exception.__init__(self)
self.message = message
self.status = status
def to_result(self):
return ApiResult({'message': self.message},
status=self.status)
app = ApiFlask(__name__)
app.config.from_object('prod')
db = SQLAlchemy(app)
class FortuneApi:
def commit(self, model):
db.session.add(model)
db.session.commit()
return model
def get_user(self, ip):
result = User.query.filter_by(ip=ip).first()
if result:
return result
raise ApiException('IP Not Found')
def create_user(self, ip):
return self.commit(User(ip))
def get_or_create_user(self, ip):
try:
return self.get_user(ip)
except ApiException:
return self.create_user(ip)
def get_key(self, token):
result = Key.query.filter_by(token=token).first()
if result:
return result
raise ApiException('Invalid Token')
def create_key(self, user):
return self.commit(Key(user, self.new_token()))
def random_fortune(self, token):
result = Fortune.query.filter(Fortune.key.has(token=token))\
.order_by(db.func.random()).limit(1).first()
if result:
return result
raise ApiException('Invalid Token or No Fortunes On Token')
def add_fortune(self, token, text):
result = Key.query.filter_by(token=token).first()
key = self.get_key(token)
return self.commit(Fortune(key, text))
def new_token(self):
alphanum = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
result = True
while result:
token = ''.join(choice(alphanum) for x in range(16))
try:
result = self.get_key(token)
except Exception:
result = None
return token
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
ip = db.Column(db.String(15))
def __init__(self, ip):
self.ip = ip
class Key(db.Model):
id = db.Column(db.Integer, primary_key=True)
token = db.Column(db.String(16), unique=True)
private = db.Column(db.Boolean)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref=db.backref('keys', lazy='joined'))
def __init__(self, user, token):
self.user = user
self.token = token
class Fortune(db.Model):
id = db.Column(db.Integer, primary_key=True)
text = db.Column(db.Text)
pulls = db.Column(db.Integer)
key_id = db.Column(db.Integer, db.ForeignKey('key.id'))
key = db.relationship('Key', backref=db.backref('fortunes', lazy='joined'))
def __init__(self, key, text):
self.text = text
self.key = key
api = FortuneApi()
class TokenAPI(MethodView):
def get(self):
user = api.get_or_create_user(request.remote_addr)
key = api.create_key(user)
return ApiResult({'token':key.token})
class FortuneAPI(MethodView):
def get(self):
token = request.form.get('token', None)
if not token:
raise ApiException('Token Required')
fortune = api.random_fortune(token)
return ApiResult({'fortune':fortune.text})
def post(self):
token = request.form.get('token', None)
fortune = request.form.get('fortune', None)
if not token or not fortune:
raise ApiException('Token and Fortune Required')
fortune = api.add_fortune(token, fortune)
return ApiResult({'fortune':fortune.text})
routes = [{'rule': '/', 'view_func': lambda: "Hello World"},
{'rule': '/token/', 'view_func': TokenAPI.as_view('token')},
{'rule': '/fortune/', 'view_func': FortuneAPI.as_view('fortune')}]
for route in routes:
app.add_url_rule(**route)
@app.route('/mkdb')
def makedb():
db.create_all()
app.register_error_handler(ApiException, lambda e: e.to_result())
if __name__ == '__main__':
app.run()
| {
"content_hash": "8581899b323359ca3497ef3862e28e78",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 83,
"avg_line_length": 27.396551724137932,
"alnum_prop": 0.5980700650304175,
"repo_name": "kryptn/Fortunate",
"id": "d3491eaf730c34d896488ba2016d584e74572c2e",
"size": "4767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19215"
},
{
"name": "Shell",
"bytes": "208"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from imp import load_source
setup(
name='webassets-traceur',
version=load_source('', 'webassets_traceur/_version.py').__version__,
description='An additional webassets filter to compile ES6 to ES5 '
'using traceur.',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Flask',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
author='Concordus Applications',
author_email='[email protected]',
url='http://github.com/concordusapps/webassets-traceur',
packages=find_packages('.'),
install_requires=[
'flask >= 0.9.0',
'webassets >= 0.9.0',
]
)
| {
"content_hash": "04d1a1de4e629af6a3750b4119baf941",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 73,
"avg_line_length": 34.37931034482759,
"alnum_prop": 0.6168505516549649,
"repo_name": "concordusapps/webassets-traceur",
"id": "f99893137f1eeaf6d85824a17d945a6560a9d80a",
"size": "1044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2165"
}
],
"symlink_target": ""
} |
import sys
firstline = sys.stdin.readline()
current_ID = firstline.split()[0]
distance_buffer=firstline.split()[2]
gene_buffer=[firstline.split()[1]]
strand_buffer={firstline.split()[3]}
for line in sys.stdin:
ID=line.split()[0]
distance=line.split()[2]
gene=line.split()[1]
strand=line.split()[3]
if ID==current_ID:
gene_buffer.append(gene)
strand_buffer.add(strand)
else:
buffered_line=';'.join(gene_buffer)+'\t'+distance_buffer+'\t'+';'.join(strand_buffer)
print buffered_line
gene_buffer=[gene]
current_ID = ID
distance_buffer = distance
strand_buffer = {strand}
buffered_line=';'.join(gene_buffer)+'\t'+distance_buffer+'\t'+strand
print buffered_line
| {
"content_hash": "fd2ff48d81b68afcbbd70f983999e89c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 93,
"avg_line_length": 29.84,
"alnum_prop": 0.6420911528150134,
"repo_name": "corcra/feabhsa-here",
"id": "df4eaf032816bad1d6952fd435abf830277a72dc",
"size": "855",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipeline/post_bedtools_join_closest.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15237"
},
{
"name": "R",
"bytes": "52804"
},
{
"name": "Shell",
"bytes": "48159"
}
],
"symlink_target": ""
} |
User_Specified_Output_Folder = "BingWallpapers/"
# Iterate through days
for whatday in range(-7,2):
whatday = 0-whatday
# Import modules
import os,datetime,re,calendar,urllib.request,shutil
# Print starting message
print("------Process Started------")
# Function to find out "Month" and "Day" value
def findmonthday():
global Year, Month, Day
now = datetime.datetime.now()
todaydate = datetime.date(now.year, now.month, now.day)
DayDelta = datetime.timedelta(days=whatday)
DayDate = str(todaydate - DayDelta)
DayDateApply = re.split("-", DayDate)
Year = DayDateApply[0]
Month = int(DayDateApply[1])
Month = str(calendar.month_abbr[(Month)])
Day = DayDateApply[2]
if len(Day) == 1:
Day = "0" + Day
# Import wallpaper url xml
urlBing = "https://www.bing.com/HPImageArchive.aspx?format=xml&idx=" + str(whatday) + "&n=1&mkt=zh-CN"
print("Getting URL")
responseBing = urllib.request.urlopen(urlBing)
dataBing = responseBing.read()
textBing = dataBing.decode('utf-8')
html = textBing
# Extract wallpaper url (https)
a = html.find("<urlBase>")
b = html.find("</urlBase>")
Wallpaper = ""
for i in range(a+9,b):
Wallpaper = Wallpaper+html[i]
WallpaperURL = "https://www.bing.com" + Wallpaper + "_1920x1080.jpg"
# Download
print("Downloading")
FileName = re.split("/", Wallpaper)
FileName = (FileName[-1] + "_1920x1080.jpg")
urllib.request.urlretrieve(WallpaperURL, FileName)
'''
except:
try:
findmonthday()
print("Error: Bing.com may not have the 1920x1200 version of the picture on " + Month + "." + Day + ", trying fetching the 1920x1080 version")
WallpaperURL = "https://www.bing.com" + Wallpaper + "_1920x1080.jpg"
FileName = re.split("/", Wallpaper)
FileName = (FileName[-1] + "_1920x1080.jpg")
urllib.request.urlretrieve(WallpaperURL, FileName)
except:
print("File does not exist on Bing.com server, skipping")
continue
'''
#os.system("axel " + WallpaperURL + "> /dev/null")
# Rename to the date of the day
findmonthday()
ToFileName = (Month + Day + ".jpg")
shutil.move(FileName, ToFileName)
print("Renamed to: " + ToFileName)
# Sort file into user specified folder (ends with "/")
print("Sorting file based on date")
NewDirectory = User_Specified_Output_Folder + Month + Year + "/"
if not os.path.exists(NewDirectory):
os.makedirs(NewDirectory)
# Try moving file, if exist, delete existing file then move
try:
shutil.move(ToFileName, NewDirectory)
except:
os.remove(NewDirectory + ToFileName)
shutil.move(ToFileName, NewDirectory)
print("Done.")
| {
"content_hash": "de56c3ad5ce3326a07bada2108e3d1bd",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 145,
"avg_line_length": 35.416666666666664,
"alnum_prop": 0.6945098039215686,
"repo_name": "dlfbdnd/Bing-Wallpaper-Downloader",
"id": "11ea9817d6b7ef9f38f704a419677c7077276c35",
"size": "2621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Bing_Wallpaper_Downloader_Batch1080.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8060"
}
],
"symlink_target": ""
} |
import unittest
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import numpy
class BaseParallelForTest(unittest.TestCase):
def run_test(self, callback, feed, fetch):
"""
Run the unittest for parallel.for
Args:
callback(callable): A callable function returns a generator. There
are two yields in the generator function. The first yield
returns the data layers, and the second yield returns the loss.
The modified data variables will be sent back during the first
yield.
feed(dict): The executor feeding dictionary.
fetch(list|basestr): The fetch name lists.
Returns:
None
Raises:
AssertionError when the computation of cpu, parallel.for in cpu,
gpu, parallel.for in gpu are different.
"""
cpu = fluid.CPUPlace()
result_cpu = self._run_test_impl_(
callback=callback,
feed=feed,
fetch=fetch,
place=cpu,
use_parallel=False)
result_cpu_parallel = self._run_test_impl_(
callback=callback,
feed=feed,
fetch=fetch,
place=cpu,
use_parallel=True)
if fluid.core.is_compiled_with_cuda():
gpu = fluid.CUDAPlace(0)
result_gpu = self._run_test_impl_(
callback=callback,
feed=feed,
fetch=fetch,
place=gpu,
use_parallel=False,
use_gpu=True)
result_gpu_parallel = self._run_test_impl_(
callback=callback,
feed=feed,
fetch=fetch,
place=gpu,
use_parallel=True,
use_gpu=True)
result_gpu_nccl = self._run_test_impl_(
callback=callback,
feed=feed,
fetch=fetch,
place=gpu,
use_parallel=True,
use_nccl=True,
use_gpu=True)
self._assert_same_(fetch, result_cpu, result_cpu_parallel,
result_gpu, result_gpu_parallel, result_gpu_nccl)
else:
self._assert_same_(fetch, result_cpu, result_cpu_parallel)
def _run_test_impl_(self,
callback,
feed,
fetch,
place,
use_parallel=False,
use_nccl=False,
use_gpu=False):
"""
Run a single test, returns the fetch values
Args:
place(Place): the computation place.
use_parallel(bool): Whether use parallel.for or not.
Returns:
Fetched numpy arrays.
"""
if isinstance(fetch, basestring):
fetch = [fetch]
main = fluid.Program()
startup = fluid.Program()
# Fix seed
main.random_seed = 10
startup.random_seed = 10
with fluid.program_guard(main, startup):
generator = callback()
# Automatically insert parallel do if use_parallel = True
if use_parallel:
places = fluid.layers.get_places()
pd = fluid.layers.ParallelDo(places, use_nccl=use_nccl)
data = next(generator)
if isinstance(data, fluid.Variable):
data = [data]
with pd.do():
ins = map(pd.read_input, data)
if len(ins) == 1:
ins = ins[0]
loss = generator.send(ins) # patch input
pd.write_output(loss)
loss = pd()
else:
data = next(generator)
loss = generator.send(data)
self.assertIsNotNone(loss)
avg_loss = fluid.layers.mean(loss)
fluid.backward.append_backward(loss=avg_loss)
exe = fluid.Executor(place)
exe.run(startup)
if use_gpu:
profile_type = 'GPU'
else:
profile_type = 'CPU'
with profiler.profiler(profile_type, 'total', '/tmp/profiler'):
return exe.run(main, feed=feed, fetch_list=fetch)
def _assert_same_(self, fetch, *args):
"""
Assert the return values of `run_test` are same.
Args:
fetch: Fetch list. Used for print error message
*args: The fetch result lists of each situations.
Returns:
None
Raises:
AssertionError
"""
def _impl_(a, b, fetch_id, item_id):
item_str = [
'CPU', 'ParallelCPU', 'GPU', 'ParallelGPU', 'ParallelGPUNCCL'
]
flag = numpy.allclose(a, b, rtol=0.1, atol=1e-3)
self.assertTrue(flag,
"The {0} are different in {1}, {2} vs {3}".format(
fetch[fetch_id], item_str[item_id], a, b))
for i, items in enumerate(zip(*args)):
self.assertGreater(len(items), 0)
for j in range(1, len(items)):
_impl_(items[0], items[j], fetch_id=i, item_id=j)
class ParallelOpTest(BaseParallelForTest):
@staticmethod
def __network__():
x = fluid.layers.data(shape=[784], dtype='float32', name='img')
x = yield x
hidden = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden = fluid.layers.batch_norm(input=hidden)
loss = fluid.layers.mean(hidden)
yield loss
def test_simple_fc(self):
self.run_test(
callback=self.__network__,
feed={
'img': numpy.random.random(size=(51, 784)).astype('float32')
},
fetch=['fc1.w@GRAD'])
def test_fc_with_tiny_data(self):
self.run_test(
callback=self.__network__,
feed={'img': numpy.random.random(size=(1, 784)).astype('float32')},
fetch=['fc1.w@GRAD'])
class ParallelOpTestMultipleInput(BaseParallelForTest):
@staticmethod
def __network__():
x = fluid.layers.data(
shape=[784], dtype='float32', name='img1', stop_gradient=False)
y = fluid.layers.data(
shape=[784], dtype='float32', name='img2', stop_gradient=False)
yield [x, y]
x = x + y
hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
hidden2 = fluid.layers.fc(input=hidden1, size=200, param_attr='fc2.w')
hidden3 = fluid.layers.fc(input=hidden2, size=200, param_attr='fc3.w')
loss = fluid.layers.mean(hidden3)
yield loss
def test_simple_fc(self):
self.run_test(
callback=self.__network__,
feed={
'img1': numpy.random.random(size=(51, 784)).astype('float32'),
'img2': numpy.random.random(size=(51, 784)).astype('float32')
},
fetch=['fc1.w@GRAD', 'fc2.w@GRAD', 'fc3.w@GRAD'])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "66a98e2d126b210c325e40ff962ce546",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 80,
"avg_line_length": 33.700934579439256,
"alnum_prop": 0.5073488630061009,
"repo_name": "lcy-seso/Paddle",
"id": "79bea148f9398152a02d70946cdc5fff1f47ba6b",
"size": "7825",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_parallel_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "278852"
},
{
"name": "C++",
"bytes": "7213431"
},
{
"name": "CMake",
"bytes": "258158"
},
{
"name": "Cuda",
"bytes": "1077180"
},
{
"name": "Go",
"bytes": "109501"
},
{
"name": "Perl",
"bytes": "11456"
},
{
"name": "Python",
"bytes": "3337838"
},
{
"name": "Shell",
"bytes": "147571"
}
],
"symlink_target": ""
} |
import unittest
from telemetry.timeline import tracing_category_filter
from telemetry.timeline import tracing_config
class TracingConfigTests(unittest.TestCase):
def testDefault(self):
config = tracing_config.TracingConfig()
config_string = config.GetChromeTraceConfigJsonString()
self.assertEquals(
'{'
'"record_mode": "record-as-much-as-possible"'
'}',
config_string)
def testBasic(self):
category_filter = tracing_category_filter.TracingCategoryFilter(
'x,-y,disabled-by-default-z,DELAY(7;foo)')
config = tracing_config.TracingConfig()
config.SetTracingCategoryFilter(category_filter)
config.enable_systrace = True
config.record_mode = tracing_config.RECORD_UNTIL_FULL
config_string = config.GetChromeTraceConfigJsonString()
self.assertEquals(
'{'
'"enable_systrace": true, '
'"excluded_categories": ["y"], '
'"included_categories": ["x", "disabled-by-default-z"], '
'"record_mode": "record-until-full", '
'"synthetic_delays": ["DELAY(7;foo)"]'
'}',
config_string)
| {
"content_hash": "d2fbcaa78bb56f71a40922b31e32d24b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 68,
"avg_line_length": 34.303030303030305,
"alnum_prop": 0.6563604240282686,
"repo_name": "XiaosongWei/chromium-crosswalk",
"id": "dc79c56b8d6795dbc5eb2a3e380f73db03f12bfe",
"size": "1295",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/timeline/tracing_config_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django.shortcuts import render
from django.http import HttpResponse
from qiniuyun.backend import QiniuPush
from qiniuyun.models import ImageAtQiniu
from .indexView import ImgList
from DJangoHotel.models import Order,RoomInfo,Customer
import time
import datetime
from random import randint
def orderResult(request):
imgObjs=ImageAtQiniu.objects.all()
imgUrls=[QiniuPush.private_download_url(i.fullname) for i in imgObjs]
imgs=ImgList()
for i in imgUrls:
if 'hotel-logo' in i:
imgs.logo=i
tel,name,IDcard= request.GET['tel'],request.GET['name'],request.GET['IDcard']
if Customer.objects.all():
cc=Customer.objects.filter(IDcard=IDcard)
else:
cc=[]
for c in cc:
if c and c.tel==tel and c.name==name:
tempCustomer=c
break
else:
tempCustomer=Customer(tel=tel,name=name,IDcard=IDcard)
tempCustomer.save()
tempOrder=Order()
tempOrder.customer = tempCustomer
tempOrder.roomtype = request.GET['roomtype']
begin,end = request.GET['begin'],request.GET['end']
tempOrder.begin = (datetime.datetime.strptime(begin , '%Y-%m-%d')).date()
tempOrder.end = (datetime.datetime.strptime(end , '%Y-%m-%d')).date()
period = (tempOrder.end - tempOrder.begin).days
if period==0:period=1
price = 0
if tempOrder.roomtype == 'standard':
price = (RoomInfo.objects.get(name='标准间')).price
elif tempOrder.roomtype =='better':
price = (RoomInfo.objects.get(name='豪华间')).price
elif tempOrder.roomtype =='president':
price = (RoomInfo.objects.get(name='总统间')).price
tempOrder.roomnum=randint(1,10)
tempOrder.totalprice = period * price
tempOrder.save()
return render(request,'orderresult.html',{'order':tempOrder,'img':imgs})
| {
"content_hash": "d906bb9bea1310cb8ef8fa55a9dd5b3d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 81,
"avg_line_length": 31.383333333333333,
"alnum_prop": 0.648964418481147,
"repo_name": "williezh/hotelbooksystem",
"id": "6047ab3e66d799e42d114ff716cc37996679c605",
"size": "1925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DJangoHotel/viewspackage/orderResultView.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "61734"
},
{
"name": "HTML",
"bytes": "10175"
},
{
"name": "JavaScript",
"bytes": "141507"
},
{
"name": "Python",
"bytes": "55246"
},
{
"name": "Shell",
"bytes": "156"
}
],
"symlink_target": ""
} |
import abc
from typing import Awaitable, Callable, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v10.services.types import customer_customizer_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-ads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class CustomerCustomizerServiceTransport(abc.ABC):
"""Abstract transport class for CustomerCustomizerService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
DEFAULT_HOST: str = "googleads.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id,
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(
service_account.Credentials, "with_always_use_jwt_access"
)
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.mutate_customer_customizers: gapic_v1.method.wrap_method(
self.mutate_customer_customizers,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def mutate_customer_customizers(
self,
) -> Callable[
[customer_customizer_service.MutateCustomerCustomizersRequest],
Union[
customer_customizer_service.MutateCustomerCustomizersResponse,
Awaitable[
customer_customizer_service.MutateCustomerCustomizersResponse
],
],
]:
raise NotImplementedError()
__all__ = ("CustomerCustomizerServiceTransport",)
| {
"content_hash": "12c226a37a940985bcab9c9dd563c292",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 101,
"avg_line_length": 38.43356643356643,
"alnum_prop": 0.6235443959243085,
"repo_name": "googleads/google-ads-python",
"id": "9980c26bd7b050d8d81e2d3719dcd311a671f40d",
"size": "6096",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v10/services/services/customer_customizer_service/transports/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
import base64
import datetime
import StringIO
import time
import urllib
from apiclient.http import MediaIoBaseUpload
from google.appengine.api import app_identity
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
import config
import utils
class CloudStorage(object):
"""A class to use Google Cloud Storage.
It uses the application default credentials and the default Cloud Storage
bucket for the App Engine app.
If you use this class in the dev app server, you need to:
1. Manually set up the application default credentials using:
$ gcloud auth application-default login
2. Point config "gcs_bucket_name" to a valid Cloud Storage bucket name,
which allows read/write access from the credentials above.
This class currently only supports a single object lifetime applied to all
objects in the bucket, by set_objects_lifetime().
If you want to store objects with different lifetimes, you may extend this
class to either:
- store objects in multiple buckets
- set object holds:
https://cloud.google.com/storage/docs/bucket-lock#object-holds
"""
def __init__(self):
credentials = GoogleCredentials.get_application_default()
self.service = build('storage', 'v1', credentials=credentials)
self.bucket_name = (
config.get('gcs_bucket_name') or
app_identity.get_default_gcs_bucket_name()).encode('utf-8')
def insert_object(self, object_name, content_type, data):
"""Uploads an object to the bucket.
Args:
object_name (str): Name of the object.
content_type (str): MIME type string of the object.
data (str): The content of the object.
"""
media = MediaIoBaseUpload(StringIO.StringIO(data), mimetype=content_type)
self.service.objects().insert(
bucket=self.bucket_name,
name=object_name,
body={
# This let browsers to download the file instead of opening
# it in a browser.
'contentDisposition':
'attachment; filename=%s' % object_name,
},
media_body=media).execute()
def compose_objects(
self,
source_object_names,
destination_object_name,
destination_content_type):
"""Concatenates the source objects to generate the destination object.
Args:
source_object_names (list of str): Names of the source objects.
destination_object_name (str): Name of the destination object.
destination_content_type (str): MIME type of the destination object.
"""
self.service.objects().compose(
destinationBucket=self.bucket_name,
destinationObject=destination_object_name,
body={
'sourceObjects': [{'name': name} for name in source_object_names],
'destination': {
'contentType': destination_content_type,
# This let browsers to download the file instead of opening
# it in a browser.
'contentDisposition':
'attachment; filename=%s' % destination_object_name,
},
}).execute()
def sign_url(self, object_name, url_lifetime):
""" Generates Cloud Storage signed URL to download Google Cloud Storage
object without sign in.
See: https://cloud.google.com/storage/docs/access-control/signed-urls
This only works on a real App Engine app, not in a dev app server.
Args:
object_name (str): The name of the object which is signed.
url_lifetime (datetime.timedelta): Lifetime of the signed URL. The
server rejects any requests received after this time from now.
"""
if utils.is_dev_app_server():
# Not working on a dev app server because it doesn't support
# app_identity.sign_blob(). An alternative implementation would
# be needed to make it work on a dev app server.
raise Exception(
'sign_url only works on a real App Engine app, not on a dev '
'app server.')
method = 'GET'
expiration_time = utils.get_utcnow() + url_lifetime
expiration_sec = int(time.mktime(expiration_time.timetuple()))
path = '/%s/%s' % (self.bucket_name, object_name)
# These are unused in our use case.
content_md5 = ''
content_type = ''
signed_text = '\n'.join([
method,
content_md5,
content_type,
str(expiration_sec),
path,
])
(_, signature) = app_identity.sign_blob(signed_text.encode('utf-8'))
query_params = {
'GoogleAccessId': app_identity.get_service_account_name(),
'Expires': str(expiration_sec),
'Signature': base64.b64encode(signature),
}
return 'https://storage.googleapis.com%s?%s' % (path, urllib.urlencode(query_params))
def set_objects_lifetime(self, lifetime_days):
"""Sets lifetime of all objects in the bucket in days.
An object is deleted the specified days after it is created.
lifetime_days (int): Lifetime of objects in a number of days.
"""
self.service.buckets().patch(bucket=self.bucket_name, body={
'lifecycle': {
'rule': [
{
'action': {'type': 'Delete'},
'condition': {'age': lifetime_days},
},
],
},
}).execute()
| {
"content_hash": "420e4eee681ca333bf7238aeb15b5260",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 93,
"avg_line_length": 38.32236842105263,
"alnum_prop": 0.590557939914163,
"repo_name": "gimite/personfinder",
"id": "e10880959ec8188fa2afead97ac69385523278b7",
"size": "6423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/cloud_storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21011"
},
{
"name": "Dockerfile",
"bytes": "1428"
},
{
"name": "HTML",
"bytes": "4527"
},
{
"name": "JavaScript",
"bytes": "90402"
},
{
"name": "Python",
"bytes": "1768355"
},
{
"name": "Shell",
"bytes": "30308"
}
],
"symlink_target": ""
} |
import theano
import theano.tensor as T
import numpy
import unittest
from yann.core.operators import copy_params
class TestOperators(unittest.TestCase):
def test_copy_params(self):
numpy.random.seed(0)
self.verbose = 3
self.input_ndarray = numpy.random.rand(1, 1, 10, 10)
self.input_tensor = theano.shared(self.input_ndarray)
self.output_ndarray = numpy.zeros((1,1,10,10))
self.output_tensor = theano.shared(self.output_ndarray)
self.source = [self.input_tensor]
self.dest = [self.output_tensor]
copy_params(source=self.source, destination= self.dest, borrow= True, verbose= self.verbose)
self.assertTrue(numpy.allclose(self.dest[0].eval(),self.source[0].eval()))
| {
"content_hash": "0158f60360443222146542f3c6c36a5f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 100,
"avg_line_length": 39.526315789473685,
"alnum_prop": 0.6830892143808256,
"repo_name": "ragavvenkatesan/Convolutional-Neural-Networks",
"id": "b4f64f9d1eef0d3c19a1d305bae88b48af1caa0d",
"size": "751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/test_operators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "2002"
},
{
"name": "Python",
"bytes": "171871"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import signal
import sys
from contextlib import contextmanager
from pants.java.nailgun_client import NailgunClient
from pants.java.nailgun_protocol import NailgunProtocol
from pants.pantsd.process_manager import ProcessMetadataManager
class RemotePantsRunner(object):
"""A thin client variant of PantsRunner."""
class PortNotFound(Exception): pass
PANTS_COMMAND = 'pants'
RECOVERABLE_EXCEPTIONS = (PortNotFound, NailgunClient.NailgunConnectionError)
def __init__(self, exiter, args, env, process_metadata_dir=None,
stdin=None, stdout=None, stderr=None):
"""
:param Exiter exiter: The Exiter instance to use for this run.
:param list args: The arguments (e.g. sys.argv) for this run.
:param dict env: The environment (e.g. os.environ) for this run.
:param str process_metadata_dir: The directory in which process metadata is kept.
:param file stdin: The stream representing stdin.
:param file stdout: The stream representing stdout.
:param file stderr: The stream representing stderr.
"""
self._exiter = exiter
self._args = args
self._env = env
self._process_metadata_dir = process_metadata_dir
self._stdin = stdin or sys.stdin
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
self._port = self._retrieve_pailgun_port()
if not self._port:
raise self.PortNotFound('unable to locate pailgun port!')
@staticmethod
def _combine_dicts(*dicts):
"""Combine one or more dicts into a new, unified dict (dicts to the right take precedence)."""
return {k: v for d in dicts for k, v in d.items()}
@contextmanager
def _trapped_control_c(self, client):
"""A contextmanager that overrides the SIGINT (control-c) handler and handles it remotely."""
def handle_control_c(signum, frame):
client.send_control_c()
existing_sigint_handler = signal.signal(signal.SIGINT, handle_control_c)
signal.siginterrupt(signal.SIGINT, False) # Retry interrupted system calls.
try:
yield
finally:
signal.signal(signal.SIGINT, existing_sigint_handler)
def _retrieve_pailgun_port(self):
return ProcessMetadataManager(
self._process_metadata_dir).read_metadata_by_name('pantsd', 'socket_pailgun', int)
def run(self, args=None):
# Merge the nailgun TTY capability environment variables with the passed environment dict.
ng_env = NailgunProtocol.isatty_to_env(self._stdin, self._stdout, self._stderr)
modified_env = self._combine_dicts(self._env, ng_env)
# Instantiate a NailgunClient.
client = NailgunClient(port=self._port, ins=self._stdin, out=self._stdout, err=self._stderr)
with self._trapped_control_c(client):
# Execute the command on the pailgun.
result = client.execute(self.PANTS_COMMAND, *self._args, **modified_env)
# Exit.
self._exiter.exit(result)
| {
"content_hash": "ca37bd64905bb8f83a2235a43b3b8bc4",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 98,
"avg_line_length": 38.87179487179487,
"alnum_prop": 0.7074538258575198,
"repo_name": "manasapte/pants",
"id": "fa8325bb2b18bba2ef89ea26c8e61fe0567a2bdb",
"size": "3179",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/python/pants/bin/remote_pants_runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "438730"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "5084384"
},
{
"name": "Scala",
"bytes": "84585"
},
{
"name": "Shell",
"bytes": "58748"
},
{
"name": "Thrift",
"bytes": "1966"
}
],
"symlink_target": ""
} |
"""
Configurator for Home Assistant.
https://github.com/danielperna84/hass-configurator
"""
import os
import sys
import json
import ssl
import socketserver
import base64
import ipaddress
import signal
import cgi
import shlex
import subprocess
import logging
import fnmatch
from string import Template
from http.server import BaseHTTPRequestHandler, HTTPServer
import urllib.request
from urllib.parse import urlparse, parse_qs, unquote
### Some options for you to change
LISTENIP = "0.0.0.0"
LISTENPORT = 3218
# Set BASEPATH to something like "/home/hass/.homeassistant/" if you're not running the
# configurator from that path
BASEPATH = None
# Set the paths to a certificate and the key if you're using SSL, e.g "/etc/ssl/certs/mycert.pem"
SSL_CERTIFICATE = None
SSL_KEY = None
# Set the destination where the HASS API is reachable
HASS_API = "http://127.0.0.1:8123/api/"
# If a password is required to access the API, set it in the form of "password"
# if you have HA ignoring SSL locally this is not needed if on same machine.
HASS_API_PASSWORD = None
# To enable authentication, set the credentials in the form of "username:password"
CREDENTIALS = None
# Limit access to the configurator by adding allowed IP addresses / networks to the list,
# e.g ALLOWED_NETWORKS = ["192.168.0.0/24", "172.16.47.23"]
ALLOWED_NETWORKS = []
# List of statically banned IP addresses, e.g. ["1.1.1.1", "2.2.2.2"]
BANNED_IPS = []
# Ban IPs after n failed login attempts. Restart service to reset banning. The default
# of `0` disables this feature.
BANLIMIT = 0
# Enable git integration. GitPython (https://gitpython.readthedocs.io/en/stable/) has
# to be installed.
GIT = False
# Files to ignore in the UI. A good example list that cleans up the UI is
# [".*", "*.log", "deps", "icloud", "*.conf", "*.json", "certs", "__pycache__"]
IGNORE_PATTERN = []
### End of options
LOGLEVEL = logging.INFO
LOG = logging.getLogger(__name__)
LOG.setLevel(LOGLEVEL)
SO = logging.StreamHandler(sys.stdout)
SO.setLevel(LOGLEVEL)
SO.setFormatter(logging.Formatter('%(levelname)s:%(asctime)s:%(name)s:%(message)s'))
LOG.addHandler(SO)
RELEASEURL = "https://api.github.com/repos/danielperna84/hass-configurator/releases/latest"
VERSION = "0.2.0"
BASEDIR = "."
DEV = False
HTTPD = None
FAIL2BAN_IPS = {}
REPO = False
if GIT:
try:
from git import Repo as REPO
except Exception:
LOG.warning("Unable to import Git module")
INDEX = Template(r"""<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1.0" />
<title>HASS Configurator</title>
<link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet">
<link href="https://cdnjs.cloudflare.com/ajax/libs/MaterialDesign-Webfont/2.0.46/css/materialdesignicons.min.css" rel="stylesheet">
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.1/css/materialize.min.css">
<style type="text/css" media="screen">
body {
margin: 0;
padding: 0;
background-color: #fafafa;
display: flex;
min-height: 100vh;
flex-direction: column;
}
main {
flex: 1 0 auto;
}
#editor {
position: fixed;
top: 135px;
right: 0;
bottom: 0;
}
@media only screen and (max-width: 600px) {
#editor {
top: 125px;
}
.toolbar_mobile {
margin-bottom: 0;
}
}
.leftellipsis {
overflow: hidden;
direction: rtl;
text-overflow: ellipsis;
white-space: nowrap;
}
.select-wrapper input.select-dropdown {
width: 96%;
overflow: hidden;
direction: ltr;
text-overflow: ellipsis;
white-space: nowrap;
}
#edit_float {
z-index: 10;
}
#filebrowser {
background-color: #fff;
}
#fbheader {
display: block;
cursor: initial;
pointer-events: none;
color: #424242 !important;
font-weight: 400;
font-size: .9em;
min-height: 64px;
padding-top: 8px;
margin-left: -5px;
max-width: 250px;
}
#fbheaderbranch {
padding: 5px 10px !important;
display: none;
color: #757575 !important;
}
#branchselector {
font-weight: 400;
}
a.branch_select.active {
color: white !important;
}
#fbelements {
margin: 0;
position: relative;
}
a.collection-item {
color: #616161 !important;
}
.fbtoolbarbutton {
color: #757575 !important;
min-height: 64px !important;
}
.fbmenubutton {
color: #616161 !important;
display: inline-block;
float: right;
min-height: 64px;
padding-top: 8px !important;
padding-left: 20px !important;
}
.filename {
color: #616161 !important;
font-weight: 400;
display: inline-block;
width: 185px;
white-space: nowrap;
text-overflow: ellipsis;
cursor: pointer;
}
.nowrap {
white-space: nowrap;
}
.text_darkgreen {
color: #1b5e20 !important;
}
.text_darkred {
color: #b71c1c !important;
}
span.stats {
margin: -10px 0 0 0;
padding: 0;
font-size: 0.5em;
color: #616161 !important;
line-height: 16px;
display: inherit;
}
.collection-item #uplink {
background-color: #f5f5f5;
width: 323px !important;
margin-left: -3px !important;
}
input.currentfile_input {
margin-bottom: 0;
margin-top: 0;
padding-left: 5px;
border-bottom: 0;
}
.side_tools {
vertical-align: middle;
}
.fbtoolbarbutton_icon {
margin-top: 20px;
}
.collection {
margin: 0;
background-color: #fff;
}
li.collection-item {
border-bottom: 1px solid #eeeeee !important;
}
.fb_side-nav li {
line-height: 36px;
}
.fb_side-nav a {
padding: 0 0 0 16px;
display: inline-block !important;
}
.fb_side-nav li>a>i {
margin-right: 16px !important;
cursor: pointer;
}
.green {
color: #fff;
}
.red {
color: #fff;
}
#dropdown_menu, #dropdown_menu_mobile {
min-width: 235px;
}
#dropdown_gitmenu {
min-width: 140px !important;
}
.dropdown-content li>a,
.dropdown-content li>span {
color: #616161 !important;
}
.fb_dd {
margin-left: -15px !important;
}
.blue_check:checked+label:before {
border-right: 2px solid #03a9f4;
border-bottom: 2px solid #03a9f4;
}
.input-field input:focus+label {
color: #03a9f4 !important;
}
.input-field input[type=text].valid {
border-bottom: 1px solid #03a9f4;;
box-shadow: 0 1px 0 0 #03a9f4;;
}
.row .input-field input:focus {
border-bottom: 1px solid #03a9f4 !important;
box-shadow: 0 1px 0 0 #03a9f4 !important
}
#modal_acekeyboard, #modal_components, #modal_icons {
top: auto;
width: 96%;
min-height: 96%;
border-radius: 0;
margin: auto;
}
.modal .modal-content_nopad {
padding: 0;
}
.waves-effect.waves-blue .waves-ripple {
background-color: #03a9f4;
}
.preloader-background {
display: flex;
align-items: center;
justify-content: center;
background-color: #eee;
position: fixed;
z-index: 10000;
top: 0;
left: 0;
right: 0;
bottom: 0;
}
.modal-content_nopad {
position: relative;
}
.modal-content_nopad .modal_btn {
position: absolute;
top: 2px;
right:0;
}
footer {
z-index: 10;
}
.shadow {
height: 25px;
margin: -26px;
min-width: 320px;
background-color: transparent;
}
.ace_optionsMenuEntry input {
position: relative !important;
left: 0 !important;
opacity: 1 !important;
}
.ace_optionsMenuEntry select {
position: relative !important;
left: 0 !important;
opacity: 1 !important;
display: block !important;
}
.ace_search {
background-color: #eeeeee !important;
border-radius: 0 !important;
border: 0 !important;
box-shadow: 0 6px 10px 0 rgba(0, 0, 0, 0.14), 0 1px 18px 0 rgba(0, 0, 0, 0.12), 0 3px 5px -1px rgba(0, 0, 0, 0.3);
}
.ace_search_form {
background-color: #fafafa;
width: 300px;
border: 0 !important;
border-radius: 0 !important;
outline: none !important;
box-shadow: 0 2px 2px 0 rgba(0, 0, 0, 0.14), 0 1px 5px 0 rgba(0, 0, 0, 0.12), 0 2px 1px -2px rgba(0, 0, 0, 0.2);
margin-bottom: 15px !important;
margin-left: 8px !important;
color: #424242 !important;
}
.ace_search_field {
padding-left: 4px !important;
margin-left: 10px !important;
max-width: 275px !important;
font-family: 'Roboto', sans-serif !important;
border-bottom: 1px solid #03a9f4 !important;
color: #424242 !important;
}
.ace_replace_form {
background-color: #fafafa;
width: 300px;
border: 0 !important;
border-radius: 0 !important;
outline: none !important;
box-shadow: 0 2px 2px 0 rgba(0, 0, 0, 0.14), 0 1px 5px 0 rgba(0, 0, 0, 0.12), 0 2px 1px -2px rgba(0, 0, 0, 0.2);
margin-bottom: 15px !important;
margin-left: 8px !important;
}
.ace_search_options {
background-color: #eeeeee;
text-align: left !important;
letter-spacing: .5px !important;
transition: .2s ease-out;
font-family: 'Roboto', sans-serif !important;
font-size: 130%;
top: 0 !important;
}
.ace_searchbtn {
text-decoration: none !important;
min-width: 40px !important;
min-height: 30px !important;
color: #424242 !important;
text-align: center !important;
letter-spacing: .5px !important;
transition: .2s ease-out;
cursor: pointer;
font-family: 'Roboto', sans-serif !important;
}
.ace_searchbtn:hover {
background-color: #03a9f4;
}
.ace_replacebtn {
text-decoration: none !important;
min-width: 40px !important;
min-height: 30px !important;
color: #424242 !important;
text-align: center !important;
letter-spacing: .5px !important;
transition: .2s ease-out;
cursor: pointer;
font-family: 'Roboto', sans-serif !important;
}
.ace_replacebtn:hover {
background-color: #03a9f4;
}
.ace_button {
text-decoration: none !important;
min-width: 40px !important;
min-height: 30px !important;
border-radius: 0 !important;
outline: none !important;
color: #424242 !important;
background-color: #fafafa;
text-align: center;
letter-spacing: .5px;
transition: .2s ease-out;
cursor: pointer;
font-family: 'Roboto', sans-serif !important;
}
.ace_button:hover {
background-color: #03a9f4 !important;
}
.fbicon_pad {
min-height: 64px !important;
}
.fbmenuicon_pad {
min-height: 64px;
margin-top: 6px !important;
margin-right: 18px !important;
color: #616161 !important;
}
.no-padding {
padding: 0 !important;
}
.branch_select {
min-width: 300px !important;
font-size: 14px !important;
font-weight: 400 !important;
}
a.branch_hover:hover {
background-color: #e0e0e0 !important;
}
.hidesave {
opacity: 0;
-webkit-transition: all 0.5s ease-in-out;
-moz-transition: all 0.5s ease-in-out;
-ms-transition: all 0.5s ease-in-out;
-o-transition: all 0.5s ease-in-out;
transition: all 0.5s ease-in-out;
}
.pathtip_color {
-webkit-animation: fadeinout 1.75s linear 1 forwards;
animation: fadeinout 1.75s linear 1 forwards;
}
@-webkit-keyframes fadeinout {
0% { background-color: #f5f5f5; }
50% { background-color: #ff8a80; }
100% { background-color: #f5f5f5; }
}
@keyframes fadeinout {
0% { background-color: #f5f5f5; }
50% { background-color: #ff8a80; }
100% { background-color: #f5f5f5; }
}
</style>
<script src="https://cdnjs.cloudflare.com/ajax/libs/ace/1.2.8/ace.js" type="text/javascript" charset="utf-8"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/ace/1.2.8/ext-modelist.js" type="text/javascript" charset="utf-8"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/ace/1.2.8/ext-language_tools.js" type="text/javascript" charset="utf-8"></script>
</head>
<body>
<div class="preloader-background">
<div class="preloader-wrapper big active">
<div class="spinner-layer spinner-blue">
<div class="circle-clipper left">
<div class="circle"></div>
</div>
<div class="gap-patch">
<div class="circle"></div>
</div>
<div class="circle-clipper right">
<div class="circle"></div>
</div>
</div>
<div class="spinner-layer spinner-red">
<div class="circle-clipper left">
<div class="circle"></div>
</div>
<div class="gap-patch">
<div class="circle"></div>
</div>
<div class="circle-clipper right">
<div class="circle"></div>
</div>
</div>
<div class="spinner-layer spinner-yellow">
<div class="circle-clipper left">
<div class="circle"></div>
</div>
<div class="gap-patch">
<div class="circle"></div>
</div>
<div class="circle-clipper right">
<div class="circle"></div>
</div>
</div>
<div class="spinner-layer spinner-green">
<div class="circle-clipper left">
<div class="circle"></div>
</div>
<div class="gap-patch">
<div class="circle"></div>
</div>
<div class="circle-clipper right">
<div class="circle"></div>
</div>
</div>
</div>
</div>
<header>
<div class="navbar-fixed">
<nav class="light-blue">
<div class="nav-wrapper">
<ul class="left">
<li><a class="waves-effect waves-light tooltipped files-collapse hide-on-small-only" data-activates="slide-out" data-position="bottom" data-delay="500" data-tooltip="Browse Filesystem" style="padding-left: 25px; padding-right: 25px;"><i class="material-icons">folder</i></a></li>
<li><a class="waves-effect waves-light files-collapse hide-on-med-and-up" data-activates="slide-out" style="padding-left: 25px; padding-right: 25px;"><i class="material-icons">folder</i></a></li>
</ul>
<ul class="right">
<li><a class="waves-effect waves-light tooltipped hide-on-small-only markdirty hidesave" data-position="bottom" data-delay="500" data-tooltip="Save" onclick="save_check()"><i class="material-icons">save</i></a></li>
<li><a class="waves-effect waves-light tooltipped hide-on-small-only modal-trigger" data-position="bottom" data-delay="500" data-tooltip="Close" href="#modal_close"><i class="material-icons">close</i></a></li>
<li><a class="waves-effect waves-light tooltipped hide-on-small-only" data-position="bottom" data-delay="500" data-tooltip="Search" onclick="editor.execCommand('replace')"><i class="material-icons">search</i></a></li>
<li><a class="waves-effect waves-light dropdown-button hide-on-small-only" data-activates="dropdown_menu" data-beloworigin="true"><i class="material-icons right">more_vert</i></a></li>
<li><a class="waves-effect waves-light hide-on-med-and-up markdirty hidesave" onclick="save_check()"><i class="material-icons">save</i></a></li>
<li><a class="waves-effect waves-light hide-on-med-and-up modal-trigger" href="#modal_close"><i class="material-icons">close</i></a></li>
<li><a class="waves-effect waves-light hide-on-med-and-up" onclick="editor.execCommand('replace')"><i class="material-icons">search</i></a></li>
<li><a class="waves-effect waves-light dropdown-button hide-on-med-and-up" data-activates="dropdown_menu_mobile" data-beloworigin="true"><i class="material-icons right">more_vert</i></a></li>
</ul>
</div>
</nav>
</div>
</header>
<main>
<ul id="dropdown_menu" class="dropdown-content z-depth-4">
<li><a class="modal-trigger" target="_blank" href="#modal_components">HASS Components</a></li>
<li><a class="modal-trigger" target="_blank" href="#modal_icons">Material Icons</a></li>
<li><a href="#" data-activates="ace_settings" class="ace_settings-collapse">Editor Settings</a></li>
<li><a class="modal-trigger" href="#modal_about">About HASS-Configurator</a></li>
<li class="divider"></li>
<!--<li><a href="#modal_check_config">Check HASS Configuration</a></li>-->
<li><a class="modal-trigger" href="#modal_reload_automations">Reload automations</a></li>
<li><a class="modal-trigger" href="#modal_reload_scripts">Reload scripts</a></li>
<li><a class="modal-trigger" href="#modal_reload_groups">Reload groups</a></li>
<li><a class="modal-trigger" href="#modal_reload_core">Reload core</a></li>
<li><a class="modal-trigger" href="#modal_restart">Restart HASS</a></li>
<li class="divider"></li>
<li><a class="modal-trigger" href="#modal_exec_command">Execute shell command</a></li>
</ul>
<ul id="dropdown_menu_mobile" class="dropdown-content z-depth-4">
<li><a target="_blank" href="https://home-assistant.io/help/">Need HASS Help?</a></li>
<li><a target="_blank" href="https://home-assistant.io/components/">HASS Components</a></li>
<li><a target="_blank" href="https://materialdesignicons.com/">Material Icons</a></li>
<li><a href="#" data-activates="ace_settings" class="ace_settings-collapse">Editor Settings</a></li>
<li><a class="modal-trigger" href="#modal_about">About HASS-Configurator</a></li>
<li class="divider"></li>
<!--<li><a href="#modal_check_config">Check HASS Configuration</a></li>-->
<li><a class="modal-trigger" href="#modal_reload_automations">Reload automations</a></li>
<li><a class="modal-trigger" href="#modal_reload_scripts">Reload scripts</a></li>
<li><a class="modal-trigger" href="#modal_reload_groups">Reload groups</a></li>
<li><a class="modal-trigger" href="#modal_reload_core">Reload core</a></li>
<li><a class="modal-trigger" href="#modal_restart">Restart HASS</a></li>
<li class="divider"></li>
<li><a class="modal-trigger" href="#modal_exec_command">Execute shell command</a></li>
</ul>
<ul id="dropdown_gitmenu" class="dropdown-content z-depth-4">
<li><a class="modal-trigger" href="#modal_init" class="nowrap waves-effect">git init</a></li>
<li><a class="modal-trigger" href="#modal_commit" class="nowrap waves-effect">git commit</a></li>
<li><a class="modal-trigger" href="#modal_push" class="nowrap waves-effect">git push</a></li>
</ul>
<ul id="dropdown_gitmenu_mobile" class="dropdown-content z-depth-4">
<li><a class="modal-trigger" href="#modal_init" class="nowrap waves-effect">git init</a></li>
<li><a class="modal-trigger" href="#modal_commit" class="nowrap waves-effect">git commit</a></li>
<li><a class="modal-trigger" href="#modal_push" class="nowrap waves-effect">git push</a></li>
</ul>
<div id="modal_components" class="modal bottom-sheet modal-fixed-footer">
<div class="modal-content_nopad">
<iframe src="https://home-assistant.io/components/" style="height: 90vh; width: 100vw"> </iframe>
<a target="_blank" href="https://home-assistant.io/components/" class="hide-on-med-and-down modal_btn waves-effect btn-large btn-flat left"><i class="material-icons">launch</i></a>
</div>
<div class="modal-footer">
<a class="modal-action modal-close waves-effect btn-flat Right light-blue-text">Close</a>
</div>
</div>
<div id="modal_icons" class="modal bottom-sheet modal-fixed-footer">
<div class="modal-content_nopad">
<iframe src="https://materialdesignicons.com/" style="height: 90vh; width: 100vw"> </iframe>
<a target="_blank" href="https://materialdesignicons.com/" class="hide-on-med-and-down modal_btn waves-effect btn-large btn-flat left"><i class="material-icons">launch</i></a>
</div>
<div class="modal-footer">
<a class="modal-action modal-close waves-effect btn-flat Right light-blue-text">Close</a>
</div>
</div>
<div id="modal_acekeyboard" class="modal bottom-sheet modal-fixed-footer">
<div class="modal-content centered">
<h4 class="grey-text text-darken-3">Ace Keyboard Shortcuts<i class="mdi mdi-keyboard right grey-text text-darken-3" style="font-size: 2rem;"></i></h4>
<br>
<ul class="collapsible popout" data-collapsible="expandable">
<li>
<div class="collapsible-header"><i class="material-icons">view_headline</i>Line Operations</div>
<div class="collapsible-body">
<table class="bordered highlight centered">
<thead>
<tr>
<th>Windows/Linux</th>
<th>Mac</th>
<th>Action</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ctrl-D</td>
<td>Command-D</td>
<td>Remove line</td>
</tr>
<tr>
<td>Alt-Shift-Down</td>
<td>Command-Option-Down</td>
<td>Copy lines down</td>
</tr>
<tr>
<td>Alt-Shift-Up</td>
<td>Command-Option-Up</td>
<td>Copy lines up</td>
</tr>
<tr>
<td>Alt-Down</td>
<td>Option-Down</td>
<td>Move lines down</td>
</tr>
<tr>
<td>Alt-Up</td>
<td>Option-Up</td>
<td>Move lines up</td>
</tr>
<tr>
<td>Alt-Delete</td>
<td>Ctrl-K</td>
<td>Remove to line end</td>
</tr>
<tr>
<td>Alt-Backspace</td>
<td>Command-Backspace</td>
<td>Remove to linestart</td>
</tr>
<tr>
<td>Ctrl-Backspace</td>
<td>Option-Backspace, Ctrl-Option-Backspace</td>
<td>Remove word left</td>
</tr>
<tr>
<td>Ctrl-Delete</td>
<td>Option-Delete</td>
<td>Remove word right</td>
</tr>
<tr>
<td>---</td>
<td>Ctrl-O</td>
<td>Split line</td>
</tr>
</tbody>
</table>
</div>
</li>
<li>
<div class="collapsible-header"><i class="material-icons">photo_size_select_small</i>Selection</div>
<div class="collapsible-body">
<table class="bordered highlight centered">
<thead>
<tr>
<th >Windows/Linux</th>
<th >Mac</th>
<th >Action</th>
</tr>
</thead>
<tbody>
<tr>
<td >Ctrl-A</td>
<td >Command-A</td>
<td >Select all</td>
</tr>
<tr>
<td >Shift-Left</td>
<td >Shift-Left</td>
<td >Select left</td>
</tr>
<tr>
<td >Shift-Right</td>
<td >Shift-Right</td>
<td >Select right</td>
</tr>
<tr>
<td >Ctrl-Shift-Left</td>
<td >Option-Shift-Left</td>
<td >Select word left</td>
</tr>
<tr>
<td >Ctrl-Shift-Right</td>
<td >Option-Shift-Right</td>
<td >Select word right</td>
</tr>
<tr>
<td >Shift-Home</td>
<td >Shift-Home</td>
<td >Select line start</td>
</tr>
<tr>
<td >Shift-End</td>
<td >Shift-End</td>
<td >Select line end</td>
</tr>
<tr>
<td >Alt-Shift-Right</td>
<td >Command-Shift-Right</td>
<td >Select to line end</td>
</tr>
<tr>
<td >Alt-Shift-Left</td>
<td >Command-Shift-Left</td>
<td >Select to line start</td>
</tr>
<tr>
<td >Shift-Up</td>
<td >Shift-Up</td>
<td >Select up</td>
</tr>
<tr>
<td >Shift-Down</td>
<td >Shift-Down</td>
<td >Select down</td>
</tr>
<tr>
<td >Shift-PageUp</td>
<td >Shift-PageUp</td>
<td >Select page up</td>
</tr>
<tr>
<td >Shift-PageDown</td>
<td >Shift-PageDown</td>
<td >Select page down</td>
</tr>
<tr>
<td >Ctrl-Shift-Home</td>
<td >Command-Shift-Up</td>
<td >Select to start</td>
</tr>
<tr>
<td >Ctrl-Shift-End</td>
<td >Command-Shift-Down</td>
<td >Select to end</td>
</tr>
<tr>
<td >Ctrl-Shift-D</td>
<td >Command-Shift-D</td>
<td >Duplicate selection</td>
</tr>
<tr>
<td >Ctrl-Shift-P</td>
<td >---</td>
<td >Select to matching bracket</td>
</tr>
</tbody>
</table>
</div>
</li>
<li>
<div class="collapsible-header"><i class="material-icons">multiline_chart</i>Multicursor</div>
<div class="collapsible-body">
<table class="bordered highlight centered">
<thead>
<tr>
<th>Windows/Linux</th>
<th>Mac</th>
<th>Action</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ctrl-Alt-Up</td>
<td>Ctrl-Option-Up</td>
<td>Add multi-cursor above</td>
</tr>
<tr>
<td>Ctrl-Alt-Down</td>
<td>Ctrl-Option-Down</td>
<td>Add multi-cursor below</td>
</tr>
<tr>
<td>Ctrl-Alt-Right</td>
<td>Ctrl-Option-Right</td>
<td>Add next occurrence to multi-selection</td>
</tr>
<tr>
<td>Ctrl-Alt-Left</td>
<td>Ctrl-Option-Left</td>
<td>Add previous occurrence to multi-selection</td>
</tr>
<tr>
<td>Ctrl-Alt-Shift-Up</td>
<td>Ctrl-Option-Shift-Up</td>
<td>Move multicursor from current line to the line above</td>
</tr>
<tr>
<td>Ctrl-Alt-Shift-Down</td>
<td>Ctrl-Option-Shift-Down</td>
<td>Move multicursor from current line to the line below</td>
</tr>
<tr>
<td>Ctrl-Alt-Shift-Right</td>
<td>Ctrl-Option-Shift-Right</td>
<td>Remove current occurrence from multi-selection and move to next</td>
</tr>
<tr>
<td>Ctrl-Alt-Shift-Left</td>
<td>Ctrl-Option-Shift-Left</td>
<td>Remove current occurrence from multi-selection and move to previous</td>
</tr>
<tr>
<td>Ctrl-Shift-L</td>
<td>Ctrl-Shift-L</td>
<td>Select all from multi-selection</td>
</tr>
</tbody>
</table>
</div>
</li>
<li>
<div class="collapsible-header"><i class="material-icons">call_missed_outgoing</i>Go To</div>
<div class="collapsible-body">
<table class="bordered highlight centered">
<thead>
<tr>
<th>Windows/Linux</th>
<th>Mac</th>
<th>Action</th>
</tr>
</thead>
<tbody>
<tr>
<td>Left</td>
<td>Left, Ctrl-B</td>
<td>Go to left</td>
</tr>
<tr>
<td>Right</td>
<td>Right, Ctrl-F</td>
<td>Go to right</td>
</tr>
<tr>
<td>Ctrl-Left</td>
<td>Option-Left</td>
<td>Go to word left</td>
</tr>
<tr>
<td>Ctrl-Right</td>
<td>Option-Right</td>
<td>Go to word right</td>
</tr>
<tr>
<td>Up</td>
<td>Up, Ctrl-P</td>
<td>Go line up</td>
</tr>
<tr>
<td>Down</td>
<td>Down, Ctrl-N</td>
<td>Go line down</td>
</tr>
<tr>
<td>Alt-Left, Home</td>
<td>Command-Left, Home, Ctrl-A</td>
<td>Go to line start</td>
</tr>
<tr>
<td>Alt-Right, End</td>
<td>Command-Right, End, Ctrl-E</td>
<td>Go to line end</td>
</tr>
<tr>
<td>PageUp</td>
<td>Option-PageUp</td>
<td>Go to page up</td>
</tr>
<tr>
<td>PageDown</td>
<td>Option-PageDown, Ctrl-V</td>
<td>Go to page down</td>
</tr>
<tr>
<td>Ctrl-Home</td>
<td>Command-Home, Command-Up</td>
<td>Go to start</td>
</tr>
<tr>
<td>Ctrl-End</td>
<td>Command-End, Command-Down</td>
<td>Go to end</td>
</tr>
<tr>
<td>Ctrl-L</td>
<td>Command-L</td>
<td>Go to line</td>
</tr>
<tr>
<td>Ctrl-Down</td>
<td>Command-Down</td>
<td>Scroll line down</td>
</tr>
<tr>
<td>Ctrl-Up</td>
<td>---</td>
<td>Scroll line up</td>
</tr>
<tr>
<td>Ctrl-P</td>
<td>---</td>
<td>Go to matching bracket</td>
</tr>
<tr>
<td>---</td>
<td>Option-PageDown</td>
<td>Scroll page down</td>
</tr>
<tr>
<td>---</td>
<td>Option-PageUp</td>
<td>Scroll page up</td>
</tr>
</tbody>
</table>
</div>
</li>
<li>
<div class="collapsible-header"><i class="material-icons">find_replace</i>Find/Replace</div>
<div class="collapsible-body">
<table class="bordered highlight centered">
<thead>
<tr>
<th>Windows/Linux</th>
<th>Mac</th>
<th>Action</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ctrl-F</td>
<td>Command-F</td>
<td>Find</td>
</tr>
<tr>
<td>Ctrl-H</td>
<td>Command-Option-F</td>
<td>Replace</td>
</tr>
<tr>
<td>Ctrl-K</td>
<td>Command-G</td>
<td>Find next</td>
</tr>
<tr>
<td>Ctrl-Shift-K</td>
<td>Command-Shift-G</td>
<td>Find previous</td>
</tr>
</tbody>
</table>
</div>
</li>
<li>
<div class="collapsible-header"><i class="material-icons">all_out</i>Folding</div>
<div class="collapsible-body">
<table class="bordered highlight centered">
<thead>
<tr>
<th>Windows/Linux</th>
<th>Mac</th>
<th>Action</th>
</tr>
</thead>
<tbody>
<tr>
<td>Alt-L, Ctrl-F1</td>
<td>Command-Option-L, Command-F1</td>
<td>Fold selection</td>
</tr>
<tr>
<td>Alt-Shift-L, Ctrl-Shift-F1</td>
<td>Command-Option-Shift-L, Command-Shift-F1</td>
<td>Unfold</td>
</tr>
<tr>
<td>Alt-0</td>
<td>Command-Option-0</td>
<td>Fold all</td>
</tr>
<tr>
<td>Alt-Shift-0</td>
<td>Command-Option-Shift-0</td>
<td>Unfold all</td>
</tr>
</tbody>
</table>
</div>
</li>
<li>
<div class="collapsible-header"><i class="material-icons">devices_other</i>Other</div>
<div class="collapsible-body">
<table class="bordered highlight centered">
<thead>
<tr>
<th>Windows/Linux</th>
<th>Mac</th>
<th>Action</th>
</tr>
</thead>
<tbody>
<tr>
<td>Tab</td>
<td>Tab</td>
<td>Indent</td>
</tr>
<tr>
<td>Shift-Tab</td>
<td>Shift-Tab</td>
<td>Outdent</td>
</tr>
<tr>
<td>Ctrl-Z</td>
<td>Command-Z</td>
<td>Undo</td>
</tr>
<tr>
<td>Ctrl-Shift-Z, Ctrl-Y</td>
<td>Command-Shift-Z, Command-Y</td>
<td>Redo</td>
</tr>
<tr>
<td>Ctrl-,</td>
<td>Command-,</td>
<td>Show the settings menu</td>
</tr>
<tr>
<td>Ctrl-/</td>
<td>Command-/</td>
<td>Toggle comment</td>
</tr>
<tr>
<td>Ctrl-T</td>
<td>Ctrl-T</td>
<td>Transpose letters</td>
</tr>
<tr>
<td>Ctrl-Enter</td>
<td>Command-Enter</td>
<td>Enter full screen</td>
</tr>
<tr>
<td>Ctrl-Shift-U</td>
<td>Ctrl-Shift-U</td>
<td>Change to lower case</td>
</tr>
<tr>
<td>Ctrl-U</td>
<td>Ctrl-U</td>
<td>Change to upper case</td>
</tr>
<tr>
<td>Insert</td>
<td>Insert</td>
<td>Overwrite</td>
</tr>
<tr>
<td>Ctrl-Shift-E</td>
<td>Command-Shift-E</td>
<td>Macros replay</td>
</tr>
<tr>
<td>Ctrl-Alt-E</td>
<td>---</td>
<td>Macros recording</td>
</tr>
<tr>
<td>Delete</td>
<td>---</td>
<td>Delete</td>
</tr>
<tr>
<td>---</td>
<td>Ctrl-L</td>
<td>Center selection</td>
</tr>
</tbody>
</table>
</div>
</li>
</ul>
</div>
<div class="modal-footer">
<a class="modal-action modal-close waves-effect btn-flat light-blue-text">Close</a>
</div>
</div>
<div id="modal_save" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Save<i class="grey-text text-darken-3 material-icons right" style="font-size: 2rem;">save</i></h4>
<p>Do you really want to save?</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">No</a>
<a onclick="save()" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Yes</a>
</div>
</div>
<div id="modal_upload" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Upload File<i class="grey-text text-darken-3 material-icons right" style="font-size: 2.28rem;">file_upload</i></h4>
<p>Please choose a file to upload</p>
<form action="#" id="uploadform">
<div class="file-field input-field">
<div class="btn light-blue waves-effect">
<span>File</span>
<input type="file" id="uploadfile" />
</div>
<div class="file-path-wrapper">
<input class="file-path validate" type="text">
</div>
</div>
</form>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">Cancel</a>
<a onclick="upload()" class="modal-action modal-close waves-effect waves-green btn-flat light-blue-text">OK</a>
</div>
</div>
<div id="modal_init" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">git init<i class="mdi mdi-git right grey-text text-darken-3" style="font-size: 2.48rem;"></i></h4>
<p>Are you sure you want to initialize a repository at the current path?</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">Cancel</a>
<a onclick="gitinit()" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">OK</a>
</div>
</div>
<div id="modal_commit" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">git commit<i class="mdi mdi-git right grey-text text-darken-3" style="font-size: 2.48rem;"></i></h4>
<div class="row">
<div class="input-field col s12">
<input type="text" id="commitmessage">
<label class="active" for="commitmessage">Commit message</label>
</div>
</div>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">Cancel</a>
<a onclick="commit(document.getElementById('commitmessage').value)" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">OK</a>
</div>
</div>
<div id="modal_push" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">git push<i class="mdi mdi-git right grey-text text-darken-3" style="font-size: 2.48rem;"></i></h4>
<p>Are you sure you want to push your commited changes to the configured remote / origin?</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">Cancel</a>
<a onclick="gitpush()" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">OK</a>
</div>
</div>
<div id="modal_close" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Close File<i class="grey-text text-darken-3 material-icons right" style="font-size: 2.28rem;">close</i></h4>
<p>Are you sure you want to close the current file? Unsaved changes will be lost.</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">No</a>
<a onclick="document.getElementById('currentfile').value='';editor.getSession().setValue('');$('.markdirty').each(function(i, o){o.classList.remove('red');});" class="modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Yes</a>
</div>
</div>
<div id="modal_delete" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Delete</h4>
<p>Are you sure you want to delete <span class="fb_currentfile"></span>?</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">No</a>
<a onclick="delete_element()" class="modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Yes</a>
</div>
</div>
<div id="modal_gitadd" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">git add<i class="mdi mdi-git right grey-text text-darken-3" style="font-size: 2.48rem;"></i></h4>
<p>Are you sure you want to add <span class="fb_currentfile"></span> to the index?</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">No</a>
<a onclick="gitadd()" class="modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Yes</a>
</div>
</div>
<div id="modal_check_config" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Check configuration<i class="mdi mdi-settings right grey-text text-darken-3" style="font-size: 2rem;"></i></h4>
<p>Do you want to check the configuration?</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">No</a>
<a onclick="check_config()" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Yes</a>
</div>
</div>
<div id="modal_reload_automations" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Reload automations<i class="mdi mdi-settings right grey-text text-darken-3" style="font-size: 2rem;"></i></h4>
<p>Do you want to reload the automations?</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">No</a>
<a onclick="reload_automations()" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Yes</a>
</div>
</div>
<div id="modal_reload_scripts" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Reload scripts<i class="mdi mdi-settings right grey-text text-darken-3" style="font-size: 2rem;"></i></h4>
<p>Do you want to reload the scripts?</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">No</a>
<a onclick="reload_scripts()" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Yes</a>
</div>
</div>
<div id="modal_reload_groups" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Reload groups<i class="mdi mdi-settings right grey-text text-darken-3" style="font-size: 2rem;"></i></h4>
<p>Do you want to reload the groups?</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">No</a>
<a onclick="reload_groups()" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Yes</a>
</div>
</div>
<div id="modal_reload_core" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Reload core<i class="mdi mdi-settings right grey-text text-darken-3" style="font-size: 2rem;"></i></h4>
<p>Do you want to reload the core?</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">No</a>
<a onclick="reload_core()" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Yes</a>
</div>
</div>
<div id="modal_restart" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Restart<i class="mdi mdi-restart right grey-text text-darken-3" style="font-size: 2rem;"></i></h4>
<p>Do you really want to restart Home Assistant?</p>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">No</a>
<a onclick="restart()" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Yes</a>
</div>
</div>
<div id="modal_exec_command" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Execute shell command<i class="mdi mdi-laptop right grey-text text-darken-3" style="font-size: 2rem;"></i></h4>
<pre class="col s6" id="command_history"></pre>
<br>
<div class="row">
<div class="input-field col s12">
<input placeholder="/bin/ls -l /var/log" id="commandline" type="text">
<label for="commandline">Command</label>
</div>
</div>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">Close</a>
<a onclick="document.getElementById('command_history').innerText='';" class=" modal-action waves-effect waves-green btn-flat light-blue-text">Clear</a>
<a onclick="exec_command()" class=" modal-action waves-effect waves-green btn-flat light-blue-text">Execute</a>
</div>
</div>
<div id="modal_markdirty" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">Unsaved Changes<i class="grey-text text-darken-3 material-icons right" style="font-size: 2rem;">save</i></h4>
<p>You have unsaved changes in the current file. Please save the changes or close the file before opening a new one.</p>
</div>
<div class="modal-footer">
<a class="modal-action modal-close waves-effect waves-red btn-flat light-blue-text">Abort</a>
<a onclick="document.getElementById('currentfile').value='';editor.getSession().setValue('');$('.markdirty').each(function(i, o){o.classList.remove('red');});" class="modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Close file</a>
<a onclick="save()" class="modal-action modal-close waves-effect waves-green btn-flat light-blue-text">Save changes</a>
</div>
</div>
<div id="modal_newfolder" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">New Folder<i class="grey-text text-darken-3 material-icons right" style="font-size: 2rem;">create_new_folder</i></h4>
<br>
<div class="row">
<div class="input-field col s12">
<input type="text" id="newfoldername">
<label class="active" for="newfoldername">New Folder Name</label>
</div>
</div>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">Cancel</a>
<a onclick="newfolder(document.getElementById('newfoldername').value)" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">OK</a>
</div>
</div>
<div id="modal_newfile" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">New File<i class="grey-text text-darken-3 material-icons right" style="font-size: 2rem;">note_add</i></h4>
<br>
<div class="row">
<div class="input-field col s12">
<input type="text" id="newfilename">
<label class="active" for="newfilename">New File Name</label>
</div>
</div>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">Cancel</a>
<a onclick="newfile(document.getElementById('newfilename').value)" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">OK</a>
</div>
</div>
<div id="modal_newbranch" class="modal">
<div class="modal-content">
<h4 class="grey-text text-darken-3">New Branch<i class="mdi mdi-git right grey-text text-darken-3" style="font-size: 2.48rem;"></i></h4>
<div class="row">
<div class="input-field col s12">
<input type="text" id="newbranch">
<label class="active" for="newbranch">New Branch Name</label>
</div>
</div>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect waves-red btn-flat light-blue-text">Cancel</a>
<a onclick="newbranch(document.getElementById('newbranch').value)" class=" modal-action modal-close waves-effect waves-green btn-flat light-blue-text">OK</a>
</div>
</div>
<div id="modal_about" class="modal modal-fixed-footer">
<div class="modal-content">
<h4 class="grey-text text-darken-3"><a class="black-text" href="https://github.com/danielperna84/hass-configurator/" target="_blank">HASS Configurator</a></h4>
<p>Version: <a class="$versionclass" href="https://github.com/danielperna84/hass-configurator/releases/" target="_blank">$current</a></p>
<p>Web-based file editor designed to modify configuration files of <a class="light-blue-text" href="https://home-assistant.io/" target="_blank">Home Assistant</a> or other textual files. Use at your own risk.</p>
<p>Published under the MIT license</p>
<p>Developed by:</p>
<ul>
<li>
<div class="chip"> <img src="https://avatars3.githubusercontent.com/u/7396998?v=3&s=400" alt="Contact Person"> <a class="black-text" href="https://github.com/danielperna84" target="_blank">Daniel Perna</a> </div>
</li>
<li>
<div class="chip"> <img src="https://avatars2.githubusercontent.com/u/1509640?v=3&s=460" alt="Contact Person"> <a class="black-text" href="https://github.com/jmart518" target="_blank">JT Martinez</a> </div>
</li>
</ul>
<p>Libraries used:</p>
<div class="row">
<div class="col s6 m3 l3">
<a href="https://ace.c9.io/" target="_blank">
<div class="card grey lighten-3 hoverable waves-effect">
<div class="card-image">
<img src="https://drive.google.com/uc?export=view&id=0B6wTGzSOtvNBeld4U09LQkV0c2M">
</div>
<div class="card-content">
<p class="grey-text text-darken-2">Ace Editor</p>
</div>
</div>
</a>
</div>
<div class="col s6 m3 l3">
<a class="light-blue-text" href="http://materializecss.com/" target="_blank">
<div class="card grey lighten-3 hoverable">
<div class="card-image">
<img src="https://evwilkin.github.io/images/materializecss.png">
</div>
<div class="card-content">
<p class="grey-text text-darken-2">Materialize</p>
</div>
</div>
</a>
</div>
<div class="col s6 m3 l3">
<a class="light-blue-text" href="https://jquery.com/" target="_blank">
<div class="card grey lighten-3 hoverable">
<div class="card-image">
<img src="https://drive.google.com/uc?export=view&id=0B6wTGzSOtvNBdFI0ZXRGb01xNzQ">
</div>
<div class="card-content">
<p class="grey-text text-darken-2">JQuery</p>
</div>
</div>
</a>
</div>
<div class="col s6 m3 l3">
<a class="light-blue-text" href="https://gitpython.readthedocs.io" target="_blank">
<div class="card grey lighten-3 hoverable">
<div class="card-image">
<img src="https://drive.google.com/uc?export=view&id=0B6wTGzSOtvNBakk4ek1uRGxqYVE">
</div>
<div class="card-content">
<p class="grey-text text-darken-2">GitPython</p>
</div>
</div>
</a>
</div>
</div>
</div>
<div class="modal-footer">
<a class=" modal-action modal-close waves-effect btn-flat light-blue-text">OK</a>
</div>
</div>
<!-- Main Editor Area -->
<div class="row">
<div class="col m4 l3 hide-on-small-only">
<br>
<div class="input-field col s12">
<select onchange="insert(this.value)">
<option value="" disabled selected>Select trigger platform</option>
<option value="event">Event</option>
<option value="mqtt">MQTT</option>
<option value="numeric_state">Numeric State</option>
<option value="state">State</option>
<option value="sun">Sun</option>
<option value="template">Template</option>
<option value="time">Time</option>
<option value="zone">Zone</option>
</select>
<label>Trigger platforms</label>
</div>
<div class="input-field col s12">
<select id="events" onchange="insert(this.value)"></select>
<label>Events</label>
</div>
<div class="input-field col s12">
<select id="entities" onchange="insert(this.value)"></select>
<label>Entities</label>
</div>
<div class="input-field col s12">
<select onchange="insert(this.value)">
<option value="" disabled selected>Select condition</option>
<option value="numeric_state">Numeric state</option>
<option value="state">State</option>
<option value="sun">Sun</option>
<option value="template">Template</option>
<option value="time">Time</option>
<option value="zone">Zone</option>
</select>
<label>Conditions</label>
</div>
<div class="input-field col s12">
<select id="services" onchange="insert(this.value)"> </select>
<label>Services</label>
</div>
</div>
<div class="col s12 m8 l9">
<div class="card input-field col s12 grey lighten-4 hoverable pathtip">
<input class="currentfile_input" value="" id="currentfile" type="text">
</div>
</div>
<div class="col s12 m8 l9 z-depth-2" id="editor"></div>
<div id="edit_float" class="fixed-action-btn vertical click-to-toggle">
<a class="btn-floating btn-large red accent-2 hoverable">
<i class="material-icons">edit</i>
</a>
<ul>
<li><a class="btn-floating yellow tooltipped" data-position="left" data-delay="50" data-tooltip="Undo" onclick="editor.execCommand('undo')"><i class="material-icons">undo</i></a></li>
<li><a class="btn-floating green tooltipped" data-position="left" data-delay="50" data-tooltip="Redo" onclick="editor.execCommand('redo')"><i class="material-icons">redo</i></a></li>
<li><a class="btn-floating blue tooltipped" data-position="left" data-delay="50" data-tooltip="Indent" onclick="editor.execCommand('indent')"><i class="material-icons">format_indent_increase</i></a></li>
<li><a class="btn-floating orange tooltipped" data-position="left" data-delay="50" data-tooltip="Outdent" onclick="editor.execCommand('outdent')"><i class="material-icons">format_indent_decrease</i></a></li>
<li><a class="btn-floating brown tooltipped" data-position="left" data-delay="50" data-tooltip="Fold" onclick="toggle_fold()"><i class="material-icons">all_out</i></a></li>
</ul>
</div>
</div>
<!-- Left filebrowser sidenav -->
<div class="row">
<ul id="slide-out" class="side-nav grey lighten-4">
<li class="no-padding">
<ul class="row no-padding center hide-on-small-only grey lighten-4" style="margin-bottom: 0;">
<a class="col s3 waves-effect fbtoolbarbutton tooltipped modal-trigger" href="#modal_newfile" data-position="bottom" data-delay="500" data-tooltip="New File"><i class="grey-text text-darken-2 material-icons fbtoolbarbutton_icon">note_add</i></a>
<a class="col s3 waves-effect fbtoolbarbutton tooltipped modal-trigger" href="#modal_newfolder" data-position="bottom" data-delay="500" data-tooltip="New Folder"><i class="grey-text text-darken-2 material-icons fbtoolbarbutton_icon">create_new_folder</i></a>
<a class="col s3 waves-effect fbtoolbarbutton tooltipped modal-trigger" href="#modal_upload" data-position="bottom" data-delay="500" data-tooltip="Upload File"><i class="grey-text text-darken-2 material-icons fbtoolbarbutton_icon">file_upload</i></a>
<a class="col s3 waves-effect fbtoolbarbutton tooltipped dropdown-button" data-activates="dropdown_gitmenu" data-alignment='right' data-beloworigin='true' data-delay='500' data-position="bottom" data-tooltip="Git"><i class="mdi mdi-git grey-text text-darken-2 material-icons" style="padding-top: 17px;"></i></a>
</ul>
<ul class="row center toolbar_mobile hide-on-med-and-up grey lighten-4" style="margin-bottom: 0;">
<a class="col s3 waves-effect fbtoolbarbutton modal-trigger" href="#modal_newfile"><i class="grey-text text-darken-2 material-icons fbtoolbarbutton_icon">note_add</i></a>
<a class="col s3 waves-effect fbtoolbarbutton modal-trigger" href="#modal_newfolder"><i class="grey-text text-darken-2 material-icons fbtoolbarbutton_icon">create_new_folder</i></a>
<a class="col s3 waves-effect fbtoolbarbutton modal-trigger" href="#modal_upload"><i class="grey-text text-darken-2 material-icons fbtoolbarbutton_icon">file_upload</i></a>
<a class="col s3 waves-effect fbtoolbarbutton dropdown-button" data-activates="dropdown_gitmenu_mobile" data-alignment='right' data-beloworigin='true'><i class="mdi mdi-git grey-text text-darken-2 material-icons" style="padding-top: 17px;"></i></a>
</ul>
</li>
<li>
<div class="col s2 no-padding" style="min-height: 64px">
<a id="uplink" class="col s12 waves-effect" style="min-height: 64px; padding-top: 15px; cursor: pointer;"><i class="arrow grey-text text-darken-2 material-icons">arrow_back</i></a>
</div>
<div class="col s10 " style="white-space: nowrap; overflow: auto; min-height: 64px">
<div id="fbheader" class="leftellipsis"></div>
</div>
</li>
<ul id='branches' class="dropdown-content branch_select z-depth-2 grey lighten-4">
<ul id="branchlist"></ul>
</ul>
<li>
<ul class="row no-padding" style="margin-bottom: 0;">
<a id="branchselector" class="col s10 dropdown-button waves-effect truncate grey-text text-darken-2" data-beloworigin="true" data-activates='branches'><i class="grey-text text-darken-2 left material-icons" style="margin-left: 0; margin-right: 0; padding-top: 12px; padding-right: 8px;">arrow_drop_down</i>Branch:<span id="fbheaderbranch"></span></a>
<a id="newbranchbutton" class="waves-effect col s2 center modal-trigger" href="#modal_newbranch"><i class="grey-text text-darken-2 center material-icons" style="padding-top: 12px;">add</i></a>
</ul>
<div class="divider" style="margin-top: 0;"></div>
</li>
<li>
<ul id="fbelements"></ul>
</li>
<div class="row col s12 shadow"></div>
<div class="z-depth-3 hide-on-med-and-up">
<div class="input-field col s12" style="margin-top: 30px;">
<select onchange="insert(this.value)">
<option value="" disabled selected>Select trigger platform</option>
<option value="event">Event</option>
<option value="mqtt">MQTT</option>
<option value="numeric_state">Numeric State</option>
<option value="state">State</option>
<option value="sun">Sun</option>
<option value="template">Template</option>
<option value="time">Time</option>
<option value="zone">Zone</option>
</select>
<label>Trigger Platforms</label>
</div>
<div class="input-field col s12">
<select id="events_side" onchange="insert(this.value)"></select>
<label>Events</label>
</div>
<div class="input-field col s12">
<select id="entities_side" onchange="insert(this.value)"></select>
<label>Entities</label>
</div>
<div class="input-field col s12">
<select onchange="insert(this.value)">
<option value="" disabled selected>Select condition</option>
<option value="numeric_state">Numeric state</option>
<option value="state">State</option>
<option value="sun">Sun</option>
<option value="template">Template</option>
<option value="time">Time</option>
<option value="zone">Zone</option>
</select>
<label>Conditions</label>
</div>
<div class="input-field col s12">
<select id="services_side" onchange="insert(this.value)"></select>
<label>Services</label>
</div>
</div>
</ul>
</div>
<!-- Ace Editor SideNav -->
<div class="row">
<ul id="ace_settings" class="side-nav">
<li class="center s12 grey lighten-3 z-depth-1 subheader">Editor Settings</li>
<div class="row col s12">
<p class="col s12"> <a class="waves-effect waves-light btn light-blue modal-trigger" href="#modal_acekeyboard">Keyboard Shortcuts</a> </p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('animatedScroll', !editor.getOptions().animatedScroll)" id="animatedScroll" />
<Label for="animatedScroll">Animated Scroll</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('behavioursEnabled', !editor.getOptions().behavioursEnabled)" id="behavioursEnabled" />
<Label for="behavioursEnabled">Behaviour Enabled</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('displayIndentGuides', !editor.getOptions().displayIndentGuides)" id="displayIndentGuides" />
<Label for="displayIndentGuides">Display Indent Guides</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('fadeFoldWidgets', !editor.getOptions().fadeFoldWidgets)" id="fadeFoldWidgets" />
<Label for="fadeFoldWidgets">Fade Fold Widgets</label>
</p>
<div class="input-field col s12">
<input type="number" onchange="editor.setOption('fontSize', parseInt(this.value))" min="6" id="fontSize">
<label class="active" for="fontSize">Font Size</label>
</div>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('highlightActiveLine', !editor.getOptions().highlightActiveLine)" id="highlightActiveLine" />
<Label for="highlightActiveLine">Hightlight Active Line</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('highlightGutterLine', !editor.getOptions().highlightGutterLine)" id="highlightGutterLine" />
<Label for="highlightGutterLine">Hightlight Gutter Line</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('highlightSelectedWord', !editor.getOptions().highlightSelectedWord)" id="highlightSelectedWord" />
<Label for="highlightSelectedWord">Hightlight Selected Word</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('hScrollBarAlwaysVisible', !editor.getOptions().hScrollBarAlwaysVisible)" id="hScrollBarAlwaysVisible" />
<Label for="hScrollBarAlwaysVisible">H Scroll Bar Always Visible</label>
</p>
<div class="input-field col s12">
<select onchange="editor.setKeyboardHandler(this.value)" id="setKeyboardHandler">
<option value="">ace</option>
<option value="ace/keyboard/vim">vim</option>
<option value="ace/keyboard/emacs">emacs</option>
</select>
<label for="setKeyboardHandler">Keyboard Handler</label>
</div>
<div class="input-field col s12">
<select onchange="editor.setOption('mode', this.value)" id="mode">
<option value="ace/mode/abap">abap</option>
<option value="ace/mode/abc">abc</option>
<option value="ace/mode/actionscript">actionscript</option>
<option value="ace/mode/ada">ada</option>
<option value="ace/mode/apache_conf">apache_conf</option>
<option value="ace/mode/asciidoc">asciidoc</option>
<option value="ace/mode/assembly_x86">assembly_x86</option>
<option value="ace/mode/autohotkey">autohotkey</option>
<option value="ace/mode/batchfile">batchfile</option>
<option value="ace/mode/bro">bro</option>
<option value="ace/mode/c_cpp">c_cpp</option>
<option value="ace/mode/c9search">c9search</option>
<option value="ace/mode/cirru">cirru</option>
<option value="ace/mode/clojure">clojure</option>
<option value="ace/mode/cobol">cobol</option>
<option value="ace/mode/coffee">coffee</option>
<option value="ace/mode/coldfusion">coldfusion</option>
<option value="ace/mode/csharp">csharp</option>
<option value="ace/mode/css">css</option>
<option value="ace/mode/curly">curly</option>
<option value="ace/mode/d">d</option>
<option value="ace/mode/dart">dart</option>
<option value="ace/mode/diff">diff</option>
<option value="ace/mode/django">django</option>
<option value="ace/mode/dockerfile">dockerfile</option>
<option value="ace/mode/dot">dot</option>
<option value="ace/mode/drools">drools</option>
<option value="ace/mode/dummy">dummy</option>
<option value="ace/mode/dummysyntax">dummysyntax</option>
<option value="ace/mode/eiffel">eiffel</option>
<option value="ace/mode/ejs">ejs</option>
<option value="ace/mode/elixir">elixir</option>
<option value="ace/mode/elm">elm</option>
<option value="ace/mode/erlang">erlang</option>
<option value="ace/mode/forth">forth</option>
<option value="ace/mode/fortran">fortran</option>
<option value="ace/mode/ftl">ftl</option>
<option value="ace/mode/gcode">gcode</option>
<option value="ace/mode/gherkin">gherkin</option>
<option value="ace/mode/gitignore">gitignore</option>
<option value="ace/mode/glsl">glsl</option>
<option value="ace/mode/gobstones">gobstones</option>
<option value="ace/mode/golang">golang</option>
<option value="ace/mode/groovy">groovy</option>
<option value="ace/mode/haml">haml</option>
<option value="ace/mode/handlebars">handlebars</option>
<option value="ace/mode/haskell">haskell</option>
<option value="ace/mode/haskell_cabal">haskell_cabal</option>
<option value="ace/mode/haxe">haxe</option>
<option value="ace/mode/hjson">hjson</option>
<option value="ace/mode/html">html</option>
<option value="ace/mode/html_elixir">html_elixir</option>
<option value="ace/mode/html_ruby">html_ruby</option>
<option value="ace/mode/ini">ini</option>
<option value="ace/mode/io">io</option>
<option value="ace/mode/jack">jack</option>
<option value="ace/mode/jade">jade</option>
<option value="ace/mode/java">java</option>
<option value="ace/mode/javascript">javascript</option>
<option value="ace/mode/json">json</option>
<option value="ace/mode/jsoniq">jsoniq</option>
<option value="ace/mode/jsp">jsp</option>
<option value="ace/mode/jsx">jsx</option>
<option value="ace/mode/julia">julia</option>
<option value="ace/mode/kotlin">kotlin</option>
<option value="ace/mode/latex">latex</option>
<option value="ace/mode/less">less</option>
<option value="ace/mode/liquid">liquid</option>
<option value="ace/mode/lisp">lisp</option>
<option value="ace/mode/livescript">livescript</option>
<option value="ace/mode/logiql">logiql</option>
<option value="ace/mode/lsl">lsl</option>
<option value="ace/mode/lua">lua</option>
<option value="ace/mode/luapage">luapage</option>
<option value="ace/mode/lucene">lucene</option>
<option value="ace/mode/makefile">makefile</option>
<option value="ace/mode/markdown">markdown</option>
<option value="ace/mode/mask">mask</option>
<option value="ace/mode/matlab">matlab</option>
<option value="ace/mode/maze">maze</option>
<option value="ace/mode/mel">mel</option>
<option value="ace/mode/mushcode">mushcode</option>
<option value="ace/mode/mysql">mysql</option>
<option value="ace/mode/nix">nix</option>
<option value="ace/mode/nsis">nsis</option>
<option value="ace/mode/objectivec">objectivec</option>
<option value="ace/mode/ocaml">ocaml</option>
<option value="ace/mode/pascal">pascal</option>
<option value="ace/mode/perl">perl</option>
<option value="ace/mode/pgsql">pgsql</option>
<option value="ace/mode/php">php</option>
<option value="ace/mode/powershell">powershell</option>
<option value="ace/mode/praat">praat</option>
<option value="ace/mode/prolog">prolog</option>
<option value="ace/mode/properties">properties</option>
<option value="ace/mode/protobuf">protobuf</option>
<option value="ace/mode/python">python</option>
<option value="ace/mode/r">r</option>
<option value="ace/mode/razor">razor</option>
<option value="ace/mode/rdoc">rdoc</option>
<option value="ace/mode/rhtml">rhtml</option>
<option value="ace/mode/rst">rst</option>
<option value="ace/mode/ruby">ruby</option>
<option value="ace/mode/rust">rust</option>
<option value="ace/mode/sass">sass</option>
<option value="ace/mode/scad">scad</option>
<option value="ace/mode/scala">scala</option>
<option value="ace/mode/scheme">scheme</option>
<option value="ace/mode/scss">scss</option>
<option value="ace/mode/sh">sh</option>
<option value="ace/mode/sjs">sjs</option>
<option value="ace/mode/smarty">smarty</option>
<option value="ace/mode/snippets">snippets</option>
<option value="ace/mode/soy_template">soy_template</option>
<option value="ace/mode/space">space</option>
<option value="ace/mode/sql">sql</option>
<option value="ace/mode/sqlserver">sqlserver</option>
<option value="ace/mode/stylus">stylus</option>
<option value="ace/mode/svg">svg</option>
<option value="ace/mode/swift">swift</option>
<option value="ace/mode/tcl">tcl</option>
<option value="ace/mode/tex">tex</option>
<option value="ace/mode/text">text</option>
<option value="ace/mode/textile">textile</option>
<option value="ace/mode/toml">toml</option>
<option value="ace/mode/tsx">tsx</option>
<option value="ace/mode/twig">twig</option>
<option value="ace/mode/typescript">typescript</option>
<option value="ace/mode/vala">vala</option>
<option value="ace/mode/vbscript">vbscript</option>
<option value="ace/mode/velocity">velocity</option>
<option value="ace/mode/verilog">verilog</option>
<option value="ace/mode/vhdl">vhdl</option>
<option value="ace/mode/wollok">wollok</option>
<option value="ace/mode/xml">xml</option>
<option value="ace/mode/xquery">xquery</option>
<option value="ace/mode/yaml">yaml</option>
</select>
<label for="mode">Mode</label>
</div>
<div class="input-field col s12">
<select onchange="editor.setOption('newLineMode', this.value)" id="newLineMode">
<option value="auto">Auto</option>
<option value="windows">Windows</option>
<option value="unix">Unix</option>
</select>
<label for="newLineMode">New Line Mode</label>
</div>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('overwrite', !editor.getOptions().overwrite)" id="overwrite" />
<Label for="overwrite">Overwrite</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('readOnly', !editor.getOptions().readOnly)" id="readOnly" />
<Label for="readOnly">Read Only</label>
</p>
<div class="input-field col s12">
<input value="2" type="number" onchange="editor.setOption('scrollSpeed', parseInt(this.value))" id="scrollSpeed">
<label class="active" for="scrollSpeed">Scroll Speed</label>
</div>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('showFoldWidgets', !editor.getOptions().showFoldWidgets)" id="showFoldWidgets" />
<Label for="showFoldWidgets">Show Fold Widgets</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('showGutter', !editor.getOptions().showGutter)" id="showGutter" />
<Label for="showGutter">Show Gutter</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('showInvisibles', !editor.getOptions().showInvisibles)" id="showInvisibles" />
<Label for="showInvisibles">Show Invisibles</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('showPrintMargin', !editor.getOptions().showPrintMargin)" id="showPrintMargin" />
<Label for="showPrintMargin">Show Print Margin</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('showLineNumbers', !editor.getOptions().showLineNumbers)" id="showLineNumbers" />
<Label for="showLineNumbers">Show Line Numbers</label>
</p>
<div class="input-field col s12">
<input type="number" onchange="editor.setOption('tabSize', parseInt(this.value))" min="1" id="tabSize">
<label class="active" for="tabSize">Tab Size</label>
</div>
<div class="input-field col s12">
<select onchange="editor.setTheme(this.value)" id="theme">
<optgroup label="Light Themes">
<option value="ace/theme/chrome">Chrome</option>
<option value="ace/theme/clouds">Clouds</option>
<option value="ace/theme/crimson_editor">Crimson Editor</option>
<option value="ace/theme/dawn">Dawn</option>
<option value="ace/theme/dreamweaver">Dreamweaver</option>
<option value="ace/theme/eclipse">Eclipse</option>
<option value="ace/theme/github">GitHub</option>
<option value="ace/theme/iplastic">IPlastic</option>
<option value="ace/theme/solarized_light">Solarized Light</option>
<option value="ace/theme/textmate">TextMate</option>
<option value="ace/theme/tomorrow">Tomorrow</option>
<option value="ace/theme/xcode">XCode</option>
<option value="ace/theme/kuroir">Kuroir</option>
<option value="ace/theme/katzenmilch">KatzenMilch</option>
<option value="ace/theme/sqlserver">SQL Server</option>
</optgroup>
<optgroup label="Dark Themes">
<option value="ace/theme/ambiance">Ambiance</option>
<option value="ace/theme/chaos">Chaos</option>
<option value="ace/theme/clouds_midnight">Clouds Midnight</option>
<option value="ace/theme/cobalt">Cobalt</option>
<option value="ace/theme/gruvbox">Gruvbox</option>
<option value="ace/theme/idle_fingers">idle Fingers</option>
<option value="ace/theme/kr_theme">krTheme</option>
<option value="ace/theme/merbivore">Merbivore</option>
<option value="ace/theme/merbivore_soft">Merbivore Soft</option>
<option value="ace/theme/mono_industrial">Mono Industrial</option>
<option value="ace/theme/monokai">Monokai</option>
<option value="ace/theme/pastel_on_dark">Pastel on dark</option>
<option value="ace/theme/solarized_dark">Solarized Dark</option>
<option value="ace/theme/terminal">Terminal</option>
<option value="ace/theme/tomorrow_night">Tomorrow Night</option>
<option value="ace/theme/tomorrow_night_blue">Tomorrow Night Blue</option>
<option value="ace/theme/tomorrow_night_bright">Tomorrow Night Bright</option>
<option value="ace/theme/tomorrow_night_eighties">Tomorrow Night 80s</option>
<option value="ace/theme/twilight">Twilight</option>
<option value="ace/theme/vibrant_ink">Vibrant Ink</option>
</optgroup>
</select>
<label for="theme">Theme</label>
</div>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('useSoftTabs', !editor.getOptions().useSoftTabs)" id="useSoftTabs" />
<Label for="useSoftTabs">Use Soft Tabs</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('useWorker', !editor.getOptions().useWorker)" id="useWorker" />
<Label for="useWorker">Use Worker</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('vScrollBarAlwaysVisible', !editor.getOptions().vScrollBarAlwaysVisible)" id="vScrollBarAlwaysVisible" />
<Label for="vScrollBarAlwaysVisible">V Scroll Bar Always Visible</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.setOption('wrapBehavioursEnabled', !editor.getOptions().wrapBehavioursEnabled)" id="wrapBehavioursEnabled" />
<Label for="wrapBehavioursEnabled">Wrap Behaviours Enabled</label>
</p>
<p class="col s12">
<input type="checkbox" class="blue_check" onclick="editor.getSession().setUseWrapMode(!editor.getSession().getUseWrapMode());if(editor.getSession().getUseWrapMode()){document.getElementById('wrap_limit').focus();document.getElementById('wrap_limit').onchange();}" id="wrap" />
<Label for="wrap">Wrap Mode</label>
</p>
<div class="input-field col s12">
<input id="wrap_limit" type="number" onchange="editor.setOption('wrap', parseInt(this.value))" min="1" value="80">
<label class="active" for="wrap_limit">Wrap Limit</label>
</div> <a class="waves-effect waves-light btn light-blue" onclick="save_ace_settings()">Save Settings Locally</a>
<p class="center col s12"> Ace Editor 1.2.8 </p>
</div>
</ul>
</div>
</main>
<input type="hidden" id="fb_currentfile" value="" />
<!-- Scripts -->
<script src="https://code.jquery.com/jquery-3.2.1.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/materialize/0.100.1/js/materialize.min.js"></script>
<script type="text/javascript">
$(document).ready(function () {
$('select').material_select();
$('.modal').modal();
$('ul.tabs').tabs();
$('.collapsible').collapsible({
onOpen: function(el) {
$('#branch_tab').click();
},
});
$('.dropdown-button').dropdown({
inDuration: 300,
outDuration: 225,
constrainWidth: false,
hover: false,
gutter: 0,
belowOrigin: true,
alignment: 'right',
stopPropagation: false
});
$('.files-collapse').sideNav({
menuWidth: 320,
edge: 'left',
closeOnClick: false,
draggable: true
});
$('.ace_settings-collapse').sideNav({
menuWidth: 300,
edge: 'right',
closeOnClick: true,
draggable: true
});
listdir('.');
});
</script>
<script type="text/javascript">
document.addEventListener("DOMContentLoaded", function(){
$('.preloader-background').delay(800).fadeOut('slow');
$('.preloader-wrapper')
.delay(800)
.fadeOut('slow');
});
</script>
<script>
var modemapping = new Object();
modemapping['c'] = 'ace/mode/c_cpp';
modemapping['cpp'] = 'ace/mode/c_cpp';
modemapping['css'] = 'ace/mode/css';
modemapping['gitignore'] = 'ace/mode/gitignore';
modemapping['htm'] = 'ace/mode/html';
modemapping['html'] = 'ace/mode/html';
modemapping['js'] = 'ace/mode/javascript';
modemapping['json'] = 'ace/mode/json';
modemapping['php'] = 'ace/mode/php';
modemapping['py'] = 'ace/mode/python';
modemapping['sh'] = 'ace/mode/sh';
modemapping['sql'] = 'ace/mode/sql';
modemapping['txt'] = 'ace/mode/text';
modemapping['xml'] = 'ace/mode/xml';
modemapping['yaml'] = 'ace/mode/yaml';
var separator = '$separator';
var bootstrap = $bootstrap;
if (bootstrap.hasOwnProperty("events")) {
var events = document.getElementById("events");
for (var i = 0; i < bootstrap.events.length; i++) {
var option = document.createElement("option");
option.value = bootstrap.events[i].event;
option.text = bootstrap.events[i].event;
events.add(option);
}
var events = document.getElementById("events_side");
for (var i = 0; i < bootstrap.events.length; i++) {
var option = document.createElement("option");
option.value = bootstrap.events[i].event;
option.text = bootstrap.events[i].event;
events.add(option);
}
var entities = document.getElementById("entities");
for (var i = 0; i < bootstrap.states.length; i++) {
var option = document.createElement("option");
option.value = bootstrap.states[i].entity_id;
option.text = bootstrap.states[i].attributes.friendly_name + ' (' + bootstrap.states[i].entity_id + ')';
entities.add(option);
}
var entities = document.getElementById("entities_side");
for (var i = 0; i < bootstrap.states.length; i++) {
var option = document.createElement("option");
option.value = bootstrap.states[i].entity_id;
option.text = bootstrap.states[i].attributes.friendly_name + ' (' + bootstrap.states[i].entity_id + ')';
entities.add(option);
}
var services = document.getElementById("services");
for (var i = 0; i < bootstrap.services.length; i++) {
for (var k in bootstrap.services[i].services) {
var option = document.createElement("option");
option.value = bootstrap.services[i].domain + '.' + k;
option.text = bootstrap.services[i].domain + '.' + k;
services.add(option);
}
}
var services = document.getElementById("services_side");
for (var i = 0; i < bootstrap.services.length; i++) {
for (var k in bootstrap.services[i].services) {
var option = document.createElement("option");
option.value = bootstrap.services[i].domain + '.' + k;
option.text = bootstrap.services[i].domain + '.' + k;
services.add(option);
}
}
function sort_select(id) {
var options = $('#' + id + ' option');
var arr = options.map(function (_, o) {
return {
t: $(o).text(), v: o.value
};
}).get();
arr.sort(function (o1, o2) {
var t1 = o1.t.toLowerCase(), t2 = o2.t.toLowerCase();
return t1 > t2 ? 1 : t1 < t2 ? -1 : 0;
});
options.each(function (i, o) {
o.value = arr[i].v;
$(o).text(arr[i].t);
});
}
sort_select('events');
sort_select('events_side');
sort_select('entities');
sort_select('entities_side');
sort_select('services');
sort_select('services_side');
}
function listdir(path) {
$.get(encodeURI("api/listdir?path=" + path), function(data) {
renderpath(data);
});
document.getElementById("slide-out").scrollTop = 0;
}
function renderitem(itemdata, index) {
var li = document.createElement('li');
li.classList.add("collection-item", "fbicon_pad", "col", "s12", "no-padding", "white");
var item = document.createElement('a');
item.classList.add("waves-effect", "col", "s10", "fbicon_pad");
var iicon = document.createElement('i');
iicon.classList.add("material-icons", "fbmenuicon_pad");
var stats = document.createElement('span');
date = new Date(itemdata.modified*1000);
stats.classList.add('stats');
if (itemdata.type == 'dir') {
iicon.innerHTML = 'folder';
item.setAttribute("onclick", "listdir('" + encodeURI(itemdata.fullpath) + "')");
stats.innerHTML = "Mod.: " + date.toUTCString();
}
else {
nameparts = itemdata.name.split('.');
extension = nameparts[nameparts.length -1];
if (['c', 'cpp', 'css', 'htm', 'html', 'js', 'json', 'php', 'py', 'sh', 'sql', 'xml', 'yaml'].indexOf(extension.toLocaleLowerCase()) > +1 ) {
iicon.classList.add('mdi', 'mdi-file-xml');
}
else if (['txt', 'doc', 'docx'].indexOf(extension.toLocaleLowerCase()) > -1 ) {
iicon.classList.add('mdi', 'mdi-file-document');
}
else if (['bmp', 'gif', 'jpg', 'jpeg', 'png', 'tif', 'webp'].indexOf(extension.toLocaleLowerCase()) > -1 ) {
iicon.classList.add('mdi', 'mdi-file-image');
}
else if (['mp3', 'ogg', 'wav'].indexOf(extension) > -1 ) {
iicon.classList.add('mdi', 'mdi-file-music');
}
else if (['avi', 'flv', 'mkv', 'mp4', 'mpg', 'mpeg', 'webm'].indexOf(extension.toLocaleLowerCase()) > -1 ) {
iicon.classList.add('mdi', 'mdi-file-video');
}
else if (['pdf'].indexOf(extension.toLocaleLowerCase()) > -1 ) {
iicon.classList.add('mdi', 'mdi-file-pdf');
}
else {
iicon.classList.add('mdi', 'mdi-file');
}
item.setAttribute("onclick", "loadfile('" + encodeURI(itemdata.fullpath) + "')");
stats.innerHTML = "Mod.: " + date.toUTCString() + " Size: " + (itemdata.size/1024).toFixed(1) + " KiB";
}
item.appendChild(iicon);
var itext = document.createElement('div');
itext.innerHTML = itemdata.name;
itext.classList.add("filename");
var hasgitadd = false;
if (itemdata.gitstatus) {
if (itemdata.gittracked == 'untracked') {
itext.classList.add('text_darkred');
hasgitadd = true;
}
else {
if(itemdata.gitstatus == 'unstaged') {
itext.classList.add('text_darkred');
hasgitadd = true;
}
else if (itemdata.gitstatus == 'staged') {
itext.classList.add('text_darkgreen');
}
}
}
item.appendChild(itext);
item.appendChild(stats);
var dropdown = document.createElement('ul');
dropdown.id = 'fb_dropdown_' + index;
dropdown.classList.add('dropdown-content');
dropdown.classList.add("z-depth-4");
// Download button
var dd_download = document.createElement('li');
var dd_download_a = document.createElement('a');
dd_download_a.classList.add("waves-effect", "fb_dd");
dd_download_a.setAttribute('onclick', "download_file('" + encodeURI(itemdata.fullpath) + "')");
dd_download_a.innerHTML = "Download";
dd_download.appendChild(dd_download_a);
dropdown.appendChild(dd_download);
// Delete button
var dd_delete = document.createElement('li');
dd_delete.classList.add("waves-effect", "fb_dd");
var dd_delete_a = document.createElement('a');
dd_delete_a.setAttribute('href', "#modal_delete");
dd_delete_a.classList.add("modal-trigger");
dd_delete_a.innerHTML = "Delete";
dd_delete.appendChild(dd_delete_a);
dropdown.appendChild(dd_delete);
if (itemdata.gitstatus) {
if (hasgitadd) {
var divider = document.createElement('li');
divider.classList.add('divider');
dropdown.appendChild(divider);
// git add button
var dd_gitadd = document.createElement('li');
var dd_gitadd_a = document.createElement('a');
dd_gitadd_a.classList.add('waves-effect', 'fb_dd', 'modal-trigger');
dd_gitadd_a.setAttribute('href', "#modal_gitadd");
dd_gitadd_a.innerHTML = "git add";
dd_gitadd.appendChild(dd_gitadd_a);
dropdown.appendChild(dd_gitadd);
}
}
var menubutton = document.createElement('a');
menubutton.classList.add("fbmenubutton", "waves-effect", "dropdown-button", "col", "s2", "fbicon_pad");
menubutton.classList.add('waves-effect');
menubutton.classList.add('dropdown-button');
menubutton.setAttribute('data-activates', dropdown.id);
menubutton.setAttribute('data-alignment', 'right');
var menubuttonicon = document.createElement('i');
menubutton.classList.add('material-icons');
menubutton.classList.add("right");
menubutton.innerHTML = 'more_vert';
menubutton.setAttribute('onclick', "document.getElementById('fb_currentfile').value='" + encodeURI(itemdata.fullpath) + "';$('span.fb_currentfile').html('" + itemdata.name + "')");
li.appendChild(item);
li.appendChild(menubutton);
li.setAttribute("title", itemdata.name)
li.appendChild(dropdown);
return li;
}
function renderpath(dirdata) {
var newbranchbutton = document.getElementById('newbranchbutton');
newbranchbutton.style.cssText = "display: none !important"
var fbelements = document.getElementById("fbelements");
while (fbelements.firstChild) {
fbelements.removeChild(fbelements.firstChild);
}
var fbheader = document.getElementById('fbheader');
fbheader.innerHTML = dirdata.abspath;
var branchselector = document.getElementById('branchselector');
var fbheaderbranch = document.getElementById('fbheaderbranch');
var branchlist = document.getElementById('branchlist');
while (branchlist.firstChild) {
branchlist.removeChild(branchlist.firstChild);
}
if (dirdata.activebranch) {
newbranchbutton.style.display = "inline-block";
fbheaderbranch.innerHTML = dirdata.activebranch;
fbheaderbranch.style.display = "inline";
branchselector.style.display = "block";
for (var i = 0; i < dirdata.branches.length; i++) {
var branch = document.createElement('li');
var link = document.createElement('a');
link.classList.add("branch_select", "truncate");
link.innerHTML = dirdata.branches[i];
link.href = '#';
link.setAttribute('onclick', 'checkout("' + dirdata.branches[i] + '");collapseAll()')
branch.appendChild(link);
if (dirdata.branches[i] == dirdata.activebranch) {
link.classList.add("active", "grey", "darken-1");
}
else {
link.classList.add("grey-text", "text-darken-3", "branch_hover", "waves-effect", "grey", "lighten-4");
}
branchlist.appendChild(branch);
}
}
else {
fbheaderbranch.innerHTML = "";
fbheaderbranch.style.display = "";
branchselector.style.display = "none";
}
var uplink = document.getElementById('uplink');
uplink.setAttribute("onclick", "listdir('" + encodeURI(dirdata.parent) + "')")
for (var i = 0; i < dirdata.content.length; i++) {
fbelements.appendChild(renderitem(dirdata.content[i], i));
}
$(".dropdown-button").dropdown();
}
function collapseAll() {
$(".collapsible-header").removeClass(function() { return "active"; });
$(".collapsible").collapsible({accordion: true});
$(".collapsible").collapsible({accordion: false});
}
function checkout(){
$(".collapsible-header").removeClass(function(){
return "active";
});
$(".collapsible").collapsible({accordion: true});
$(".collapsible").collapsible({accordion: false});
}
function loadfile(filepath) {
if ($('.markdirty.red').length) {
$('#modal_markdirty').modal('open');
}
else {
$.get("api/file?filename=" + filepath, function(data) {
fileparts = filepath.split('.');
extension = fileparts[fileparts.length -1];
if (modemapping.hasOwnProperty(extension)) {
editor.setOption('mode', modemapping[extension]);
}
else {
editor.setOption('mode', "ace/mode/text");
}
editor.getSession().setValue(data, -1);
document.getElementById('currentfile').value = decodeURI(filepath);
editor.session.getUndoManager().markClean();
$('.markdirty').each(function(i, o){o.classList.remove('red');});
$('.hidesave').css('opacity', 0);
});
}
}
function check_config() {
$.get("api/check_config", function (resp) {
if (resp.length == 0) {
var $toastContent = $("<div><pre>Configuration seems valid.</pre></div>");
Materialize.toast($toastContent, 2000);
}
else {
var $toastContent = $("<div><pre>" + resp[0].state + "</pre></div>");
Materialize.toast($toastContent, 2000);
}
});
}
function reload_automations() {
$.get("api/reload_automations", function (resp) {
var $toastContent = $("<div>Automations reloaded</div>");
Materialize.toast($toastContent, 2000);
});
}
function reload_scripts() {
$.get("api/reload_scripts", function (resp) {
var $toastContent = $("<div>Scripts reloaded</div>");
Materialize.toast($toastContent, 2000);
});
}
function reload_groups() {
$.get("api/reload_groups", function (resp) {
var $toastContent = $("<div><pre>Groups reloaded</pre></div>");
Materialize.toast($toastContent, 2000);
});
}
function reload_core() {
$.get("api/reload_core", function (resp) {
var $toastContent = $("<div><pre>Core reloaded</pre></div>");
Materialize.toast($toastContent, 2000);
});
}
function restart() {
$.get("api/restart", function (resp) {
if (resp.length == 0) {
var $toastContent = $("<div><pre>Restarting HASS</pre></div>");
Materialize.toast($toastContent, 2000);
}
else {
var $toastContent = $("<div><pre>" + resp + "</pre></div>");
Materialize.toast($toastContent, 2000);
}
});
}
function save() {
var filepath = document.getElementById('currentfile').value;
if (filepath.length > 0) {
data = new Object();
data.filename = filepath;
data.text = editor.getValue()
$.post("api/save", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "\n" + resp.path + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
listdir(document.getElementById('fbheader').innerHTML);
$('.markdirty').each(function(i, o){o.classList.remove('red');});
$('.hidesave').css('opacity', 0);
editor.session.getUndoManager().markClean();
}
});
}
else {
Materialize.toast('Error: Please provide a filename', 5000);
}
}
function save_check() {
var filepath = document.getElementById('currentfile').value;
if (filepath.length > 0) {
$('#modal_save').modal('open');
}
else {
Materialize.toast('Error: Please provide a filename', 5000);
$(".pathtip").bind("animationend webkitAnimationEnd oAnimationEnd MSAnimationEnd", function(){
$(this).removeClass("pathtip_color");
}).addClass("pathtip_color");
}
}
function download_file(filepath) {
window.open("/api/download?filename="+encodeURI(filepath));
}
function delete_file() {
var path = document.getElementById('currentfile').value;
if (path.length > 0) {
data = new Object();
data.path= path;
$.post("api/delete", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "\n" + resp.path + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
listdir(document.getElementById('fbheader').innerHTML)
document.getElementById('currentfile').value='';
editor.setValue('');
}
});
}
}
function exec_command() {
var command = document.getElementById('commandline').value;
if (command.length > 0) {
data = new Object();
data.command = command;
data.timeout = 15;
$.post("api/exec_command", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var history = document.getElementById('command_history');
history.innerText += resp.message + ': ' + resp.returncode + "\n";
if (resp.stdout) {
history.innerText += resp.stdout;
}
if (resp.stderr) {
history.innerText += resp.stderr;
}
}
});
}
}
function delete_element() {
var path = document.getElementById('fb_currentfile').value;
if (path.length > 0) {
data = new Object();
data.path= path;
$.post("api/delete", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "\n" + resp.path + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
listdir(document.getElementById('fbheader').innerHTML);
if (document.getElementById('currentfile').value == path) {
document.getElementById('currentfile').value='';
editor.setValue('');
}
}
});
}
}
function gitadd() {
var path = document.getElementById('fb_currentfile').value;
if (path.length > 0) {
data = new Object();
data.path = path;
$.post("api/gitadd", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "\n" + resp.path + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
listdir(document.getElementById('fbheader').innerHTML);
}
});
}
}
function gitinit() {
var path = document.getElementById("fbheader").innerHTML;
if (path.length > 0) {
data = new Object();
data.path = path;
$.post("api/init", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "\n" + resp.path + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
listdir(document.getElementById('fbheader').innerHTML);
}
});
}
}
function commit(message) {
var path = document.getElementById("fbheader").innerHTML;
if (path.length > 0) {
data = new Object();
data.path = path;
data.message = message;
$.post("api/commit", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "\n" + resp.path + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
listdir(document.getElementById('fbheader').innerHTML);
document.getElementById('commitmessage').value = "";
}
});
}
}
function gitpush() {
var path = document.getElementById("fbheader").innerHTML;
if (path.length > 0) {
data = new Object();
data.path = path;
$.post("api/push", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "\n" + resp.path + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
listdir(document.getElementById('fbheader').innerHTML);
}
});
}
}
function checkout(branch) {
var path = document.getElementById("fbheader").innerHTML;
if (path.length > 0) {
data = new Object();
data.path = path;
data.branch = branch;
$.post("api/checkout", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "\n" + resp.path + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
listdir(document.getElementById('fbheader').innerHTML);
}
});
}
}
function newbranch(branch) {
var path = document.getElementById("fbheader").innerHTML;
if (path.length > 0) {
data = new Object();
data.path = path;
data.branch = branch;
$.post("api/newbranch", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "\n" + resp.path + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
listdir(document.getElementById('fbheader').innerHTML);
}
});
}
}
function newfolder(foldername) {
var path = document.getElementById('fbheader').innerHTML;
if (path.length > 0 && foldername.length > 0) {
data = new Object();
data.path = path;
data.name = foldername;
$.post("api/newfolder", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "\n" + resp.path + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
}
listdir(document.getElementById('fbheader').innerHTML);
document.getElementById('newfoldername').value = '';
});
}
}
function newfile(filename) {
var path = document.getElementById('fbheader').innerHTML;
if (path.length > 0 && filename.length > 0) {
data = new Object();
data.path = path;
data.name = filename;
$.post("api/newfile", data).done(function(resp) {
if (resp.error) {
var $toastContent = $("<div><pre>" + resp.message + "\n" + resp.path + "</pre></div>");
Materialize.toast($toastContent, 5000);
}
else {
var $toastContent = $("<div><pre>" + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
}
listdir(document.getElementById('fbheader').innerHTML);
document.getElementById('newfilename').value = '';
});
}
}
function upload() {
var file_data = $('#uploadfile').prop('files')[0];
var form_data = new FormData();
form_data.append('file', file_data);
form_data.append('path', document.getElementById('fbheader').innerHTML);
$.ajax({
url: 'api/upload',
dataType: 'json',
cache: false,
contentType: false,
processData: false,
data: form_data,
type: 'post',
success: function(resp){
if (resp.error) {
var $toastContent = $("<div><pre>Error: " + resp.message + "</pre></div>");
Materialize.toast($toastContent, 2000);
}
else {
var $toastContent = $("<div><pre>Upload succesful</pre></div>");
Materialize.toast($toastContent, 2000);
listdir(document.getElementById('fbheader').innerHTML);
document.getElementById('uploadform').reset();
}
}
});
}
</script>
<script>
ace.require("ace/ext/language_tools");
var editor = ace.edit("editor");
editor.on("input", function() {
if (editor.session.getUndoManager().isClean()) {
$('.markdirty').each(function(i, o){o.classList.remove('red');});
$('.hidesave').css('opacity', 0);
}
else {
$('.markdirty').each(function(i, o){o.classList.add('red');});
$('.hidesave').css('opacity', 1);
}
});
if (localStorage.hasOwnProperty("pochass")) {
editor.setOptions(JSON.parse(localStorage.pochass));
editor.setOptions({
enableBasicAutocompletion: true,
enableSnippets: true
})
editor.$blockScrolling = Infinity;
}
else {
editor.getSession().setMode("ace/mode/yaml");
editor.setOptions({
showInvisibles: true,
useSoftTabs: true,
displayIndentGuides: true,
highlightSelectedWord: true,
enableBasicAutocompletion: true,
enableSnippets: true
})
editor.$blockScrolling = Infinity;
}
function apply_settings() {
var options = editor.getOptions();
for (var key in options) {
if (options.hasOwnProperty(key)) {
var target = document.getElementById(key);
if (target) {
if (typeof(options[key]) == "boolean" && target.type === 'checkbox') {
target.checked = options[key];
target.setAttribute("checked", options[key]);
}
else if (typeof(options[key]) == "number" && target.type === 'number') {
target.value = options[key];
}
else if (typeof(options[key]) == "string" && target.tagName == 'SELECT') {
target.value = options[key];
}
}
}
}
}
apply_settings();
function save_ace_settings() {
localStorage.pochass = JSON.stringify(editor.getOptions())
Materialize.toast("Ace Settings Saved", 2000);
}
function insert(text) {
var pos = editor.selection.getCursor();
var end = editor.session.insert(pos, text);
editor.selection.setRange({
start: pos,
end: end
});
editor.focus();
}
var foldstatus = true;
function toggle_fold() {
if (foldstatus) {
editor.getSession().foldAll();
}
else {
editor.getSession().unfold();
}
foldstatus = !foldstatus;
}
</script>
</body>
</html>""")
def signal_handler(sig, frame):
global HTTPD
LOG.info("Got signal: %s. Shutting down server", str(sig))
HTTPD.server_close()
sys.exit(0)
def load_settings(settingsfile):
global LISTENIP, LISTENPORT, BASEPATH, SSL_CERTIFICATE, SSL_KEY, HASS_API, \
HASS_API_PASSWORD, CREDENTIALS, ALLOWED_NETWORKS, BANNED_IPS, BANLIMIT, DEV, \
IGNORE_PATTERN
try:
if os.path.isfile(settingsfile):
with open(settingsfile) as fptr:
settings = json.loads(fptr.read())
LISTENIP = settings.get("LISTENIP", LISTENIP)
LISTENPORT = settings.get("LISTENPORT", LISTENPORT)
BASEPATH = settings.get("BASEPATH", BASEPATH)
SSL_CERTIFICATE = settings.get("SSL_CERTIFICATE", SSL_CERTIFICATE)
SSL_KEY = settings.get("SSL_KEY", SSL_KEY)
HASS_API = settings.get("HASS_API", HASS_API)
HASS_API_PASSWORD = settings.get("HASS_API_PASSWORD", HASS_API_PASSWORD)
CREDENTIALS = settings.get("CREDENTIALS", CREDENTIALS)
ALLOWED_NETWORKS = settings.get("ALLOWED_NETWORKS", ALLOWED_NETWORKS)
BANNED_IPS = settings.get("BANNED_IPS", BANNED_IPS)
BANLIMIT = settings.get("BANLIMIT", BANLIMIT)
DEV = settings.get("DEV", DEV)
IGNORE_PATTERN = settings.get("IGNORE_PATTERN", IGNORE_PATTERN)
except Exception as err:
LOG.warning(err)
LOG.warning("Not loading static settings")
return False
def get_dircontent(path, repo=None):
dircontent = []
if repo:
untracked = [
"%s%s%s"%(repo.working_dir, os.sep, e) for e in \
["%s"%os.sep.join(f.split('/')) for f in repo.untracked_files]
]
staged = {}
unstaged = {}
try:
for element in repo.index.diff("HEAD"):
staged["%s%s%s" % (repo.working_dir, os.sep, "%s"%os.sep.join(element.b_path.split('/')))] = element.change_type
except Exception as err:
LOG.warning("Exception: %s", str(err))
for element in repo.index.diff(None):
unstaged["%s%s%s" % (repo.working_dir, os.sep, "%s"%os.sep.join(element.b_path.split('/')))] = element.change_type
else:
untracked = []
staged = {}
unstaged = {}
for elem in sorted(os.listdir(path), key=lambda x: x.lower()):
edata = {}
edata['name'] = elem
edata['dir'] = path
edata['fullpath'] = os.path.abspath(os.path.join(path, elem))
edata['type'] = 'dir' if os.path.isdir(edata['fullpath']) else 'file'
try:
stats = os.stat(os.path.join(path, elem))
edata['size'] = stats.st_size
edata['modified'] = stats.st_mtime
edata['created'] = stats.st_ctime
except Exception:
edata['size'] = 0
edata['modified'] = 0
edata['created'] = 0
edata['changetype'] = None
edata['gitstatus'] = bool(repo)
edata['gittracked'] = 'untracked' if edata['fullpath'] in untracked else 'tracked'
if edata['fullpath'] in unstaged:
edata['gitstatus'] = 'unstaged'
edata['changetype'] = unstaged.get(edata['name'], None)
elif edata['fullpath'] in staged:
edata['gitstatus'] = 'staged'
edata['changetype'] = staged.get(edata['name'], None)
hidden = False
if IGNORE_PATTERN is not None:
for file_pattern in IGNORE_PATTERN:
if fnmatch.fnmatch(edata['name'], file_pattern):
hidden = True
if not hidden:
dircontent.append(edata)
return dircontent
def get_html():
if DEV:
try:
with open("dev.html") as fptr:
html = Template(fptr.read())
return html
except Exception as err:
LOG.warning(err)
LOG.warning("Delivering embedded HTML")
return INDEX
def check_access(clientip):
global BANNED_IPS
if clientip in BANNED_IPS:
return False
if not ALLOWED_NETWORKS:
return True
for net in ALLOWED_NETWORKS:
ipobject = ipaddress.ip_address(clientip)
if ipobject in ipaddress.ip_network(net):
return True
BANNED_IPS.append(clientip)
return False
class RequestHandler(BaseHTTPRequestHandler):
def log_message(self, format, *args):
LOG.info("%s - %s" % (self.client_address[0], format % args))
return
def do_BLOCK(self):
self.send_response(420)
self.end_headers()
self.wfile.write(bytes("Policy not fulfilled", "utf8"))
def do_GET(self):
if not check_access(self.client_address[0]):
self.do_BLOCK()
return
req = urlparse(self.path)
query = parse_qs(req.query)
self.send_response(200)
if req.path == '/api/file':
content = ""
self.send_header('Content-type', 'text/text')
self.end_headers()
filename = query.get('filename', None)
try:
if filename:
filename = unquote(filename[0]).encode('utf-8')
if os.path.isfile(os.path.join(BASEDIR.encode('utf-8'), filename)):
with open(os.path.join(BASEDIR.encode('utf-8'), filename)) as fptr:
content += fptr.read()
else:
content = "File not found"
except Exception as err:
LOG.warning(err)
content = str(err)
self.wfile.write(bytes(content, "utf8"))
return
elif req.path == '/api/download':
content = ""
filename = query.get('filename', None)
try:
if filename:
filename = unquote(filename[0]).encode('utf-8')
LOG.info(filename)
if os.path.isfile(os.path.join(BASEDIR.encode('utf-8'), filename)):
with open(os.path.join(BASEDIR.encode('utf-8'), filename), 'rb') as fptr:
filecontent = fptr.read()
self.send_header('Content-Disposition', 'attachment; filename=%s' % filename.decode('utf-8').split(os.sep)[-1])
self.end_headers()
self.wfile.write(filecontent)
return
else:
content = "File not found"
except Exception as err:
LOG.warning(err)
content = str(err)
self.send_header('Content-type', 'text/text')
self.wfile.write(bytes(content, "utf8"))
return
elif req.path == '/api/listdir':
content = ""
self.send_header('Content-type', 'text/json')
self.end_headers()
dirpath = query.get('path', None)
try:
if dirpath:
dirpath = unquote(dirpath[0]).encode('utf-8')
if os.path.isdir(dirpath):
repo = None
activebranch = None
dirty = False
branches = []
if REPO:
try:
repo = REPO(dirpath.decode('utf-8'), search_parent_directories=True)
activebranch = repo.active_branch.name
dirty = repo.is_dirty()
for branch in repo.branches:
branches.append(branch.name)
except Exception as err:
LOG.debug("Exception (no repo): %s" % str(err))
dircontent = get_dircontent(dirpath.decode('utf-8'), repo)
filedata = {'content': dircontent,
'abspath': os.path.abspath(dirpath).decode('utf-8'),
'parent': os.path.dirname(os.path.abspath(dirpath)).decode('utf-8'),
'branches': branches,
'activebranch': activebranch,
'dirty': dirty
}
self.wfile.write(bytes(json.dumps(filedata), "utf8"))
except Exception as err:
LOG.warning(err)
content = str(err)
self.wfile.write(bytes(content, "utf8"))
return
elif req.path == '/api/abspath':
content = ""
self.send_header('Content-type', 'text/text')
self.end_headers()
dirpath = query.get('path', None)
if dirpath:
dirpath = unquote(dirpath[0]).encode('utf-8')
LOG.debug(dirpath)
absp = os.path.abspath(dirpath)
LOG.debug(absp)
if os.path.isdir(dirpath):
self.wfile.write(os.path.abspath(dirpath))
return
elif req.path == '/api/parent':
content = ""
self.send_header('Content-type', 'text/text')
self.end_headers()
dirpath = query.get('path', None)
if dirpath:
dirpath = unquote(dirpath[0]).encode('utf-8')
LOG.debug(dirpath)
absp = os.path.abspath(dirpath)
LOG.debug(absp)
if os.path.isdir(dirpath):
self.wfile.write(os.path.abspath(os.path.dirname(dirpath)))
return
elif req.path == '/api/restart':
LOG.info("/api/restart")
self.send_header('Content-type', 'text/json')
self.end_headers()
res = {"restart": False}
try:
headers = {
"Content-Type": "application/json"
}
if HASS_API_PASSWORD:
headers["x-ha-access"] = HASS_API_PASSWORD
req = urllib.request.Request("%sservices/homeassistant/restart" % HASS_API, headers=headers, method='POST')
with urllib.request.urlopen(req) as response:
res = json.loads(response.read().decode('utf-8'))
LOG.debug(res)
except Exception as err:
LOG.warning(err)
res['restart'] = str(err)
self.wfile.write(bytes(json.dumps(res), "utf8"))
return
elif req.path == '/api/check_config':
LOG.info("/api/check_config")
self.send_header('Content-type', 'text/json')
self.end_headers()
res = {"check_config": False}
try:
headers = {
"Content-Type": "application/json"
}
if HASS_API_PASSWORD:
headers["x-ha-access"] = HASS_API_PASSWORD
req = urllib.request.Request("%sservices/homeassistant/check_config" % HASS_API, headers=headers, method='POST')
# with urllib.request.urlopen(req) as response:
# print(json.loads(response.read().decode('utf-8')))
# res['service'] = "called successfully"
except Exception as err:
LOG.warning(err)
res['restart'] = str(err)
self.wfile.write(bytes(json.dumps(res), "utf8"))
return
elif req.path == '/api/reload_automations':
LOG.info("/api/reload_automations")
self.send_header('Content-type', 'text/json')
self.end_headers()
res = {"reload_automations": False}
try:
headers = {
"Content-Type": "application/json"
}
if HASS_API_PASSWORD:
headers["x-ha-access"] = HASS_API_PASSWORD
req = urllib.request.Request("%sservices/automation/reload" % HASS_API, headers=headers, method='POST')
with urllib.request.urlopen(req) as response:
LOG.debug(json.loads(response.read().decode('utf-8')))
res['service'] = "called successfully"
except Exception as err:
LOG.warning(err)
res['restart'] = str(err)
self.wfile.write(bytes(json.dumps(res), "utf8"))
return
elif req.path == '/api/reload_scripts':
LOG.info("/api/reload_scripts")
self.send_header('Content-type', 'text/json')
self.end_headers()
res = {"reload_scripts": False}
try:
headers = {
"Content-Type": "application/json"
}
if HASS_API_PASSWORD:
headers["x-ha-access"] = HASS_API_PASSWORD
req = urllib.request.Request("%sservices/script/reload" % HASS_API, headers=headers, method='POST')
with urllib.request.urlopen(req) as response:
LOG.debug(json.loads(response.read().decode('utf-8')))
res['service'] = "called successfully"
except Exception as err:
LOG.warning(err)
res['restart'] = str(err)
self.wfile.write(bytes(json.dumps(res), "utf8"))
return
elif req.path == '/api/reload_groups':
LOG.info("/api/reload_groups")
self.send_header('Content-type', 'text/json')
self.end_headers()
res = {"reload_groups": False}
try:
headers = {
"Content-Type": "application/json"
}
if HASS_API_PASSWORD:
headers["x-ha-access"] = HASS_API_PASSWORD
req = urllib.request.Request("%sservices/group/reload" % HASS_API, headers=headers, method='POST')
with urllib.request.urlopen(req) as response:
LOG.debug(json.loads(response.read().decode('utf-8')))
res['service'] = "called successfully"
except Exception as err:
LOG.warning(err)
res['restart'] = str(err)
self.wfile.write(bytes(json.dumps(res), "utf8"))
return
elif req.path == '/api/reload_core':
LOG.info("/api/reload_core")
self.send_header('Content-type', 'text/json')
self.end_headers()
res = {"reload_core": False}
try:
headers = {
"Content-Type": "application/json"
}
if HASS_API_PASSWORD:
headers["x-ha-access"] = HASS_API_PASSWORD
req = urllib.request.Request("%sservices/homeassistant/reload_core_config" % HASS_API, headers=headers, method='POST')
with urllib.request.urlopen(req) as response:
LOG.debug(json.loads(response.read().decode('utf-8')))
res['service'] = "called successfully"
except Exception as err:
LOG.warning(err)
res['restart'] = str(err)
self.wfile.write(bytes(json.dumps(res), "utf8"))
return
elif req.path == '/':
self.send_header('Content-type', 'text/html')
self.end_headers()
boot = "{}"
try:
headers = {
"Content-Type": "application/json"
}
if HASS_API_PASSWORD:
headers["x-ha-access"] = HASS_API_PASSWORD
req = urllib.request.Request("%sbootstrap" % HASS_API, headers=headers, method='GET')
with urllib.request.urlopen(req) as response:
boot = response.read().decode('utf-8')
except Exception as err:
LOG.warning("Exception getting bootstrap")
LOG.warning(err)
color = "green"
try:
response = urllib.request.urlopen(RELEASEURL)
latest = json.loads(response.read().decode('utf-8'))['tag_name']
if VERSION != latest:
color = "red"
except Exception as err:
LOG.warning("Exception getting release")
LOG.warning(err)
html = get_html().safe_substitute(bootstrap=boot,
current=VERSION,
versionclass=color,
separator="\%s" % os.sep if os.sep == "\\" else os.sep)
self.wfile.write(bytes(html, "utf8"))
return
else:
self.send_response(404)
self.end_headers()
self.wfile.write(bytes("File not found", "utf8"))
def do_POST(self):
if not check_access(self.client_address[0]):
self.do_BLOCK()
return
req = urlparse(self.path)
response = {
"error": True,
"message": "Generic failure"
}
length = int(self.headers['content-length'])
if req.path == '/api/save':
try:
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
LOG.warning(err)
response['message'] = "%s" % (str(err))
postvars = {}
if 'filename' in postvars.keys() and 'text' in postvars.keys():
if postvars['filename'] and postvars['text']:
try:
filename = unquote(postvars['filename'][0])
response['file'] = filename
with open(filename, 'wb') as fptr:
fptr.write(bytes(postvars['text'][0], "utf-8"))
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
response['error'] = False
response['message'] = "File saved successfully"
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
except Exception as err:
response['message'] = "%s" % (str(err))
LOG.warning(err)
else:
response['message'] = "Missing filename or text"
elif req.path == '/api/upload':
if length > 104857600: #100 MB for now
read = 0
while read < length:
read += len(self.rfile.read(min(66556, length - read)))
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
response['error'] = True
response['message'] = "File too big: %i" % read
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
else:
form = cgi.FieldStorage(
fp=self.rfile,
headers=self.headers,
environ={'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': self.headers['Content-Type'],
})
filename = form['file'].filename
filepath = form['path'].file.read()
data = form['file'].file.read()
open("%s%s%s" % (filepath, os.sep, filename), "wb").write(data)
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
response['error'] = False
response['message'] = "Upload successful"
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
elif req.path == '/api/delete':
try:
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
LOG.warning(err)
response['message'] = "%s" % (str(err))
postvars = {}
if 'path' in postvars.keys():
if postvars['path']:
try:
delpath = unquote(postvars['path'][0])
response['path'] = delpath
try:
if os.path.isdir(delpath):
os.rmdir(delpath)
else:
os.unlink(delpath)
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
response['error'] = False
response['message'] = "Deletetion successful"
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
except Exception as err:
LOG.warning(err)
response['error'] = True
response['message'] = str(err)
except Exception as err:
response['message'] = "%s" % (str(err))
LOG.warning(err)
else:
response['message'] = "Missing filename or text"
elif req.path == '/api/exec_command':
try:
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
LOG.warning(err)
response['message'] = "%s" % (str(err))
postvars = {}
if 'command' in postvars.keys():
if postvars['command']:
try:
command = shlex.split(postvars['command'][0])
timeout = 15
if 'timeout' in postvars.keys():
if postvars['timeout']:
timeout = int(postvars['timeout'][0])
try:
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(timeout=timeout)
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
response['error'] = False
response['message'] = "Command executed: %s" % postvars['command'][0]
response['returncode'] = proc.returncode
try:
response['stdout'] = stdout.decode(sys.getdefaultencoding())
except Exception as err:
LOG.warning(err)
response['stdout'] = stdout.decode("utf-8", errors="replace")
try:
response['stderr'] = stderr.decode(sys.getdefaultencoding())
except Exception as err:
LOG.warning(err)
response['stderr'] = stderr.decode("utf-8", errors="replace")
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
except Exception as err:
LOG.warning(err)
response['error'] = True
response['message'] = str(err)
except Exception as err:
response['message'] = "%s" % (str(err))
LOG.warning(err)
else:
response['message'] = "Missing command"
elif req.path == '/api/gitadd':
try:
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
LOG.warning(err)
response['message'] = "%s" % (str(err))
postvars = {}
if 'path' in postvars.keys():
if postvars['path']:
try:
addpath = unquote(postvars['path'][0])
repo = REPO(addpath, search_parent_directories=True)
filepath = "/".join(addpath.split(os.sep)[len(repo.working_dir.split(os.sep)):])
response['path'] = filepath
try:
repo.index.add([filepath])
response['error'] = False
response['message'] = "Added file to index"
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
except Exception as err:
LOG.warning(err)
response['error'] = True
response['message'] = str(err)
except Exception as err:
response['message'] = "%s" % (str(err))
LOG.warning(err)
else:
response['message'] = "Missing filename"
elif req.path == '/api/commit':
try:
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
LOG.warning(err)
response['message'] = "%s" % (str(err))
postvars = {}
if 'path' in postvars.keys() and 'message' in postvars.keys():
if postvars['path'] and postvars['message']:
try:
commitpath = unquote(postvars['path'][0])
response['path'] = commitpath
message = unquote(postvars['message'][0])
repo = REPO(commitpath, search_parent_directories=True)
try:
repo.index.commit(message)
response['error'] = False
response['message'] = "Changes commited"
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
except Exception as err:
response['error'] = True
response['message'] = str(err)
LOG.debug(response)
except Exception as err:
response['message'] = "Not a git repository: %s" % (str(err))
LOG.warning("Exception (no repo): %s" % str(err))
else:
response['message'] = "Missing path"
elif req.path == '/api/checkout':
try:
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
LOG.warning(err)
response['message'] = "%s" % (str(err))
postvars = {}
if 'path' in postvars.keys() and 'branch' in postvars.keys():
if postvars['path'] and postvars['branch']:
try:
branchpath = unquote(postvars['path'][0])
response['path'] = branchpath
branch = unquote(postvars['branch'][0])
repo = REPO(branchpath, search_parent_directories=True)
try:
head = [h for h in repo.heads if h.name == branch][0]
head.checkout()
response['error'] = False
response['message'] = "Checked out %s" % branch
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
except Exception as err:
response['error'] = True
response['message'] = str(err)
LOG.warning(response)
except Exception as err:
response['message'] = "Not a git repository: %s" % (str(err))
LOG.warning("Exception (no repo): %s" % str(err))
else:
response['message'] = "Missing path or branch"
elif req.path == '/api/newbranch':
try:
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
LOG.warning(err)
response['message'] = "%s" % (str(err))
postvars = {}
if 'path' in postvars.keys() and 'branch' in postvars.keys():
if postvars['path'] and postvars['branch']:
try:
branchpath = unquote(postvars['path'][0])
response['path'] = branchpath
branch = unquote(postvars['branch'][0])
repo = REPO(branchpath, search_parent_directories=True)
try:
repo.git.checkout("HEAD", b=branch)
response['error'] = False
response['message'] = "Created and checked out %s" % branch
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
except Exception as err:
response['error'] = True
response['message'] = str(err)
LOG.warning(response)
except Exception as err:
response['message'] = "Not a git repository: %s" % (str(err))
LOG.warning("Exception (no repo): %s" % str(err))
else:
response['message'] = "Missing path or branch"
elif req.path == '/api/init':
try:
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
LOG.warning(err)
response['message'] = "%s" % (str(err))
postvars = {}
if 'path' in postvars.keys():
if postvars['path']:
try:
repopath = unquote(postvars['path'][0])
response['path'] = repopath
try:
repo = REPO.init(repopath)
response['error'] = False
response['message'] = "Initialized repository in %s" % repopath
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
except Exception as err:
response['error'] = True
response['message'] = str(err)
LOG.warning(response)
except Exception as err:
response['message'] = "Not a git repository: %s" % (str(err))
LOG.warning("Exception (no repo): %s" % str(err))
else:
response['message'] = "Missing path or branch"
elif req.path == '/api/push':
try:
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
LOG.warning(err)
response['message'] = "%s" % (str(err))
postvars = {}
if 'path' in postvars.keys():
if postvars['path']:
try:
repopath = unquote(postvars['path'][0])
response['path'] = repopath
try:
repo = REPO(repopath)
urls = []
if repo.remotes:
for url in repo.remotes.origin.urls:
urls.append(url)
if not urls:
response['error'] = True
response['message'] = "No remotes configured for %s" % repopath
else:
repo.remotes.origin.push()
response['error'] = False
response['message'] = "Pushed to %s" % urls[0]
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
except Exception as err:
response['error'] = True
response['message'] = str(err)
LOG.warning(response)
except Exception as err:
response['message'] = "Not a git repository: %s" % (str(err))
LOG.warning("Exception (no repo): %s" % str(err))
else:
response['message'] = "Missing path or branch"
elif req.path == '/api/newfolder':
try:
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
LOG.warning(err)
response['message'] = "%s" % (str(err))
postvars = {}
if 'path' in postvars.keys() and 'name' in postvars.keys():
if postvars['path'] and postvars['name']:
try:
basepath = unquote(postvars['path'][0])
name = unquote(postvars['name'][0])
response['path'] = os.path.join(basepath, name)
try:
os.makedirs(response['path'])
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
response['error'] = False
response['message'] = "Folder created"
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
except Exception as err:
LOG.warning(err)
response['error'] = True
response['message'] = str(err)
except Exception as err:
response['message'] = "%s" % (str(err))
LOG.warning(err)
elif req.path == '/api/newfile':
try:
postvars = parse_qs(self.rfile.read(length).decode('utf-8'), keep_blank_values=1)
except Exception as err:
LOG.warning(err)
response['message'] = "%s" % (str(err))
postvars = {}
if 'path' in postvars.keys() and 'name' in postvars.keys():
if postvars['path'] and postvars['name']:
try:
basepath = unquote(postvars['path'][0])
name = unquote(postvars['name'][0])
response['path'] = os.path.join(basepath, name)
try:
with open(response['path'], 'w') as fptr:
fptr.write("")
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
response['error'] = False
response['message'] = "File created"
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
except Exception as err:
LOG.warning(err)
response['error'] = True
response['message'] = str(err)
except Exception as err:
response['message'] = "%s" % (str(err))
LOG.warning(err)
else:
response['message'] = "Missing filename or text"
else:
response['message'] = "Invalid method"
self.send_response(200)
self.send_header('Content-type', 'text/json')
self.end_headers()
self.wfile.write(bytes(json.dumps(response), "utf8"))
return
class AuthHandler(RequestHandler):
def do_AUTHHEAD(self):
LOG.info("Requesting authorization")
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm=\"HASS-Configurator\"')
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
global CREDENTIALS
authorization = self.headers.get('Authorization', None)
if authorization is None:
self.do_AUTHHEAD()
self.wfile.write(bytes('no auth header received', 'utf-8'))
pass
elif authorization == 'Basic %s' % CREDENTIALS.decode('utf-8'):
if BANLIMIT:
FAIL2BAN_IPS.pop(self.client_address[0], None)
super().do_GET()
pass
else:
if BANLIMIT:
bancounter = FAIL2BAN_IPS.get(self.client_address[0], 1)
if bancounter >= BANLIMIT:
LOG.warning("Blocking access from %s" % self.client_address[0])
self.do_BLOCK()
return
else:
FAIL2BAN_IPS[self.client_address[0]] = bancounter + 1
self.do_AUTHHEAD()
self.wfile.write(bytes('Authentication required', 'utf-8'))
pass
def do_POST(self):
global CREDENTIALS
authorization = self.headers.get('Authorization', None)
if authorization is None:
self.do_AUTHHEAD()
self.wfile.write(bytes('no auth header received', 'utf-8'))
pass
elif authorization == 'Basic %s' % CREDENTIALS.decode('utf-8'):
if BANLIMIT:
FAIL2BAN_IPS.pop(self.client_address[0], None)
super().do_POST()
pass
else:
if BANLIMIT:
bancounter = FAIL2BAN_IPS.get(self.client_address[0], 1)
if bancounter >= BANLIMIT:
LOG.warning("Blocking access from %s" % self.client_address[0])
self.do_BLOCK()
return
else:
FAIL2BAN_IPS[self.client_address[0]] = bancounter + 1
self.do_AUTHHEAD()
self.wfile.write(bytes('Authentication required', 'utf-8'))
pass
def main(args):
global HTTPD, CREDENTIALS
if args:
load_settings(args[0])
LOG.info("Starting server")
server_address = (LISTENIP, LISTENPORT)
if CREDENTIALS:
CREDENTIALS = base64.b64encode(bytes(CREDENTIALS, "utf-8"))
Handler = AuthHandler
else:
Handler = RequestHandler
if not SSL_CERTIFICATE:
HTTPD = HTTPServer(server_address, Handler)
else:
HTTPD = socketserver.TCPServer(server_address, Handler)
HTTPD.socket = ssl.wrap_socket(HTTPD.socket,
certfile=SSL_CERTIFICATE,
keyfile=SSL_KEY,
server_side=True)
LOG.info('Listening on: %s://%s:%i' % ('https' if SSL_CERTIFICATE else 'http',
LISTENIP,
LISTENPORT))
if BASEPATH:
os.chdir(BASEPATH)
HTTPD.serve_forever()
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
main(sys.argv[1:])
| {
"content_hash": "5ff76de227d01db24bda2e28315b38fd",
"timestamp": "",
"source": "github",
"line_count": 3623,
"max_line_length": 363,
"avg_line_length": 45.12669058791057,
"alnum_prop": 0.4956451001259985,
"repo_name": "vinjex/home-assistant",
"id": "42e8eafc28edcc045ed221de9953abe582202866",
"size": "163537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".homeassistant/configurator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "169676"
}
],
"symlink_target": ""
} |
"""
This module contains useful utilities for GeoDjango.
"""
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.utils.wkt import precision_wkt # NOQA
from django.core.exceptions import ImproperlyConfigured
if HAS_GDAL:
from django.contrib.gis.utils.ogrinfo import ogrinfo # NOQA
from django.contrib.gis.utils.ogrinspect import mapping, ogrinspect # NOQA
from django.contrib.gis.utils.srs import add_srs_entry # NOQA
try:
# LayerMapping requires DJANGO_SETTINGS_MODULE to be set,
# so this needs to be in try/except.
from django.contrib.gis.utils.layermapping import LayerMapping, LayerMapError # NOQA
except ImproperlyConfigured:
pass
| {
"content_hash": "ebbb1a704529a4c15ef0ce1396c52448",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 93,
"avg_line_length": 41.88235294117647,
"alnum_prop": 0.7429775280898876,
"repo_name": "tlksio/tlksio",
"id": "78b221663fc139ca0af41fdd8ae837080647adcf",
"size": "712",
"binary": false,
"copies": "8",
"ref": "refs/heads/develop",
"path": "env/lib/python3.4/site-packages/django/contrib/gis/utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1668"
},
{
"name": "HTML",
"bytes": "65037"
},
{
"name": "JavaScript",
"bytes": "450"
},
{
"name": "Makefile",
"bytes": "1075"
},
{
"name": "Python",
"bytes": "42727"
}
],
"symlink_target": ""
} |
from robot.utils import is_string, py2to3, PY3
from .comments import Comment
if PY3:
unicode = str
@py2to3
class Setting(object):
def __init__(self, setting_name, parent=None, comment=None):
self.setting_name = setting_name
self.parent = parent
self._set_initial_value()
self._set_comment(comment)
self._populated = False
def _set_initial_value(self):
self.value = []
def _set_comment(self, comment):
self.comment = Comment(comment)
def reset(self):
self.__init__(self.setting_name, self.parent)
@property
def source(self):
return self.parent.source if self.parent is not None else None
@property
def directory(self):
return self.parent.directory if self.parent is not None else None
def populate(self, value, comment=None):
"""Mainly used at parsing time, later attributes can be set directly."""
if not self._populated:
self._populate(value)
self._set_comment(comment)
self._populated = True
else:
self._set_initial_value()
self._set_comment(None)
self.report_invalid_syntax("Setting '%s' used multiple times."
% self.setting_name, 'ERROR')
def _populate(self, value):
self.value = value
def is_set(self):
return bool(self.value)
def is_for_loop(self):
return False
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax(message, level)
def _string_value(self, value):
return value if is_string(value) else ' '.join(value)
def _concat_string_with_value(self, string, value):
if string:
return string + ' ' + self._string_value(value)
return self._string_value(value)
def as_list(self):
return self._data_as_list() + self.comment.as_list()
def _data_as_list(self):
ret = [self.setting_name]
if self.value:
ret.extend(self.value)
return ret
def __nonzero__(self):
return self.is_set()
def __iter__(self):
return iter(self.value or ())
def __unicode__(self):
return unicode(self.value or '')
class StringValueJoiner(object):
def __init__(self, separator):
self._separator = separator
def join_string_with_value(self, string, value):
if string:
return string + self._separator + self.string_value(value)
return self.string_value(value)
def string_value(self, value):
if is_string(value):
return value
return self._separator.join(value)
class Documentation(Setting):
def _set_initial_value(self):
self.value = ''
def _populate(self, value):
self.value = self._concat_string_with_value(self.value, value)
def _string_value(self, value):
return value if is_string(value) else ''.join(value)
def _data_as_list(self):
return [self.setting_name, self.value]
class Template(Setting):
def _set_initial_value(self):
self.value = None
def _populate(self, value):
self.value = self._concat_string_with_value(self.value, value)
def is_set(self):
return self.value is not None
def is_active(self):
return self.value and self.value.upper() != 'NONE'
def _data_as_list(self):
ret = [self.setting_name]
if self.value:
ret.append(self.value)
return ret
class Fixture(Setting):
# `keyword`, `is_comment` and `assign` make the API compatible with Step.
@property
def keyword(self):
return self.name or ''
def is_comment(self):
return False
def _set_initial_value(self):
self.name = None
self.args = []
self.assign = ()
def _populate(self, value):
if not self.name:
self.name = value[0] if value else ''
value = value[1:]
self.args.extend(value)
def is_set(self):
return self.name is not None
def is_active(self):
return self.name and self.name.upper() != 'NONE'
def _data_as_list(self):
ret = [self.setting_name]
if self.name or self.args:
ret.append(self.name or '')
if self.args:
ret.extend(self.args)
return ret
class Timeout(Setting):
def _set_initial_value(self):
self.value = None
self.message = ''
def _populate(self, value):
if not self.value:
self.value = value[0] if value else ''
value = value[1:]
self.message = self._concat_string_with_value(self.message, value)
def is_set(self):
return self.value is not None
def _data_as_list(self):
ret = [self.setting_name]
if self.value or self.message:
ret.append(self.value or '')
if self.message:
ret.append(self.message)
return ret
class Tags(Setting):
def _set_initial_value(self):
self.value = None
def _populate(self, value):
self.value = (self.value or []) + value
def is_set(self):
return self.value is not None
def __add__(self, other):
if not isinstance(other, Tags):
raise TypeError('Tags can only be added with tags')
tags = Tags('Tags')
tags.value = (self.value or []) + (other.value or [])
return tags
class Arguments(Setting):
pass
class Return(Setting):
pass
class Metadata(Setting):
setting_name = 'Metadata'
def __init__(self, parent, name, value, comment=None, joined=False):
self.parent = parent
self.name = name
joiner = StringValueJoiner('' if joined else ' ')
self.value = joiner.join_string_with_value('', value)
self._set_comment(comment)
def reset(self):
pass
def is_set(self):
return True
def _data_as_list(self):
return [self.setting_name, self.name, self.value]
class _Import(Setting):
def __init__(self, parent, name, args=None, alias=None, comment=None):
self.parent = parent
self.name = name
self.args = args or []
self.alias = alias
self._set_comment(comment)
def reset(self):
pass
@property
def type(self):
return type(self).__name__
def is_set(self):
return True
def _data_as_list(self):
return [self.type, self.name] + self.args
def report_invalid_syntax(self, message, level='ERROR', parent=None):
parent = parent or getattr(self, 'parent', None)
if parent:
parent.report_invalid_syntax(message, level)
else:
from robot.api import logger
logger.write(message, level)
class Library(_Import):
def __init__(self, parent, name, args=None, alias=None, comment=None):
if args and not alias:
args, alias = self._split_alias(args, parent)
_Import.__init__(self, parent, name, args, alias, comment)
def _split_alias(self, args, parent):
if len(args) > 1 and is_string(args[-2]):
with_name = args[-2]
if with_name.upper() == 'WITH NAME':
# TODO: Require all uppercase 'WITH NAME' in RF 3.1.
# https://github.com/robotframework/robotframework/issues/2263
if with_name != 'WITH NAME':
self._deprecation_warning(with_name, parent)
return args[:-2], args[-1]
return args, None
def _deprecation_warning(self, with_name, parent):
message = ("Using 'WITH NAME' syntax when importing libraries case "
"insensitively like '%s' is deprecated. Use all upper case "
"format 'WITH NAME' instead." % with_name)
self.report_invalid_syntax(message, 'WARN', parent)
def _data_as_list(self):
data = ['Library', self.name] + self.args
if self.alias:
data += ['WITH NAME', self.alias]
return data
class Resource(_Import):
def __init__(self, parent, name, invalid_args=None, comment=None):
if invalid_args:
name += ' ' + ' '.join(invalid_args)
_Import.__init__(self, parent, name, comment=comment)
class Variables(_Import):
def __init__(self, parent, name, args=None, comment=None):
_Import.__init__(self, parent, name, args, comment=comment)
class _DataList(object):
def __init__(self, parent):
self._parent = parent
self.data = []
def add(self, meta):
self._add(meta)
def _add(self, meta):
self.data.append(meta)
def _parse_name_and_value(self, value):
name = value[0] if value else ''
return name, value[1:]
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, item):
self.data[index] = item
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
class ImportList(_DataList):
def populate_library(self, data, comment):
self._populate(Library, data, comment)
def populate_resource(self, data, comment):
self._populate(Resource, data, comment)
def populate_variables(self, data, comment):
self._populate(Variables, data, comment)
def _populate(self, item_class, data, comment):
name, value = self._parse_name_and_value(data)
self._add(item_class(self._parent, name, value, comment=comment))
class MetadataList(_DataList):
def populate(self, name, value, comment):
self._add(Metadata(self._parent, name, value, comment, joined=True))
| {
"content_hash": "4d0976dccd3b0856b1607e7b110309e0",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 80,
"avg_line_length": 26.258064516129032,
"alnum_prop": 0.5854832104832105,
"repo_name": "snyderr/robotframework",
"id": "86916e7338171b482d5ab5e325b63336772f10c4",
"size": "10376",
"binary": false,
"copies": "2",
"ref": "refs/heads/Robotframework_SkipExecution",
"path": "src/robot/parsing/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "CSS",
"bytes": "23646"
},
{
"name": "HTML",
"bytes": "988253"
},
{
"name": "Java",
"bytes": "57542"
},
{
"name": "JavaScript",
"bytes": "163896"
},
{
"name": "Python",
"bytes": "2232719"
},
{
"name": "RobotFramework",
"bytes": "2061354"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
} |
import pytest
from libqtile import layout
import libqtile.manager
import libqtile.config
from ..conftest import no_xinerama
from .layout_utils import assertFocused, assertFocusPath
class MatrixConfig(object):
auto_fullscreen = True
main = None
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [
layout.Matrix(columns=2)
]
floating_layout = libqtile.layout.floating.Floating()
keys = []
mouse = []
screens = []
def matrix_config(x):
return no_xinerama(pytest.mark.parametrize("qtile", [MatrixConfig], indirect=True)(x))
@matrix_config
def test_matrix_simple(qtile):
qtile.testWindow("one")
assert qtile.c.layout.info()["rows"] == [["one"]]
qtile.testWindow("two")
assert qtile.c.layout.info()["rows"] == [["one", "two"]]
qtile.testWindow("three")
assert qtile.c.layout.info()["rows"] == [["one", "two"], ["three"]]
@matrix_config
def test_matrix_navigation(qtile):
qtile.testWindow("one")
qtile.testWindow("two")
qtile.testWindow("three")
qtile.testWindow("four")
qtile.testWindow("five")
qtile.c.layout.right()
assert qtile.c.layout.info()["current_window"] == (0, 2)
qtile.c.layout.up()
assert qtile.c.layout.info()["current_window"] == (0, 1)
qtile.c.layout.up()
assert qtile.c.layout.info()["current_window"] == (0, 0)
qtile.c.layout.up()
assert qtile.c.layout.info()["current_window"] == (0, 2)
qtile.c.layout.down()
assert qtile.c.layout.info()["current_window"] == (0, 0)
qtile.c.layout.down()
assert qtile.c.layout.info()["current_window"] == (0, 1)
qtile.c.layout.right()
assert qtile.c.layout.info()["current_window"] == (1, 1)
qtile.c.layout.right()
assert qtile.c.layout.info()["current_window"] == (0, 1)
@matrix_config
def test_matrix_add_remove_columns(qtile):
qtile.testWindow("one")
qtile.testWindow("two")
qtile.testWindow("three")
qtile.testWindow("four")
qtile.testWindow("five")
qtile.c.layout.add()
assert qtile.c.layout.info()["rows"] == [["one", "two", "three"], ["four", "five"]]
qtile.c.layout.delete()
assert qtile.c.layout.info()["rows"] == [["one", "two"], ["three", "four"], ["five"]]
@matrix_config
def test_matrix_window_focus_cycle(qtile):
# setup 3 tiled and two floating clients
qtile.testWindow("one")
qtile.testWindow("two")
qtile.testWindow("float1")
qtile.c.window.toggle_floating()
qtile.testWindow("float2")
qtile.c.window.toggle_floating()
qtile.testWindow("three")
# test preconditions
assert qtile.c.layout.info()['clients'] == ['one', 'two', 'three']
# last added window has focus
assertFocused(qtile, "three")
# assert window focus cycle, according to order in layout
assertFocusPath(qtile, 'float1', 'float2', 'one', 'two', 'three')
@matrix_config
def test_matrix_next_no_clients(qtile):
qtile.c.layout.next()
@matrix_config
def test_matrix_previous_no_clients(qtile):
qtile.c.layout.previous()
| {
"content_hash": "92f341327a391a57bd97fa14d405ff4a",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 90,
"avg_line_length": 29.317757009345794,
"alnum_prop": 0.641695887790883,
"repo_name": "cortesi/qtile",
"id": "5c09eb8d37a128325515e0054b3c8f029a430040",
"size": "4475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/layouts/test_matrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1142"
},
{
"name": "Python",
"bytes": "1170921"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "5643"
}
],
"symlink_target": ""
} |
""" Copyright (c) <2014> Author Vance King Saxbe. A, and contributors Power Dominion Enterprise, Precieux Consulting and other contributors. Modelled, Architected and designed by Vance King Saxbe. A. with the geeks from GoldSax Consulting and GoldSax Technologies email @[email protected]. Development teams from Power Dominion Enterprise, Precieux Consulting. Project sponsored by GoldSax Foundation, GoldSax Group and executed by GoldSax Manager."""#!/usr/bin/env python3
import _thread
import os
import sys
import time
import gc
from src import googlequotemachine
from src import yahooquotemachine
from src import bloombergquotemachine
from src import createtablesgooglefinance
from src import createtablesyahoofinance
from src import createtablesbloomberg
from time import localtime, strftime
start1 = []
sys.setrecursionlimit(1000000)
database = "data/"
markettime = {}
with open("conf/MarketTimings.conf") as fillees:
mlist = fillees.read().splitlines()
fillees.close()
for line in mlist:
items = line.split(", ")
key, values = items[0], items[1]
markettime[key] = values
with open('conf/symbolname.conf') as fille:
synamelist = fille.read().splitlines()
fille.close()
timetorun = 1800
cycle = 1
while("TRUE"):
with open('conf/urls.conf') as openedfile:
fileaslist = openedfile.read().splitlines()
openedfile.close()
a_lock = _thread.allocate_lock()
thr = []
with a_lock:
print("locks placed and Market engine is running for the...", cycle)
for lines in fileaslist:
lisj = lines.split('", "')
mtime = markettime[lisj[2].replace('"','')]
mktime = mtime.split("-")
if mktime[1] < mktime[0]:
righto = mktime[1].split(":")
close = str(str(int(righto[0])+24)+":"+righto[1])
else:
close = mktime[1]
rightnow = strftime("%H:%M", localtime())
if rightnow < strftime("04:00"):
right = rightnow.split(":")
rightnow = str(str(int(right[0])+24)+":"+right[1])
if (close > rightnow > mktime[0]):
print("Market ", lisj[2].replace('.db"',''), " is starting at cycle ", cycle)
if lisj[1] =='g':
thr.append(_thread.start_new_thread(googlequotemachine.actionking, (a_lock, start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,synamelist,1,0,timetorun) ))
elif lisj[1] =='y':
thr.append(_thread.start_new_thread(yahooquotemachine.actionking, (a_lock,start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,synamelist,1,0,timetorun) ))
else:
thr.append(_thread.start_new_thread(bloombergquotemachine.actionking, (a_lock,start1, lisj[0].replace('"',''),database+lisj[2].replace('"',''),0,) ))
time.sleep(0.00001)
print("locks placed and Market engine is running for the....", cycle, " time...with threads", thr )
time.sleep(timetorun)
gc.collect()
print("locks released and Market engine is restarting for the...", cycle, " time...")
cycle = cycle + 1
/*email to provide support at [email protected], [email protected], For donations please write to [email protected]*/////// | {
"content_hash": "ef9432f6614e60f785bca7ecbe30a606",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 472,
"avg_line_length": 53.625,
"alnum_prop": 0.5625485625485626,
"repo_name": "VanceKingSaxbeA/EconometricIntelligence",
"id": "492689bbf8566f500cbd6eee77e25c29c11376ad",
"size": "3905",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "engine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "398"
},
{
"name": "Python",
"bytes": "39305"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Logo.visibility_level'
db.add_column(u'communication_logo', 'visibility_level',
self.gf('django.db.models.fields.CharField')(default='default', max_length=32),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Logo.visibility_level'
db.delete_column(u'communication_logo', 'visibility_level')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'communication.agepslide': {
'Meta': {'object_name': 'AgepSlide'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"})
},
u'communication.agepslidelogging': {
'Meta': {'object_name': 'AgepSlideLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['communication.AgepSlide']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'communication.logo': {
'Meta': {'object_name': 'Logo'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"}),
'visibility_level': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '32'})
},
u'communication.logologging': {
'Meta': {'object_name': 'LogoLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['communication.Logo']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'communication.websitenews': {
'Meta': {'object_name': 'WebsiteNews'},
'content': ('django.db.models.fields.TextField', [], {}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'0_draft'", 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '255'})
},
u'communication.websitenewslogging': {
'Meta': {'object_name': 'WebsiteNewsLogging'},
'extra_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': u"orm['communication.WebsiteNews']"}),
'what': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'who': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['users.TruffeUser']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'units.unit': {
'Meta': {'object_name': 'Unit'},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_epfl': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'is_commission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_equipe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent_hierarchique': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['units.Unit']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'users.truffeuser': {
'Meta': {'object_name': 'TruffeUser'},
'adresse': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'body': ('django.db.models.fields.CharField', [], {'default': "'.'", 'max_length': '1'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255'}),
'email_perso': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'iban_ou_ccp': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'nom_banque': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['communication'] | {
"content_hash": "d043eb7046219207545da93028b16022",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 195,
"avg_line_length": 74.29411764705883,
"alnum_prop": 0.5503760886777513,
"repo_name": "ArcaniteSolutions/truffe2",
"id": "a0d59a059a4949baf5d52e9e3df25a25718eab16",
"size": "10128",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "truffe2/communication/migrations/0005_auto__add_field_logo_visibility_level.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "552855"
},
{
"name": "HTML",
"bytes": "742372"
},
{
"name": "JavaScript",
"bytes": "1859724"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3048852"
}
],
"symlink_target": ""
} |
"""Consume and serialize all of the data from a running TensorBoard instance.
This program connects to a live TensorBoard backend at given port, and saves
all of the data to local disk JSON in a predictable format.
This makes it easy to mock out the TensorBoard backend so that the frontend
may be tested in isolation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import json
import os
import os.path
import shutil
import StringIO
import threading
import urllib
import six
from six.moves import http_client
import tensorflow as tf
from tensorflow.python.summary import event_multiplexer
from tensorflow.tensorboard.backend import server
tf.flags.DEFINE_string('logdir', None, """the logdir to pass to the TensorBoard
backend; data will be read from this logdir for serialization.""")
tf.flags.DEFINE_string('target', None, """The directoy where serialized data
will be written""")
tf.flags.DEFINE_boolean('overwrite', False, """Whether to remove and overwrite
TARGET if it already exists.""")
tf.flags.DEFINE_boolean(
'purge_orphaned_data', True, 'Whether to purge data that '
'may have been orphaned due to TensorBoard restarts. '
'Disabling purge_orphaned_data can be used to debug data '
'disappearance.')
FLAGS = tf.flags.FLAGS
BAD_CHARACTERS = "#%&{}\\/<>*? $!'\":@+`|="
DEFAULT_SUFFIX = '.json'
IMAGE_SUFFIX = '.png'
AUDIO_SUFFIX = '.wav'
GRAPH_SUFFIX = '.pbtxt'
def Url(route, params):
"""Takes route and query params, and produce encoded url for that asset."""
out = route
if params:
# sorting ensures a unique filename for each query
sorted_params = sorted(six.iteritems(params))
out += '?' + urllib.urlencode(sorted_params)
return out
def Clean(s):
"""Clean a string so it can be used as a filepath."""
for c in BAD_CHARACTERS:
s = s.replace(c, '_')
return s
class TensorBoardStaticSerializer(object):
"""Serialize all the routes from a TensorBoard server to static json."""
def __init__(self, connection, target_path):
self.connection = connection
EnsureDirectoryExists(target_path)
self.path = target_path
def GetAndSave(self, url, save_suffix, unzip=False):
"""GET the given url. Serialize the result at clean path version of url."""
self.connection.request('GET',
'/data/' + url,
headers={'content-type': 'text/plain'})
response = self.connection.getresponse()
file_name = Clean(url) + save_suffix
destination = os.path.join(self.path, file_name)
if response.status != 200:
raise IOError(url)
if unzip:
s = StringIO.StringIO(response.read())
content = gzip.GzipFile(fileobj=s).read()
else:
content = response.read()
with open(destination, 'w') as f:
f.write(content)
return content
def GetRouteAndSave(self, route, params=None):
"""GET given route and params. Serialize the result. Return as JSON."""
url = Url(route, params)
return json.loads(self.GetAndSave(url, DEFAULT_SUFFIX))
def Run(self):
"""Serialize everything from a TensorBoard backend."""
# get the runs object, which is an index for every tag.
runs = self.GetRouteAndSave('runs')
# collect sampled data.
self.GetRouteAndSave('scalars')
# now let's just download everything!
for run, tag_type_to_tags in six.iteritems(runs):
for tag_type, tags in six.iteritems(tag_type_to_tags):
try:
if tag_type == 'graph':
# in this case, tags is a bool which specifies if graph is present.
if tags:
url = Url('graph', {'run': run})
self.GetAndSave(url, GRAPH_SUFFIX, unzip=True)
elif tag_type == 'images':
for t in tags:
images = self.GetRouteAndSave('images', {'run': run, 'tag': t})
for im in images:
url = 'individualImage?' + im['query']
# pull down the images themselves.
self.GetAndSave(url, IMAGE_SUFFIX)
elif tag_type == 'audio':
for t in tags:
audio = self.GetRouteAndSave('audio', {'run': run, 'tag': t})
for snd in audio:
url = 'individualAudio?' + snd['query']
# pull down the audio clips themselves
self.GetAndSave(url, AUDIO_SUFFIX)
elif tag_type == 'run_metadata':
for t in tags:
url = Url('run_metadata', {'run': run, 'tag': t})
self.GetAndSave(url, GRAPH_SUFFIX, unzip=True)
elif tag_type == 'firstEventTimestamp':
pass
else:
for t in tags:
# Save this, whatever it is :)
self.GetRouteAndSave(tag_type, {'run': run, 'tag': t})
except IOError as e:
PrintAndLog('Retrieval failed for %s/%s/%s' % (tag_type, run, tags),
tf.logging.WARN)
PrintAndLog('Got Exception: %s' % e, tf.logging.WARN)
PrintAndLog('continuing...', tf.logging.WARN)
continue
def EnsureDirectoryExists(path):
if not os.path.exists(path):
os.makedirs(path)
def PrintAndLog(msg, lvl=tf.logging.INFO):
tf.logging.log(lvl, msg)
print(msg)
def main(unused_argv=None):
target = FLAGS.target
logdir = FLAGS.logdir
if not target or not logdir:
PrintAndLog('Both --target and --logdir are required.', tf.logging.ERROR)
return -1
if os.path.exists(target):
if FLAGS.overwrite:
if os.path.isdir(target):
shutil.rmtree(target)
else:
os.remove(target)
else:
PrintAndLog('Refusing to overwrite target %s without --overwrite' %
target, tf.logging.ERROR)
return -2
path_to_run = server.ParseEventFilesSpec(FLAGS.logdir)
PrintAndLog('About to load Multiplexer. This may take some time.')
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=server.TENSORBOARD_SIZE_GUIDANCE,
purge_orphaned_data=FLAGS.purge_orphaned_data)
server.ReloadMultiplexer(multiplexer, path_to_run)
PrintAndLog('Multiplexer load finished. Starting TensorBoard server.')
s = server.BuildServer(multiplexer, 'localhost', 0)
server_thread = threading.Thread(target=s.serve_forever)
server_thread.daemon = True
server_thread.start()
connection = http_client.HTTPConnection('localhost', s.server_address[1])
PrintAndLog('Server setup! Downloading data from the server.')
x = TensorBoardStaticSerializer(connection, target)
x.Run()
PrintAndLog('Done downloading data.')
connection.close()
s.shutdown()
s.server_close()
if __name__ == '__main__':
tf.app.run()
| {
"content_hash": "2eb14be1ad9c435af4a5c5f7893b13e0",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 79,
"avg_line_length": 32.63592233009709,
"alnum_prop": 0.6486687490703555,
"repo_name": "Lab603/PicEncyclopedias",
"id": "3feca406cc59adf71d4bbfa25da2fdadd6184d65",
"size": "7412",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "jni-build/jni-build/jni/include/tensorflow/tensorboard/scripts/serialize_tensorboard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "361482"
},
{
"name": "C++",
"bytes": "22994090"
},
{
"name": "CMake",
"bytes": "72924"
},
{
"name": "CSS",
"bytes": "1548"
},
{
"name": "HTML",
"bytes": "1040352"
},
{
"name": "Java",
"bytes": "252082"
},
{
"name": "JavaScript",
"bytes": "25902"
},
{
"name": "Jupyter Notebook",
"bytes": "3547008"
},
{
"name": "Makefile",
"bytes": "47206"
},
{
"name": "Objective-C",
"bytes": "10664"
},
{
"name": "Objective-C++",
"bytes": "91354"
},
{
"name": "Python",
"bytes": "19063444"
},
{
"name": "Shell",
"bytes": "476334"
},
{
"name": "TypeScript",
"bytes": "1264488"
}
],
"symlink_target": ""
} |
import json
from typing import Optional
import pytest
from mock import Mock
from pygls.lsp.types import (DidCloseTextDocumentParams,
DidOpenTextDocumentParams, TextDocumentIdentifier,
TextDocumentItem)
from pygls.workspace import Document, Workspace
from ...server import completions, did_close, did_open
class FakeServer():
"""We don't need real server to unit test features."""
publish_diagnostics = None
show_message = None
show_message_log = None
def __init__(self):
self.workspace = Workspace('', None)
fake_document_uri = 'file://fake_doc.txt'
fake_document_content = 'text'
fake_document = Document(fake_document_uri, fake_document_content)
server = FakeServer()
server.publish_diagnostics = Mock()
server.show_message = Mock()
server.show_message_log = Mock()
server.workspace.get_document = Mock(return_value=fake_document)
def _reset_mocks():
server.publish_diagnostics.reset_mock()
server.show_message.reset_mock()
server.show_message_log.reset_mock()
def test_completions():
completion_list = completions()
labels = [i.label for i in completion_list.items]
assert '"' in labels
assert '[' in labels
assert ']' in labels
assert '{' in labels
assert '}' in labels
def test_did_close():
_reset_mocks()
params = DidCloseTextDocumentParams(
text_document=TextDocumentIdentifier(uri=fake_document_uri))
did_close(server, params)
# Check if show message is called
server.show_message.assert_called_once()
@pytest.mark.asyncio
async def test_did_open():
_reset_mocks()
expected_msg = None
# Get expected error message
try:
json.loads(fake_document_content)
except json.JSONDecodeError as err:
expected_msg = err.msg
params = DidOpenTextDocumentParams(
text_document=TextDocumentItem(uri=fake_document_uri,
language_id='json',
version=1,
text=fake_document_content))
await did_open(server, params)
# Check publish diagnostics is called
server.publish_diagnostics.assert_called_once()
# Check publish diagnostics args message
args = server.publish_diagnostics.call_args
assert args[0][1][0].message is expected_msg
# Check other methods are called
server.show_message.assert_called_once()
server.show_message_log.assert_called_once()
| {
"content_hash": "16ad97f2f9e8df51ffef4d9f62e21d48",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 79,
"avg_line_length": 26.79787234042553,
"alnum_prop": 0.6621675267963477,
"repo_name": "openlawlibrary/pygls",
"id": "6453c51304501245f85758f535c561903ff86ed5",
"size": "3751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/json-extension/server/tests/unit/test_features.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1368"
},
{
"name": "JavaScript",
"bytes": "1117"
},
{
"name": "Python",
"bytes": "446640"
}
],
"symlink_target": ""
} |
import bibleIn
import tldrnewsIn
import weatherIn
import calebIn
import bibleProc
import tldrnewsProc
import weatherProc
import calebAmountsProc
import calebSupportersProc
import textOut
kindle = "/media/toben/Kindle/documents/"
#kindle = '/home/toben/Code/Little-News-Processor/'
def run():
caleb_data = calebIn.fetch()
data = []
print("fectching bible")
data.append(bibleProc.consume(bibleIn.fetch()))
print("done. Fetching weather")
data.append(weatherProc.consume(weatherIn.fetch()))
print("done. Fetching support stats")
data.append(calebAmountsProc.consume(caleb_data))
data.append(calebSupportersProc.consume(caleb_data))
print("done. Fetching news")
data.append(tldrnewsProc.consume(tldrnewsIn.fetch()))
print("done. outputing")
textOut.put(data,kindle+"dailyNews.txt")
print("Network complete")
if __name__ == "__main__":
run()
| {
"content_hash": "1ca1f0e18ec099b85fdcef7526a03f2a",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 54,
"avg_line_length": 22.102564102564102,
"alnum_prop": 0.7587006960556845,
"repo_name": "Narcolapser/Little-News-Processor",
"id": "1decfc89b40100b4af98e2e6e461a1dac51e53db",
"size": "862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kindleNetwork.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "72699"
}
],
"symlink_target": ""
} |
"""
Sample Google App Engine application that demonstrates using the Users API
For more information about App Engine, see README.md under /appengine.
"""
# [START all]
from google.appengine.api import users
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
nickname = user.nickname()
logout_url = users.create_logout_url('/')
greeting = 'Welcome, {}! (<a href="{}">sign out</a>)'.format(
nickname, logout_url)
else:
login_url = users.create_login_url('/')
greeting = '<a href="{}">Sign in</a>'.format(login_url)
self.response.write(
'<html><body>{}</body></html>'.format(greeting))
class AdminPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
if users.is_current_user_admin():
self.response.write('You are an administrator.')
else:
self.response.write('You are not an administrator.')
else:
self.response.write('You are not logged in.')
app = webapp2.WSGIApplication([
('/', MainPage),
('/admin', AdminPage)
], debug=True)
# [END all]
| {
"content_hash": "3c4772bf2fb231c5e5c565fa4e849109",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 74,
"avg_line_length": 27.630434782608695,
"alnum_prop": 0.5837922895357985,
"repo_name": "clarko1/Cramd",
"id": "7904a20c44284db3241f6b6fcfb29b1197cb2d9b",
"size": "1847",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "appengine/standard/users/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2924"
},
{
"name": "HTML",
"bytes": "23592"
},
{
"name": "JavaScript",
"bytes": "11222"
},
{
"name": "Makefile",
"bytes": "881"
},
{
"name": "Protocol Buffer",
"bytes": "8810"
},
{
"name": "Python",
"bytes": "1055640"
},
{
"name": "Shell",
"bytes": "8344"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from typing import Any, Iterable, List, Mapping, Set, Text, Tuple
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_stream_name, create_streams_if_needed
from zerver.lib.request import JsonableError
from zerver.models import UserProfile, Stream, Subscription, \
Realm, Recipient, bulk_get_recipients, get_recipient, get_stream, \
bulk_get_streams
def access_stream_common(user_profile, stream, error):
# type: (UserProfile, Stream, Text) -> Tuple[Recipient, Subscription]
"""Common function for backend code where the target use attempts to
access the target stream, returning all the data fetched along the
way. If that user does not have permission to access that stream,
we throw an exception. A design goal is that the error message is
the same for streams you can't access and streams that don't exist."""
# First, we don't allow any access to streams in other realms.
if stream.realm_id != user_profile.realm_id:
raise JsonableError(error)
recipient = get_recipient(Recipient.STREAM, stream.id)
try:
sub = Subscription.objects.get(user_profile=user_profile,
recipient=recipient,
active=True)
except Subscription.DoesNotExist:
sub = None
# If the stream is in your realm and public, you can access it.
if stream.is_public():
return (recipient, sub)
# Or if you are subscribed to the stream, you can access it.
if sub is not None:
return (recipient, sub)
# Otherwise it is a private stream and you're not on it, so throw
# an error.
raise JsonableError(error)
def access_stream_by_id(user_profile, stream_id):
# type: (UserProfile, int) -> Tuple[Stream, Recipient, Subscription]
error = _("Invalid stream id")
try:
stream = Stream.objects.get(id=stream_id)
except Stream.DoesNotExist:
raise JsonableError(error)
(recipient, sub) = access_stream_common(user_profile, stream, error)
return (stream, recipient, sub)
def check_stream_name_available(realm, name):
# type: (Realm, Text) -> None
check_stream_name(name)
try:
get_stream(name, realm)
raise JsonableError(_("Stream name '%s' is already taken") % (name,))
except Stream.DoesNotExist:
pass
def access_stream_by_name(user_profile, stream_name):
# type: (UserProfile, Text) -> Tuple[Stream, Recipient, Subscription]
error = _("Invalid stream name '%s'" % (stream_name,))
try:
stream = get_stream(stream_name, user_profile.realm)
except Stream.DoesNotExist:
raise JsonableError(error)
(recipient, sub) = access_stream_common(user_profile, stream, error)
return (stream, recipient, sub)
def filter_stream_authorization(user_profile, streams):
# type: (UserProfile, Iterable[Stream]) -> Tuple[List[Stream], List[Stream]]
streams_subscribed = set() # type: Set[int]
recipients_map = bulk_get_recipients(Recipient.STREAM, [stream.id for stream in streams])
subs = Subscription.objects.filter(user_profile=user_profile,
recipient__in=list(recipients_map.values()),
active=True)
for sub in subs:
streams_subscribed.add(sub.recipient.type_id)
unauthorized_streams = [] # type: List[Stream]
for stream in streams:
# The user is authorized for his own streams
if stream.id in streams_subscribed:
continue
# The user is not authorized for invite_only streams
if stream.invite_only:
unauthorized_streams.append(stream)
authorized_streams = [stream for stream in streams if
stream.id not in set(stream.id for stream in unauthorized_streams)]
return authorized_streams, unauthorized_streams
def list_to_streams(streams_raw, user_profile, autocreate=False):
# type: (Iterable[Mapping[str, Any]], UserProfile, bool) -> Tuple[List[Stream], List[Stream]]
"""Converts list of dicts to a list of Streams, validating input in the process
For each stream name, we validate it to ensure it meets our
requirements for a proper stream name using check_stream_name.
This function in autocreate mode should be atomic: either an exception will be raised
during a precheck, or all the streams specified will have been created if applicable.
@param streams_raw The list of stream dictionaries to process;
names should already be stripped of whitespace by the caller.
@param user_profile The user for whom we are retreiving the streams
@param autocreate Whether we should create streams if they don't already exist
"""
# Validate all streams, getting extant ones, then get-or-creating the rest.
stream_set = set(stream_dict["name"] for stream_dict in streams_raw)
for stream_name in stream_set:
# Stream names should already have been stripped by the
# caller, but it makes sense to verify anyway.
assert stream_name == stream_name.strip()
check_stream_name(stream_name)
existing_streams = [] # type: List[Stream]
missing_stream_dicts = [] # type: List[Mapping[str, Any]]
existing_stream_map = bulk_get_streams(user_profile.realm, stream_set)
for stream_dict in streams_raw:
stream_name = stream_dict["name"]
stream = existing_stream_map.get(stream_name.lower())
if stream is None:
missing_stream_dicts.append(stream_dict)
else:
existing_streams.append(stream)
if len(missing_stream_dicts) == 0:
# This is the happy path for callers who expected all of these
# streams to exist already.
created_streams = [] # type: List[Stream]
else:
# autocreate=True path starts here
if not user_profile.can_create_streams():
raise JsonableError(_('User cannot create streams.'))
elif not autocreate:
raise JsonableError(_("Stream(s) (%s) do not exist") % ", ".join(
stream_dict["name"] for stream_dict in missing_stream_dicts))
# We already filtered out existing streams, so dup_streams
# will normally be an empty list below, but we protect against somebody
# else racing to create the same stream. (This is not an entirely
# paranoid approach, since often on Zulip two people will discuss
# creating a new stream, and both people eagerly do it.)
created_streams, dup_streams = create_streams_if_needed(realm=user_profile.realm,
stream_dicts=missing_stream_dicts)
existing_streams += dup_streams
return existing_streams, created_streams
| {
"content_hash": "667b9d302200e30e881989d4c2e4f185",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 98,
"avg_line_length": 42.98757763975155,
"alnum_prop": 0.6657997399219766,
"repo_name": "christi3k/zulip",
"id": "96c22be0106fead4dce433b636b7bd87638f891e",
"size": "6921",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "zerver/lib/streams.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "241509"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "457023"
},
{
"name": "JavaScript",
"bytes": "1471696"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "82466"
},
{
"name": "Python",
"bytes": "2960440"
},
{
"name": "Ruby",
"bytes": "249748"
},
{
"name": "Shell",
"bytes": "37195"
}
],
"symlink_target": ""
} |
from fake_useragent import UserAgent
def get_random_agent_headers():
headers = {
"User-Agent": UserAgent().chrome,
"Referer": "https://www.baidu.com/"
}
return headers | {
"content_hash": "0ca3631383cedbc0e4f6b5d5382d2e3d",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 43,
"avg_line_length": 24.5,
"alnum_prop": 0.6275510204081632,
"repo_name": "hbbhbbh/TmallSingleCrawler",
"id": "2f5e9ca07ffa902f198533702a1e3bf02cd31299",
"size": "196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TmallSingleCrawler/utils/UserAgentUtils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23913"
}
],
"symlink_target": ""
} |
import collections
import os
import re
import tracing_project
from tracing_build import check_common
class _Token(object):
def __init__(self, data, token_id=None):
self.data = data
if token_id:
self.token_id = token_id
else:
self.token_id = 'plain'
class BuildFile(object):
def __init__(self, text, file_groups):
self._file_groups = file_groups
self._tokens = list(self._Tokenize(text))
def _Tokenize(self, text):
rest = text
token_regex = self._TokenRegex()
while len(rest):
m = token_regex.search(rest)
if not m:
# In `rest', we couldn't find a match.
# So, lump the entire `rest' into a token
# and stop producing any more tokens.
yield _Token(rest)
return
min_index, end_index, matched_token = self._ProcessMatch(m)
if min_index > 0:
yield _Token(rest[:min_index])
yield matched_token
rest = rest[end_index:]
def Update(self, files_by_group):
for token in self._tokens:
if token.token_id in files_by_group:
token.data = self._GetReplacementListAsString(
token.data,
files_by_group[token.token_id])
def Write(self, f):
for token in self._tokens:
f.write(token.data)
def _ProcessMatch(self, match):
raise NotImplementedError
def _TokenRegex(self):
raise NotImplementedError
def _GetReplacementListAsString(self, existing_list_as_string, filelist):
raise NotImplementedError
class GniFile(BuildFile):
def _ProcessMatch(self, match):
min_index = match.start(2)
end_index = match.end(2)
token = _Token(match.string[min_index:end_index],
token_id=match.groups()[0])
return min_index, end_index, token
def _TokenRegex(self):
# regexp to match the following:
# file_group_name = [
# "path/to/one/file.extension",
# "another/file.ex",
# ]
# In the match,
# group 1 is : 'file_group_name'
# group 2 is : ' "path/to/one/file.extension",\n "another/file.ex",\n'
regexp_str = r'(%s) = \[\n(.+?) +\],?\n' % '|'.join(self._file_groups)
return re.compile(regexp_str, re.MULTILINE | re.DOTALL)
def _GetReplacementListAsString(self, existing_list_as_string, filelist):
list_entry = existing_list_as_string.splitlines()[0]
prefix, _, suffix = list_entry.split('"')
return ''.join(['"'.join([prefix, filename, suffix + '\n'])
for filename in filelist])
def _GroupFiles(file_name_to_group_name_func, filenames):
file_groups = collections.defaultdict(lambda: [])
for filename in filenames:
file_groups[file_name_to_group_name_func(filename)].append(filename)
for group in file_groups:
file_groups[group].sort()
return file_groups
def _UpdateBuildFile(filename, build_file_class):
with open(filename, 'r') as f:
build_file = build_file_class(f.read(), check_common.FILE_GROUPS)
files_by_group = _GroupFiles(check_common.GetFileGroupFromFileName,
check_common.GetKnownFiles())
build_file.Update(files_by_group)
with open(filename, 'w') as f:
build_file.Write(f)
def UpdateGni():
tvp = tracing_project.TracingProject()
_UpdateBuildFile(
os.path.join(tvp.tracing_root_path, 'trace_viewer.gni'), GniFile)
def Update():
UpdateGni()
| {
"content_hash": "32f6e19944a010ca37e51f9e735e0da4",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 75,
"avg_line_length": 28.10924369747899,
"alnum_prop": 0.6385650224215247,
"repo_name": "catapult-project/catapult",
"id": "4151ea7a6ac38a681d74a42cc77fd03aea81fcff",
"size": "3512",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "tracing/tracing_build/update_gni.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
import copy
import datetime
import os
import uuid
import eventlet.event
from oslo_config import cfg
from oslo_log import log as logging
from six.moves import urllib
from yaql import specs
import murano.common.exceptions as exceptions
import murano.common.messaging as messaging
from murano.dsl import dsl
import murano.engine.system.common as common
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class AgentException(Exception):
pass
@dsl.name('io.murano.system.Agent')
class Agent(object):
def __init__(self, interfaces, host):
self._enabled = False
if CONF.engine.disable_murano_agent:
LOG.debug('Use of murano-agent is disallowed '
'by the server configuration')
return
self._environment = host.find_owner('io.murano.Environment')
self._enabled = True
self._queue = str('e%s-h%s' % (
self._environment.id, host.id)).lower()
@property
def enabled(self):
return self._enabled
def prepare(self):
# (sjmc7) - turn this into a no-op if agents are disabled
if CONF.engine.disable_murano_agent:
LOG.debug('Use of murano-agent is disallowed '
'by the server configuration')
return
with common.create_rmq_client() as client:
client.declare(self._queue, enable_ha=True, ttl=86400000)
def queue_name(self):
return self._queue
def _check_enabled(self):
if CONF.engine.disable_murano_agent:
raise exceptions.PolicyViolationException(
'Use of murano-agent is disallowed '
'by the server configuration')
def _prepare_message(self, template, msg_id):
msg = messaging.Message()
msg.body = template
msg.id = msg_id
return msg
def _send(self, template, wait_results, timeout):
"""Send a message over the MQ interface."""
msg_id = template.get('ID', uuid.uuid4().hex)
if wait_results:
event = eventlet.event.Event()
listener = self._environment['agentListener']
listener().subscribe(msg_id, event)
msg = self._prepare_message(template, msg_id)
with common.create_rmq_client() as client:
client.send(message=msg, key=self._queue)
if wait_results:
try:
with eventlet.Timeout(timeout):
result = event.wait()
except eventlet.Timeout:
listener().unsubscribe(msg_id)
raise exceptions.TimeoutException(
'The murano-agent did not respond '
'within {0} seconds'.format(timeout))
if not result:
return None
if result.get('FormatVersion', '1.0.0').startswith('1.'):
return self._process_v1_result(result)
else:
return self._process_v2_result(result)
else:
return None
@specs.parameter(
'resources', dsl.MuranoType('io.murano.system.Resources'))
def call(self, template, resources, timeout=None):
if timeout is None:
timeout = CONF.engine.agent_timeout
self._check_enabled()
plan = self.build_execution_plan(template, resources())
return self._send(plan, True, timeout)
@specs.parameter(
'resources', dsl.MuranoType('io.murano.system.Resources'))
def send(self, template, resources):
self._check_enabled()
plan = self.build_execution_plan(template, resources())
return self._send(plan, False, 0)
def call_raw(self, plan, timeout=None):
if timeout is None:
timeout = CONF.engine.agent_timeout
self._check_enabled()
return self._send(plan, True, timeout)
def send_raw(self, plan):
self._check_enabled()
return self._send(plan, False, 0)
def is_ready(self, timeout=100):
try:
self.wait_ready(timeout)
except exceptions.TimeoutException:
return False
else:
return True
def wait_ready(self, timeout=100):
self._check_enabled()
template = {'Body': 'return', 'FormatVersion': '2.0.0', 'Scripts': {}}
self.call(template, False, timeout)
def _process_v1_result(self, result):
if result['IsException']:
raise AgentException(dict(self._get_exception_info(
result.get('Result', [])), source='execution_plan'))
else:
results = result.get('Result', [])
if not result:
return None
value = results[-1]
if value['IsException']:
raise AgentException(dict(self._get_exception_info(
value.get('Result', [])), source='command'))
else:
return value.get('Result')
def _process_v2_result(self, result):
error_code = result.get('ErrorCode', 0)
if not error_code:
return result.get('Body')
else:
body = result.get('Body') or {}
err = {
'message': body.get('Message'),
'details': body.get('AdditionalInfo'),
'errorCode': error_code,
'time': result.get('Time')
}
for attr in ('Message', 'AdditionalInfo'):
if attr in body:
del body[attr]
err['extra'] = body if body else None
raise AgentException(err)
def _get_array_item(self, array, index):
return array[index] if len(array) > index else None
def _get_exception_info(self, data):
data = data or []
return {
'type': self._get_array_item(data, 0),
'message': self._get_array_item(data, 1),
'command': self._get_array_item(data, 2),
'details': self._get_array_item(data, 3),
'timestamp': datetime.datetime.now().isoformat()
}
def build_execution_plan(self, template, resources):
template = copy.deepcopy(template)
if not isinstance(template, dict):
raise ValueError('Incorrect execution plan ')
format_version = template.get('FormatVersion')
if not format_version or format_version.startswith('1.'):
return self._build_v1_execution_plan(template, resources)
else:
return self._build_v2_execution_plan(template, resources)
def _build_v1_execution_plan(self, template, resources):
scripts_folder = 'scripts'
script_files = template.get('Scripts', [])
scripts = []
for script in script_files:
script_path = os.path.join(scripts_folder, script)
scripts.append(resources.string(
script_path).encode('base64'))
template['Scripts'] = scripts
return template
def _build_v2_execution_plan(self, template, resources):
scripts_folder = 'scripts'
plan_id = uuid.uuid4().hex
template['ID'] = plan_id
if 'Action' not in template:
template['Action'] = 'Execute'
if 'Files' not in template:
template['Files'] = {}
files = {}
for file_id, file_descr in template['Files'].items():
files[file_descr['Name']] = file_id
for name, script in template.get('Scripts', {}).items():
if 'EntryPoint' not in script:
raise ValueError('No entry point in script ' + name)
if 'Application' in script['Type']:
script['EntryPoint'] = self._place_file(scripts_folder,
script['EntryPoint'],
template, resources,
files)
if 'Files' in script:
for i, file in enumerate(script['Files']):
if self._get_name(file) not in files:
script['Files'][i] = self._place_file(
scripts_folder, file, template, resources, files)
else:
script['Files'][i] = files[file]
return template
def _is_url(self, file):
file = self._get_url(file)
parts = urllib.parse.urlsplit(file)
if not parts.scheme or not parts.netloc:
return False
else:
return True
def _get_url(self, file):
if isinstance(file, dict):
return list(file.values())[0]
else:
return file
def _get_name(self, file):
if isinstance(file, dict):
name = list(file.keys())[0]
else:
name = file
if self._is_url(name):
name = name[name.rindex('/') + 1:len(name)]
elif name.startswith('<') and name.endswith('>'):
name = name[1: -1]
return name
def _get_file_value(self, file):
if isinstance(file, dict):
file = list(file.values())[0]
return file
def _get_body(self, file, resources, folder):
use_base64 = self._is_base64(file)
if use_base64 and file.startswith('<') and file.endswith('>'):
file = file[1: -1]
body = resources.string(os.path.join(folder, file))
if use_base64:
body = body.encode('base64')
return body
def _is_base64(self, file):
return file.startswith('<') and file.endswith('>')
def _get_body_type(self, file):
return 'Base64' if self._is_base64(file) else 'Text'
def _place_file(self, folder, file, template, resources, files):
file_value = self._get_file_value(file)
name = self._get_name(file)
file_id = uuid.uuid4().hex
if self._is_url(file_value):
template['Files'][file_id] = self._get_file_des_downloadable(file)
files[name] = file_id
else:
template['Files'][file_id] = self._get_file_description(
file, resources, folder)
files[name] = file_id
return file_id
def _get_file_des_downloadable(self, file):
name = self._get_name(file)
file = self._get_file_value(file)
return {
'Name': str(name),
'URL': file,
'Type': 'Downloadable'
}
def _get_file_description(self, file, resources, folder):
name = self._get_name(file)
file_value = self._get_file_value(file)
body_type = self._get_body_type(file_value)
body = self._get_body(file_value, resources, folder)
return {
'Name': name,
'BodyType': body_type,
'Body': body
}
| {
"content_hash": "45ce59d22afa486c153aa30f23baf104",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 78,
"avg_line_length": 33.80625,
"alnum_prop": 0.5501016823812165,
"repo_name": "olivierlemasle/murano",
"id": "32ef701de3adf3a5f60c9b2815f0875432a35952",
"size": "11401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "murano/engine/system/agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "152"
},
{
"name": "Mako",
"bytes": "1013"
},
{
"name": "PowerShell",
"bytes": "2772"
},
{
"name": "Puppet",
"bytes": "86"
},
{
"name": "Python",
"bytes": "1267810"
},
{
"name": "Ruby",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "25578"
}
],
"symlink_target": ""
} |
import random
import re
import unittest
import warnings
import requests
from ruamel.yaml import YAML
from pymatgen.analysis.phase_diagram import PhaseDiagram
from pymatgen.analysis.pourbaix_diagram import PourbaixDiagram, PourbaixEntry
from pymatgen.analysis.reaction_calculator import Reaction
from pymatgen.analysis.wulff import WulffShape
from pymatgen.core import SETTINGS
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Composition, Structure
from pymatgen.electronic_structure.bandstructure import (
BandStructure,
BandStructureSymmLine,
)
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.entries.compatibility import MaterialsProject2020Compatibility
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.ext.matproj import MP_LOG_FILE, MPRester, MPRestError, TaskType
from pymatgen.io.cif import CifParser
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
from pymatgen.phonon.dos import CompletePhononDos
from pymatgen.util.testing import PymatgenTest
try:
website_is_up = requests.get("https://www.materialsproject.org").status_code == 200
except requests.exceptions.ConnectionError:
website_is_up = False
@unittest.skipIf(
(not SETTINGS.get("PMG_MAPI_KEY")) or (not website_is_up),
"PMG_MAPI_KEY environment variable not set or MP is down.",
)
class MPResterTest(PymatgenTest):
_multiprocess_shared_ = True
def setUp(self):
self.rester = MPRester()
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
self.rester.session.close()
def test_get_all_materials_ids_doc(self):
mids = self.rester.get_materials_ids("Al2O3")
random.shuffle(mids)
doc = self.rester.get_doc(mids.pop(0))
self.assertEqual(doc["pretty_formula"], "Al2O3")
def test_get_xas_data(self):
# Test getting XAS data
data = self.rester.get_xas_data("mp-19017", "Li")
self.assertEqual("mp-19017,Li", data["mid_and_el"])
self.assertAlmostEqual(data["spectrum"]["x"][0], 55.178, places=2)
self.assertAlmostEqual(data["spectrum"]["y"][0], 0.0164634, places=2)
def test_get_data(self):
props = {
"energy",
"energy_per_atom",
"formation_energy_per_atom",
"nsites",
"unit_cell_formula",
"pretty_formula",
"is_hubbard",
"elements",
"nelements",
"e_above_hull",
"hubbards",
"is_compatible",
"task_ids",
"density",
"icsd_ids",
"total_magnetization",
}
mpid = "mp-1143"
vals = requests.get(f"http://legacy.materialsproject.org/materials/{mpid}/json/")
expected_vals = vals.json()
for prop in props:
if prop not in [
"hubbards",
"unit_cell_formula",
"elements",
"icsd_ids",
"task_ids",
]:
val = self.rester.get_data(mpid, prop=prop)[0][prop]
if prop in ["energy", "energy_per_atom"]:
prop = "final_" + prop
self.assertAlmostEqual(expected_vals[prop], val, 2, f"Failed with property {prop}")
elif prop in ["elements", "icsd_ids", "task_ids"]:
upstream_vals = set(self.rester.get_data(mpid, prop=prop)[0][prop])
self.assertLessEqual(set(expected_vals[prop]), upstream_vals)
else:
self.assertEqual(
expected_vals[prop],
self.rester.get_data(mpid, prop=prop)[0][prop],
)
props = ["structure", "initial_structure", "final_structure", "entry"]
for prop in props:
obj = self.rester.get_data(mpid, prop=prop)[0][prop]
if prop.endswith("structure"):
self.assertIsInstance(obj, Structure)
elif prop == "entry":
obj = self.rester.get_data(mpid, prop=prop)[0][prop]
self.assertIsInstance(obj, ComputedEntry)
# Test chemsys search
data = self.rester.get_data("Fe-Li-O", prop="unit_cell_formula")
self.assertTrue(len(data) > 1)
elements = {Element("Li"), Element("Fe"), Element("O")}
for d in data:
self.assertTrue(set(Composition(d["unit_cell_formula"]).elements).issubset(elements))
self.assertRaises(MPRestError, self.rester.get_data, "Fe2O3", "badmethod")
def test_get_materials_id_from_task_id(self):
self.assertEqual(self.rester.get_materials_id_from_task_id("mp-540081"), "mp-19017")
def test_get_materials_id_references(self):
# nosetests pymatgen/matproj/tests/test_matproj.py:MPResterTest.test_get_materials_id_references
m = MPRester()
data = m.get_materials_id_references("mp-123")
self.assertTrue(len(data) > 1000)
def test_find_structure(self):
# nosetests pymatgen/matproj/tests/test_matproj.py:MPResterTest.test_find_structure
m = MPRester()
ciffile = self.TEST_FILES_DIR / "Fe3O4.cif"
data = m.find_structure(str(ciffile))
self.assertTrue(len(data) > 1)
s = CifParser(ciffile).get_structures()[0]
data = m.find_structure(s)
self.assertTrue(len(data) > 1)
def test_get_entries_in_chemsys(self):
syms = ["Li", "Fe", "O"]
syms2 = "Li-Fe-O"
entries = self.rester.get_entries_in_chemsys(syms)
entries2 = self.rester.get_entries_in_chemsys(syms2)
elements = {Element(sym) for sym in syms}
for e in entries:
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(set(e.composition.elements).issubset(elements))
e1 = {i.entry_id for i in entries}
e2 = {i.entry_id for i in entries2}
self.assertTrue(e1 == e2)
stable_entries = self.rester.get_entries_in_chemsys(syms, additional_criteria={"e_above_hull": {"$lte": 0.001}})
self.assertTrue(len(stable_entries) < len(entries))
def test_get_structure_by_material_id(self):
s1 = self.rester.get_structure_by_material_id("mp-1")
self.assertEqual(s1.formula, "Cs1")
# requesting via task-id instead of mp-id
self.assertWarns(Warning, self.rester.get_structure_by_material_id, "mp-698856")
# requesting unknown mp-id
self.assertRaises(MPRestError, self.rester.get_structure_by_material_id, "mp-does-not-exist")
def test_get_entry_by_material_id(self):
e = self.rester.get_entry_by_material_id("mp-19017")
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(e.composition.reduced_formula, "LiFePO4")
def test_query(self):
criteria = {"elements": {"$in": ["Li", "Na", "K"], "$all": ["O"]}}
props = ["pretty_formula", "energy"]
data = self.rester.query(criteria=criteria, properties=props, chunk_size=0)
self.assertTrue(len(data) > 6)
data = self.rester.query(criteria="*2O", properties=props, chunk_size=0)
self.assertGreaterEqual(len(data), 52)
self.assertIn("Li2O", (d["pretty_formula"] for d in data))
def test_query_chunk_size(self):
criteria = {"nelements": 2, "elements": "O"}
props = ["pretty_formula"]
data1 = self.rester.query(criteria=criteria, properties=props, chunk_size=0)
data2 = self.rester.query(criteria=criteria, properties=props, chunk_size=500)
self.assertEqual({d["pretty_formula"] for d in data1}, {d["pretty_formula"] for d in data2})
self.assertIn("Al2O3", {d["pretty_formula"] for d in data1})
def test_get_exp_thermo_data(self):
data = self.rester.get_exp_thermo_data("Fe2O3")
self.assertTrue(len(data) > 0)
for d in data:
self.assertEqual(d.formula, "Fe2O3")
def test_get_dos_by_id(self):
dos = self.rester.get_dos_by_material_id("mp-2254")
self.assertIsInstance(dos, CompleteDos)
def test_get_bandstructure_by_material_id(self):
bs = self.rester.get_bandstructure_by_material_id("mp-2254")
self.assertIsInstance(bs, BandStructureSymmLine)
bs_unif = self.rester.get_bandstructure_by_material_id("mp-2254", line_mode=False)
self.assertIsInstance(bs_unif, BandStructure)
self.assertNotIsInstance(bs_unif, BandStructureSymmLine)
def test_get_phonon_data_by_material_id(self):
bs = self.rester.get_phonon_bandstructure_by_material_id("mp-661")
self.assertIsInstance(bs, PhononBandStructureSymmLine)
dos = self.rester.get_phonon_dos_by_material_id("mp-661")
self.assertIsInstance(dos, CompletePhononDos)
ddb_str = self.rester.get_phonon_ddb_by_material_id("mp-661")
self.assertIsInstance(ddb_str, str)
def test_get_structures(self):
structs = self.rester.get_structures("Mn3O4")
self.assertTrue(len(structs) > 0)
def test_get_entries(self):
entries = self.rester.get_entries("TiO2")
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.composition.reduced_formula, "TiO2")
entries = self.rester.get_entries("TiO2", inc_structure=True)
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.structure.composition.reduced_formula, "TiO2")
# all_entries = self.rester.get_entries("Fe", compatible_only=False)
# entries = self.rester.get_entries("Fe", compatible_only=True)
# self.assertTrue(len(entries) < len(all_entries))
entries = self.rester.get_entries("Fe", compatible_only=True, property_data=["cif"])
self.assertIn("cif", entries[0].data)
for e in self.rester.get_entries("CdO2", inc_structure=False):
self.assertIsNotNone(e.data["oxide_type"])
# test if it will retrieve the conventional unit cell of Ni
entry = self.rester.get_entry_by_material_id("mp-23", inc_structure=True, conventional_unit_cell=True)
Ni = entry.structure
self.assertEqual(Ni.lattice.a, Ni.lattice.b)
self.assertEqual(Ni.lattice.a, Ni.lattice.c)
self.assertEqual(Ni.lattice.alpha, 90)
self.assertEqual(Ni.lattice.beta, 90)
self.assertEqual(Ni.lattice.gamma, 90)
# Ensure energy per atom is same
primNi = self.rester.get_entry_by_material_id("mp-23", inc_structure=True, conventional_unit_cell=False)
self.assertEqual(primNi.energy_per_atom, entry.energy_per_atom)
Ni = self.rester.get_structure_by_material_id("mp-23", conventional_unit_cell=True)
self.assertEqual(Ni.lattice.a, Ni.lattice.b)
self.assertEqual(Ni.lattice.a, Ni.lattice.c)
self.assertEqual(Ni.lattice.alpha, 90)
self.assertEqual(Ni.lattice.beta, 90)
self.assertEqual(Ni.lattice.gamma, 90)
# Test case where convs are different from initial and final
# th = self.rester.get_structure_by_material_id(
# "mp-37", conventional_unit_cell=True)
# th_entry = self.rester.get_entry_by_material_id(
# "mp-37", inc_structure=True, conventional_unit_cell=True)
# th_entry_initial = self.rester.get_entry_by_material_id(
# "mp-37", inc_structure="initial", conventional_unit_cell=True)
# self.assertEqual(th, th_entry.structure)
# self.assertEqual(len(th_entry.structure), 4)
# self.assertEqual(len(th_entry_initial.structure), 2)
# Test if the polymorphs of Fe are properly sorted
# by e_above_hull when sort_by_e_above_hull=True
Fe_entries = self.rester.get_entries("Fe", sort_by_e_above_hull=True)
self.assertEqual(Fe_entries[0].data["e_above_hull"], 0)
def test_get_pourbaix_entries(self):
# test input chemsys as a list of elements
pbx_entries = self.rester.get_pourbaix_entries(["Fe", "Cr"])
for pbx_entry in pbx_entries:
self.assertTrue(isinstance(pbx_entry, PourbaixEntry))
# test input chemsys as a string
pbx_entries = self.rester.get_pourbaix_entries("Fe-Cr")
for pbx_entry in pbx_entries:
self.assertTrue(isinstance(pbx_entry, PourbaixEntry))
# fe_two_plus = [e for e in pbx_entries if e.entry_id == "ion-0"][0]
# self.assertAlmostEqual(fe_two_plus.energy, -1.12369, places=3)
#
# feo2 = [e for e in pbx_entries if e.entry_id == "mp-25332"][0]
# self.assertAlmostEqual(feo2.energy, 3.56356, places=3)
#
# # Test S, which has Na in reference solids
# pbx_entries = self.rester.get_pourbaix_entries(["S"])
# so4_two_minus = pbx_entries[9]
# self.assertAlmostEqual(so4_two_minus.energy, 0.301511, places=3)
# Ensure entries are Pourbaix compatible
PourbaixDiagram(pbx_entries)
def test_get_exp_entry(self):
entry = self.rester.get_exp_entry("Fe2O3")
self.assertEqual(entry.energy, -825.5)
# def test_submit_query_delete_snl(self):
# s = Structure([[5, 0, 0], [0, 5, 0], [0, 0, 5]], ["Fe"], [[0, 0, 0]])
# d = self.rester.submit_snl(
# [s, s], remarks=["unittest"],
# authors="Test User <[email protected]>")
# self.assertEqual(len(d), 2)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 2)
# snlids = [d["_id"] for d in data]
# self.rester.delete_snl(snlids)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 0)
def test_get_stability(self):
entries = self.rester.get_entries_in_chemsys(["Fe", "O"])
modified_entries = []
for entry in entries:
# Create modified entries with energies that are 0.01eV higher
# than the corresponding entries.
if entry.composition.reduced_formula == "Fe2O3":
modified_entries.append(
ComputedEntry(
entry.composition,
entry.uncorrected_energy + 0.01,
parameters=entry.parameters,
entry_id=f"mod_{entry.entry_id}",
)
)
rest_ehulls = self.rester.get_stability(modified_entries)
all_entries = entries + modified_entries
compat = MaterialsProject2020Compatibility()
all_entries = compat.process_entries(all_entries)
pd = PhaseDiagram(all_entries)
for e in all_entries:
if str(e.entry_id).startswith("mod"):
for d in rest_ehulls:
if d["entry_id"] == e.entry_id:
data = d
break
self.assertAlmostEqual(pd.get_e_above_hull(e), data["e_above_hull"])
def test_get_reaction(self):
rxn = self.rester.get_reaction(["Li", "O"], ["Li2O"])
self.assertIn("Li2O", rxn["Experimental_references"])
def test_get_substrates(self):
substrate_data = self.rester.get_substrates("mp-123", 5, [1, 0, 0])
substrates = [sub_dict["sub_id"] for sub_dict in substrate_data]
self.assertIn("mp-2534", substrates)
def test_get_surface_data(self):
data = self.rester.get_surface_data("mp-126") # Pt
one_surf = self.rester.get_surface_data("mp-129", miller_index=[-2, -3, 1])
self.assertAlmostEqual(one_surf["surface_energy"], 2.99156963, places=2)
self.assertArrayAlmostEqual(one_surf["miller_index"], [3, 2, 1])
self.assertIn("surfaces", data)
surfaces = data["surfaces"]
self.assertTrue(len(surfaces) > 0)
surface = surfaces.pop()
self.assertIn("miller_index", surface)
self.assertIn("surface_energy", surface)
self.assertIn("is_reconstructed", surface)
data_inc = self.rester.get_surface_data("mp-126", inc_structures=True)
self.assertIn("structure", data_inc["surfaces"][0])
def test_get_wulff_shape(self):
ws = self.rester.get_wulff_shape("mp-126")
self.assertTrue(isinstance(ws, WulffShape))
def test_get_cohesive_energy(self):
ecoh = self.rester.get_cohesive_energy("mp-13")
self.assertTrue(ecoh, 5.04543279)
def test_get_gb_data(self):
mo_gbs = self.rester.get_gb_data(chemsys="Mo")
self.assertEqual(len(mo_gbs), 10)
mo_gbs_s5 = self.rester.get_gb_data(pretty_formula="Mo", sigma=5)
self.assertEqual(len(mo_gbs_s5), 3)
mo_s3_112 = self.rester.get_gb_data(
material_id="mp-129",
sigma=3,
gb_plane=[1, -1, -2],
include_work_of_separation=True,
)
self.assertEqual(len(mo_s3_112), 1)
gb_f = mo_s3_112[0]["final_structure"]
self.assertArrayAlmostEqual(gb_f.rotation_axis, [1, 1, 0])
self.assertAlmostEqual(gb_f.rotation_angle, 109.47122, places=4)
self.assertAlmostEqual(mo_s3_112[0]["gb_energy"], 0.47965, places=2)
self.assertAlmostEqual(mo_s3_112[0]["work_of_separation"], 6.318144, places=2)
self.assertIn("Mo24", gb_f.formula)
hcp_s7 = self.rester.get_gb_data(material_id="mp-87", gb_plane=[0, 0, 0, 1], include_work_of_separation=True)
self.assertAlmostEqual(hcp_s7[0]["gb_energy"], 1.12, places=2)
self.assertAlmostEqual(hcp_s7[0]["work_of_separation"], 2.47, places=2)
def test_get_interface_reactions(self):
kinks = self.rester.get_interface_reactions("LiCoO2", "Li3PS4")
self.assertTrue(len(kinks) > 0)
kink = kinks[0]
self.assertIn("energy", kink)
self.assertIn("ratio_atomic", kink)
self.assertIn("rxn", kink)
self.assertTrue(isinstance(kink["rxn"], Reaction))
kinks_open_O = self.rester.get_interface_reactions("LiCoO2", "Li3PS4", open_el="O", relative_mu=-1)
self.assertTrue(len(kinks_open_O) > 0)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings("always", message="The reactant.+")
self.rester.get_interface_reactions("LiCoO2", "MnO9")
self.assertTrue("The reactant" in str(w[-1].message))
def test_download_info(self):
material_ids = ["mvc-2970"]
task_types = [TaskType.GGA_OPT, TaskType.GGAU_UNIFORM]
file_patterns = ["vasprun*", "OUTCAR*"]
meta, urls = self.rester.get_download_info(material_ids, task_types=task_types, file_patterns=file_patterns)
self.assertDictEqual(
dict(meta),
{
"mvc-2970": [{"task_id": "mp-1738602", "task_type": "GGA+U NSCF Uniform"}],
},
)
self.assertEqual(
urls[0],
"https://nomad-lab.eu/prod/rae/api/raw/query?file_pattern=vasprun*&file_pattern=OUTCAR*&external_id=mp"
"-1738602",
)
def test_parse_criteria(self):
crit = MPRester.parse_criteria("mp-1234 Li-*")
self.assertIn("Li-O", crit["$or"][1]["chemsys"]["$in"])
self.assertIn({"task_id": "mp-1234"}, crit["$or"])
crit = MPRester.parse_criteria("Li2*")
self.assertIn("Li2O", crit["pretty_formula"]["$in"])
self.assertIn("Li2I", crit["pretty_formula"]["$in"])
self.assertIn("CsLi2", crit["pretty_formula"]["$in"])
crit = MPRester.parse_criteria("Li-*-*")
self.assertIn("Li-Re-Ru", crit["chemsys"]["$in"])
self.assertNotIn("Li-Li", crit["chemsys"]["$in"])
comps = MPRester.parse_criteria("**O3")["pretty_formula"]["$in"]
for c in comps:
self.assertEqual(len(Composition(c)), 3, f"Failed in {c}")
chemsys = MPRester.parse_criteria("{Fe,Mn}-O")["chemsys"]["$in"]
self.assertEqual(len(chemsys), 2)
comps = MPRester.parse_criteria("{Fe,Mn,Co}O")["pretty_formula"]["$in"]
self.assertEqual(len(comps), 3, comps)
# Let's test some invalid symbols
self.assertRaises(ValueError, MPRester.parse_criteria, "li-fe")
self.assertRaises(ValueError, MPRester.parse_criteria, "LO2")
crit = MPRester.parse_criteria("POPO2")
self.assertIn("P2O3", crit["pretty_formula"]["$in"])
def test_include_user_agent(self):
headers = self.rester.session.headers
self.assertIn("user-agent", headers, msg="Include user-agent header by default")
m = re.match(
r"pymatgen/(\d+)\.(\d+)\.(\d+)\.?(\d+)? \(Python/(\d+)\.(\d)+\.(\d+) ([^\/]*)/([^\)]*)\)",
headers["user-agent"],
)
self.assertIsNotNone(m, msg=f"Unexpected user-agent value {headers['user-agent']}")
self.rester = MPRester(include_user_agent=False)
self.assertNotIn("user-agent", self.rester.session.headers, msg="user-agent header unwanted")
def test_database_version(self):
with MPRester(notify_db_version=True) as mpr:
db_version = mpr.get_database_version()
self.assertIsInstance(db_version, str)
yaml = YAML()
with open(MP_LOG_FILE) as f:
d = yaml.load(f)
self.assertEqual(d["MAPI_DB_VERSION"]["LAST_ACCESSED"], db_version)
self.assertIsInstance(d["MAPI_DB_VERSION"]["LOG"][db_version], int)
def test_pourbaix_heavy(self):
entries = self.rester.get_pourbaix_entries(["Li", "Mg", "Sn", "Pd"])
_ = PourbaixDiagram(entries, nproc=4, filter_solids=False)
entries = self.rester.get_pourbaix_entries(["Ba", "Ca", "V", "Cu", "F"])
_ = PourbaixDiagram(entries, nproc=4, filter_solids=False)
entries = self.rester.get_pourbaix_entries(["Ba", "Ca", "V", "Cu", "F", "Fe"])
_ = PourbaixDiagram(entries, nproc=4, filter_solids=False)
entries = self.rester.get_pourbaix_entries(["Na", "Ca", "Nd", "Y", "Ho", "F"])
_ = PourbaixDiagram(entries, nproc=4, filter_solids=False)
def test_pourbaix_mpr_pipeline(self):
data = self.rester.get_pourbaix_entries(["Zn"])
pbx = PourbaixDiagram(data, filter_solids=True, conc_dict={"Zn": 1e-8})
pbx.find_stable_entry(10, 0)
data = self.rester.get_pourbaix_entries(["Ag", "Te"])
pbx = PourbaixDiagram(data, filter_solids=True, conc_dict={"Ag": 1e-8, "Te": 1e-8})
self.assertEqual(len(pbx.stable_entries), 29)
test_entry = pbx.find_stable_entry(8, 2)
self.assertEqual(sorted(test_entry.entry_id), ["ion-10", "mp-499"])
# Test against ion sets with multiple equivalent ions (Bi-V regression)
entries = self.rester.get_pourbaix_entries(["Bi", "V"])
pbx = PourbaixDiagram(entries, filter_solids=True, conc_dict={"Bi": 1e-8, "V": 1e-8})
self.assertTrue(all(["Bi" in entry.composition and "V" in entry.composition for entry in pbx.all_entries]))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "e42b68c0501893faf161d4fd95c7ffe4",
"timestamp": "",
"source": "github",
"line_count": 519,
"max_line_length": 120,
"avg_line_length": 44.14643545279383,
"alnum_prop": 0.6158781424581006,
"repo_name": "gVallverdu/pymatgen",
"id": "4668511dca2f18610de127d7bf88a0ac8490fb05",
"size": "23005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/ext/tests/test_matproj.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "87"
},
{
"name": "CSS",
"bytes": "7572"
},
{
"name": "Cython",
"bytes": "39738"
},
{
"name": "HTML",
"bytes": "12642493"
},
{
"name": "OpenEdge ABL",
"bytes": "312"
},
{
"name": "Python",
"bytes": "9200904"
},
{
"name": "Roff",
"bytes": "1407429"
},
{
"name": "Shell",
"bytes": "12027"
}
],
"symlink_target": ""
} |
"""
使用协程实现生产者-消费者的例子
"""
import asyncio
import random
async def consumer(queue, id):
while True:
val = await queue.get()
print('{} get a val: {}'.format(id, val))
await asyncio.sleep(1)
async def producer(queue, id):
for i in range(5):
val = random.randint(1, 10)
await queue.put(val)
print('{} put a val: {}'.format(id, val))
await asyncio.sleep(1)
async def main():
queue = asyncio.Queue()
consumer_1 = asyncio.create_task(consumer(queue, 'consumer_1'))
consumer_2 = asyncio.create_task(consumer(queue, 'consumer_2'))
producer_1 = asyncio.create_task(producer(queue, 'producer_1'))
producer_2 = asyncio.create_task(producer(queue, 'producer_2'))
await asyncio.sleep(10)
consumer_1.cancel()
consumer_2.cancel()
await asyncio.gather(consumer_1, consumer_2, producer_1, producer_2, return_exceptions=True)
asyncio.run(main())
########## 输出 ##########
| {
"content_hash": "7b996daefead394bc3b25de2bc2a638a",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 96,
"avg_line_length": 22.952380952380953,
"alnum_prop": 0.6224066390041494,
"repo_name": "yidao620c/core-python",
"id": "221c6d69b86d5c82c8e760d54d81138f526aa85e",
"size": "1024",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ch11asyncio/my_producer_consumer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "132153"
}
],
"symlink_target": ""
} |
import unittest
import tkinter
from tkinter import font
from test.support import requires, run_unittest
from tkinter.test.support import AbstractTkTest
requires('gui')
fontname = "TkDefaultFont"
class FontTest(AbstractTkTest, unittest.TestCase):
@classmethod
def setUpClass(cls):
AbstractTkTest.setUpClass()
try:
cls.font = font.Font(root=cls.root, name=fontname, exists=True)
except tkinter.TclError:
cls.font = font.Font(root=cls.root, name=fontname, exists=False)
def test_configure(self):
options = self.font.configure()
self.assertGreaterEqual(set(options),
{'family', 'size', 'weight', 'slant', 'underline', 'overstrike'})
for key in options:
self.assertEqual(self.font.cget(key), options[key])
self.assertEqual(self.font[key], options[key])
for key in 'family', 'weight', 'slant':
self.assertIsInstance(options[key], str)
self.assertIsInstance(self.font.cget(key), str)
self.assertIsInstance(self.font[key], str)
sizetype = int if self.wantobjects else str
for key in 'size', 'underline', 'overstrike':
self.assertIsInstance(options[key], sizetype)
self.assertIsInstance(self.font.cget(key), sizetype)
self.assertIsInstance(self.font[key], sizetype)
def test_actual(self):
options = self.font.actual()
self.assertGreaterEqual(set(options),
{'family', 'size', 'weight', 'slant', 'underline', 'overstrike'})
for key in options:
self.assertEqual(self.font.actual(key), options[key])
for key in 'family', 'weight', 'slant':
self.assertIsInstance(options[key], str)
self.assertIsInstance(self.font.actual(key), str)
sizetype = int if self.wantobjects else str
for key in 'size', 'underline', 'overstrike':
self.assertIsInstance(options[key], sizetype)
self.assertIsInstance(self.font.actual(key), sizetype)
def test_name(self):
self.assertEqual(self.font.name, fontname)
self.assertEqual(str(self.font), fontname)
def test_eq(self):
font1 = font.Font(root=self.root, name=fontname, exists=True)
font2 = font.Font(root=self.root, name=fontname, exists=True)
self.assertIsNot(font1, font2)
self.assertEqual(font1, font2)
self.assertNotEqual(font1, font1.copy())
self.assertNotEqual(font1, 0)
def test_measure(self):
self.assertIsInstance(self.font.measure('abc'), int)
def test_metrics(self):
metrics = self.font.metrics()
self.assertGreaterEqual(set(metrics),
{'ascent', 'descent', 'linespace', 'fixed'})
for key in metrics:
self.assertEqual(self.font.metrics(key), metrics[key])
self.assertIsInstance(metrics[key], int)
self.assertIsInstance(self.font.metrics(key), int)
def test_families(self):
families = font.families(self.root)
self.assertIn(self.font.actual('family'), families)
def test_names(self):
names = font.names(self.root)
self.assertIn(fontname, names)
tests_gui = (FontTest, )
if __name__ == "__main__":
run_unittest(*tests_gui)
| {
"content_hash": "a7216e67782f723318dd87b8420f1f6c",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 77,
"avg_line_length": 37.97701149425287,
"alnum_prop": 0.6343825665859564,
"repo_name": "juanyaw/python",
"id": "ba3b9da498b770c28fef60f2032fd52f9c624783",
"size": "3304",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "cpython/Lib/tkinter/test/test_tkinter/test_font.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "470920"
},
{
"name": "Batchfile",
"bytes": "35551"
},
{
"name": "C",
"bytes": "16518323"
},
{
"name": "C#",
"bytes": "1231"
},
{
"name": "C++",
"bytes": "343272"
},
{
"name": "CSS",
"bytes": "2839"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "Groff",
"bytes": "254942"
},
{
"name": "HTML",
"bytes": "130698"
},
{
"name": "JavaScript",
"bytes": "10616"
},
{
"name": "Makefile",
"bytes": "25026"
},
{
"name": "Objective-C",
"bytes": "1390263"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "1420"
},
{
"name": "Prolog",
"bytes": "557"
},
{
"name": "Python",
"bytes": "24911704"
},
{
"name": "R",
"bytes": "5378"
},
{
"name": "Shell",
"bytes": "437386"
},
{
"name": "TeX",
"bytes": "323102"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from functools import partial
from django.template import RequestContext
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
from django import template
from django.utils.html import escape
from couchdbkit.exceptions import ResourceNotFound
from corehq import privileges
from corehq.apps.cloudcare import CLOUDCARE_DEVICE_ID
from corehq.apps.hqwebapp.templatetags.hq_shared_tags import toggle_enabled
from corehq.apps.receiverwrapper.auth import AuthContext
from corehq.apps.hqwebapp.doc_info import get_doc_info_by_id, DocInfo
from corehq.apps.locations.permissions import can_edit_form_location
from corehq.apps.reports.formdetails.readable import get_readable_data_for_submission
from corehq import toggles
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.util.timezones.utils import get_timezone_for_request
from corehq.util.xml_utils import indent_xml
from casexml.apps.case.xform import extract_case_blocks
from casexml.apps.case import const
from casexml.apps.case.templatetags.case_tags import case_inline_display
from corehq.apps.hqwebapp.templatetags.proptable_tags import (
get_tables_as_columns, get_default_definition)
from dimagi.utils.parsing import json_format_datetime
from django_prbac.utils import has_privilege
register = template.Library()
@register.simple_tag
def render_form_xml(form):
xml = form.get_xml()
if isinstance(xml, unicode):
xml.encode('utf-8', errors='replace')
formatted_xml = indent_xml(xml) if xml else ''
return '<pre class="fancy-code prettyprint linenums"><code class="no-border language-xml">%s</code></pre>' \
% escape(formatted_xml)
def sorted_case_update_keys(keys):
"""Put common @ attributes at the bottom"""
return sorted(keys, key=lambda k: (k[0] == '@', k))
def sorted_form_metadata_keys(keys):
def mycmp(x, y):
foo = ('timeStart', 'timeEnd')
bar = ('username', 'userID')
if x in foo and y in foo:
return -1 if foo.index(x) == 0 else 1
elif x in foo or y in foo:
return 0
if x in bar and y in bar:
return -1 if bar.index(x) == 0 else 1
elif x in bar and y in bar:
return 0
return cmp(x, y)
return sorted(keys, cmp=mycmp)
@register.simple_tag
def render_form(form, domain, options):
"""
Uses options since Django 1.3 doesn't seem to support templatetag kwargs.
Change to kwargs when we're on a version of Django that does.
"""
timezone = get_timezone_for_request()
case_id = options.get('case_id')
side_pane = options.get('side_pane', False)
user = options.get('user', None)
request = options.get('request', None)
support_enabled = toggle_enabled(request, toggles.SUPPORT)
_get_tables_as_columns = partial(get_tables_as_columns, timezone=timezone)
# Form Data tab
form_data, question_list_not_found = get_readable_data_for_submission(form)
# Case Changes tab
case_blocks = extract_case_blocks(form)
for i, block in enumerate(list(case_blocks)):
if case_id and block.get(const.CASE_ATTR_ID) == case_id:
case_blocks.pop(i)
case_blocks.insert(0, block)
cases = []
for b in case_blocks:
this_case_id = b.get(const.CASE_ATTR_ID)
try:
this_case = CaseAccessors(domain).get_case(this_case_id) if this_case_id else None
valid_case = True
except ResourceNotFound:
this_case = None
valid_case = False
if this_case and this_case.case_id:
url = reverse('case_details', args=[domain, this_case.case_id])
else:
url = "#"
definition = get_default_definition(
sorted_case_update_keys(b.keys()),
assume_phonetimes=(not form.metadata or
(form.metadata.deviceID != CLOUDCARE_DEVICE_ID)),
)
cases.append({
"is_current_case": case_id and this_case_id == case_id,
"name": case_inline_display(this_case),
"table": _get_tables_as_columns(b, definition),
"url": url,
"valid_case": valid_case
})
# Form Metadata tab
meta = _top_level_tags(form).get('meta', None) or {}
meta['received_on'] = json_format_datetime(form.received_on)
if support_enabled:
meta['last_sync_token'] = form.last_sync_token
definition = get_default_definition(sorted_form_metadata_keys(meta.keys()))
form_meta_data = _get_tables_as_columns(meta, definition)
if getattr(form, 'auth_context', None):
auth_context = AuthContext(form.auth_context)
auth_context_user_id = auth_context.user_id
auth_user_info = get_doc_info_by_id(domain, auth_context_user_id)
else:
auth_user_info = get_doc_info_by_id(domain, None)
auth_context = AuthContext(
user_id=None,
authenticated=False,
domain=domain,
)
meta_userID = meta.get('userID')
meta_username = meta.get('username')
if meta_userID == 'demo_user':
user_info = DocInfo(
domain=domain,
display='demo_user',
)
elif meta_username == 'admin':
user_info = DocInfo(
domain=domain,
display='admin',
)
else:
user_info = get_doc_info_by_id(domain, meta_userID)
user_can_edit = (
request and user and request.domain
and (user.can_edit_data() or user.is_commcare_user())
)
show_edit_options = (
user_can_edit
and can_edit_form_location(domain, user, form)
)
show_edit_submission = (
user_can_edit
and has_privilege(request, privileges.DATA_CLEANUP)
and not form.is_deprecated
)
show_resave = (
user_can_edit and support_enabled
)
def _get_edit_info(instance):
info = {
'was_edited': False,
'is_edit': False,
}
if instance.is_deprecated:
info.update({
'was_edited': True,
'latest_version': instance.orig_id,
})
if getattr(instance, 'edited_on', None) and getattr(instance, 'deprecated_form_id', None):
info.update({
'is_edit': True,
'edited_on': instance.edited_on,
'previous_version': instance.deprecated_form_id
})
return info
return render_to_string("reports/form/partials/single_form.html", {
"context_case_id": case_id,
"instance": form,
"is_archived": form.is_archived,
"edit_info": _get_edit_info(form),
"domain": domain,
'question_list_not_found': question_list_not_found,
"form_data": form_data,
"cases": cases,
"form_table_options": {
# todo: wells if display config has more than one column
"put_loners_in_wells": False
},
"form_meta_data": form_meta_data,
"auth_context": auth_context,
"auth_user_info": auth_user_info,
"user_info": user_info,
"side_pane": side_pane,
"show_edit_options": show_edit_options,
"show_edit_submission": show_edit_submission,
"show_resave": show_resave,
}, RequestContext(request))
def _top_level_tags(form):
"""
Returns a OrderedDict of the top level tags found in the xml, in the
order they are found.
"""
to_return = OrderedDict()
element = form.get_xml_element()
if not element:
return OrderedDict(sorted(form.form_data.items()))
for child in element:
# fix {namespace}tag format forced by ElementTree in certain cases (eg, <reg> instead of <n0:reg>)
key = child.tag.split('}')[1] if child.tag.startswith("{") else child.tag
if key == "Meta":
key = "meta"
to_return[key] = form.get_data('form/' + key)
return to_return
| {
"content_hash": "1474bee07ded36cd98866ac588d3e748",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 112,
"avg_line_length": 34.50212765957447,
"alnum_prop": 0.6185249136655155,
"repo_name": "qedsoftware/commcare-hq",
"id": "7575fd3756a6ea46781f550847a093de5365ef4b",
"size": "8108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/reports/templatetags/xform_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
"""
:mod:`packet_base` --- Packet base class
========================================
"""
# Stdlib
import struct
from abc import ABCMeta, abstractmethod
# External
import capnp
# SCION
import proto.scion_capnp as P
from lib.errors import SCIONParseError
from lib.util import hex_str
class Serializable(object, metaclass=ABCMeta): # pragma: no cover
"""
Base class for all objects which serialize into raw bytes.
"""
def __init__(self, raw=None):
if raw:
self._parse(raw)
@abstractmethod
def _parse(self, raw):
raise NotImplementedError
@abstractmethod
def from_values(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def pack(self):
raise NotImplementedError
@abstractmethod
def __len__(self):
raise NotImplementedError
@abstractmethod
def __str__(self):
raise NotImplementedError
class Cerealizable(object, metaclass=ABCMeta):
"""
Base class for all objects which serialize to Cap'n Proto.
Each subclass needs to specify a class attribute for the corresponding
proto file (P) and the proto message name (P_CLS), e.g.,
P = capnp.load("proto/foo.capnp")
P_CLS = P.Foo
"""
def __init__(self, p):
assert not isinstance(p, bytes)
self.p = p
self._packed = False
@classmethod
def from_raw(cls, raw):
assert isinstance(raw, bytes), type(raw)
try:
return cls(cls.P_CLS.from_bytes_packed(raw).as_builder())
except capnp.lib.capnp.KjException as e:
raise SCIONParseError("Unable to parse %s capnp message: %s" %
(cls, e)) from None
@classmethod
def from_raw_multiple(cls, raw):
assert isinstance(raw, bytes), type(raw)
try:
for p in cls.P_CLS.read_multiple_bytes_packed(raw):
yield cls(p.as_builder())
except capnp.lib.capnp.KjException as e:
raise SCIONParseError("Unable to parse %s capnp message: %s" %
(cls, e)) from None
@abstractmethod
def from_values(self, *args, **kwargs):
raise NotImplementedError
@classmethod
def from_dict(cls, d):
return cls(cls.P_CLS.new_message(**d))
def to_dict(self):
return self.p.to_dict()
def pack(self, *args, **kwargs):
assert not self._packed, "May only be packed once"
self._packed = True
return self._pack(*args, **kwargs)
def _pack(self):
return self.p.to_bytes_packed()
def __bool__(self):
return True
def __len__(self):
raise NotImplementedError
def copy(self):
return type(self)(self.p.copy())
def __copy__(self):
return type(self)(self.p.copy())
def __deepcopy__(self, memo):
# http://stackoverflow.com/a/15774013
inst = type(self)(self.p.copy())
memo[id(self)] = inst
return inst
def __eq__(self, other): # pragma: no cover
raise NotImplementedError
def short_desc(self):
return str(self.p)
def __str__(self):
return "%s: %s" % (self.NAME, self.short_desc())
class L4HeaderBase(Serializable, metaclass=ABCMeta): # pragma: no cover
"""
Base class for L4 headers.
"""
TYPE = None
def pack(self, payload, checksum=None):
self.total_len = self.LEN + len(payload)
if checksum is None:
checksum = self._calc_checksum(payload)
return self._pack(checksum)
@abstractmethod
def validate(self, payload):
raise NotImplementedError
class PacketBase(Serializable): # pragma: no cover
"""
Base class for packets.
"""
def __init__(self, raw=None):
"""
Initialize an instance of the class PacketBase.
"""
self._payload = b""
super().__init__(raw)
def get_payload(self):
return self._payload
def set_payload(self, new_payload):
assert isinstance(new_payload, (PayloadBase, SCIONPayloadBaseProto))
self._payload = new_payload
class PayloadBase(Serializable): # pragma: no cover
"""
Interface that payloads of packets must implement.
"""
METADATA_LEN = 0
def pack_meta(self):
return b""
def pack_full(self):
return self.pack_meta() + self.pack()
def total_len(self):
return self.METADATA_LEN + len(self)
class PayloadRaw(PayloadBase): # pragma: no cover
SNIPPET_LEN = 32
def __init__(self, raw=None):
self._raw = b""
super().__init__(raw)
def _parse(self, raw):
self._raw = raw or b""
def from_values(cls, raw):
assert isinstance(raw, bytes)
inst = cls()
inst._raw = raw
return inst
def pack(self):
return self._raw
def __eq__(self, other):
return self._raw == other._raw
def __len__(self):
return len(self._raw)
def __str__(self):
s = "PayloadRaw(%dB): %s" % (len(self._raw),
hex_str(self._raw[:self.SNIPPET_LEN]))
if len(self._raw) > self.SNIPPET_LEN:
s += "[...]"
return s
class SCIONPayloadBaseProto(Cerealizable): # pragma: no cover
"""
All child classes must define the PAYLOAD_CLASS attributed, defined by
lib.types.PayloadClass
"""
# 4B length prepended to the capnp block
METADATA_LEN = 4
PAYLOAD_TYPE = None
def pack_full(self):
assert not self._packed, "May only be packed once"
self._packed = True
return self._pack_full(self.p)
def _pack_full(self, p):
wrapper = P.SCION.new_message(**{self.PAYLOAD_CLASS: p})
raw = wrapper.to_bytes_packed()
meta = struct.pack("!I", len(raw))
return meta + raw
| {
"content_hash": "f5a735a1ca978d117f733e41ad526930",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 76,
"avg_line_length": 25.393939393939394,
"alnum_prop": 0.5801227412205933,
"repo_name": "dmpiergiacomo/scion",
"id": "9d34e04667e9463e13cef90384d7417059ea01ff",
"size": "6438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/lib/packet/packet_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "182811"
},
{
"name": "C++",
"bytes": "222397"
},
{
"name": "Cap'n Proto",
"bytes": "9737"
},
{
"name": "Go",
"bytes": "341602"
},
{
"name": "Makefile",
"bytes": "19041"
},
{
"name": "Python",
"bytes": "1396489"
},
{
"name": "Ruby",
"bytes": "2520"
},
{
"name": "Shell",
"bytes": "17290"
}
],
"symlink_target": ""
} |
import ibis
from ibis.compat import unittest
from ibis.expr.tests.mocks import BasicTestCase
import ibis.expr.analysis as L
import ibis.expr.operations as ops
import ibis.common as com
from ibis.tests.util import assert_equal
# Place to collect esoteric expression analysis bugs and tests
class TestTableExprBasics(BasicTestCase, unittest.TestCase):
def test_rewrite_substitute_distinct_tables(self):
t = self.con.table('test1')
tt = self.con.table('test1')
expr = t[t.c > 0]
expr2 = tt[tt.c > 0]
metric = t.f.sum().name('metric')
expr3 = expr.aggregate(metric)
result = L.sub_for(expr3, [(expr2, t)])
expected = t.aggregate(metric)
assert_equal(result, expected)
def test_rewrite_join_projection_without_other_ops(self):
# See #790, predicate pushdown in joins not supported
# Star schema with fact table
table = self.con.table('star1')
table2 = self.con.table('star2')
table3 = self.con.table('star3')
filtered = table[table['f'] > 0]
pred1 = table['foo_id'] == table2['foo_id']
pred2 = filtered['bar_id'] == table3['bar_id']
j1 = filtered.left_join(table2, [pred1])
j2 = j1.inner_join(table3, [pred2])
# Project out the desired fields
view = j2[[filtered, table2['value1'], table3['value2']]]
# Construct the thing we expect to obtain
ex_pred2 = table['bar_id'] == table3['bar_id']
ex_expr = (table.left_join(table2, [pred1])
.inner_join(table3, [ex_pred2]))
rewritten_proj = L.substitute_parents(view)
op = rewritten_proj.op()
assert not op.table.equals(ex_expr)
def test_rewrite_past_projection(self):
table = self.con.table('test1')
# Rewrite past a projection
table3 = table[['c', 'f']]
expr = table3['c'] == 2
result = L.substitute_parents(expr)
expected = table['c'] == 2
assert_equal(result, expected)
# Unsafe to rewrite past projection
table5 = table[(table.f * 2).name('c'), table.f]
expr = table5['c'] == 2
result = L.substitute_parents(expr)
assert result is expr
def test_multiple_join_deeper_reference(self):
# Join predicates down the chain might reference one or more root
# tables in the hierarchy.
table1 = ibis.table({'key1': 'string', 'key2': 'string',
'value1': 'double'})
table2 = ibis.table({'key3': 'string', 'value2': 'double'})
table3 = ibis.table({'key4': 'string', 'value3': 'double'})
joined = table1.inner_join(table2, [table1['key1'] == table2['key3']])
joined2 = joined.inner_join(table3, [table1['key2'] == table3['key4']])
# it works, what more should we test here?
materialized = joined2.materialize()
repr(materialized)
def test_filter_on_projected_field(self):
# See #173. Impala and other SQL engines do not allow filtering on a
# just-created alias in a projection
region = self.con.table('tpch_region')
nation = self.con.table('tpch_nation')
customer = self.con.table('tpch_customer')
orders = self.con.table('tpch_orders')
fields_of_interest = [customer,
region.r_name.name('region'),
orders.o_totalprice.name('amount'),
orders.o_orderdate
.cast('timestamp').name('odate')]
all_join = (
region.join(nation, region.r_regionkey == nation.n_regionkey)
.join(customer, customer.c_nationkey == nation.n_nationkey)
.join(orders, orders.o_custkey == customer.c_custkey))
tpch = all_join[fields_of_interest]
# Correlated subquery, yikes!
t2 = tpch.view()
conditional_avg = t2[(t2.region == tpch.region)].amount.mean()
# `amount` is part of the projection above as an aliased field
amount_filter = tpch.amount > conditional_avg
result = tpch.filter([amount_filter])
# Now then! Predicate pushdown here is inappropriate, so we check that
# it didn't occur.
assert isinstance(result.op(), ops.Selection)
assert result.op().table is tpch
def test_bad_join_predicate_raises(self):
# Join predicate references a derived table, but we can salvage and
# rewrite it to get the join semantics out
# see ibis #74
table = ibis.table([
('c', 'int32'),
('f', 'double'),
('g', 'string')
], 'foo_table')
table2 = ibis.table([
('key', 'string'),
('value', 'double')
], 'bar_table')
filter_pred = table['f'] > 0
table3 = table[filter_pred]
with self.assertRaises(com.ExpressionError):
table.inner_join(table2, [table3['g'] == table2['key']])
# expected = table.inner_join(table2, [table['g'] == table2['key']])
# assert_equal(result, expected)
def test_filter_self_join(self):
# GH #667
purchases = ibis.table([('region', 'string'),
('kind', 'string'),
('user', 'int64'),
('amount', 'double')], 'purchases')
metric = purchases.amount.sum().name('total')
agged = (purchases.group_by(['region', 'kind'])
.aggregate(metric))
left = agged[agged.kind == 'foo']
right = agged[agged.kind == 'bar']
cond = left.region == right.region
joined = left.join(right, cond)
# unmodified by analysis
assert_equal(joined.op().predicates[0], cond)
metric = (left.total - right.total).name('diff')
what = [left.region, metric]
projected = joined.projection(what)
proj_exprs = projected.op().selections
# proj exprs unaffected by analysis
assert_equal(proj_exprs[0], left.region)
assert_equal(proj_exprs[1], metric)
# def test_fuse_filter_projection(self):
# data = ibis.table([('kind', 'string'),
# ('year', 'int64')], 'data')
# pred = data.year == 2010
# result = data.projection(['kind'])[pred]
# expected = data.filter(pred).kind
# assert isinstance(result, ops.Selection)
# assert result.equals(expected)
def test_fuse_projection_sort_by(self):
pass
def test_fuse_filter_sort_by(self):
pass
# Refactoring deadpool
def test_no_rewrite(self):
table = self.con.table('test1')
# Substitution not fully possible if we depend on a new expr in a
# projection
table4 = table[['c', (table['c'] * 2).name('foo')]]
expr = table4['c'] == table4['foo']
result = L.substitute_parents(expr)
expected = table['c'] == table4['foo']
assert_equal(result, expected)
# def test_projection_with_join_pushdown_rewrite_refs(self):
# # Observed this expression IR issue in a TopK-rewrite context
# table1 = ibis.table([
# ('a_key1', 'string'),
# ('a_key2', 'string'),
# ('a_value', 'double')
# ], 'foo')
# table2 = ibis.table([
# ('b_key1', 'string'),
# ('b_name', 'string'),
# ('b_value', 'double')
# ], 'bar')
# table3 = ibis.table([
# ('c_key2', 'string'),
# ('c_name', 'string')
# ], 'baz')
# proj = (table1.inner_join(table2, [('a_key1', 'b_key1')])
# .inner_join(table3, [(table1.a_key2, table3.c_key2)])
# [table1, table2.b_name.name('b'), table3.c_name.name('c'),
# table2.b_value])
# cases = [
# (proj.a_value > 0, table1.a_value > 0),
# (proj.b_value > 0, table2.b_value > 0)
# ]
# for higher_pred, lower_pred in cases:
# result = proj.filter([higher_pred])
# op = result.op()
# assert isinstance(op, ops.Selection)
# new_pred = op.predicates[0]
# assert_equal(new_pred, lower_pred)
# def test_rewrite_expr_with_parent(self):
# table = self.con.table('test1')
# table2 = table[table['f'] > 0]
# expr = table2['c'] == 2
# result = L.substitute_parents(expr)
# expected = table['c'] == 2
# assert_equal(result, expected)
# def test_rewrite_distinct_but_equal_objects(self):
# t = self.con.table('test1')
# t_copy = self.con.table('test1')
# table2 = t[t_copy['f'] > 0]
# expr = table2['c'] == 2
# result = L.substitute_parents(expr)
# expected = t['c'] == 2
# assert_equal(result, expected)
| {
"content_hash": "068a5fdbedfaa28d81256f9c7b3176b8",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 79,
"avg_line_length": 33.13703703703704,
"alnum_prop": 0.5471107633843747,
"repo_name": "mariusvniekerk/ibis",
"id": "5a3ebc02c8e88912922f7af3132b060f626226b3",
"size": "9521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibis/expr/tests/test_analysis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1730"
},
{
"name": "C",
"bytes": "3684"
},
{
"name": "C++",
"bytes": "44943"
},
{
"name": "CMake",
"bytes": "4383"
},
{
"name": "Makefile",
"bytes": "186"
},
{
"name": "Python",
"bytes": "1179745"
},
{
"name": "Shell",
"bytes": "5808"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from setuptools.command import sdist
del sdist.finders[:]
CPSVERSION = '1.2.0'
long_description = """
ConPaaS: an integrated runtime environment for elastic Cloud applications
=========================================================================
"""
setup(name='cpslib',
version=CPSVERSION,
description='ConPaaS: an integrated runtime environment for elastic Cloud applications',
author='Emanuele Rocca',
author_email='[email protected]',
url='http://www.conpaas.eu/',
download_url='http://www.conpaas.eu/download/',
license='BSD',
packages=find_packages(exclude=["*taskfarm"]),
install_requires=[ 'simplejson', 'pycurl', 'pyopenssl' ])
| {
"content_hash": "8e06832ba6e08a5f592c80d1b6a5d794",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 94,
"avg_line_length": 36.85,
"alnum_prop": 0.6268656716417911,
"repo_name": "mihaisoloi/conpaas",
"id": "4b31adc3ff039926690e2d729b33dedae3a1ecc5",
"size": "760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conpaas-services/src/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "399657"
},
{
"name": "JavaScript",
"bytes": "105439"
},
{
"name": "PHP",
"bytes": "1811901"
},
{
"name": "Python",
"bytes": "2047262"
},
{
"name": "Shell",
"bytes": "136501"
}
],
"symlink_target": ""
} |
from keystoneclient import exceptions
from keystoneclient.tests.v3 import utils
from keystoneclient.v3 import role_assignments
class RoleAssignmentsTests(utils.TestCase, utils.CrudTests):
def setUp(self):
super(RoleAssignmentsTests, self).setUp()
self.key = 'role_assignment'
self.collection_key = 'role_assignments'
self.model = role_assignments.RoleAssignment
self.manager = self.client.role_assignments
self.TEST_USER_DOMAIN_LIST = [{
'role': {
'id': self.TEST_ROLE_ID
},
'scope': {
'domain': {
'id': self.TEST_DOMAIN_ID
}
},
'user': {
'id': self.TEST_USER_ID
}
}]
self.TEST_GROUP_PROJECT_LIST = [{
'group': {
'id': self.TEST_GROUP_ID
},
'role': {
'id': self.TEST_ROLE_ID
},
'scope': {
'project': {
'id': self.TEST_TENANT_ID
}
}
}]
self.TEST_USER_PROJECT_LIST = [{
'user': {
'id': self.TEST_USER_ID
},
'role': {
'id': self.TEST_ROLE_ID
},
'scope': {
'project': {
'id': self.TEST_TENANT_ID
}
}
}]
self.TEST_ALL_RESPONSE_LIST = (self.TEST_USER_PROJECT_LIST +
self.TEST_GROUP_PROJECT_LIST +
self.TEST_USER_DOMAIN_LIST)
def _assert_returned_list(self, ref_list, returned_list):
self.assertEqual(len(ref_list), len(returned_list))
[self.assertIsInstance(r, self.model) for r in returned_list]
def test_list_params(self):
ref_list = self.TEST_USER_PROJECT_LIST
self.stub_entity('GET',
[self.collection_key,
'?scope.project.id=%s&user.id=%s' %
(self.TEST_TENANT_ID, self.TEST_USER_ID)],
entity=ref_list)
returned_list = self.manager.list(user=self.TEST_USER_ID,
project=self.TEST_TENANT_ID)
self._assert_returned_list(ref_list, returned_list)
kwargs = {'scope.project.id': self.TEST_TENANT_ID,
'user.id': self.TEST_USER_ID}
self.assertQueryStringContains(**kwargs)
def test_all_assignments_list(self):
ref_list = self.TEST_ALL_RESPONSE_LIST
self.stub_entity('GET',
[self.collection_key],
entity=ref_list)
returned_list = self.manager.list()
self._assert_returned_list(ref_list, returned_list)
kwargs = {}
self.assertQueryStringContains(**kwargs)
def test_project_assignments_list(self):
ref_list = self.TEST_GROUP_PROJECT_LIST + self.TEST_USER_PROJECT_LIST
self.stub_entity('GET',
[self.collection_key,
'?scope.project.id=%s' % self.TEST_TENANT_ID],
entity=ref_list)
returned_list = self.manager.list(project=self.TEST_TENANT_ID)
self._assert_returned_list(ref_list, returned_list)
kwargs = {'scope.project.id': self.TEST_TENANT_ID}
self.assertQueryStringContains(**kwargs)
def test_domain_assignments_list(self):
ref_list = self.TEST_USER_DOMAIN_LIST
self.stub_entity('GET',
[self.collection_key,
'?scope.domain.id=%s' % self.TEST_DOMAIN_ID],
entity=ref_list)
returned_list = self.manager.list(domain=self.TEST_DOMAIN_ID)
self._assert_returned_list(ref_list, returned_list)
kwargs = {'scope.domain.id': self.TEST_DOMAIN_ID}
self.assertQueryStringContains(**kwargs)
def test_group_assignments_list(self):
ref_list = self.TEST_GROUP_PROJECT_LIST
self.stub_entity('GET',
[self.collection_key,
'?group.id=%s' % self.TEST_GROUP_ID],
entity=ref_list)
returned_list = self.manager.list(group=self.TEST_GROUP_ID)
self._assert_returned_list(ref_list, returned_list)
kwargs = {'group.id': self.TEST_GROUP_ID}
self.assertQueryStringContains(**kwargs)
def test_user_assignments_list(self):
ref_list = self.TEST_USER_DOMAIN_LIST + self.TEST_USER_PROJECT_LIST
self.stub_entity('GET',
[self.collection_key,
'?user.id=%s' % self.TEST_USER_ID],
entity=ref_list)
returned_list = self.manager.list(user=self.TEST_USER_ID)
self._assert_returned_list(ref_list, returned_list)
kwargs = {'user.id': self.TEST_USER_ID}
self.assertQueryStringContains(**kwargs)
def test_effective_assignments_list(self):
ref_list = self.TEST_USER_PROJECT_LIST + self.TEST_USER_DOMAIN_LIST
self.stub_entity('GET',
[self.collection_key,
'?effective=True'],
entity=ref_list)
returned_list = self.manager.list(effective=True)
self._assert_returned_list(ref_list, returned_list)
kwargs = {'effective': 'True'}
self.assertQueryStringContains(**kwargs)
def test_role_assignments_list(self):
ref_list = self.TEST_ALL_RESPONSE_LIST
self.stub_entity('GET',
[self.collection_key,
'?role.id=' + self.TEST_ROLE_ID],
entity=ref_list)
returned_list = self.manager.list(role=self.TEST_ROLE_ID)
self._assert_returned_list(ref_list, returned_list)
kwargs = {'role.id': self.TEST_ROLE_ID}
self.assertQueryStringContains(**kwargs)
def test_domain_and_project_list(self):
# Should only accept either domain or project, never both
self.assertRaises(exceptions.ValidationError,
self.manager.list,
domain=self.TEST_DOMAIN_ID,
project=self.TEST_TENANT_ID)
def test_user_and_group_list(self):
# Should only accept either user or group, never both
self.assertRaises(exceptions.ValidationError, self.manager.list,
user=self.TEST_USER_ID, group=self.TEST_GROUP_ID)
def test_create(self):
# Create not supported for role assignments
self.assertRaises(exceptions.MethodNotImplemented, self.manager.create)
def test_update(self):
# Update not supported for role assignments
self.assertRaises(exceptions.MethodNotImplemented, self.manager.update)
def test_delete(self):
# Delete not supported for role assignments
self.assertRaises(exceptions.MethodNotImplemented, self.manager.delete)
def test_get(self):
# Get not supported for role assignments
self.assertRaises(exceptions.MethodNotImplemented, self.manager.get)
def test_find(self):
# Find not supported for role assignments
self.assertRaises(exceptions.MethodNotImplemented, self.manager.find)
| {
"content_hash": "712bc63ca39a98af00bdf0c4b779a90b",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 79,
"avg_line_length": 37.378787878787875,
"alnum_prop": 0.5501959194703419,
"repo_name": "alexpilotti/python-keystoneclient",
"id": "f47d9ec6096106123c8bcd21c4144686b3643b28",
"size": "7974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystoneclient/tests/v3/test_role_assignments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1298136"
},
{
"name": "Shell",
"bytes": "5326"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
urlpatterns = patterns(
'home.views',
url(r'^$', 'home', name='home'),
)
| {
"content_hash": "754afecd28c8e74e37130432e35970e4",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 42,
"avg_line_length": 18,
"alnum_prop": 0.626984126984127,
"repo_name": "Codectivo/Incubu",
"id": "73b24b7969a56cd25041d9caec84eb401c5de1a0",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "home/urls.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8248"
},
{
"name": "JavaScript",
"bytes": "24803"
},
{
"name": "Perl",
"bytes": "48"
},
{
"name": "Python",
"bytes": "27423"
}
],
"symlink_target": ""
} |
import os
import asyncio
import discord
import logging
from discord.ext import commands
from cogs.utils.dataIO import dataIO
from cogs.utils import checks
from __main__ import settings
from cogs.utils.chat_formatting import box, pagify
log = logging.getLogger('red.ServerWhitelist')
class ServerWhitelist:
"""
Lets a bot owner create a list of servers that the bot will immediately
leave any other server it joins. defaults to allowing
It does not require you to make the bot private"""
__author__ = "mikeshardmind"
__version__ = "0.1"
def __init__(self, bot):
self.bot = bot
self.settings = dataIO.load_json('data/serverwhitelist/settings.json')
self.whitelist = dataIO.load_json('data/serverwhitelist/list.json')
def save_json(self):
dataIO.save_json("data/serverwhitelist/settings.json", self.settings)
dataIO.save_json("data/serverwhitelist/list.json", self.whitelist)
@checks.is_owner()
@commands.group(name="serverwhitelist", pass_context=True)
async def serverwhitelist(self, ctx):
"""Manage the server whitelist
These commands will fail if not in direct message"""
if ctx.invoked_subcommand is None:
await self.bot.send_cmd_help(ctx)
@checks.is_owner()
@serverwhitelist.command(name="add", pass_context=True)
async def whitelist_server(self, ctx, server_id=None):
"""
whitelists a server by server ID
because the author is lazy,
this must be used in direct messages
"""
if ctx.message.channel.is_private:
if server_id is None:
await self.bot.say("I can't whitelist a server without the ID")
else:
if server_id not in self.whitelist:
self.whitelist[server_id] = {}
self.save_json()
await self.bot.say("Server with ID: {} "
"whitelisted.".format(server_id))
else:
await self.bot.say("That server is already "
"in the whitelist")
else:
try:
await self.bot.say("You can't use that here")
except discord.Forbidden:
log.debug("Some Dumbass tried to use me in a "
"place I couldn't repsond")
@checks.is_owner()
@serverwhitelist.command(name="remove", pass_context=True)
async def un_whitelist_server(self, ctx, server_id=None):
"""
un-whitelists a server by ID
for the sake of consistency,
this can only be used in direct messages
"""
if ctx.message.channel.is_private:
if server_id is None:
await self.bot.say("I can't remove a server from the whitelist"
" without an ID")
else:
if server_id in list(self.whitelist):
del self.whitelist[server_id]
self.save_json()
await self.bot.say("Server with ID: {} no longer "
"in whitelist".format(server_id))
else:
await self.bot.say("There wasn't a server with that ID "
"in the whitelist")
else:
try:
await self.bot.say("You can't use that here")
except discord.errors.Forbidden:
log.debug("Some Dumbass didn't RTFM and tried to use me in a "
"place I couldn't resond")
@checks.is_owner()
@serverwhitelist.command(name="list", pass_context=True)
async def fetch_whitelist(self, ctx):
"""
get a list of whitelisted server's IDs
attempts to get the corresponding name if the bot is also in a
whitelisted server
"""
if ctx.message.channel.is_private:
if len(self.whitelist) > 0:
output = "Whitelist\n"
for k, v in self.whitelist:
s = self.bot.get_server(k)
if s is None:
output += "\n{}".format(k)
else:
output += "\n{0.id} : {0.name}".format(s)
for page in pagify(output, delims=["\n", ","]):
await self.bot.say(box(page))
else:
await self.bot.say("There are no servers in the whitelist.")
else:
await self.bot.say("You can't use that here.")
@checks.is_owner()
@serverwhitelist.command(name="setmsg", pass_context=True)
async def setleaveonwhite(self, ctx, msg=None):
"""
sets (or clears) the message to send when leaving
like the rest of this cog, direct message only,
message must be enclosed in quotes
"""
if ctx.message.channel.is_private:
self.settings['msg'] = msg
self.save_json
if msg:
await self.bot.say("Message set to: \n```{}```".format(msg))
else:
await self.bot.say("Leave message disabled")
else:
await self.bot.say("You can't use that here.")
@checks.is_owner()
@serverwhitelist.command(name="runnow", pass_context=True)
async def runnow(self, ctx):
"""
processes all servers the bot is in
"""
for server in self.bot.servers:
await self.whitelist_routine(server)
async def whitelist_routine(self, server):
"""do the thing"""
if server.owner.id == settings.owner:
return # If the bot is joining your own server, you should know
if server.id not in self.whitelist:
channel = server.default_channel
if channel is None:
chan_list = [
c for c in sorted(
server.channels, key=lambda ch: ch.position
) if c.type.name == "text"
]
for ch in chan_list:
if ch.permissions_for(server.me).read_messages and \
ch.permissions_for(server.me).send_messages:
channel = ch
break
else:
log.debug("Did not have permission to leave exit message "
"for any channel in server named {0.name} "
"with ID of {0.id} ".format(server))
return
msg = self.settings.get('msg', None)
if msg:
try:
await self.bot.send_message(channel, "{}".format(msg))
except discord.Forbidden:
log.debug("Did not have permission to leave exit message "
"for server named {0.name} with ID of {0.id} "
"".format(server))
except Exception as e:
log.debug(e)
await asyncio.sleep(1)
await self.bot.leave_server(server)
log.debug("I left a server named {} with an ID of {} "
"".format(server.name, server.id))
def check_folder():
f = 'data/serverwhitelist'
if not os.path.exists(f):
os.makedirs(f)
def check_file():
f = 'data/serverwhitelist/settings.json'
if dataIO.is_valid_json(f) is False:
dataIO.save_json(f, {})
f = 'data/serverwhitelist/list.json'
if dataIO.is_valid_json(f) is False:
dataIO.save_json(f, {})
def setup(bot):
check_folder()
check_file()
n = ServerWhitelist(bot)
bot.add_listener(n.whitelist_routine, "on_server_join")
bot.add_cog(n)
| {
"content_hash": "b13f19a89b68138cf72cbcee58ddf97a",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 79,
"avg_line_length": 36.71830985915493,
"alnum_prop": 0.5350978135788262,
"repo_name": "Garcia1008/tournament",
"id": "3d141f11982f35902453aec985ac1e0f1a677ecc",
"size": "7821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "serverwhitelist/serverwhitelist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "177304"
}
],
"symlink_target": ""
} |
from proton import Message, symbol
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT, unittest, TestTimeout
from proton.handlers import MessagingHandler
from proton.reactor import Container
class AddrTimer(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.check_address()
class RouterTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
"""Start a router"""
super(RouterTest, cls).setUpClass()
def router(name, mode, connection, extra=None):
config = [
('router', {'mode': mode, 'id': name, "helloMaxAgeSeconds": '10'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no'}),
('address', {'prefix': 'queue', 'waypoint': 'yes'}),
('address', {'prefix': 'multi', 'ingressPhase': '0', 'egressPhase': '9'}),
connection
]
if extra:
config.append(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
cls.routers = []
inter_router_port = cls.tester.get_port()
edge_port_A = cls.tester.get_port()
edge_port_B = cls.tester.get_port()
router('INT.A', 'interior', ('listener', {'role': 'inter-router', 'port': inter_router_port}),
('listener', {'role': 'edge', 'port': edge_port_A}))
router('INT.B', 'interior', ('connector', {'name': 'connectorToA', 'role': 'inter-router', 'port': inter_router_port}),
('listener', {'role': 'edge', 'port': edge_port_B}))
router('EA1', 'edge', ('connector', {'name': 'edge', 'role': 'edge', 'port': edge_port_A}))
router('EA2', 'edge', ('connector', {'name': 'edge', 'role': 'edge', 'port': edge_port_A}))
router('EB1', 'edge', ('connector', {'name': 'edge', 'role': 'edge', 'port': edge_port_B}))
router('EB2', 'edge', ('connector', {'name': 'edge', 'role': 'edge', 'port': edge_port_B}))
cls.routers[0].wait_router_connected('INT.B')
cls.routers[1].wait_router_connected('INT.A')
def test_01_waypoint_same_interior(self):
test = WaypointTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
self.routers[0].addresses[0],
'queue.01')
test.run()
self.assertIsNone(test.error)
def test_02_waypoint_same_edge(self):
test = WaypointTest(self.routers[2].addresses[0],
self.routers[2].addresses[0],
self.routers[2].addresses[0],
'queue.02')
test.run()
self.assertIsNone(test.error)
def test_03_waypoint_edge_interior(self):
test = WaypointTest(self.routers[2].addresses[0],
self.routers[2].addresses[0],
self.routers[0].addresses[0],
'queue.03')
test.run()
self.assertIsNone(test.error)
def test_04_waypoint_interior_edge(self):
test = WaypointTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
self.routers[2].addresses[0],
'queue.04')
test.run()
self.assertIsNone(test.error)
def test_05_waypoint_interior_interior(self):
test = WaypointTest(self.routers[0].addresses[0],
self.routers[0].addresses[0],
self.routers[1].addresses[0],
'queue.05')
test.run()
self.assertIsNone(test.error)
def test_06_waypoint_edge_edge(self):
test = WaypointTest(self.routers[2].addresses[0],
self.routers[5].addresses[0],
self.routers[0].addresses[0],
'queue.06')
test.run()
self.assertIsNone(test.error)
def test_07_waypoint_edge_endpoints_int_1(self):
test = WaypointTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
self.routers[2].addresses[0],
'queue.07')
test.run()
self.assertIsNone(test.error)
def test_08_waypoint_edge_endpoints_int_2(self):
test = WaypointTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
self.routers[5].addresses[0],
'queue.08')
test.run()
self.assertIsNone(test.error)
def test_09_waypoint_int_endpoints_edge_1(self):
test = WaypointTest(self.routers[2].addresses[0],
self.routers[5].addresses[0],
self.routers[0].addresses[0],
'queue.09')
test.run()
self.assertIsNone(test.error)
def test_10_waypoint_int_endpoints_edge_2(self):
test = WaypointTest(self.routers[2].addresses[0],
self.routers[5].addresses[0],
self.routers[1].addresses[0],
'queue.10')
test.run()
self.assertIsNone(test.error)
def test_11_waypoint_int_endpoints_int_1(self):
test = WaypointTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
self.routers[0].addresses[0],
'queue.11')
test.run()
self.assertIsNone(test.error)
def test_12_waypoint_int_endpoints_int_2(self):
test = WaypointTest(self.routers[0].addresses[0],
self.routers[1].addresses[0],
self.routers[1].addresses[0],
'queue.12')
test.run()
self.assertIsNone(test.error)
def test_13_waypoint_edge_endpoints_edge_1(self):
test = WaypointTest(self.routers[2].addresses[0],
self.routers[5].addresses[0],
self.routers[3].addresses[0],
'queue.13')
test.run()
self.assertIsNone(test.error)
def test_14_waypoint_edge_endpoints_edge_2(self):
test = WaypointTest(self.routers[2].addresses[0],
self.routers[5].addresses[0],
self.routers[4].addresses[0],
'queue.14')
test.run()
self.assertIsNone(test.error)
def test_15_multiphase_1(self):
test = MultiPhaseTest(self.routers[2].addresses[0],
self.routers[5].addresses[0],
[
self.routers[0].addresses[0],
self.routers[1].addresses[0],
self.routers[2].addresses[0],
self.routers[3].addresses[0],
self.routers[4].addresses[0],
self.routers[5].addresses[0],
self.routers[0].addresses[0],
self.routers[1].addresses[0],
self.routers[2].addresses[0]
],
'multi.15')
test.run()
self.assertIsNone(test.error)
def test_16_multiphase_2(self):
test = MultiPhaseTest(self.routers[2].addresses[0],
self.routers[5].addresses[0],
[
self.routers[5].addresses[0],
self.routers[3].addresses[0],
self.routers[1].addresses[0],
self.routers[4].addresses[0],
self.routers[2].addresses[0],
self.routers[0].addresses[0],
self.routers[5].addresses[0],
self.routers[3].addresses[0],
self.routers[1].addresses[0]
],
'multi.16')
test.run()
self.assertIsNone(test.error)
def test_17_multiphase_3(self):
test = MultiPhaseTest(self.routers[1].addresses[0],
self.routers[0].addresses[0],
[
self.routers[0].addresses[0],
self.routers[1].addresses[0],
self.routers[2].addresses[0],
self.routers[3].addresses[0],
self.routers[4].addresses[0],
self.routers[5].addresses[0],
self.routers[0].addresses[0],
self.routers[1].addresses[0],
self.routers[2].addresses[0]
],
'multi.17')
test.run()
self.assertIsNone(test.error)
class WaypointTest(MessagingHandler):
def __init__(self, sender_host, receiver_host, waypoint_host, addr):
super(WaypointTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.waypoint_host = waypoint_host
self.addr = addr
self.count = 300
self.sender_conn = None
self.receiver_conn = None
self.waypoint_conn = None
self.error = None
self.n_tx = 0
self.n_rx = 0
self.n_thru = 0
def timeout(self):
self.error = "Timeout Expired - n_tx=%d, n_rx=%d, n_thru=%d" % (self.n_tx, self.n_rx, self.n_thru)
self.sender_conn.close()
self.receiver_conn.close()
self.waypoint_conn.close()
def fail(self, error):
self.error = error
self.sender_conn.close()
self.receiver_conn.close()
self.waypoint_conn.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.sender_host)
self.receiver_conn = event.container.connect(self.receiver_host)
self.waypoint_conn = event.container.connect(self.waypoint_host)
self.sender = event.container.create_sender(self.sender_conn, self.addr)
self.receiver = event.container.create_receiver(self.receiver_conn, self.addr)
self.wp_sender = event.container.create_sender(self.waypoint_conn, self.addr)
self.wp_receiver = event.container.create_receiver(self.waypoint_conn, self.addr)
self.wp_sender.target.capabilities.put_object(symbol("qd.waypoint"))
self.wp_receiver.source.capabilities.put_object(symbol("qd.waypoint"))
def on_sendable(self, event):
if event.sender == self.sender:
while self.sender.credit > 0 and self.n_tx < self.count:
self.sender.send(Message("Message %d" % self.n_tx))
self.n_tx += 1
def on_message(self, event):
if event.receiver == self.receiver:
self.n_rx += 1
if self.n_rx == self.count and self.n_thru == self.count:
self.fail(None)
elif event.receiver == self.wp_receiver:
self.n_thru += 1
self.wp_sender.send(Message(event.message.body))
def run(self):
Container(self).run()
class MultiPhaseTest(MessagingHandler):
def __init__(self, sender_host, receiver_host, waypoint_hosts, addr):
super(MultiPhaseTest, self).__init__()
self.sender_host = sender_host
self.receiver_host = receiver_host
self.waypoint_hosts = waypoint_hosts
self.addr = addr
self.count = 300
self.sender_conn = None
self.receiver_conn = None
self.waypoint_conns = []
self.wp_senders = []
self.wp_receivers = []
self.error = None
self.n_tx = 0
self.n_rx = 0
self.n_thru = [0, 0, 0, 0, 0, 0, 0, 0, 0]
def timeout(self):
self.error = "Timeout Expired - n_tx=%d, n_rx=%d, n_thru=%r" % (self.n_tx, self.n_rx, self.n_thru)
self.sender_conn.close()
self.receiver_conn.close()
for c in self.waypoint_conns:
c.close()
def fail(self, error):
self.error = error
self.sender_conn.close()
self.receiver_conn.close()
for c in self.waypoint_conns:
c.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
self.sender_conn = event.container.connect(self.sender_host)
self.receiver_conn = event.container.connect(self.receiver_host)
self.sender = event.container.create_sender(self.sender_conn, self.addr)
self.receiver = event.container.create_receiver(self.receiver_conn, self.addr)
for host in self.waypoint_hosts:
self.waypoint_conns.append(event.container.connect(host))
ordinal = 1
for conn in self.waypoint_conns:
sender = event.container.create_sender(conn, self.addr)
receiver = event.container.create_receiver(conn, self.addr)
sender.target.capabilities.put_object(symbol("qd.waypoint.%d" % ordinal))
receiver.source.capabilities.put_object(symbol("qd.waypoint.%d" % ordinal))
self.wp_senders.append(sender)
self.wp_receivers.append(receiver)
ordinal += 1
def on_sendable(self, event):
if event.sender == self.sender:
while self.sender.credit > 0 and self.n_tx < self.count:
self.sender.send(Message("Message %d" % self.n_tx))
self.n_tx += 1
def on_message(self, event):
if event.receiver == self.receiver:
self.n_rx += 1
if self.n_rx == self.count:
self.fail(None)
else:
idx = 0
for receiver in self.wp_receivers:
if event.receiver == receiver:
self.n_thru[idx] += 1
self.wp_senders[idx].send(Message(event.message.body))
return
idx += 1
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module())
| {
"content_hash": "d2279f100a39c5c6ce566947836a1306",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 127,
"avg_line_length": 40.832417582417584,
"alnum_prop": 0.5091838794321469,
"repo_name": "mgoulish/qpid-dispatch",
"id": "29171f0c07c5d40918ffe7710a945138d516f63e",
"size": "15653",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/system_tests_multi_phase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2831990"
},
{
"name": "C++",
"bytes": "354723"
},
{
"name": "CMake",
"bytes": "57520"
},
{
"name": "CSS",
"bytes": "49129"
},
{
"name": "Dockerfile",
"bytes": "3323"
},
{
"name": "HTML",
"bytes": "2320"
},
{
"name": "JavaScript",
"bytes": "733506"
},
{
"name": "Python",
"bytes": "2736603"
},
{
"name": "Shell",
"bytes": "34107"
}
],
"symlink_target": ""
} |
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_display_units06.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [93548544, 93550464]
data = [
[10000000, 20000000, 30000000, 20000000, 10000000],
]
worksheet.write_column(0, 0, data[0])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.set_y_axis({'display_units': 'millions', 'display_units_visible': 0})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "a5e423dce91b41b48db92ef347aabcee",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 83,
"avg_line_length": 26.404255319148938,
"alnum_prop": 0.6107977437550363,
"repo_name": "applicationdevm/XlsxWriter",
"id": "8a78b17425deea24f5bf6cf1d1576328175ee133",
"size": "1414",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/comparison/test_chart_display_units06.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7365"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2512197"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
} |
import unittest
from hwt.hdl.statements.assignmentContainer import HdlAssignmentContainer
from hwt.hdl.statements.statement import HdlStatement
from hwtLib.examples.axi.oooOp.counterHashTable import OooOpExampleCounterHashTable
from hwtLib.examples.mem.ram import SimpleAsyncRam
from hwtLib.examples.statements.ifStm import SimpleIfStatement3
from hwtLib.mem.atomic.flipReg import FlipRegister
from hwtLib.mem.cuckooHashTable import CuckooHashTable
from hwtLib.peripheral.displays.segment7 import Segment7
from hwtLib.peripheral.i2c.masterBitCntrl import I2cMasterBitCtrl
from hwtLib.tests.synthesizer.interfaceLevel.subunitsSynthesisTC import synthesised
class StatementsConsystencyTC(unittest.TestCase):
def check_consystency(self, u):
synthesised(u)
c = u._ctx
for s in c.signals:
for e in s.endpoints:
if isinstance(e, HdlStatement):
self.assertIs(e.parentStm, None, (s, e))
self.assertIn(e, c.statements)
for d in s.drivers:
if isinstance(d, HdlStatement):
self.assertIs(d.parentStm, None, (s, d))
self.assertIn(d, c.statements)
for stm in c.statements:
self.assertIs(stm.parentStm, None)
def test_if_stm_merging(self):
u = FlipRegister()
self.check_consystency(u)
def test_comples_stm_ops(self):
u = CuckooHashTable()
self.check_consystency(u)
def test_rm_statement(self):
u = SimpleIfStatement3()
self.check_consystency(u)
stms = u._ctx.statements
self.assertEqual(len(stms), 1)
self.assertIsInstance(list(stms)[0], HdlAssignmentContainer)
def test_index_inputs_with_assignment_has_endpoint(self):
u = SimpleAsyncRam()
self.check_consystency(u)
self.assertEqual(len(u.addr_in._sigInside.endpoints), 1)
self.assertEqual(len(u.addr_out._sigInside.endpoints), 1)
def test_if_inputs_correc(self):
u = Segment7()
self.check_consystency(u)
def test_unconnected_slices_removed_from_inputs_of_statements(self):
u = OooOpExampleCounterHashTable()
self.check_consystency(u)
def test_stm_enclosure_consystency(self):
u = I2cMasterBitCtrl()
self.check_consystency(u)
# test if there is not a latch
for stm in u._ctx.statements:
if stm._event_dependent_from_branch != 0:
diff = stm._enclosed_for.symmetric_difference(stm._outputs)
self.assertEqual(diff, set(), f"\n{stm}")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "1d65607067f9bf6468a871cb593f29af",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 83,
"avg_line_length": 35.851351351351354,
"alnum_prop": 0.6637768563889936,
"repo_name": "Nic30/hwtLib",
"id": "80f67d51cf6491bf031e35825032f12819ed98c1",
"size": "2701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hwtLib/tests/synthesizer/rtlLevel/statements_consystency_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41560"
},
{
"name": "Python",
"bytes": "2523349"
},
{
"name": "VHDL",
"bytes": "117346"
},
{
"name": "Verilog",
"bytes": "36444"
}
],
"symlink_target": ""
} |
class CachingBase(object):
class __metaclass__(type):
def __new__(cls, name, bases, dct):
dct = dict(dct)
dct['_cache'] = dict()
return type.__new__(cls, name, bases, dct)
def __new__(cls, *args):
key = cls.get_key(*args)
try:
return cls._cache[key]
except KeyError:
self = cls._cache[key] = object.__new__(cls)
self._init_cached(*key)
return self
| {
"content_hash": "a8da3da5491fed8d279a003c858ac0c7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 56,
"avg_line_length": 27.941176470588236,
"alnum_prop": 0.48,
"repo_name": "matthagy/Jamenson",
"id": "0f10210b6209ad6eb4bc9aa0a786f28149df0744",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jamenson/runtime/bases.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "444789"
}
],
"symlink_target": ""
} |
import cv2
import os
def save_frame_play(video_path, dir_path, basename, ext='jpg', delay=1, window_name='frame'):
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return
os.makedirs(dir_path, exist_ok=True)
base_path = os.path.join(dir_path, basename)
digit = len(str(int(cap.get(cv2.CAP_PROP_FRAME_COUNT))))
n = 0
while True:
ret, frame = cap.read()
if ret:
cv2.imshow(window_name, frame)
key = cv2.waitKey(delay) & 0xFF
if key == ord('c'):
cv2.imwrite('{}_{}.{}'.format(base_path, str(n).zfill(digit), ext), frame)
elif key == ord('q'):
break
n += 1
else:
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
n = 0
cv2.destroyWindow(window_name)
save_frame_play('data/temp/sample_video.mp4', 'data/temp', 'sample_video_cap', delay=0)
| {
"content_hash": "39015ab1f85f6b1b642559ed6a39e32e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 93,
"avg_line_length": 27.352941176470587,
"alnum_prop": 0.543010752688172,
"repo_name": "nkmk/python-snippets",
"id": "1aa35b42fd1ffb914598d84538ea54d001f74c45",
"size": "930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebook/opencv_video_to_still_play.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5734214"
},
{
"name": "Python",
"bytes": "1619882"
},
{
"name": "Shell",
"bytes": "12097"
}
],
"symlink_target": ""
} |
import time
import unittest
from unittest.mock import MagicMock
from unittest.mock import patch
import apache_beam as beam
from apache_beam import coders
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.portability.api import beam_interactive_api_pb2
from apache_beam.runners.interactive import background_caching_job as bcj
from apache_beam.runners.interactive import interactive_beam as ib
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive.caching.cacheable import CacheKey
from apache_beam.runners.interactive.interactive_runner import InteractiveRunner
from apache_beam.runners.interactive.options.capture_limiters import Limiter
from apache_beam.runners.interactive.recording_manager import ElementStream
from apache_beam.runners.interactive.recording_manager import Recording
from apache_beam.runners.interactive.recording_manager import RecordingManager
from apache_beam.runners.interactive.testing.test_cache_manager import FileRecordsBuilder
from apache_beam.runners.interactive.testing.test_cache_manager import InMemoryCache
from apache_beam.runners.runner import PipelineState
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.test_stream import WindowedValueHolder
from apache_beam.transforms.window import GlobalWindow
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.windowed_value import WindowedValue
class MockPipelineResult(beam.runners.runner.PipelineResult):
"""Mock class for controlling a PipelineResult."""
def __init__(self):
self._state = PipelineState.RUNNING
def wait_until_finish(self):
pass
def set_state(self, state):
self._state = state
@property
def state(self):
return self._state
def cancel(self):
self._state = PipelineState.CANCELLED
class ElementStreamTest(unittest.TestCase):
def setUp(self):
self.cache = InMemoryCache()
self.p = beam.Pipeline()
self.pcoll = self.p | beam.Create([])
self.cache_key = str(CacheKey('pcoll', '', '', ''))
# Create a MockPipelineResult to control the state of a fake run of the
# pipeline.
self.mock_result = MockPipelineResult()
ie.current_env().add_user_pipeline(self.p)
ie.current_env().set_pipeline_result(self.p, self.mock_result)
ie.current_env().set_cache_manager(self.cache, self.p)
def test_read(self):
"""Test reading and if a stream is done no more elements are returned."""
self.mock_result.set_state(PipelineState.DONE)
self.cache.write(['expected'], 'full', self.cache_key)
self.cache.save_pcoder(None, 'full', self.cache_key)
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=1, max_duration_secs=1)
self.assertFalse(stream.is_done())
self.assertEqual(list(stream.read())[0], 'expected')
self.assertTrue(stream.is_done())
def test_done_if_terminated(self):
"""Test that terminating the job sets the stream as done."""
self.cache.write(['expected'], 'full', self.cache_key)
self.cache.save_pcoder(None, 'full', self.cache_key)
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=10)
self.assertFalse(stream.is_done())
self.assertEqual(list(stream.read(tail=False))[0], 'expected')
# The limiters were not reached, so the stream is not done yet.
self.assertFalse(stream.is_done())
self.mock_result.set_state(PipelineState.DONE)
self.assertEqual(list(stream.read(tail=False))[0], 'expected')
# The underlying pipeline is terminated, so the stream won't yield new
# elements.
self.assertTrue(stream.is_done())
def test_read_n(self):
"""Test that the stream only reads 'n' elements."""
self.mock_result.set_state(PipelineState.DONE)
self.cache.write(list(range(5)), 'full', self.cache_key)
self.cache.save_pcoder(None, 'full', self.cache_key)
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=1, max_duration_secs=1)
self.assertEqual(list(stream.read()), [0])
self.assertTrue(stream.is_done())
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=2, max_duration_secs=1)
self.assertEqual(list(stream.read()), [0, 1])
self.assertTrue(stream.is_done())
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=5, max_duration_secs=1)
self.assertEqual(list(stream.read()), list(range(5)))
self.assertTrue(stream.is_done())
# Test that if the user asks for more than in the cache it still returns.
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=10, max_duration_secs=1)
self.assertEqual(list(stream.read()), list(range(5)))
self.assertTrue(stream.is_done())
def test_read_duration(self):
"""Test that the stream only reads a 'duration' of elements."""
def as_windowed_value(element):
return WindowedValueHolder(WindowedValue(element, 0, []))
values = (FileRecordsBuilder(tag=self.cache_key)
.advance_processing_time(1)
.add_element(element=as_windowed_value(0), event_time_secs=0)
.advance_processing_time(1)
.add_element(element=as_windowed_value(1), event_time_secs=1)
.advance_processing_time(1)
.add_element(element=as_windowed_value(2), event_time_secs=3)
.advance_processing_time(1)
.add_element(element=as_windowed_value(3), event_time_secs=4)
.advance_processing_time(1)
.add_element(element=as_windowed_value(4), event_time_secs=5)
.build()) # yapf: disable
values = [
v.recorded_event for v in values
if isinstance(v, beam_interactive_api_pb2.TestStreamFileRecord)
]
self.mock_result.set_state(PipelineState.DONE)
self.cache.write(values, 'full', self.cache_key)
self.cache.save_pcoder(coders.FastPrimitivesCoder(), 'full', self.cache_key)
# The following tests a progression of reading different durations from the
# cache.
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=1)
self.assertSequenceEqual([e.value for e in stream.read()], [0])
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=2)
self.assertSequenceEqual([e.value for e in stream.read()], [0, 1])
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=10)
self.assertSequenceEqual([e.value for e in stream.read()], [0, 1, 2, 3, 4])
class RecordingTest(unittest.TestCase):
def test_computed(self):
"""Tests that a PCollection is marked as computed only in a complete state.
Because the background caching job is now long-lived, repeated runs of a
PipelineFragment may yield different results for the same PCollection.
"""
p = beam.Pipeline(InteractiveRunner())
elems = p | beam.Create([0, 1, 2])
ib.watch(locals())
# Create a MockPipelineResult to control the state of a fake run of the
# pipeline.
mock_result = MockPipelineResult()
ie.current_env().track_user_pipelines()
ie.current_env().set_pipeline_result(p, mock_result)
# Create a mock BackgroundCachingJob that will control whether to set the
# PCollections as computed or not.
bcj_mock_result = MockPipelineResult()
background_caching_job = bcj.BackgroundCachingJob(bcj_mock_result, [])
# Create a recording.
recording = Recording(
p, [elems], mock_result, max_n=10, max_duration_secs=60)
# The background caching job and the recording isn't done yet so there may
# be more elements to be recorded.
self.assertFalse(recording.is_computed())
self.assertFalse(recording.computed())
self.assertTrue(recording.uncomputed())
# The recording is finished but the background caching job is not. There
# may still be more elements to record, or the intermediate PCollection may
# have stopped caching in an incomplete state, e.g. before a window could
# fire.
mock_result.set_state(PipelineState.DONE)
recording.wait_until_finish()
self.assertFalse(recording.is_computed())
self.assertFalse(recording.computed())
self.assertTrue(recording.uncomputed())
# The background caching job finished before we started a recording which
# is a sure signal that there will be no more elements.
bcj_mock_result.set_state(PipelineState.DONE)
ie.current_env().set_background_caching_job(p, background_caching_job)
recording = Recording(
p, [elems], mock_result, max_n=10, max_duration_secs=60)
recording.wait_until_finish()
# There are no more elements and the recording finished, meaning that the
# intermediate PCollections are in a complete state. They can now be marked
# as computed.
self.assertTrue(recording.is_computed())
self.assertTrue(recording.computed())
self.assertFalse(recording.uncomputed())
def test_describe(self):
p = beam.Pipeline(InteractiveRunner())
numbers = p | 'numbers' >> beam.Create([0, 1, 2])
letters = p | 'letters' >> beam.Create(['a', 'b', 'c'])
ib.watch(locals())
# Create a MockPipelineResult to control the state of a fake run of the
# pipeline.
mock_result = MockPipelineResult()
ie.current_env().track_user_pipelines()
ie.current_env().set_pipeline_result(p, mock_result)
cache_manager = InMemoryCache()
ie.current_env().set_cache_manager(cache_manager, p)
# Create a recording with an arbitrary start time.
recording = Recording(
p, [numbers, letters], mock_result, max_n=10, max_duration_secs=60)
# Get the cache key of the stream and write something to cache. This is
# so that a pipeline doesn't have to run in the test.
numbers_stream = recording.stream(numbers)
cache_manager.write([0, 1, 2], 'full', numbers_stream.cache_key)
cache_manager.save_pcoder(None, 'full', numbers_stream.cache_key)
letters_stream = recording.stream(letters)
cache_manager.write(['a', 'b', 'c'], 'full', letters_stream.cache_key)
cache_manager.save_pcoder(None, 'full', letters_stream.cache_key)
# Get the description.
description = recording.describe()
size = description['size']
self.assertEqual(
size,
cache_manager.size('full', numbers_stream.cache_key) +
cache_manager.size('full', letters_stream.cache_key))
class RecordingManagerTest(unittest.TestCase):
def test_basic_execution(self):
"""A basic pipeline to be used as a smoke test."""
# Create the pipeline that will emit 0, 1, 2.
p = beam.Pipeline(InteractiveRunner())
numbers = p | 'numbers' >> beam.Create([0, 1, 2])
letters = p | 'letters' >> beam.Create(['a', 'b', 'c'])
# Watch the pipeline and PCollections. This is normally done in a notebook
# environment automatically, but we have to do it manually here.
ib.watch(locals())
ie.current_env().track_user_pipelines()
# Create the recording objects. By calling `record` a new PipelineFragment
# is started to compute the given PCollections and cache to disk.
rm = RecordingManager(p)
numbers_recording = rm.record([numbers], max_n=3, max_duration=500)
numbers_stream = numbers_recording.stream(numbers)
numbers_recording.wait_until_finish()
# Once the pipeline fragment completes, we can read from the stream and know
# that all elements were written to cache.
elems = list(numbers_stream.read())
expected_elems = [
WindowedValue(i, MIN_TIMESTAMP, [GlobalWindow()]) for i in range(3)
]
self.assertListEqual(elems, expected_elems)
# Make an extra recording and test the description.
letters_recording = rm.record([letters], max_n=3, max_duration=500)
letters_recording.wait_until_finish()
self.assertEqual(
rm.describe()['size'],
numbers_recording.describe()['size'] +
letters_recording.describe()['size'])
rm.cancel()
def test_duration_parsing(self):
p = beam.Pipeline(InteractiveRunner())
elems = p | beam.Create([0, 1, 2])
# Watch the pipeline and PCollections. This is normally done in a notebook
# environment automatically, but we have to do it manually here.
ib.watch(locals())
ie.current_env().track_user_pipelines()
# Create the recording objects.
rm = RecordingManager(p)
recording = rm.record([elems], max_n=3, max_duration='500s')
recording.wait_until_finish()
# Assert that the duration was parsed correctly to integer seconds.
self.assertEqual(recording.describe()['duration'], 500)
def test_cancel_stops_recording(self):
# Add the TestStream so that it can be cached.
ib.options.recordable_sources.add(TestStream)
p = beam.Pipeline(
InteractiveRunner(), options=PipelineOptions(streaming=True))
elems = (
p
| TestStream().advance_watermark_to(0).advance_processing_time(
1).add_elements(list(range(10))).advance_processing_time(1))
squares = elems | beam.Map(lambda x: x**2)
# Watch the local scope for Interactive Beam so that referenced PCollections
# will be cached.
ib.watch(locals())
# This is normally done in the interactive_utils when a transform is
# applied but needs an IPython environment. So we manually run this here.
ie.current_env().track_user_pipelines()
class SemaphoreLimiter(Limiter):
def __init__(self):
self.triggered = False
def is_triggered(self):
return self.triggered
# Get the recording then the BackgroundCachingJob.
semaphore_limiter = SemaphoreLimiter()
rm = RecordingManager(p, test_limiters=[semaphore_limiter])
rm.record([squares], max_n=10, max_duration=500)
# The BackgroundCachingJob is still waiting for more elements, so it isn't
# done yet.
bcj = ie.current_env().get_background_caching_job(p)
self.assertFalse(bcj.is_done())
# Assert that something was read and that the BackgroundCachingJob was
# sucessfully stopped.
# self.assertTrue(list(recording.stream(squares).read()))
semaphore_limiter.triggered = True
rm.cancel()
self.assertTrue(bcj.is_done())
def test_recording_manager_clears_cache(self):
"""Tests that the RecordingManager clears the cache before recording.
A job may have incomplete PCollections when the job terminates. Clearing the
cache ensures that correct results are computed every run.
"""
# Add the TestStream so that it can be cached.
ib.options.recordable_sources.add(TestStream)
p = beam.Pipeline(
InteractiveRunner(), options=PipelineOptions(streaming=True))
elems = (
p
| TestStream().advance_watermark_to(0).advance_processing_time(
1).add_elements(list(range(10))).advance_processing_time(1))
squares = elems | beam.Map(lambda x: x**2)
# Watch the local scope for Interactive Beam so that referenced PCollections
# will be cached.
ib.watch(locals())
# This is normally done in the interactive_utils when a transform is
# applied but needs an IPython environment. So we manually run this here.
ie.current_env().track_user_pipelines()
# Do the first recording to get the timestamp of the first time the fragment
# was run.
rm = RecordingManager(p)
# Set up a mock for the Cache's clear function which will be used to clear
# uncomputed PCollections.
rm._clear_pcolls = MagicMock()
rm.record([squares], max_n=1, max_duration=500)
rm.cancel()
# Assert that the cache cleared the PCollection.
rm._clear_pcolls.assert_any_call(
unittest.mock.ANY,
# elems is unbounded source populated by the background job, thus not
# cleared.
{CacheKey.from_pcoll('squares', squares).to_str()})
def test_clear(self):
p1 = beam.Pipeline(InteractiveRunner())
elems_1 = p1 | 'elems 1' >> beam.Create([0, 1, 2])
ib.watch(locals())
ie.current_env().track_user_pipelines()
recording_manager = RecordingManager(p1)
recording = recording_manager.record([elems_1], max_n=3, max_duration=500)
recording.wait_until_finish()
record_describe = recording_manager.describe()
self.assertGreater(record_describe['size'], 0)
recording_manager.clear()
self.assertEqual(recording_manager.describe()['size'], 0)
def test_clear_specific_pipeline(self):
"""Tests that clear can empty the cache for a specific pipeline."""
# Create two pipelines so we can check that clearing the cache won't clear
# all defined pipelines.
p1 = beam.Pipeline(InteractiveRunner())
elems_1 = p1 | 'elems 1' >> beam.Create([0, 1, 2])
p2 = beam.Pipeline(InteractiveRunner())
elems_2 = p2 | 'elems 2' >> beam.Create([0, 1, 2])
# Watch the pipeline and PCollections. This is normally done in a notebook
# environment automatically, but we have to do it manually here.
ib.watch(locals())
ie.current_env().track_user_pipelines()
# Create the recording objects. By calling `record` a new PipelineFragment
# is started to compute the given PCollections and cache to disk.
rm_1 = RecordingManager(p1)
recording = rm_1.record([elems_1], max_n=3, max_duration=500)
recording.wait_until_finish()
rm_2 = RecordingManager(p2)
recording = rm_2.record([elems_2], max_n=3, max_duration=500)
recording.wait_until_finish()
# Assert that clearing only one recording clears that recording.
if rm_1.describe()['state'] == PipelineState.STOPPED \
and rm_2.describe()['state'] == PipelineState.STOPPED:
self.assertGreater(rm_1.describe()['size'], 0)
self.assertGreater(rm_2.describe()['size'], 0)
rm_1.clear()
self.assertEqual(rm_1.describe()['size'], 0)
self.assertGreater(rm_2.describe()['size'], 0)
rm_2.clear()
self.assertEqual(rm_2.describe()['size'], 0)
def test_record_pipeline(self):
# Add the TestStream so that it can be cached.
ib.options.recordable_sources.add(TestStream)
p = beam.Pipeline(
InteractiveRunner(), options=PipelineOptions(streaming=True))
# pylint: disable=unused-variable
_ = (p
| TestStream()
.advance_watermark_to(0)
.advance_processing_time(1)
.add_elements(list(range(10)))
.advance_processing_time(1)) # yapf: disable
# Watch the local scope for Interactive Beam so that referenced PCollections
# will be cached.
ib.watch(locals())
# This is normally done in the interactive_utils when a transform is
# applied but needs an IPython environment. So we manually run this here.
ie.current_env().track_user_pipelines()
# Create a lmiter that stops the background caching job when something is
# written to cache. This is used to make ensure that the pipeline is
# functioning properly and that there are no data races with the test.
class SizeLimiter(Limiter):
def __init__(self, p):
self.pipeline = p
self._rm = None
def set_recording_manager(self, rm):
self._rm = rm
def is_triggered(self):
return self._rm.describe()['size'] > 0 if self._rm else False
# Do the first recording to get the timestamp of the first time the fragment
# was run.
size_limiter = SizeLimiter(p)
rm = RecordingManager(p, test_limiters=[size_limiter])
size_limiter.set_recording_manager(rm)
self.assertEqual(rm.describe()['state'], PipelineState.STOPPED)
self.assertTrue(rm.record_pipeline())
# A recording is in progress, no need to start another one.
self.assertFalse(rm.record_pipeline())
for _ in range(60):
if rm.describe()['state'] == PipelineState.CANCELLED:
break
time.sleep(1)
self.assertTrue(
rm.describe()['state'] == PipelineState.CANCELLED,
'Test timed out waiting for pipeline to be cancelled. This indicates '
'that the BackgroundCachingJob did not cache anything.')
@patch(
'apache_beam.runners.interactive.recording_manager.'
'RecordingManager._clear_pcolls',
return_value=None)
@patch(
'apache_beam.runners.interactive.pipeline_fragment.'
'PipelineFragment.run',
return_value=None)
def test_record_detects_remote_runner(
self, mock_pipeline_fragment, mock_clear_pcolls):
"""Tests that a remote runner is detected, resulting in the
PipelineFragment instance to have blocking enabled."""
# Create the pipeline that will emit 0, 1, 2.
p = beam.Pipeline(InteractiveRunner())
numbers = p | 'numbers' >> beam.Create([0, 1, 2])
# Set the cache directory for Interactive Beam to be in a GCS bucket.
ib.options.cache_root = 'gs://test-bucket/'
# Create the recording objects. By calling `record` a new PipelineFragment
# is started to compute the given PCollections and cache to disk.
rm = RecordingManager(p)
# Run record() and check if the PipelineFragment.run had blocking set to
# True due to the GCS cache_root value.
rm.record([numbers], max_n=3, max_duration=500)
mock_pipeline_fragment.assert_called_with(blocking=True)
# Reset cache_root value.
ib.options.cache_root = None
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "87a26d99bd2a39a601d0f8d8a06080fa",
"timestamp": "",
"source": "github",
"line_count": 553,
"max_line_length": 89,
"avg_line_length": 38.67088607594937,
"alnum_prop": 0.6884732288987608,
"repo_name": "apache/beam",
"id": "6d1b3f3ebb1712732fd5b851defa45c5853f539a",
"size": "22168",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/interactive/recording_manager_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "C",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "4957"
},
{
"name": "Cython",
"bytes": "70760"
},
{
"name": "Dart",
"bytes": "912687"
},
{
"name": "Dockerfile",
"bytes": "59805"
},
{
"name": "FreeMarker",
"bytes": "7933"
},
{
"name": "Go",
"bytes": "5508697"
},
{
"name": "Groovy",
"bytes": "936956"
},
{
"name": "HCL",
"bytes": "103872"
},
{
"name": "HTML",
"bytes": "184151"
},
{
"name": "Java",
"bytes": "41223435"
},
{
"name": "JavaScript",
"bytes": "119576"
},
{
"name": "Jupyter Notebook",
"bytes": "55818"
},
{
"name": "Kotlin",
"bytes": "220768"
},
{
"name": "Lua",
"bytes": "3620"
},
{
"name": "Python",
"bytes": "10728612"
},
{
"name": "Rust",
"bytes": "5168"
},
{
"name": "SCSS",
"bytes": "318364"
},
{
"name": "Sass",
"bytes": "25954"
},
{
"name": "Scala",
"bytes": "1429"
},
{
"name": "Shell",
"bytes": "375834"
},
{
"name": "Smarty",
"bytes": "2618"
},
{
"name": "Thrift",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "1997829"
}
],
"symlink_target": ""
} |
import unittest
from hagelslag.data.MRMSGrid import MRMSGrid
from datetime import datetime
class TestMRMSGrid(unittest.TestCase):
def setUp(self):
self.path = "../../testdata/mrms_3km/"
self.variable = "MESH_Max_60min_00.50"
self.start_date = datetime(2015, 5, 1, 18, 0)
self.end_date = datetime(2015, 5, 2, 15, 0)
self.mrms = MRMSGrid(self.start_date, self.end_date, self.variable, self.path)
def test_constructor(self):
self.assertEquals(self.mrms.all_dates.size, 22, "Number of dates is wrong")
self.assertIsNone(self.mrms.data, "Data already loaded")
self.assertIsNone(self.mrms.valid_dates, "Valid dates already loaded")
def test_loading(self):
self.mrms.load_data()
self.assertEquals(self.mrms.data.shape[0], self.mrms.valid_dates.shape[0], "Data and valid dates unequal length")
self.assertEquals(self.mrms.all_dates.shape[0], self.mrms.valid_dates.shape[0], "All dates were not loaded")
| {
"content_hash": "d2b3d5c39c4bab4aaa77f868c2bf3511",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 121,
"avg_line_length": 44.26086956521739,
"alnum_prop": 0.6679764243614931,
"repo_name": "djgagne/hagelslag-unidata",
"id": "5c3828dcae1df358d827e6b92f97666f143f65d7",
"size": "1018",
"binary": false,
"copies": "1",
"ref": "refs/heads/unidata_workshop_2015",
"path": "hagelslag/test/test_data_MRMSGrid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "172294"
}
],
"symlink_target": ""
} |
import unittest2
from ice import tasks
import fabric.api as fabric_api
def a_func(*args, **kwargs):
"""A help message"""
ret_val = kwargs
ret_val['args'] = args
return ret_val
class TestCallable(unittest2.TestCase):
def test_help_message(self):
c = tasks.Callable(a_func)
self.assertEqual(c.help_msg, 'A help message')
def test_call(self):
c = tasks.Callable(a_func)
self.assertDictEqual(c(), {'args': tuple()})
def test_call_with_args(self):
c = tasks.Callable(a_func)
self.assertDictEqual(c('banana', 12),
{'args': tuple(['banana', 12])})
def test_call_with_kwargs(self):
c = tasks.Callable(a_func)
self.assertDictEqual(
c(a_string='banana', a_int=12),
{
'a_string': 'banana',
'a_int': 12,
'args': tuple()
}
)
def test_call_with_args_and_kwargs(self):
c = tasks.Callable(a_func)
self.assertDictEqual(
c('hello_world', a_string='banana', a_int=12),
{
'a_string': 'banana',
'a_int': 12,
'args': tuple(['hello_world'])
}
)
def mock_decorator(func):
def decorated_func(*args, **kwargs):
ret_val = func(*args, **kwargs)
ret_val['is_decorated'] = True
return ret_val
return decorated_func
class TestTask(unittest2.TestCase):
def setUp(self):
self.fa_task = fabric_api.task
fabric_api.task = mock_decorator
def tearDown(self):
fabric_api.task = self.fa_task
def test_callable_is_wrapped(self):
c = tasks.Task(a_func)
self.assertDictEqual(
c('hello_world', a_int=12),
{
'a_int': 12,
'args': tuple(['hello_world']),
'is_decorated': True
}
)
class TestParallelTask(unittest2.TestCase):
def setUp(self):
self.fa_parallel = fabric_api.parallel
fabric_api.parallel = mock_decorator
def tearDown(self):
fabric_api.parallel = self.fa_parallel
def test_callable_is_wrapped(self):
c = tasks.ParallelTask(a_func)
self.assertDictEqual(
c('hello_world', a_int=12),
{
'a_int': 12,
'args': tuple(['hello_world']),
'is_decorated': True
}
)
| {
"content_hash": "996529cfd3a7f40d9d69c2038d09f60c",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 61,
"avg_line_length": 26.16842105263158,
"alnum_prop": 0.5172968624296058,
"repo_name": "ice-stuff/ice",
"id": "bab5802798b958ef3320c0f5a3cfc24977096758",
"size": "2486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ice/test/test_tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "87083"
},
{
"name": "Shell",
"bytes": "5466"
}
],
"symlink_target": ""
} |
from twisted.plugin import IPlugin
from zope.interface import implementer
from desertbot.message import IRCMessage
from desertbot.moduleinterface import IModule
from desertbot.modules.commandinterface import BotCommand
from desertbot.response import IRCResponse
import requests
import http.client
from urllib3.exceptions import LocationParseError
@implementer(IPlugin, IModule)
class Down(BotCommand):
def triggers(self):
return ['down']
def help(self, query):
return 'down <url> - Check if the specified website URL is up'
def execute(self, message: IRCMessage):
if not message.parameterList:
return IRCResponse("You didn't give me a URL to check!", message.replyTo)
url = message.parameterList[0].strip()
if not url.startswith("http://") and not url.startswith("https://"):
url = f"http://{url}"
try:
res = self.bot.moduleHandler.runActionUntilValue("fetch-url", url, handleErrors=False)
except LocationParseError:
return IRCResponse("I don't know how to parse that URL!", message.replyTo)
except requests.exceptions.Timeout:
return IRCResponse(f"{url} looks to be down! It timed out after 10 seconds.", message.replyTo)
except requests.exceptions.SSLError:
return IRCResponse(f"{url} looks to be down! SSL verification failed.", message.replyTo)
except requests.exceptions.ConnectionError:
return IRCResponse(f"{url} looks to be down! I failed to connect to it.", message.replyTo)
except Exception as e:
self.logger.exception(e)
return IRCResponse(f"{url} looks to be down? requests broke on it. Send help.", message.replyTo)
if res.ok:
return IRCResponse(f"{url} looks up to me! It returned {res.status_code} ({http.client.responses[res.status_code]}).", message.replyTo)
else:
return IRCResponse(f"{url} looks to be down! It returned {res.status_code} ({http.client.responses[res.status_code]}).", message.replyTo)
down = Down()
| {
"content_hash": "c38258a635def71db15f7557c141c92a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 149,
"avg_line_length": 41.05882352941177,
"alnum_prop": 0.6848137535816619,
"repo_name": "DesertBot/DesertBot",
"id": "74917e49aeaffe573c6e1649a8a5867008d502cb",
"size": "2094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desertbot/modules/commands/Down.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "938"
},
{
"name": "Python",
"bytes": "514976"
}
],
"symlink_target": ""
} |
'''Prompt user for confirmations based on file names, and keeps track of
previous choices.
'''
import tilutil.systemutils as su
class ConfirmManager:
'''Class to prompt user for file operation confirmations, and remember the
patterns for future prompts.
'''
def __init__(self):
self.approve_list = []
self.reject_list = []
def addapprove(self, value):
'''Add a value to the list of approved values'''
self.approve_list.append(value)
def confirm(self, path, message, choices): #IGNORE:R0911
'''Prompts for confirmation.
An item in the approve list always returns 1.
An item in the reject list always returns 0.
An empty response (hitting just enter) always returns 0.
A response of +... adds a pattern to the approve list and returns 1.
A response of -... adds a pattern to the reject list and returns 0.
A response startring with y returns 1.
The first character of any other response is matched against the letters
in the choices parameters. If a match is found, the position is returned.
For example, if choices is "nyc", entering c... returns 2.
All other input returns 0.
All matching is done without case sensitivity, and choices should be all
lower case.
@param theFile a <code>File</code> value
@param message a <code>String</code> value
@param choices a <code>String</code> value
@return an <code>int</code> value
'''
for pattern in self.approve_list:
if path.find(pattern) != -1:
return 1
for pattern in self.reject_list:
if path.find(pattern) != -1:
return 0
answer = raw_input(su.fsenc(message))
if len(answer) == 0:
return 0
first_char = answer[0].lower()
if len(answer) > 1 and first_char == '+':
self.approve_list.append(answer[1:])
return 1
if len(answer) > 1 and first_char == '-':
self.reject_list.append(answer[1:])
return 0
if first_char == 'y':
return 1
for c in range(0, len(choices)):
if first_char == choices[c]:
return c
return 0
| {
"content_hash": "20f13a35e7ce9576bd8444dc4515284f",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 81,
"avg_line_length": 35.43283582089552,
"alnum_prop": 0.5754001684919966,
"repo_name": "nagyistoce/phoshare",
"id": "79c30b16efef27438973edb316f966fd12375635",
"size": "2374",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tilutil/confirmmanager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "172252"
}
],
"symlink_target": ""
} |
"""
Compliance Checker suite runner
"""
from __future__ import print_function
import sys
import inspect
import itertools
import json
from netCDF4 import Dataset
from lxml import etree as ET
from compliance_checker.base import fix_return_value, Result
from owslib.sos import SensorObservationService
from owslib.swe.sensor.sml import SensorML
try:
from urlparse import urlparse
except:
from urllib.parse import urlparse
from datetime import datetime
import requests
import textwrap
class CheckSuite(object):
checkers = {} # Base dict of checker names to BaseCheck derived types, override this in your CheckSuite implementation
@classmethod
def load_all_available_checkers(cls):
"""
Helper method to retrieve all sub checker classes derived from various
base classes.
"""
from pkg_resources import working_set
for x in working_set.iter_entry_points('compliance_checker.suites'):
try:
xl = x.load()
cls.checkers[xl.name] = xl
except Exception as e:
print("Could not load", x, ":", e, file=sys.stderr)
def _get_checks(self, checkclass):
"""
Helper method to retreive check methods from a Checker class.
The name of the methods in the Checker class should start with "check_" for this
method to find them.
"""
meths = inspect.getmembers(checkclass, inspect.ismethod)
return [x[1] for x in meths if x[0].startswith("check_")]
def _run_check(self, check_method, ds):
val = check_method(ds)
if isinstance(val, list):
return [fix_return_value(v, check_method.__func__.__name__, check_method, check_method.__self__) for v in val]
return [fix_return_value(val, check_method.__func__.__name__, check_method, check_method.__self__)]
def _get_valid_checkers(self, ds, checker_names):
"""
Returns a filtered list of 2-tuples: (name, valid checker) based on the ds object's type and
the user selected names.
"""
if len(checker_names) == 0:
checker_names = list(self.checkers.keys())
args = [(name, self.checkers[name]) for name in checker_names if name in self.checkers]
valid = []
all_checked = set([a[1] for a in args]) # only class types
checker_queue = set(args)
while len(checker_queue):
name, a = checker_queue.pop()
if type(ds) in a().supported_ds:
valid.append((name, a))
# add all to queue
for subc in a.__subclasses__():
if subc not in all_checked:
all_checked.add(subc)
checker_queue.add((name, subc))
return valid
def run(self, ds, *checker_names):
"""
Runs this CheckSuite on the dataset with all the passed Checker instances.
Returns a dictionary mapping checker names to a 2-tuple of their grouped scores and errors/exceptions while running checks.
"""
ret_val = {}
checkers = self._get_valid_checkers(ds, checker_names)
if len(checkers) == 0:
print("No valid checkers found for tests '%s'" % ",".join(checker_names))
for checker_name, checker_class in checkers:
checker = checker_class()
checker.setup(ds)
checks = self._get_checks(checker)
vals = []
errs = {} # check method name -> (exc, traceback)
for c in checks:
try:
vals.extend(self._run_check(c, ds))
except Exception as e:
errs[c.__func__.__name__] = (e, sys.exc_info()[2])
# score the results we got back
groups = self.scores(vals)
ret_val[checker_name] = groups, errs
return ret_val
@classmethod
def passtree(cls, groups, limit):
for r in groups:
if r.children:
x = cls.passtree(r.children, limit)
if r.weight >= limit and x is False:
return False
if r.weight >= limit and r.value[0] != r.value[1]:
return False
return True
def build_structure(self, check_name, groups, source_name, limit=1):
'''
Compiles the checks, results and scores into an aggregate structure which looks like:
{
"scored_points": 396,
"low_count": 0,
"possible_points": 400,
"testname": "gliderdac",
"medium_count": 2,
"source_name": ".//rutgers/ru01-20140120T1444/ru01-20140120T1649.nc",
"high_count": 0,
"all_priorities" : [...],
"high_priorities": [...],
"medium_priorities" : [...],
"low_priorities" : [...]
}
@param check_name The test which was run
@param groups List of results from compliance checker
@param source_name Source of the dataset, used for title
'''
aggregates = {}
aggregates['scored_points'] = 0
aggregates['possible_points'] = 0
high_priorities = []
medium_priorities = []
low_priorities = []
all_priorities = []
aggregates['high_count'] = 0
aggregates['medium_count'] = 0
aggregates['low_count'] = 0
def named_function(result):
for child in result.children:
all_priorities.append(child)
named_function(child)
# For each result, bin them into the appropriate category, put them all
# into the all_priorities category and add up the point values
for res in groups:
if res.weight < limit:
continue
# If the result has 0 possible points, then it was not valid for
# this dataset and contains no meaningful information
if res.value[1] == 0:
continue
aggregates['scored_points'] += res.value[0]
aggregates['possible_points'] += res.value[1]
if res.weight == 3:
high_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates['high_count'] += 1
elif res.weight == 2:
medium_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates['medium_count'] += 1
else:
low_priorities.append(res)
if res.value[0] < res.value[1]:
aggregates['low_count'] += 1
all_priorities.append(res)
# Some results have children
# We don't render children inline with the top three tables, but we
# do total the points and display the messages
named_function(res)
aggregates['high_priorities'] = high_priorities
aggregates['medium_priorities'] = medium_priorities
aggregates['low_priorities'] = low_priorities
aggregates['all_priorities'] = all_priorities
aggregates['testname'] = check_name
aggregates['source_name'] = source_name
return aggregates
def json_output(self, check_name, groups, file_object, source_name, limit):
'''
Builds the results into a JSON structure and writes it to the file buffer.
@param check_name The test which was run
@param groups List of results from compliance checker
@param output_filename Path to file to save output
@param file_object A python file object where the output should be written to
@param source_name Source of the dataset, used for title
@param limit Integer value for limiting output
'''
aggregates = self.build_structure(check_name, groups, source_name, limit)
aggregates = self.serialize(aggregates)
json_string = json.dumps(aggregates, ensure_ascii=False)
file_object.write(str(json_string))
return
def serialize(self, o):
'''
Returns a safe serializable object that can be serialized into JSON.
@param o Python object to serialize
'''
if isinstance(o, (list, tuple)):
return [self.serialize(i) for i in o]
if isinstance(o, dict):
return {k: self.serialize(v) for k, v in o.items()}
if isinstance(o, datetime):
return o.isoformat()
if isinstance(o, Result):
return self.serialize(o.serialize())
return o
def html_output(self, check_name, groups, file_object, source_name, limit):
'''
Renders an HTML file using Jinja2 and saves the output to the file specified.
@param check_name The test which was run
@param groups List of results from compliance checker
@param output_filename Path to file to save output
@param file_object A python file object where the output should be written to
@param source_name Source of the dataset, used for title
@param limit Integer value for limiting output
'''
from jinja2 import Environment, PackageLoader
self.j2 = Environment(loader=PackageLoader('compliance_checker', 'data/templates'))
template = self.j2.get_template('ccheck.html.j2')
template_vars = self.build_structure(check_name, groups, source_name, limit)
buf = template.render(**template_vars)
file_object.write(str(buf))
def get_points(self, groups, limit):
score_list = []
score_only_list = []
for v in range(len(groups)):
score_list.append([groups[v].name, groups[v].weight, groups[v].value, groups[v].children])
if groups[v].weight >= limit:
score_only_list.append(groups[v].value)
points = [x[0] for x in score_only_list]
out_of = [x[1] for x in score_only_list]
points = sum(points)
out_of = sum(out_of)
return score_list, points, out_of
def standard_output(self, limit, check_name, groups):
"""
Generates the Terminal Output for Standard cases
Returns the dataset needed for the verbose output, as well as the failure flags.
"""
score_list, points, out_of = self.get_points(groups, limit)
print('\n')
print("-" * 80)
print('{:^80}'.format("The dataset scored %r out of %r points" % (points, out_of)))
print('{:^80}'.format("during the %s check" % check_name))
print("-" * 80)
return [score_list, points, out_of]
def non_verbose_output_generation(self, score_list, groups, limit, points, out_of):
if points < out_of:
print('{:^80}'.format("Scoring Breakdown:"))
print('\n')
priority_flag = 3
for x in range(len(score_list)):
if score_list[x][1] == 3 and limit <= 3 :
if priority_flag == 3:
print('{:^80}'.format("High Priority"))
print("-" * 80)
print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))
priority_flag -= 1
print('%-40s:%s:%6s/%1s' % (score_list[x][0][0:39], score_list[x][1], score_list[x][2][0], score_list[x][2][1]))
elif score_list[x][1] == 2 and limit <= 2 :
if priority_flag == 2:
print('\n')
print('{:^80}'.format("Medium Priority"))
print("-" * 80)
print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))
priority_flag -= 1
print('%-40s:%s:%6s/%1s' % (score_list[x][0][0:39], score_list[x][1], score_list[x][2][0], score_list[x][2][1]))
elif score_list[x][1] == 1 and limit == 1 :
if priority_flag == 1:
print('\n')
print('{:^80}'.format("Low Priority"))
print("-" * 80)
print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))
priority_flag -= 1
print('%-40s:%s:%6s/%1s' % (score_list[x][0][0:39], score_list[x][1], score_list[x][2][0], score_list[x][2][1]))
elif score_list[x][1] == 1 and limit == 1 and priority_flag == 2:
print('{:^80}'.format('No medium priority tests present'))
print('-' * 80)
priority_flag -= 1
# Catch All for pretty presentation
if priority_flag == 2 and limit == 2:
print('{:^80}'.format('No Medium priority tests present'))
print('-' * 80)
if priority_flag == 2 and limit == 1:
print('{:^80}'.format('No Medium priority tests present'))
print('-' * 80)
print('')
print('{:^80}'.format('No Low priority tests present'))
print('-' * 80)
if priority_flag == 1 and limit == 1:
print('{:^80}'.format('No Low priority tests present'))
print('-' * 80)
print("\n" + "\n" + '-' * 80)
print('{:^80}'.format('Reasoning for the failed tests given below:'))
print('\n')
print('%s%37s:%10s:%8s' % ('Name', 'Priority', ' Score', 'Reasoning'))
print("-" * 80)
self.reasoning_routine(groups, 0)
else:
print("All tests passed!")
def verbose_output_generation(self, groups, limit, points, out_of):
'''
Generates the Terminal Output for Verbose cases
'''
priority_flag = 3
print('{:^80}'.format("Verbose Scoring Breakdown:"), end=' ')
self.print_routine(groups, 0, priority_flag)
if points < out_of:
print("\n" + "\n" + '-' * 80)
print('{:^80}'.format('Reasoning for the failed tests given below:'))
print('\n')
print('%s%37s:%10s:%8s' % ('Name', 'Priority', ' Score', 'Reasoning'))
print("-" * 80)
self.reasoning_routine(groups, 0)
pass
def print_routine(self, list_of_results, indent, priority_flag):
"""
print routine performed
"""
def weight_func(r):
"""
Function that returns the weight, used for sorting by priority
"""
return r.weight
# Sorting method used to properly sort the output by priority.
grouped_sorted = []
grouped_sorted = sorted(list_of_results, key=weight_func, reverse=True)
# Loop over input
for res in grouped_sorted:
# If statements to print the proper Headings
if res.weight == 3 and indent == 0 and priority_flag == 3:
print('\n')
print('{:^80}'.format("High Priority"))
print("-" * 80)
print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))
priority_flag -= 1
if res.weight == 2 and indent == 0 and priority_flag == 2:
print('\n')
print('{:^80}'.format("Medium Priority"))
print("-" * 80)
print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))
priority_flag -= 1
if res.weight == 1 and indent == 0 and priority_flag == 1:
print('\n')
print('{:^80}'.format("Low Priority"))
print("-" * 80)
print('%-36s:%8s:%6s' % (' Name', 'Priority', 'Score'))
priority_flag -= 1
print('%-40s:%s:%s%6s/%1s' % ((indent * ' ' + res.name)[0:39], res.weight, indent * ' ', res.value[0], res.value[1]))
if res.children:
self.print_routine(res.children, indent + 1, priority_flag)
def reasoning_routine(self, list_of_results, indent, line = True):
"""
print routine performed
"""
def weight_func(r):
"""
Function that returns the weight, used for sorting by priority
"""
return r.weight
# Sorting method used to properly sort the output by priority.
grouped_sorted = []
grouped_sorted = sorted(list_of_results, key=weight_func, reverse=True)
wrapper = textwrap.TextWrapper(initial_indent = '', width = 80, subsequent_indent = ' ' * 54)
for res in grouped_sorted:
if (res.value[0] != res.value[1]) and not res.msgs:
print('%-39s:%1s:%6s/%2s : %s' % (str(indent * ' ' + res.name)[0:39], res.weight, str(res.value[0]), str(res.value[1]), ' '))
if (res.value[0] != res.value[1]) and res.msgs:
print(wrapper.fill('%-39s:%1s:%6s/%2s : %s' % (str(indent * ' ' + res.name)[0:39], res.weight, str(res.value[0]), str(res.value[1]), str(", ".join(res.msgs)))))
if res.children:
self.reasoning_routine(res.children, indent + 1, False)
def load_dataset(self, ds_str):
"""
Helper method to load a dataset or SOS GC/DS url.
"""
ds = None
# try to figure out if this is a local NetCDF Dataset, a remote one, or an SOS GC/DS url
doc = None
pr = urlparse(ds_str)
if pr.netloc: # looks like a remote url
rhead = requests.head(ds_str)
# if we get a 400 here, it's likely a Dataset openable OpenDAP url
if rhead.status_code == 400:
pass
elif rhead.status_code == 200 and rhead.headers['content-type'] == 'text/xml':
# probably interesting, grab it
r = requests.get(ds_str)
r.raise_for_status()
doc = r.text
else:
raise Exception("Could not understand response code %s and content-type %s" % (rhead.status_code, rhead.headers.get('content-type', 'none')))
else:
def is_binary_string(bts):
# do a cheap imitation of libmagic
# http://stackoverflow.com/a/7392391/84732
textchars = ''.join(map(chr, [7, 8, 9, 10, 12, 13, 27] + list(range(0x20, 0x100))))
if sys.version_info >= (3, ):
textchars = textchars.encode()
return bool(bts.translate(None, textchars))
with open(ds_str, 'rb') as f:
first_chunk = f.read(1024)
if is_binary_string(first_chunk):
# likely netcdf file
pass
else:
f.seek(0)
doc = "".join(f.readlines())
if doc is not None:
xml_doc = ET.fromstring(str(doc))
if xml_doc.tag == "{http://www.opengis.net/sos/1.0}Capabilities":
ds = SensorObservationService(ds_str, xml=str(doc))
elif xml_doc.tag == "{http://www.opengis.net/sensorML/1.0.1}SensorML":
ds = SensorML(xml_doc)
else:
raise Exception("Unrecognized XML root element: %s" % xml_doc.tag)
else:
# no doc? try the dataset constructor
ds = Dataset(ds_str)
return ds
def scores(self, raw_scores):
"""
Transforms raw scores from a single checker into a fully tallied and grouped scoreline.
"""
grouped = self._group_raw(raw_scores)
return (grouped)
def _group_raw(self, raw_scores, cur=None, level=1):
"""
Internal recursive method to group raw scores into a cascading score summary.
Only top level items are tallied for scores.
"""
def build_group(label=None, weight=None, value=None, sub=None):
label = label
weight = weight
value = self._translate_value(value)
sub = sub or []
return Result(weight=weight,
value=value,
name=label,
children=sub)
def trim_groups(r):
if isinstance(r.name, tuple) or isinstance(r.name, list):
new_name = r.name[1:]
else:
new_name = []
return Result(r.weight, r.value, new_name, r.msgs)
# CHECK FOR TERMINAL CONDITION: all raw_scores.name are single length
# @TODO could have a problem here with scalar name, but probably still works
terminal = [len(x.name) for x in raw_scores]
if terminal == [0] * len(raw_scores):
return []
def group_func(r):
"""
Slices off first element (if list/tuple) of classification or just returns it if scalar.
"""
if isinstance(r.name, tuple) or isinstance(r.name, list):
if len(r.name) == 0:
retval = ''
else:
retval = r.name[0:1][0]
else:
retval = r.name
return retval
grouped = itertools.groupby(sorted(raw_scores, key=group_func), key=group_func)
ret_val = []
for k, v in grouped:
v = list(v)
cv = self._group_raw(list(map(trim_groups, v)), k, level + 1)
if len(cv):
# if this node has children, max weight of children + sum of all the scores
max_weight = max([x.weight for x in cv])
sum_scores = tuple(map(sum, list(zip(*([x.value for x in cv])))))
msgs = []
else:
max_weight = max([x.weight for x in v])
sum_scores = tuple(map(sum, list(zip(*([self._translate_value(x.value) for x in v])))))
msgs = sum([x.msgs for x in v], [])
ret_val.append(Result(name=k, weight=max_weight, value=sum_scores, children=cv, msgs=msgs))
return ret_val
def _translate_value(self, val):
"""
Turns shorthand True/False/None checks into full scores (1, 1)/(0, 1)/(0, 0).
Leaves full scores alone.
"""
if val is True:
return (1, 1)
elif val is False:
return (0, 1)
elif val is None:
return (0, 0)
return val
| {
"content_hash": "e64163ce7522a3cf2408d623ac4fd500",
"timestamp": "",
"source": "github",
"line_count": 589,
"max_line_length": 179,
"avg_line_length": 38.39388794567063,
"alnum_prop": 0.5264437958786592,
"repo_name": "duncombe/compliance-checker",
"id": "8a103aecaf650aeba0e2eee3c37cbcad2631457a",
"size": "22614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compliance_checker/suite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "254866"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.tensor_array_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_grad
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def _make_converter(tf_dtype):
def _converter(x):
if tf_dtype == dtypes.string:
# In Python3, np.str is unicode, while we always want bytes
return np.asarray(x).astype("|S")
x = np.asarray(x).astype(tf_dtype.as_numpy_dtype)
if tf_dtype.is_complex:
# Add a non-zero imaginary component to x.
x -= 1j * x
return x
return _converter
def _make_ta(size, name, dtype=dtypes.float32, infer_shape=False):
return tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name=name, size=size, infer_shape=infer_shape)
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class TensorArrayTest(test.TestCase):
@classmethod
def setUpClass(cls):
super(TensorArrayTest, cls).setUpClass()
cls._workers, _ = test.create_local_cluster(num_workers=3, num_ps=0)
@classmethod
def tearDownClass(cls):
super(TensorArrayTest, cls).tearDownClass()
session_lib.Session.reset(cls._workers[0].target)
@test_util.run_in_graph_and_eager_modes
def testTensorArrayWriteRead(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
def _testTensorArrayWritePack(self, tf_dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.stack()
c0 = self.evaluate(c0)
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0)
def _testTensorArrayWritePackMaybeLegacy(self):
self._testTensorArrayWritePack(dtypes.float32)
self._testTensorArrayWritePack(dtypes.float64)
self._testTensorArrayWritePack(dtypes.int32)
self._testTensorArrayWritePack(dtypes.int64)
self._testTensorArrayWritePack(dtypes.complex64)
self._testTensorArrayWritePack(dtypes.complex128)
self._testTensorArrayWritePack(dtypes.string)
def testTensorArrayWritePack(self):
self._testTensorArrayWritePackMaybeLegacy()
def testEmptyTensorArrayPack(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
empty_element = np.zeros((0, 1), dtype=np.float32)
w0 = ta.write(0, empty_element)
w1 = w0.write(1, empty_element)
w2 = w1.write(2, empty_element)
c0 = w2.stack()
c0 = self.evaluate(c0)
self.assertAllEqual([3, 0, 1], c0.shape)
def _testTensorArrayWriteConcat(self, tf_dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.concat()
c0 = self.evaluate(c0)
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0], [6.0, 7.0],
[106.0, 107.0], [8.0, 9.0]]), c0)
@test_util.deprecated_graph_mode_only
def testTensorArrayWriteConcat(self):
self._testTensorArrayWriteConcat(dtypes.float32)
self._testTensorArrayWriteConcat(dtypes.float64)
self._testTensorArrayWriteConcat(dtypes.int32)
self._testTensorArrayWriteConcat(dtypes.int64)
self._testTensorArrayWriteConcat(dtypes.complex64)
self._testTensorArrayWriteConcat(dtypes.complex128)
self._testTensorArrayWriteConcat(dtypes.string)
def _testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
element_shape=tensor_shape.TensorShape([1, 2]))
self.assertAllEqual([[0.0, 0.0]], self.evaluate(ta.read(0)))
self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).stack()))
self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))
@test_util.run_v1_only("b/122324791")
def testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros(self):
self._testTensorArrayReadOrPackNotAllValuesAvailableFillsZeros()
def _testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
self.assertAllEqual(
[[0.0, 0.0]], self.evaluate(ta.write(1, [[4.0, 5.0]]).read(0)))
self.assertAllEqual([[[0.0, 0.0]], [[4.0, 5.0]], [[0.0, 0.0]]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).stack()))
self.assertAllEqual([[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]],
self.evaluate(ta.write(1, [[4.0, 5.0]]).concat()))
@test_util.run_v1_only("b/122324791")
def testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros(self):
self._testTensorArrayReadOrPackNotAllValuesAvailableInferShapeFillsZeros()
@test_util.run_v1_only("Uses placeholders")
def testSkipEagerTensorArrayReadUninitializedInferShapeFillsZeros(self):
with self.cached_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
val = array_ops.placeholder(dtypes.float32)
self.assertAllEqual(
[[0.0, 0.0]], sess.run(ta.write(1, val).read(0), {val: [[4.0, 5.0]]}))
def _testTensorArrayUnpackRead(self, tf_dtype):
with self.cached_session(use_gpu=True):
convert = _make_converter(tf_dtype)
ta = _make_ta(3, "foo", dtype=tf_dtype)
# Unpack a vector into scalars
w0 = ta.unstack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
# Unpack a matrix into vectors
w1 = ta.unstack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
# Try unpacking an empty matrix, which should not cause an error.
w2 = ta.unstack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
self._testTensorArrayUnpackRead(dtypes.float32)
self._testTensorArrayUnpackRead(dtypes.float64)
self._testTensorArrayUnpackRead(dtypes.int32)
self._testTensorArrayUnpackRead(dtypes.int64)
self._testTensorArrayUnpackRead(dtypes.complex64)
self._testTensorArrayUnpackRead(dtypes.complex128)
self._testTensorArrayUnpackRead(dtypes.string)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
def _testTensorArraySplitRead(self, tf_dtype):
with self.cached_session(use_gpu=True):
convert = _make_converter(tf_dtype)
# Split an empty vector
ta = _make_ta(3, "foo", dtype=tf_dtype)
lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([1.0, 2.0]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix
lengths = constant_op.constant([2, 0, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = self.evaluate([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0], [2.0, 201.0]]), d0)
self.assertAllEqual(convert([]).reshape(0, 2), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
@test_util.deprecated_graph_mode_only
def testTensorArraySplitRead(self):
self._testTensorArraySplitRead(dtypes.float32)
self._testTensorArraySplitRead(dtypes.float64)
self._testTensorArraySplitRead(dtypes.int32)
self._testTensorArraySplitRead(dtypes.int64)
self._testTensorArraySplitRead(dtypes.complex64)
self._testTensorArraySplitRead(dtypes.complex128)
self._testTensorArraySplitRead(dtypes.string)
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradArrayWriteRead(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
g_ta = ta.grad("grad")
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradGrad(self):
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.skipTest("Legacy TensorArray does not support double derivatives.")
with self.test_session(use_gpu=True) as session:
x = constant_op.constant(4.0)
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=1,
infer_shape=False)
w0 = ta.write(0, x)
r0 = w0.read(0)
y = r0 * r0
g1 = gradients_impl.gradients(ys=[y], xs=[x])
g2 = gradients_impl.gradients(ys=[g1], xs=[x])
self.assertAllEqual([2.0], session.run(g2))
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradArrayDynamicWriteRead(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, -3.0)
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0, 6.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, -2.0)
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run(
[r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual(-3.0, d2)
self.assertAllEqual([[5.0, 6.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual(-2.0, g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorGradAccessTwiceReceiveSameObject(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle.op, g_ta_1.handle.op, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
# Test writing the wrong datatype
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = ("Invalid data types; op elements string but list elements "
"float")
else:
error_msg = (
"TensorArray dtype is (float|float32) but Op is trying to write "
"dtype string")
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(0, "wrong_type_scalar").flow)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to modify element -1 in a list with 3 elements."
else:
error_msg = "index -1"
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(-1, 3.0).flow)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to modify element 3 in a list with 3 elements"
else:
error_msg = ("Tried to write to index 3 but array is not "
"resizeable and size is: 3")
# Test reading from too large an index
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.write(3, 3.0).flow)
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
with self.session(use_gpu=True):
ta = _make_ta(3, "foo", dtype=dtypes.float32)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype (only possible when constructing graphs).
if (not context.executing_eagerly() and
not control_flow_util.ENABLE_CONTROL_FLOW_V2):
r0_bad = gen_data_flow_ops.tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtypes.float64, flow_in=w0.flow)
with self.assertRaisesOpError(
"TensorArray dtype is float but Op requested dtype double."):
self.evaluate(r0_bad)
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to access element -1 in a list with 3 elements."
else:
error_msg = "index -1"
# Test reading from a negative index, which is not allowed
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.read(-1))
if (control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly()):
error_msg = "Trying to access element 3 in a list with 3 elements."
else:
error_msg = "Tried to read from index 3 but array size is: 3"
# Test reading from too large an index
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.read(3))
@test_util.disable_control_flow_v2("v2 allows multiple writes.")
@test_util.run_v1_only("v2 allows multiple writes.")
def testSkipEagerTensorArrayWriteMultipleFails(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
with self.assertRaisesOpError(
"Could not write to TensorArray index 2 because "
"it has already been written to."):
self.evaluate(ta.write(2, 3.0).write(2, 3.0).flow)
def testTensorArrayConcatIncompatibleShapesFails(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, 3.0)
w2 = w1.write(1, 4.0)
w3 = w2.write(2, [3.0])
with self.assertRaisesOpError(
"Concat saw a scalar shape at index 0 but requires at least vectors"):
self.evaluate(w3.concat())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w1 = ta.write(0, [3.0])
w2 = w1.write(1, [4.0])
w3 = w2.write(2, [[3.0]])
# The exact error messages differ between eager execution and graph
# construction as the former bubbles up the error from array_op.concat.
error_msg = ("Incompatible ranks"
if control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not context.executing_eagerly() else "shape")
with self.assertRaisesRegexp(errors.InvalidArgumentError, error_msg):
self.evaluate(w3.concat())
def testTensorArraySplitIncompatibleShapesFails(self):
with self.session(use_gpu=True):
in_eager_mode = context.executing_eagerly()
ta = _make_ta(3, "foo")
with self.assertRaisesOpError(
r"Expected lengths to be a vector, received shape: \[\]"):
if in_eager_mode:
self.evaluate(ta.split([1.0, 2.0, 3.0], 1))
else:
lengths = array_ops.placeholder(dtypes.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
error_msg = ("Unused values in tensor. Length of tensor: 3 Values used: 1"
if control_flow_util.ENABLE_CONTROL_FLOW_V2 and
not in_eager_mode else
r"Expected sum of lengths to be equal to values.shape\[0\], "
r"but sum of lengths is 1 and value's shape is: \[3\]")
with self.assertRaisesOpError(error_msg):
self.evaluate(ta.split([1.0, 2.0, 3.0], [1]).flow)
ta = _make_ta(1, "baz")
if control_flow_util.ENABLE_CONTROL_FLOW_V2 and not in_eager_mode:
with self.assertRaisesRegexp(
ValueError, "Shape must be at least rank 1 but is rank 0"):
self.evaluate(ta.split(1.0, [1]).flow)
else:
with self.assertRaisesOpError(
r"Expected value to be at least a vector, but received shape: \[\]"
):
self.evaluate(ta.split(1.0, [1]).flow)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2 or in_eager_mode:
ta = _make_ta(2, "buz")
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(2 vs. 1\), and the TensorArray is not marked as "
r"dynamically resizeable"):
self.evaluate(ta.split([1.0], [1]).flow)
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
ta_grad = ta.grad("grad")
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Assert that if multiple_writes_aggregate is not enabled,
# multiple writes raise an exception.
with self.assertRaisesOpError(
r"TensorArray foo_.*: Could not write to TensorArray index 2 because "
r"it has already been written to."):
w1.flow.eval()
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Could not aggregate to TensorArray index 1 because the "
r"existing shape is \[\] but the new input shape is \[1\]"):
wb1_grad.flow.eval()
@test_util.disable_control_flow_v2("v2 does not support TensorArray.grad.")
@test_util.run_v1_only("v2 does not support TensorArray.grad.")
def testSkipEagerTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in (dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128):
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
@test_util.run_v1_only("Low level legacy TA op test.")
def testSkipEagerTensorArrayGradWithShapeKnownElementShape(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
size=3,
dtype=dtypes.float32,
element_shape=tensor_shape.TensorShape([2, 3]))
handle, flow = data_flow_ops.tensor_array_grad_with_shape(
handle=ta.handle,
flow_in=ta.flow,
shape_to_prepend=tensor_shape.TensorShape([4, 5]),
source="source")
ta_grad = tensor_array_ops.TensorArray(
dtypes.float32, handle=handle, flow=flow)
value = array_ops.placeholder(dtypes.float32)
ta_grad = ta_grad.write(0, value)
read_value = ta_grad.read(0)
# Make sure shape inference worked.
self.assertAllEqual([None, None, 2, 3], read_value.shape.as_list())
# Writing with wrong shape should not work.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Could not write to TensorArray"):
fed_value = np.random.random([2, 3])
sess.run(read_value, feed_dict={value: fed_value})
# Writing with correct shape should work.
fed_value = np.random.random([4, 5, 2, 3])
self.assertAllClose(fed_value,
sess.run(read_value, feed_dict={value: fed_value}))
@test_util.disable_control_flow_v2("Low level legacy TA op test.")
@test_util.run_v1_only("Low level legacy TA op test.")
def testSkipEagerTensorArrayGradWithShapeUnknownElementShape(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
size=3, dtype=dtypes.float32,
element_shape=None) # Note that element_shape is unknown
handle, flow = data_flow_ops.tensor_array_grad_with_shape(
handle=ta.handle,
flow_in=ta.flow,
shape_to_prepend=tensor_shape.TensorShape([4, 5]),
source="source")
ta_grad = tensor_array_ops.TensorArray(
dtypes.float32, handle=handle, flow=flow)
value = array_ops.placeholder(dtypes.float32)
ta_grad = ta_grad.write(0, value)
read_value = ta_grad.read(0)
# Make sure shape inference worked.
self.assertIsNone(read_value.shape.ndims)
# Write with some shape and check read value.
fed_value = np.random.random([4, 5, 7])
self.assertAllClose(fed_value,
sess.run(read_value, feed_dict={value: fed_value}))
def testMultiTensorArray(self):
with self.session(use_gpu=True):
h1 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
val = self.evaluate(r)
self.assertAllClose(9.0, val)
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
c = lambda x: np.array(x, dtype=dtype)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c(3.0))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c(-2.0)])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c(-2.0), grad_just_r1_vals[0])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c(-2.0)])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c(-2.0), grad_vals[1])
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradientWriteRead(self):
for dtype in (np.float32, np.float64, np.complex64, np.complex128):
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.cached_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value_0 = constant_op.constant([-1.0, 1.0])
value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.stack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat()
with ops.control_dependencies([p0, r0, s0]):
grad_r = gradients_impl.gradients(
ys=[p0, r0, s0],
xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # pack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0]
]) # concat gradient
grad_vals = self.evaluate(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
@test_util.disable_control_flow_v2("v2 does not support clear_after_read.")
@test_util.run_v1_only("v2 does not support clear_after_read.")
def testTensorArrayReadTwice(self):
with self.session(use_gpu=True):
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readonce = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
w_readonce = ta_readonce.unstack(value)
r0_readonce = w_readonce.read(0)
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"):
with ops.control_dependencies([r0_readonce]):
self.evaluate(w_readonce.read(0))
ta_readtwice = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], self.evaluate(r1_readtwice))
def _testTensorArrayGradientUnpackRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r0_1, r1],
xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead()
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradientSplitConcat(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2,
infer_shape=False)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w = ta.split(value, [2, 1])
r = w.concat()
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r],
xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0]],
grad_vals[0])
def _testTensorArrayGradientDynamicUnpackRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradientDynamicUnpackRead(self):
self._testTensorArrayGradientDynamicUnpackRead()
def testCloseTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
self.evaluate(ta.close())
def testSizeTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, self.evaluate(s))
def testWriteCloseTensorArray(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
self.evaluate(w1.close()) # Expected to run without problems
def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
np_dtype = dtype.as_numpy_dtype
with self.cached_session(use_gpu=True):
def func(v0, state0, var):
ta = tensor_array_ops.TensorArray(
dtype=dtype,
tensor_array_name="foo",
size=0 if dynamic_size else 3,
dynamic_size=dynamic_size)
time_0 = array_ops.identity(0)
def body(time, ta_t, state):
sliced = array_ops.slice(
v0, begin=array_ops.stack([time, 0]), size=[1, -1])
sliced = array_ops.squeeze(sliced)
out = sliced + var + state
state += sliced
ta_t = ta_t.write(time, out)
return (time + 1, ta_t, state)
(unused_0, h_final, unused_2) = control_flow_ops.while_loop(
cond=lambda time, unused_1, unused_2: time < 3,
body=body,
loop_vars=(time_0, ta, state0),
shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()),
parallel_iterations=3)
vout = h_final.stack()
return vout
v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
init_val = np.arange(100, 105, dtype=np_dtype)
var = variable_scope.get_variable(
"var",
shape=init_val.shape,
dtype=np_dtype,
initializer=init_ops.constant_initializer(init_val))
vout = func(v0, state0, var)
grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
if context.executing_eagerly():
grad_fn = backprop.gradients_function(func)
v0_grad, state0_grad, var_grad = grad_fn(v0, state0, var, dy=grad_val)
else:
v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
self.evaluate(variables.global_variables_initializer())
state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
self.evaluate(
([state0, var, v0, vout, v0_grad, var_grad, state0_grad])))
just_v0_grad_t = self.evaluate(v0_grad)
# state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# vout = [ v0[0] + var + state[0] |
# v0[1] + var + state[1] |
# v0[2] + var + state[2] ]
# = [ v0[0] + var + state0 |
# v0[1] + var + state0 + v0[0] |
# v0[2] + var + state0 + v0[0] + v0[1] ]
#
# d(vout[0])/d(v0) = [1 | 0 | 0 ]
# d(vout[1])/d(v0) = [1 | 1 | 0 ]
# d(vout[2])/d(v0) = [1 | 1 | 1 ]
# d(vout)/d(var) = [1 | 1 | 1]
# d(vout)/d(state0) = [ 1 | 1 | 1 ]
state_per_time = np.array(
[state0_t, state0_t + v0_t[0, :], state0_t + v0_t[0, :] + v0_t[1, :]])
# Compare forward prop
self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# Compare backward prop
expected_v0_grad_t = np.array([
grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
grad_val[1, :] + grad_val[2, :], grad_val[2, :]
])
self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
def testWhileLoopWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=False, dtype=dtypes.float32)
# TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=tf.int64)
@test_util.run_v1_only("b/117943489")
def testSkipEagerWhileLoopDynamicWritePackGradients(self):
self._testWhileLoopWritePackGradients(
dynamic_size=True, dtype=dtypes.float32)
def testGradSerialTwoLoops(self):
with self.session(use_gpu=True):
def loop(x):
num_steps = 100
acc = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
size=num_steps,
clear_after_read=False,
element_shape=tensor_shape.scalar())
i = constant_op.constant(0, name="i")
c = lambda i, acc: i < 5
def b(i, acc):
x1 = control_flow_ops.cond(
math_ops.equal(i, 0), lambda: x,
lambda: math_ops.multiply(acc.read(i - 1), 2.0))
return i + 1, acc.write(i, x1)
i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
z = constant_op.constant(0.0)
def fn(i, acc):
return i + 1, acc.write(i, z)
_, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
[i1, acc1])
r = acc2.stack()
return r
x = constant_op.constant(2.0, name="x")
if context.executing_eagerly():
grad = backprop.gradients_function(loop)(x)[0]
else:
grad = gradients_impl.gradients(loop(x), [x])[0]
self.assertAllClose(31.0, self.evaluate(grad))
@test_util.deprecated_graph_mode_only
def testSkipEagerSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.session(use_gpu=True) as session:
a = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(
0, name="read_a_0") + # a + b
ta.read(
1, name="read_b_0"))
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def _grad_source_for_name(self, name):
return tensor_array_grad._GetGradSource(constant_op.constant(0, name=name))
@test_util.deprecated_graph_mode_only
def testSkipEagerGetGradSource_Invalid(self):
with self.assertRaises(ValueError):
self._grad_source_for_name("")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo")
with self.assertRaises(ValueError):
self._grad_source_for_name("foo/bar")
@test_util.deprecated_graph_mode_only
def testSkipEagerGetGradSource_NoEnclosingScope(self):
self.assertEqual("gradients:0", self._grad_source_for_name("gradients"))
self.assertEqual("gradients_0:0", self._grad_source_for_name("gradients_0"))
self.assertEqual("gradients", self._grad_source_for_name("gradients/foo"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo"))
self.assertEqual("gradients",
self._grad_source_for_name("gradients/foo/bar"))
self.assertEqual("gradients_0",
self._grad_source_for_name("gradients_0/foo/bar"))
@test_util.deprecated_graph_mode_only
def testSkipEagerGetGradSource_EnclosingScope(self):
self.assertEqual("foo/gradients:0",
self._grad_source_for_name("foo/gradients"))
self.assertEqual("foo/gradients_0:0",
self._grad_source_for_name("foo/gradients_0"))
self.assertEqual("foo/gradients",
self._grad_source_for_name("foo/gradients/bar"))
self.assertEqual("foo/gradients_0",
self._grad_source_for_name("foo/gradients_0/bar"))
self.assertEqual("foo/bar/gradients",
self._grad_source_for_name("foo/bar/gradients/baz"))
self.assertEqual("foo/bar/gradients_0",
self._grad_source_for_name("foo/bar/gradients_0/baz"))
@test_util.deprecated_graph_mode_only
def testSkipEagerGetGradSource_NestedUsesInnermost(self):
self.assertEqual(
"foo/gradients/bar/gradients_0",
self._grad_source_for_name("foo/gradients/bar/gradients_0/baz"))
@test_util.deprecated_graph_mode_only
def testSkipEagerWriteShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = constant_op.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
@test_util.deprecated_graph_mode_only
def testSkipEagerPartlyUnknownShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=6)
c0 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual([None, None, None, 3], r0.get_shape().as_list())
c1 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w1 = w0.write(1, c1)
r1 = w1.read(0)
self.assertAllEqual([None, None, None, 3], r1.get_shape().as_list())
# Writing less specific shape (doesn't change type.)
c2 = array_ops.placeholder(dtypes.float32, [None, None, None, None])
w2 = w1.write(2, c2)
r2 = w2.read(0)
self.assertAllEqual([None, None, None, 3], r2.get_shape().as_list())
# Writing more specific shape in one dimension and less specific in
# another.
c3 = array_ops.placeholder(dtypes.float32, [None, None, 2, None])
w3 = w2.write(3, c3)
r3 = w3.read(0)
self.assertAllEqual([None, None, 2, 3], r3.get_shape().as_list())
# Writing partly defined shape using TensorArray.scatter.
c4 = array_ops.placeholder(dtypes.float32, [2, None, 4, 2, 3])
w4 = w3.scatter([4, 5], c4)
r4 = w4.read(0)
self.assertAllEqual([None, 4, 2, 3], r4.get_shape().as_list())
# Writing fully defined shape using TensorArray.split.
c5 = array_ops.placeholder(dtypes.float32, [10, 4, 2, 3])
w5 = w4.split(c5, constant_op.constant([5, 5]))
r5 = w5.read(0)
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
def _testUnpackShape(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unstack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = constant_op.constant([4.0, 5.0])
w1 = w0.write(3, c1)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
# TensorArray v2 does not support clear_after_read.
with self.assertRaisesOpError(
r"Could not read index 0 twice because it was cleared after a "
r"previous read \(perhaps try setting clear_after_read = false\?\)"
):
with ops.control_dependencies([r0]):
self.evaluate(w1.read(0))
r1 = w1.read(1)
self.assertAllEqual(c1.get_shape(), r1.shape)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
@test_util.run_v1_only("b/117943489")
def testUnpackShape(self):
self._testUnpackShape()
@test_util.deprecated_graph_mode_only
def testSplitShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True,
infer_shape=True)
value = constant_op.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo1",
size=0,
dynamic_size=True,
infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
if context.executing_eagerly():
self.assertEqual((1, 2), r0.get_shape())
self.assertEqual((2, 2), w0.read(1).get_shape())
else:
self.assertEqual(r0.get_shape().ndims, None)
if not control_flow_util.ENABLE_CONTROL_FLOW_V2:
self.assertEqual(
tensor_shape.TensorShape(
ta1.handle.op.get_attr("element_shape")).ndims, None)
@test_util.deprecated_graph_mode_only
def testSkipEagerWriteUnknownShape(self):
with self.session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=True)
c0 = array_ops.placeholder(dtypes.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def _testGradientWhenNotAllComponentsRead(self):
with self.cached_session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0 = gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
@test_util.deprecated_graph_mode_only
def testSkipEagerGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead()
@test_util.deprecated_graph_mode_only
def testSkipEagerWriteButNotAllComponentsReadGrad(self):
with self.cached_session(use_gpu=True) as session:
x0 = constant_op.constant(5.0)
x1 = constant_op.constant(10.0)
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2).write(0, x0).write(1, x1)
r0 = ta.read(0)
# calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0_x1 = gradients_impl.gradients(ys=[r0], xs=[x0, x1], grad_ys=[1.0])
grad_r0_x1_vals = session.run(grad_r0_x1)
self.assertAllEqual(grad_r0_x1_vals, [1.0, 0.0])
def _testTensorArrayUnpackDynamic(self):
with self.cached_session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.unstack(x)
w1 = w0.write(3, 4.0)
r = w1.stack()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), self.evaluate(r))
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), self.evaluate(grad)[0])
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayUnpackDynamic(self):
self._testTensorArrayUnpackDynamic()
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArraySplitDynamic(self):
with self.session(use_gpu=True) as sess:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=3, dynamic_size=True)
x = constant_op.constant([1.0, 2.0, 3.0])
w0 = ta.split(x, [1, 1, 1])
w1 = w0.write(3, [4.0])
r = w1.concat()
self.assertAllEqual(np.array([1.0, 2.0, 3.0, 4.0]), self.evaluate(r))
grad = gradients_impl.gradients(ys=[r], xs=[x])
self.assertAllEqual(np.array([1.0, 1.0, 1.0]), self.evaluate(grad)[0])
def _testTensorArrayEvalEmpty(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=False)
v2_msg = ("Tried to stack elements of an empty list with "
"non-fully-defined element_shape")
v1_msg = (
"TensorArray has size zero, but element shape <unknown> is not "
"fully defined. Currently only static shapes are supported when "
"packing zero-size TensorArrays.")
with self.assertRaisesOpError(
v2_msg if control_flow_util.ENABLE_CONTROL_FLOW_V2 else v1_msg):
ta.stack().eval()
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty()
# this test is ill-defined for Eager mode --- unpacking an empty tensor
# gives an empty list / there is not equivalent of "mark_used" in Eager
def _testTensorArrayEvalEmptyWithDefault(self):
with self.cached_session(use_gpu=True):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, dynamic_size=False, infer_shape=True)
self.assertEqual(0, ta.size().eval())
# Don't actually perform the pack. This stores the static shape.
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
else:
ta.unstack(array_ops.zeros([0, 3, 5])).mark_used()
packed = ta.stack()
concatenated = ta.concat()
self.assertAllEqual([0, 3, 5], self.evaluate(packed).shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
self.assertAllEqual([0, 5], self.evaluate(concatenated).shape)
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault()
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayScatterReadAndGradients(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
r1 = w.read(8)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
read_vals, grad_vals = session.run([[r0, r1], grad])
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
@test_util.run_v1_only("b/117943489")
def testSkipEagerTensorArrayScatterPartialReadAndGradients(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
# Test combined gradients + aggregation of read(0)
grad = gradients_impl.gradients(
ys=[r0], xs=[value], grad_ys=[[2.0, 3.0]])[0]
read_val, grad_val = session.run([r0, grad])
self.assertAllEqual([1.0, -1.0], read_val)
self.assertAllEqual([[2.0, 3.0], [0.0, 0.0]], grad_val)
def testScatterIntoExistingList(self):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=5)
ta = ta.scatter(indices=[3, 4], value=array_ops.ones([2]))
self.assertAllEqual(ta.stack(), [0., 0., 0., 1., 1.])
ta = ta.scatter(indices=[1], value=array_ops.ones([1]))
self.assertAllEqual(ta.stack(), [0., 1., 0., 1., 1.])
ta = ta.scatter(indices=[0, 2], value=[5., 6.])
self.assertAllEqual(ta.stack(), [5., 1., 6., 1., 1.])
@test_util.run_v1_only("b/118890905")
def testTensorArrayWriteGatherAndGradients(self):
with self.session(use_gpu=True) as session:
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
dynamic_size=True)
def func(values):
indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
return g
values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
g = func(values)
grad_ys = [[[2.0, 3.0], [4.0, 5.0]]]
# Test combined gradients + aggregation of read(0)
if context.executing_eagerly():
g_vals = [g]
grad_vals = backprop.gradients_function(func)(
values, dy=constant_op.constant(grad_ys[0], dtype=dtypes.float32))
else:
grad = gradients_impl.gradients(ys=[g], xs=[values], grad_ys=grad_ys)
g_vals, grad_vals = session.run([[g], grad])
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayGetsDeviceFromFirstWrite(self):
with ops.device("/job:worker/task:0/cpu:0"):
# this initial device will be ignored.
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
with ops.device("/job:worker/task:1/cpu:0"):
# the first write sets the op's device.
ta = ta.write(0, 1.0)
with ops.device("/job:worker/task:2/cpu:0"):
# subsequent writes do not modify the op's device.
ta = ta.write(1, 1.0)
# The gradient TA will sit on the same device as the forward TA.
ta_grad = ta.grad("grad")
flows = [ta.flow, ta_grad.flow]
# Similar tests for unpack and split
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=3)
with ops.device("/job:worker/task:1/cpu:0"):
ta = ta.unstack([1.0, 2.0])
with ops.device("/job:worker/task:2/cpu:0"):
ta = ta.write(2, 3.0)
flows.append(ta.flow)
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
with ops.device("/job:worker/task:1/cpu:0"):
ta = ta.split([1.0, 2.0], [1, 1])
flows.append(ta.flow)
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(flows, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: d.node_stats
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:1/" in d:
self.assertTrue(
[s for s in dev_stats[d] if "/TensorArray" in s.node_name])
elif "/host:CPU" not in d:
self.assertFalse(
[s for s in dev_stats[d] if "/TensorArray" in s.node_name])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayGetsDeviceFromFirstWriteInWhileLoop(self):
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
def _body(i, ta_i):
with ops.device("/job:worker/task:1/cpu:0"):
return i + 1, ta_i.write(i, constant_op.constant(0.0))
_, ta_out = control_flow_ops.while_loop(
lambda i, ta: i < 2, _body, loop_vars=[0, ta])
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(ta_out.flow, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: d.node_stats
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:1/" in d:
self.assertTrue(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
else:
self.assertFalse(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
@test_util.disable_control_flow_v2("colocate_with not supported in v2.")
@test_util.run_v1_only("b/120545219")
def testSkipEagerTensorArrayDisabledColocateWithFirstWriteCall(self):
with ops.device("/job:worker/task:0/cpu:0"):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, colocate_with_first_write_call=False)
def _body(i, ta_i):
with ops.device("/job:worker/task:1/cpu:0"):
return i + 1, ta_i.write(i, constant_op.constant(0.0))
_, ta_out = control_flow_ops.while_loop(
lambda i, ta: i < 2, _body, loop_vars=[0, ta])
session = session_lib.Session(self._workers[0].target)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
session.run(ta_out.flow, options=run_options, run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField("step_stats"))
dev_stats = {d.device: list(d.node_stats)
for d in run_metadata.step_stats.dev_stats}
for d in dev_stats:
if "/task:0/" in d and "CPU" in d: # Skip any GPU node stats
self.assertTrue(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
else:
self.assertFalse(
[s for s in dev_stats[d] if "TensorArray" == s.node_name])
def testTensorArrayIdentity(self):
with self.session(use_gpu=True):
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,
infer_shape=False)
ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,
infer_shape=True)
ta0 = ta0.write(0, 0.)
ta1 = ta1.write(0, 1)
v0 = variable_scope.get_variable(
"v0", shape=(), initializer=init_ops.zeros_initializer())
v1 = variable_scope.get_variable(
"v1", shape=(), initializer=init_ops.zeros_initializer())
with ops.control_dependencies([v0.assign_add(1)]):
ta0 = ta0.identity()
with ops.control_dependencies([v1.assign_add(1)]):
ta1 = ta1.identity()
read0 = ta0.read(0)
read1 = ta1.read(0)
size0 = ta0.size()
size1 = ta1.size()
# Tests correct properties on new TensorArrays.
self.assertEqual(dtypes.float32, ta0.dtype)
self.assertEqual(dtypes.int32, ta1.dtype)
if context.executing_eagerly():
self.assertEqual(tensor_shape.scalar(), read0.get_shape())
else:
self.assertEqual(tensor_shape.unknown_shape(), read0.get_shape())
self.assertEqual(tensor_shape.scalar(), read1.get_shape())
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
read0_v, read1_v, size0_v, size1_v = self.evaluate((read0, read1, size0,
size1))
# Tests that the control dependencies was added and executed.
self.assertEqual(1, self.evaluate(v0))
self.assertEqual(1, self.evaluate(v1))
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)
self.assertEqual(read1_v, 1)
self.assertEqual(size0_v, 2)
self.assertEqual(size1_v, 4)
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayGradYsInCorrectScope(self):
n_time = 1
n_dim = 1
x = constant_op.constant([[1.42]])
dy = constant_op.constant([[2.42]])
ta = tensor_array_ops.TensorArray(
dtypes.float32, size=n_time, element_shape=[n_dim])
for t in range(n_time):
ta = ta.write(index=t, value=x[t])
y = ta.stack()
# dy is outside of the gradients name scope; tf.gradients must
# wrap it in the correct name scope.
dx, = gradients_impl.gradients(ys=[y], xs=[x], grad_ys=[dy])
with self.cached_session(use_gpu=True) as sess:
vdx, vdy = self.evaluate([dx, dy])
self.assertAllClose(vdx, vdy)
@test_util.deprecated_graph_mode_only
def testSkipEagerTensorArrayInt64GPU(self):
if not test.is_gpu_available():
return
with self.session(use_gpu=True, force_gpu=True) as sess:
value = array_ops.placeholder(dtypes.int64)
ta = tensor_array_ops.TensorArray(dtype=dtypes.int64, size=2)
ta = ta.scatter([0, 1], value)
r0 = ta.read(0)
r1 = ta.read(1)
v0, v1 = sess.run([r0, r1], feed_dict={value: [-3, 100]})
self.assertAllEqual(v0, -3)
self.assertAllEqual(v1, 100)
if __name__ == "__main__":
test.main()
| {
"content_hash": "fc0ca2a90ea41994e63691d570d50a30",
"timestamp": "",
"source": "github",
"line_count": 1669,
"max_line_length": 80,
"avg_line_length": 38.254643499101256,
"alnum_prop": 0.6159412345137595,
"repo_name": "ageron/tensorflow",
"id": "4d7ae4f24ea04fe840d7f8e6f13e8d9ec95fffea",
"size": "64536",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/tensor_array_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644380"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59281238"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1501606"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908340"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94466"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15024"
},
{
"name": "Pascal",
"bytes": "617"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46230508"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481859"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
from demoapp.models import Somemodel
from django.contrib import admin
class SomemodelAdmin(admin.ModelAdmin):
list_display = ('title',)
def save_model(self, request, obj, form, change):
obj.current_user = request.user
super(SomemodelAdmin, self).save_model(request, obj, form, change)
admin.site.register(Somemodel, SomemodelAdmin)
| {
"content_hash": "7caeb21e6acdd3a0aaf8ff22c732d53e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 74,
"avg_line_length": 32.07692307692308,
"alnum_prop": 0.7386091127098321,
"repo_name": "sthzg/django-chatterbox",
"id": "0c41dd329df4db22db444d3d2bce452dbe49e9e3",
"size": "441",
"binary": false,
"copies": "1",
"ref": "refs/heads/active_development",
"path": "django/djangoapp/demoapp/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "702"
},
{
"name": "Python",
"bytes": "57339"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CategoryTranslation.slug'
db.add_column(u'aldryn_faq_category_translation', 'slug',
self.gf('django.db.models.fields.SlugField')(default='', max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CategoryTranslation.slug'
db.delete_column(u'aldryn_faq_category_translation', 'slug')
models = {
u'aldryn_faq.category': {
'Meta': {'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'aldryn_faq.categorytranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'CategoryTranslation', 'db_table': "u'aldryn_faq_category_translation'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_faq.Category']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
u'aldryn_faq.latestquestionplugin': {
'Meta': {'object_name': 'LatestQuestionPlugin', 'db_table': "u'cmsplugin_latestquestionplugin'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'latest_questions': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'aldryn_faq.question': {
'Meta': {'ordering': "['order']", 'object_name': 'Question'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'faq_questions'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'category': ('adminsortable.fields.SortableForeignKey', [], {'related_name': "'questions'", 'to': u"orm['aldryn_faq.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['aldryn_faq'] | {
"content_hash": "7d9d532f05c8c9851b76211c1130fbd0",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 162,
"avg_line_length": 65.21126760563381,
"alnum_prop": 0.5691144708423326,
"repo_name": "mkoistinen/aldryn-faq",
"id": "383351dcc4eae62c202a32a46026cd72859e63e7",
"size": "4654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aldryn_faq/migrations/0003_auto__add_field_categorytranslation_slug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "128567"
}
],
"symlink_target": ""
} |
import re, inspect
from revkitui import RevLibEditor
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from core.BaseItemButton import *
from core.BaseItemButtonMaximize import *
from core.BaseItemButtonClose import *
from core.PathSelector import *
from core.PortItem import *
from core.PropertyWidgetManagement import PropertyWidgetManager, toCamelCase
from core.SettingsDialog import *
def item( description, iconname = "unknown", requires = None, provides = None, properties = [], widget = None ):
def f( cls ):
def wrap( *args, **kwargs ):
cls.description = description
cls.iconname = iconname
self = cls( *args, **kwargs )
# Ports
if requires is not None:
self.requires( [ requires ] if isinstance( requires, str ) else requires )
if provides is not None:
self.provides( [ provides ] if isinstance( provides, str ) else provides )
# Actions
self.actions = [ [ m.icon, m.label, m ] for (_,m) in inspect.getmembers( self ) if hasattr( m, 'is_action' ) ]
self.actions.sort( key = lambda x: x[2].order )
# Properties
self.manager = PropertyWidgetManager( self )
for p in properties:
setattr( self, p, QString() )
if not hasattr( self, "set%s" % toCamelCase( p ) ):
setattr( self, "set%s" % toCamelCase( p ), self.manager.createSetter( p ) )
self.properties.append( p )
# Widget
if widget is not None:
# Tuple or class
if isinstance( widget, dict ):
self._widget_class = widget['class']
self.custom_size = QSize( widget['size'][0], widget['size'][1] )
else:
self._widget_class = widget
return self
return wrap
return f
def action( label, icon = None ):
def wrap( f ):
f.order = int( re.search( 'line (\d+)', str( f.__code__ ) ).group( 1 ) )
f.is_action = True
f.label = label
f.icon = icon
return f
return wrap
class BaseItem( QGraphicsObject ):
description = "BaseItem"
iconname = "unknown"
whatsthis = None
actions = []
has_widget = False
custom_size = None
requestTabWidget = pyqtSignal( QWidget, QString, QString )
portAdded = pyqtSignal( PortItem )
portRemoved = pyqtSignal( PortItem )
UNCONFIGURED, CONFIGURED, RUNNING, DONE, ERROR = range( 5 )
def __init__( self, parent = None ):
QGraphicsObject.__init__( self, parent )
self.setAcceptHoverEvents( True )
self.setFlags( QGraphicsItem.ItemIsMovable )
self.setCacheMode( QGraphicsItem.DeviceCoordinateCache )
# properties
self._color = Settings()["color_unconfigured"]
self._text = QString()
self._state = self.UNCONFIGURED
self._height = 40
self._width = 200
self.properties = []
# effect
#effect = QGraphicsDropShadowEffect();
#effect.setOffset( 2, 2 )
#effect.setBlurRadius( 10 )
#self.setGraphicsEffect( effect )
# for the graph and actions
self._requires = []
self._provides = []
self.requiresPorts = []
self.providesPorts = []
self.requiresMapping = dict()
# Buttons
self.close_button = BaseItemButtonClose( self )
self.maximize_button = BaseItemButtonMaximize( self )
self.maximize_button.maximized.connect( self.onMaximized )
self.maximize_button.minimized.connect( self.onMinimized )
self.buttons = [ self.maximize_button, self.close_button ]
# default
#self.setScale( 0.0 )
# Ports
def addPorts( self, l, direction = Qt.AlignTop ):
ports = []
factor = -1 if direction == Qt.AlignTop else 1
y = factor * ( self._height / 2 )
for i, name in enumerate( l ):
port = PortItem( name, direction, self )
port.setPos( ( ( i + 1.0 ) / ( len( l ) + 1 ) ) * 200 - 100, y )
ports.append( port )
self.portAdded.emit( port )
return ports
def addPort( self, name, direction = Qt.AlignTop ):
l = self.requiresPorts if direction == Qt.AlignTop else self.providesPorts
y = 0
if self.maximize_button.isMaximized():
y = -20 if direction == Qt.AlignTop else self._height - 20
else:
factor = -1 if direction == Qt.AlignTop else 1
y = factor * ( self._height / 2 )
# Move other
for i, port in enumerate( l ):
port.setPos( ( ( i + 1.0 ) / ( len( l ) + 2 ) ) * 200 - 100, y )
new_port = PortItem( name, direction, self )
new_port.setPos( ( len( l ) + 1.0 ) / ( len( l ) + 2 ) * 200 - 100, y )
l.append( new_port )
self.portAdded.emit( new_port )
return new_port
def addRequires( self, name ):
port = self.addPort( name )
port.valueChanged.connect( self.requiresValueChanged )
return port
def addProvides( self, name ):
return self.addPort( name, Qt.AlignBottom )
def removePort( self, index, direction = Qt.AlignTop ):
l = self.requiresPorts if direction == Qt.AlignTop else self.providesPorts
if index not in range( len( l ) ): return
port = l[index]
# remove edges
edges = self.scene().graph.inEdges( port ) if direction == Qt.AlignTop else self.scene().graph.outEdges( port )
for e in edges:
self.scene().graph.deleteEdge( e[2] )
# remove port
self.portRemoved.emit( port )
l.remove( port )
# FIXME remove instead of making invisible
#self.scene().removeItem( port )
port.setVisible( False )
# Update Positions
for i, p in enumerate( l ):
p.setPos( ( ( i + 1.0 ) / ( len( l ) + 1 ) ) * 200 - 100, p.pos().y() )
def removeRequires( self, index ):
self.removePort( index )
def removeProvides( self, index ):
self.removePort( index, Qt.AlignBottom )
def requires( self, l ):
self._requires = l
self.requiresPorts = self.addPorts( l )
# prepare for execution
for port in self.requiresPorts:
port.valueChanged.connect( self.requiresValueChanged )
def provides( self, l ):
self._provides = l
self.providesPorts = self.addPorts( l, Qt.AlignBottom )
def requiresValueChanged( self ):
self.requiresMapping[self.sender()] = self.sender().value()
if len( self.requiresMapping ) == len( self.requiresPorts ):
# Execute
inputs = []
for port in self.requiresPorts:
inputs.append( self.requiresMapping[port] )
port.setValue( None )
self.execute( inputs )
self.requiresMapping.clear()
def boundingRect( self ):
return QRectF( -self._width / 2, -20, self._width, self._height )
def paint( self, painter, option, widget = None ):
# rectangle back ground
painter.setBrush( QBrush( self.createButtonGradient( option.rect.height(), self._color ) ) )
painter.setPen( self._color.darker() )
painter.drawRoundedRect( option.rect, 7, 7 )
# text
tr = QRect( option.rect )
tr.setHeight( 40 )
tr2 = QRect( tr )
tr2.translate( 1, 1 )
painter.setFont( QFont( "Helvetica", 8, QFont.Bold ) )
painter.setPen( QColor( "#303030" ) )
painter.drawText( tr2, Qt.AlignCenter, self._text )
painter.setPen( QColor( "#ffffff" ) )
painter.drawText( tr, Qt.AlignCenter, self._text )
def getText( self ):
return self._text
def setText( self, value ):
self._text = value
self.update()
text = QtCore.pyqtProperty( "QString", getText, setText )
def getColor( self ):
return self._color
def setColor( self, value ):
self._color = value
self.update()
color = QtCore.pyqtProperty( "QColor", getColor, setColor )
def getWidth( self ):
return self._width
def setWidth( self, value ):
self._width = value
self.update()
width = QtCore.pyqtProperty( "int", getWidth, setWidth )
def getHeight( self ):
return self._height
def setHeight( self, value ):
self._height = value
self.update()
height = QtCore.pyqtProperty( "int", getHeight, setHeight )
def getState( self ):
return self._state
def setState( self, value ):
self._state = value
new_color = None
if self._state == self.UNCONFIGURED:
new_color = Settings()["color_unconfigured"]
elif self._state == self.CONFIGURED:
new_color = Settings()["color_configured"]
elif self._state == self.RUNNING:
new_color = Settings()["color_processing"]
elif self._state == self.DONE:
new_color = Settings()["color_finished"]
elif self._state == self.ERROR:
new_color = Settings()["color_error"]
else:
assert( False )
self.setColor( new_color )
QApplication.processEvents()
# Operation
def create( self ):
if hasattr( self, '_widget_class' ):
self.widget = self.mainWidget( self._widget_class() )
self.widget.ownerItem = self
if hasattr( self.widget, "onCreate" ):
self.widget.onCreate()
# Connect UI with properties
for p in self.properties:
if hasattr( self, "widget" ) and hasattr( self.widget, p ):
widget = getattr( self.widget, p )
self.manager.updateValue( p ) # Initial Value
key = type( widget )
if PropertyWidgetManager.type_mapping.has_key( key ) and PropertyWidgetManager.type_mapping[key][2] is not None:
signal = PropertyWidgetManager.type_mapping[key][2]
if isinstance( signal, str ):
self.connect( widget, SIGNAL( signal ), self.manager.widgetValueChanged )
else: #pyqtSignal
signal.__get__( widget, widget.__class__ ).connect( self.manager.widgetValueChanged )
self.onCreate()
def onCreate( self ):
pass
def execute( self, inputs = [] ):
self.setState( self.RUNNING )
outputs = self.executeEvent( inputs )
# Error Handling
if isinstance( outputs, str ):
self.setState( self.ERROR )
self.scene().notifyError( self, outputs )
return
# Set data to the ports
for i, o in enumerate( outputs ):
self.providesPorts[i].setValue( o )
self.setState( self.DONE )
return outputs
def executeEvent( self, inputs ):
return []
def initialize( self ): # Called in the beginning before execution
pass
# TODO when and how to call?
def finalize( self ): # Called in the end after execution of all runs
pass
def numRuns( self ): # Number of runs
return 1
# Override
def contextMenuEvent( self, event ):
if len( self.actions ) > 0:
menu = QMenu()
for action in self.actions:
if len( action ) == 2:
menu.addAction( action[0], action[1] )
else:
menu.addAction( QIcon.fromTheme( action[0] ), action[1], action[2] )
menu.exec_( event.screenPos() )
def mouseDoubleClickEvent( self, event ):
"""If there is are actions, execute the first one by default on double click"""
if len( self.actions ) > 0:
self.actions[0][-1]()
def hoverEnterEvent( self, event ):
if self.maximize_button.isMaximized():
return
if self.has_widget:
self.maximize_button.show()
self.close_button.show()
QGraphicsObject.hoverEnterEvent( self, event )
def hoverLeaveEvent( self, event ):
if self.maximize_button.isMaximized():
return
# Prevent items from closing if mouse leaves over them
item = self.scene().itemAt( event.scenePos() )
if item is not None:
item = item.toGraphicsObject()
if item in self.buttons:
QGraphicsObject.hoverLeaveEvent( self, event )
return
if self.has_widget:
self.maximize_button.hide()
self.close_button.hide()
QGraphicsObject.hoverLeaveEvent( self, event )
def onMaximized( self ):
if self.has_widget:
self.maximize_button.show()
self.close_button.hide()
def onMinimized( self ):
if self.has_widget:
self.maximize_button.hide()
self.close_button.hide()
# Main Widget
def mainWidget( self, otherWidget = None ):
self.has_widget = True
return self.maximize_button.widget( otherWidget )
# Load/Save
def load( self, properties ):
for prop in [ properties.childNodes().at( i ).toElement() for i in range( properties.childNodes().length() ) ]:
name = str( prop.attributeNode( "name" ).value() )
value = prop.attributeNode( "value" ).value()
# TODO
#self.setProperty( name, value )
t = self.metaObject().property( self.metaObject().indexOfProperty( "conf_%s" % name ) ).typeName()
if t == "bool":
eval( "self.set%s( value == 'true' )" % toCamelCase( name ) )
else:
eval( "self.set%s( value )" % toCamelCase( name ) )
def save( self, elem ):
elem.setAttribute( "type", self.__class__.__name__ )
pos = elem.ownerDocument().createElement( "pos" )
pos.setAttribute( "x", str( self.scenePos().x() ) )
pos.setAttribute( "y", str( self.scenePos().y() ) )
elem.appendChild( pos )
# Properties
properties = elem.ownerDocument().createElement( "properties" )
for i in range( self.metaObject().propertyCount() ):
name = str( self.metaObject().property( i ).name() )
if name.startswith( "conf_" ):
prop = elem.ownerDocument().createElement( "property" )
prop.setAttribute( "name", name[5:] )
prop.setAttribute( "value", self.metaObject().property( i ).read( self ).toString() )
properties.appendChild( prop )
# New properties
for name in self.properties:
prop = elem.ownerDocument().createElement( "property" )
if hasattr( self, "get%s" % toCamelCase( name ) ):
value = getattr( self, "get%s" % toCamelCase( name ) )()
else:
value = getattr( self, name )
prop.setAttribute( "name", name )
prop.setAttribute( "value", value )
properties.appendChild( prop )
elem.appendChild( properties )
return elem
#private
def createButtonGradient( self, height, color ):
grad = QLinearGradient( QPointF( 0, -height / 2 ), QPointF( 0, height / 2 ) )
grad.setColorAt( 0, color.lighter( 120 ) )
grad.setColorAt( 0.4, color.darker( 120 ) )
grad.setColorAt( 1, color )
return grad
| {
"content_hash": "3e6d10995eb1974b7c4d3c1db709678d",
"timestamp": "",
"source": "github",
"line_count": 463,
"max_line_length": 132,
"avg_line_length": 33.734341252699785,
"alnum_prop": 0.564632819002497,
"repo_name": "ajavadia/ScaffCC",
"id": "3dfe510b8aaf157156b5b74530525ce734da90fc",
"size": "16405",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rkqc/tools/gui/core/BaseItem.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "3493637"
},
{
"name": "Batchfile",
"bytes": "753"
},
{
"name": "C",
"bytes": "20644720"
},
{
"name": "C++",
"bytes": "54622765"
},
{
"name": "CMake",
"bytes": "198301"
},
{
"name": "CSS",
"bytes": "48884"
},
{
"name": "Cuda",
"bytes": "5785"
},
{
"name": "Emacs Lisp",
"bytes": "20994"
},
{
"name": "Groff",
"bytes": "18799"
},
{
"name": "HTML",
"bytes": "3199845"
},
{
"name": "JavaScript",
"bytes": "17391"
},
{
"name": "LLVM",
"bytes": "10223782"
},
{
"name": "M",
"bytes": "219"
},
{
"name": "M4",
"bytes": "189436"
},
{
"name": "Makefile",
"bytes": "414982"
},
{
"name": "Matlab",
"bytes": "21976"
},
{
"name": "Mercury",
"bytes": "1195"
},
{
"name": "OCaml",
"bytes": "340220"
},
{
"name": "Objective-C",
"bytes": "1625435"
},
{
"name": "Objective-C++",
"bytes": "319231"
},
{
"name": "Perl",
"bytes": "184639"
},
{
"name": "Python",
"bytes": "669722"
},
{
"name": "Shell",
"bytes": "558812"
},
{
"name": "SourcePawn",
"bytes": "1128"
},
{
"name": "Standard ML",
"bytes": "2841"
},
{
"name": "VimL",
"bytes": "12476"
}
],
"symlink_target": ""
} |
import numpy.testing as npt
import AFQ.data.fetch as afd
from AFQ.utils.conversion import matlab_tractography, matlab_mori_groups
import os.path as op
DATA_PATH = op.join(op.abspath(op.dirname(__file__)), "../../tests/data")
def test_matlab_tractography():
sft = matlab_tractography(
op.join(DATA_PATH, "WholeBrainFG_test.mat"),
afd.read_mni_template())
npt.assert_equal(len(sft.streamlines), 2)
def test_matlab_mori_groups():
fiber_groups = matlab_mori_groups(
op.join(DATA_PATH, "MoriGroups_Test.mat"),
afd.read_mni_template())
npt.assert_equal(len(fiber_groups.keys()), 20)
npt.assert_equal(len(fiber_groups['CST_R'].streamlines), 2)
| {
"content_hash": "20abdfe3363590ef978954f78a0f2717",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 73,
"avg_line_length": 30.26086956521739,
"alnum_prop": 0.6824712643678161,
"repo_name": "yeatmanlab/pyAFQ",
"id": "3c9b8c58f4f45ee73c761e4fd83c99326ee139b3",
"size": "696",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "AFQ/utils/tests/test_conversions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "409"
},
{
"name": "Makefile",
"bytes": "374"
},
{
"name": "Python",
"bytes": "640881"
},
{
"name": "Shell",
"bytes": "1831"
}
],
"symlink_target": ""
} |
from sqlalchemy.ext import declarative
from sqlalchemy import orm
class QuantumBase(object):
"""Base class for Quantum Models."""
def __setitem__(self, key, value):
setattr(self, key, value)
def __getitem__(self, key):
return getattr(self, key)
def get(self, key, default=None):
return getattr(self, key, default)
def __iter__(self):
self._i = iter(orm.object_mapper(self).columns)
return self
def next(self):
n = self._i.next().name
return n, getattr(self, n)
def update(self, values):
"""Make the model object behave like a dict"""
for k, v in values.iteritems():
setattr(self, k, v)
def iteritems(self):
"""Make the model object behave like a dict.
Includes attributes from joins."""
local = dict(self)
joined = dict([(k, v) for k, v in self.__dict__.iteritems()
if not k[0] == '_'])
local.update(joined)
return local.iteritems()
class QuantumBaseV2(QuantumBase):
@declarative.declared_attr
def __tablename__(cls):
# NOTE(jkoelker) use the pluralized name of the class as the table
return cls.__name__.lower() + 's'
BASEV2 = declarative.declarative_base(cls=QuantumBaseV2)
| {
"content_hash": "1feeb397f0d3ec18e5b558f37c3e04af",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 74,
"avg_line_length": 27.104166666666668,
"alnum_prop": 0.595695618754804,
"repo_name": "ruijie/quantum",
"id": "e3ffa682c7f42b1d14096c1961bdf2fd85a8d16f",
"size": "1886",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "quantum/db/model_base.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "71602"
},
{
"name": "Perl",
"bytes": "36750"
},
{
"name": "Python",
"bytes": "2684560"
},
{
"name": "Racket",
"bytes": "143"
},
{
"name": "Shell",
"bytes": "8432"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FlatValue.published'
db.add_column(u'profiles_flatvalue', 'published', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'FlatValue.published'
db.delete_column(u'profiles_flatvalue', 'published')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 2, 12, 15, 0, 21, 606618)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 2, 12, 15, 0, 21, 606228)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maps.shapefile': {
'Meta': {'object_name': 'ShapeFile'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geo_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geo_meta_key_column': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'geom_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label_column': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'shape_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'zoom_threshold': ('django.db.models.fields.IntegerField', [], {'default': '5'})
},
u'profiles.customvalue': {
'Meta': {'object_name': 'CustomValue'},
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_value': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'supress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_operator': ('django.db.models.fields.CharField', [], {'max_length': "'255'"})
},
u'profiles.datadomain': {
'Meta': {'ordering': "['weight']", 'object_name': 'DataDomain'},
'group': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Group']", 'through': u"orm['profiles.DataDomainIndex']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'subdomain_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subdomains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'symmetrical': 'False', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
u'profiles.datadomainindex': {
'Meta': {'ordering': "['order']", 'object_name': 'DataDomainIndex'},
'dataDomain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.datapoint': {
'Meta': {'unique_together': "(('indicator', 'record', 'time'),)", 'object_name': 'DataPoint'},
'change_from_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_from'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
'change_to_time': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'datapoint_as_change_to'", 'null': 'True', 'to': u"orm['profiles.Time']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']", 'null': 'True'})
},
u'profiles.datasource': {
'Meta': {'object_name': 'DataSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'implementation': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'profiles.denominator': {
'Meta': {'object_name': 'Denominator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'multiplier': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'sort': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'table_label': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'profiles.denominatorpart': {
'Meta': {'object_name': 'DenominatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'part': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.IndicatorPart']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'profiles.flatvalue': {
'Meta': {'object_name': 'FlatValue'},
'display_title': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'f_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_number': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_numerator': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_numerator_moe': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'f_percent': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'null': 'True', 'blank': 'True'}),
'geography': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
'geography_geo_key': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': "'255'", 'db_index': 'True'}),
'geography_name': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'geography_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'geometry_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'indicator_slug': ('django.db.models.fields.CharField', [], {'max_length': "'255'", 'db_index': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'numerator': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'numerator_moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'time_key': ('django.db.models.fields.CharField', [], {'max_length': "'255'"}),
'value_type': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'profiles.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'data_sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataSource']", 'symmetrical': 'False', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'shapefile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maps.ShapeFile']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200', 'db_index': 'True'}),
'summary_level': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'profiles.georecord': {
'Meta': {'unique_together': "(('slug', 'level'), ('level', 'geo_id', 'custom_name', 'owner'))", 'object_name': 'GeoRecord'},
'components': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'components_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'custom_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'geo_id': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_id_segments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geo_searchable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoLevel']"}),
'mappings': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mappings_rel_+'", 'blank': 'True', 'to': u"orm['profiles.GeoRecord']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'})
},
u'profiles.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.Indicator']", 'through': u"orm['profiles.GroupIndex']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.groupindex': {
'Meta': {'ordering': "['order']", 'object_name': 'GroupIndex'},
'groups': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicators': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': u"orm['profiles.Indicator']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
u'profiles.indicator': {
'Meta': {'ordering': "['display_name', 'name']", 'object_name': 'Indicator'},
'data_domains': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.DataDomain']", 'through': u"orm['profiles.IndicatorDomain']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.CharField', [], {'default': "'COUNT'", 'max_length': '30'}),
'display_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_distribution': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'display_percent': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_tasks': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'ind_tasks'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['profiles.IndicatorTask']"}),
'last_generated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['profiles.GeoLevel']", 'symmetrical': 'False'}),
'limitations': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'long_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'purpose': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'routine_use': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'short_definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'default': "'U.S. Census Bureau'", 'max_length': '300', 'blank': 'True'}),
'universe': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'})
},
u'profiles.indicatordomain': {
'Meta': {'object_name': 'IndicatorDomain'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataDomain']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.indicatorpart': {
'Meta': {'object_name': 'IndicatorPart'},
'data': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'formula': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"}),
'levels': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['profiles.GeoLevel']", 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'time': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Time']"})
},
u'profiles.indicatortask': {
'Meta': {'object_name': 'IndicatorTask'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']", 'null': 'True', 'blank': 'True'}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'profiles.legendoption': {
'Meta': {'object_name': 'LegendOption'},
'bin_options': ('django.db.models.fields.TextField', [], {'default': "''"}),
'bin_type': ('django.db.models.fields.CharField', [], {'default': "'jenks'", 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Indicator']"})
},
u'profiles.precalculatedvalue': {
'Meta': {'object_name': 'PrecalculatedValue'},
'data_source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataSource']"}),
'geo_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.GeoRecord']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'table': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'profiles.taskstatus': {
'Meta': {'object_name': 'TaskStatus'},
'error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
't_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'task': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'profiles.time': {
'Meta': {'object_name': 'Time'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sort': ('django.db.models.fields.DecimalField', [], {'max_digits': '5', 'decimal_places': '1'})
},
u'profiles.value': {
'Meta': {'object_name': 'Value'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.DataPoint']"}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Denominator']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moe': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'number': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
}
}
complete_apps = ['profiles']
| {
"content_hash": "716ae6d2cb1ecc9d5dbc32fb1812c857",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 217,
"avg_line_length": 87.21212121212122,
"alnum_prop": 0.552660026252799,
"repo_name": "ProvidencePlan/Profiles",
"id": "4e078c2a091230b1c30bfcc6306e2e6429b4345f",
"size": "25920",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "communityprofiles/profiles/oldmigrations/0086_auto__add_field_flatvalue_published.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "132319"
},
{
"name": "HTML",
"bytes": "146060"
},
{
"name": "JavaScript",
"bytes": "188204"
},
{
"name": "Python",
"bytes": "2668150"
},
{
"name": "Ruby",
"bytes": "4727"
},
{
"name": "Shell",
"bytes": "745"
}
],
"symlink_target": ""
} |
from pipeline.param.base_param import BaseParam
from pipeline.param import consts
class SampleWeightParam(BaseParam):
"""
Define sample weight parameters.
Parameters
----------
class_weight : str or dict, default None
class weight dictionary or class weight computation mode, string value only accepts 'balanced';
If dict provided, key should be class(label), and weight will not be normalize, e.g.: {'0': 1, '1': 2}
If both class_weight and sample_weight_name are None, return original input data
sample_weight_name : str, name of column which specifies sample weight.
feature name of sample weight; if both class_weight and sample_weight_name are None, return original input data
normalize : bool, default False
whether to normalize sample weight extracted from `sample_weight_name` column
need_run : bool, default True
whether to run this module or not
"""
def __init__(self, class_weight=None, sample_weight_name=None, normalize=False, need_run=True):
self.class_weight = class_weight
self.sample_weight_name = sample_weight_name
self.normalize = normalize
self.need_run = need_run
def check(self):
descr = "sample weight param's"
if self.class_weight:
if not isinstance(self.class_weight, str) and not isinstance(self.class_weight, dict):
raise ValueError(f"{descr} class_weight must be str, dict, or None.")
if isinstance(self.class_weight, str):
self.class_weight = self.check_and_change_lower(self.class_weight,
[consts.BALANCED],
f"{descr} class_weight")
if self.sample_weight_name:
self.check_string(self.sample_weight_name, f"{descr} sample_weight_name")
self.check_boolean(self.need_run, f"{descr} need_run")
self.check_boolean(self.normalize, f"{descr} normalize")
return True
| {
"content_hash": "463118165b66cb5a0ace0f369f973a50",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 119,
"avg_line_length": 39.86538461538461,
"alnum_prop": 0.627592860588519,
"repo_name": "FederatedAI/FATE",
"id": "394f4050dd13d431c0a96b5ed221e8b0ebb6216d",
"size": "2737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/fate_client/pipeline/param/sample_weight_param.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from bson import ObjectId
import datetime
from tgext.ecommerce.model import models, Product
from tgext.ecommerce.lib.utils import apply_vat, with_currency
class OrderManager(object):
@classmethod
def create(cls, cart, payment_date=None, payer_info=None, status='created', payment_type=None, **details): #create_order
if payer_info is None:
payer_info = {}
if not payment_type:
payment_type = ''
items = []
for cart_item in cart.items.values():
items.append(dict(name=cart_item.get('name'), variety=cart_item.get('variety'),
category_name=cart_item.get('category_name', {}), qty=cart_item.get('qty'),
sku=cart_item.get('sku'), net_price=cart_item.get('price'), vat=cart_item.get('vat'),
rate=cart_item.get('rate'), gross_price=cart_item.get('price') + cart_item.get('vat'),
base_vat=cart_item.get('base_vat'), base_rate=cart_item.base_rate,
details=dict(cart_item.get('product_details').items()+cart_item.get('details').items())))
Product.increase_sold(cart_item.get('sku'), qty=cart_item.get('qty'))
order = models.Order(_id=cart._id,
user_id=cart.user_id,
payment_date=payment_date,
creation_date=datetime.datetime.utcnow(),
shipment_info=cart.order_info.shipment_info,
bill=cart.order_info.bill,
bill_info=cart.order_info.bill_info or {},
payer_info=payer_info,
items=items,
payment_type=payment_type,
net_total=cart.subtotal,
tax=cart.tax,
gross_total=cart.total,
shipping_charges=cart.order_info.shipping_charges,
total=cart.total+cart.order_info.shipping_charges,
due=cart.order_info.due,
discounts=cart.order_info.discounts,
applied_discount= - cart.order_info.applied_discount,
status=status,
notes=cart.order_info.notes,
message=cart.order_info.message,
details=details,
currencies=cart.order_info.currencies)
cart.delete()
models.DBSession.flush()
return order
@classmethod
def get(self, _id): #get_order
return models.Order.query.get(_id=ObjectId(_id))
@classmethod
def get_many(cls, query=dict(), fields=None): #get_products
q_kwargs = {}
if fields:
q_kwargs['fields'] = fields
q = models.Order.query.find(query, **q_kwargs)
return q
@classmethod
def get_user_orders(self, user_id):
"""Retrieves all the past orders of a given user
:param user_id: the user id string to filter for
"""
return models.Order.query.find({'user_id': user_id})
| {
"content_hash": "4ddc1b41cf377a586164419617ac5241",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 124,
"avg_line_length": 45.41095890410959,
"alnum_prop": 0.5185520361990951,
"repo_name": "axant/tgext.ecommerce",
"id": "7bcac6e51e6f2f73c1af5a256c0cd0588ff0c7c6",
"size": "3330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tgext/ecommerce/lib/order.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "822"
},
{
"name": "HTML",
"bytes": "52839"
},
{
"name": "Python",
"bytes": "114697"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from panda.models.base_upload import BaseUpload
class RelatedUpload(BaseUpload):
"""
A file related to a dataset file uploaded to PANDA.
"""
from panda.models.dataset import Dataset
dataset = models.ForeignKey(Dataset, related_name='related_uploads',
help_text=_('The dataset this upload is associated with.'),
verbose_name=_('dataset'))
file_root = settings.MEDIA_ROOT
class Meta:
app_label = 'panda'
ordering = ['creation_date']
verbose_name = _('RelatedUpload')
verbose_name_plural = _('RelatedUploads')
| {
"content_hash": "e2137cd72574bf6000622e1c33f8bce0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 72,
"avg_line_length": 30.695652173913043,
"alnum_prop": 0.6827195467422096,
"repo_name": "PalmBeachPost/panda",
"id": "23122243a5a9d203504bf02b6037f7e6116117f5",
"size": "729",
"binary": false,
"copies": "4",
"ref": "refs/heads/1.2.0",
"path": "panda/models/related_upload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14837"
},
{
"name": "HTML",
"bytes": "51564"
},
{
"name": "Java",
"bytes": "256"
},
{
"name": "JavaScript",
"bytes": "759191"
},
{
"name": "Python",
"bytes": "877718"
},
{
"name": "Shell",
"bytes": "17522"
}
],
"symlink_target": ""
} |
import os, sys
from PyQt5.QtWidgets import (QWizard, QMessageBox)
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import pyqtSlot, Qt
try:
import odmltables
have_odmltables = True
except:
have_odmltables = False
from .settings import Settings
class OdmltablesWizard(QWizard):
def __init__(self, wizname, parent=None):
super(OdmltablesWizard, self).__init__(parent)
self.wizname = wizname
self.settingsfile = os.path.join(os.path.expanduser("~"),
'.odmltables',
wizname.replace(' ', '').lower() + '.conf')
# initialize settings
self.settings = Settings(self.settingsfile)
# setting starting page of wizard
# self.setStartId(0)
self.setOption(self.IndependentPages, False)
# images won't show in Windows 7 if style not set
self.setWizardStyle(self.ModernStyle)
self.setOption(self.HaveHelpButton, True)
logo_filename = "odMLtables_100x100.png"
logo_dirs = [os.path.join(os.path.dirname(__file__), '..', '..', 'logo'),
os.path.join(sys.prefix, 'share/pixmaps')]
for logo_dir in logo_dirs:
filepath = os.path.join(logo_dir, logo_filename)
if os.path.exists(filepath):
self.setPixmap(QWizard.LogoPixmap, QPixmap(filepath))
# set up help messages
self._lastHelpMsg = ''
self._helpMsgs = self._createHelpMsgs()
self.helpRequested.connect(self._showHelp)
self.setWindowTitle(self.tr(wizname))
def _createHelpMsgs(self):
raise NotImplementedError()
@pyqtSlot()
def _showHelp(self):
# get the help message for the current page
msg = self._helpMsgs[self.currentId()]
# # if same as last message, display alternate message
# if msg == self._lastHelpMsg:
# msg = self._helpMsgs[self.NUM_PAGES + 1]
doc_link = "<p>For detailed information about odMLtables refer to the " \
"<a href='http://pythonhosted.org/python-odmltables'>odMLtables " \
"documentation</a>.</p>"
msgBox = QMessageBox()
msgBox.setWindowTitle("Help")
msgBox.setTextFormat(Qt.RichText)
msgBox.setText(msg + doc_link)
msgBox.exec_()
# QMessageBox.information(self,
# self.tr(self.wizname),
# msg)
# self._lastHelpMsg = msg
def get_graphic_path():
if have_odmltables:
data_path = os.path.join(os.path.dirname(odmltables.__file__),
'gui',
'graphics')
return data_path
| {
"content_hash": "2b667b8db5b2d2dd8fee320781af0661",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 86,
"avg_line_length": 33.27710843373494,
"alnum_prop": 0.5745836350470673,
"repo_name": "INM-6/python-odmltables",
"id": "54b51b30bb070d1462b530e3aafb5daba4e65245",
"size": "2787",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "odmltables/gui/wizutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "26"
},
{
"name": "Jupyter Notebook",
"bytes": "120236"
},
{
"name": "Python",
"bytes": "338119"
},
{
"name": "Shell",
"bytes": "3498"
},
{
"name": "TeX",
"bytes": "12438"
},
{
"name": "XSLT",
"bytes": "18504"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from collections import defaultdict
import functools
import itertools
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.download import (url_to_path, unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError)
from pip.req.req_install import InstallRequirement
from pip.utils import (
display_path, dist_in_usersite, ensure_dir, normalize_path)
from pip.utils.logging import indent_log
from pip.vcs import vcs
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
isolated=False, wheel_download_dir=None,
wheel_cache=None):
"""Create a RequirementSet.
:param wheel_download_dir: Where still-packed .whl files should be
written to. If None they are written to the download_dir parameter.
Separate to download_dir to permit only keeping wheel archives for
pip wheel.
:param download_dir: Where still packed archives should be written to.
If None they are not saved, and are deleted immediately after
unpacking.
:param wheel_cache: The pip wheel cache, for passing to
InstallRequirement.
"""
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
self._wheel_cache = wheel_cache
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers():
logger.warning("Ignoring %s: markers %r don't match your "
"environment", install_req.name,
install_req.markers)
return []
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
if (parent_req_name is None and existing_req and not
existing_req.constraint):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, existing_req, name))
if not existing_req:
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
if not existing_req.constraint:
# No need to scan, we've already encountered this for
# scanning.
result = []
elif not install_req.constraint:
if (install_req.link and not (existing_req.link and
install_req.link.path == existing_req.link.path)):
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
# And now we need to scan this.
result = [existing_req]
# Canonicalise to the already-added object for the backref
# check below.
install_req = existing_req
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
name = project_name.lower()
if (name in self.requirements and
not self.requirements[name].constraint or
name in self.requirement_aliases and
not self.requirements[self.requirement_aliases[name]].constraint):
return True
return False
@property
def has_requirements(self):
return list(req for req in self.requirements.values() if not
req.constraint) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = os.path.expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
if req.constraint:
continue
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def _walk_req_to_install(self, handler):
"""Call handler for all pending reqs.
:param handler: Handle a single requirement. Should take a requirement
to install. Can optionally return an iterable of additional
InstallRequirements to cover.
"""
# The list() here is to avoid potential mutate-while-iterating bugs.
discovered_reqs = []
reqs = itertools.chain(
list(self.unnamed_requirements), list(self.requirements.values()),
discovered_reqs)
for req_to_install in reqs:
more_reqs = handler(req_to_install)
if more_reqs:
discovered_reqs.extend(more_reqs)
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
# make the wheelhouse
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
self._walk_req_to_install(
functools.partial(self._prepare_file, finder))
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
skip_reason = 'satisfied (use --upgrade to upgrade)'
if self.upgrade:
best_installed = False
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
skip_reason = 'up-to-date'
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
return skip_reason
else:
return None
def _prepare_file(self, finder, req_to_install):
"""Prepare a single requirements files.
:return: A list of addition InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement already %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
elif req_to_install.satisfied_by:
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(finder, self.upgrade)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
try:
download_dir = self.download_dir
# We always delete unpacked sdists after pip ran.
autodelete_unpacked = True
if req_to_install.link.is_wheel \
and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if req_to_install.link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, autodelete_unpacked,
session=self.session)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
more_reqs = []
def add_req(subreq):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self._wheel_cache,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not self.ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
| {
"content_hash": "ecdad30d1b386e2caeb089c33acf0013",
"timestamp": "",
"source": "github",
"line_count": 670,
"max_line_length": 79,
"avg_line_length": 41.832835820895525,
"alnum_prop": 0.5483445126302269,
"repo_name": "ChristopherHogan/pip",
"id": "58951adba967ebf668a39d8c13fcc09434332c98",
"size": "28028",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "pip/req/req_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "2212079"
},
{
"name": "Shell",
"bytes": "2475"
}
],
"symlink_target": ""
} |
import subprocess
import os
import sys
import time
import datetime
import re
import random
import multiprocessing
#from enum import Enum
import numpy as np
import matplotlib.pyplot as plt
import copy_reg
import types
from my_regex import *
import arch_handler as ah
###### Enums ######
e_Test_Type = ('normal',
'binary_search_norm_demand',
'binary_search_routability_metric'
)
#redifine the pickling function used by 'pickle' (in turn used by multiprocessing) s.t.
#multiprocessing will work with instancemethods (i.e. class functions).
#this is necessary for python2 compatibility. python3 solves all ills but burst.eecg
#doesn't support it fully yet
def _pickle_method(m):
if m.im_self is None:
return getattr, (m.im_class, m.im_func.func_name)
else:
return getattr, (m.im_self, m.im_func.func_name)
copy_reg.pickle(types.MethodType, _pickle_method)
###### Classes ######
#represents a suite of Wotan tests to be run
class Wotan_Test_Suite:
def __init__(self, wirelength, #wirelength to test (number)
switchblock, #which switchblock to test (wilton/universal/subset)
arch_name, #a string specifying the arch to use. should correspond to an entry in 'arch_dictionaries' variable
arch_dictionaries, #object of type 'Archs' that contains dictionaries of possible archs for use in Wotan and VPR tests
sweep_type, #what value will be swept for this test (fcin/fcout(
sweep_range, #what range should the value be swept over? (this should be a list/tuple)
output_regex_list, #list of regex lines that will be used to parse output (one output per regex)
output_label_list, #list of readable labels to associate with each regex above (for annotating outputs)
plot_index, #index into above two lists. which output should be plotted agains the variable swept in the arch file?
wotan_opts, #a string of wotan options to be used for this test suite
extra_string=''): #a short string that will be prefixed to the string descriptor of this suite. used to help name results file
self.wirelength = wirelength
if switchblock != 'wilton' and switchblock != 'universal' and switchblock != 'subset':
print('unrecognized switchblock: ' + switchblock)
sys.exit()
self.switchblock = switchblock
if sweep_type != 'fcin' and sweep_type != 'fcout':
print('unrecognized sweep type: ' + sweep_type)
sys.exit()
self.sweep_type = sweep_type
self.sweep_range = sweep_range
if len(output_regex_list) == 0 or len(output_regex_list) != len(output_label_list):
print('regex list should be of same length as the label list, and neither list should have a length of 0')
sys.exit()
self.output_regex_list = output_regex_list
self.output_label_list = output_label_list
if plot_index >= len(output_regex_list):
print('plot_index is outside the range of the regex list')
sys.exit()
self.plot_index = plot_index
self.wotan_opts = wotan_opts
self.extra_descriptor_str = extra_string
if not isinstance(arch_dictionaries, Archs):
print('expected arch_dictionaries to be of type Archs')
sys.exit()
self.arch_dictionaries = arch_dictionaries
self.arch_name = arch_name
#returns path to VPR architecture file used for Wotan tests
def get_wotan_arch_path(self):
return self.arch_dictionaries.wotan_archs[ self.arch_name ]
#returns path to VPR architecture file used for VPR tests
def get_vpr_arch_path(self):
return self.arch_dictionaries.vpr_archs[ self.arch_name ]
#returns a brief string specifying wirelength -- useful for annotating graphs and naming files
def wirelength_str(self):
result = 'len' + str(self.wirelength)
return result
#returns a brief string specifying input/output equivalence -- useful for annotating graphs and naming files
def equiv_str(self):
result = ''
if self.input_equiv and self.output_equiv:
result = 'inout-equiv'
elif self.input_equiv:
result = 'in-equiv'
elif self.output_equiv:
result = 'out-equiv'
else:
result = 'no-equiv'
return result
#returns a brief string specifying switchblock -- useful for annotating graphs and naming files
def switchblock_str(self):
result = ''
if self.switchblock == 'subset':
result = 'planar'
else:
result = self.switchblock
return result
#returns a brief string specifying the sweep type -- useful for annotating graphs and naming files
def sweep_type_str(self):
return self.sweep_type
#returns a brief string specifying the arch file that was used
def arch_name_str(self):
result = 'arch:' + self.arch_name
return result
#returns a string describing the entire test suite -- useful for naming files and stuff
def as_str(self, separator='_'):
test_str = ''
if self.extra_descriptor_str:
test_str += self.extra_descriptor_str + separator
test_str += self.wirelength_str() + separator + self.switchblock_str() + separator + \
self.sweep_type_str() + separator + self.arch_name_str()
return test_str
#return string to describe a specific attribute
def get_attr_as_string(self, attr):
return_str = ''
if attr == 'wirelength':
return_str = self.wirelength_str()
elif attr == 'input_equiv' or attr == 'output_equiv':
return_str = self.equiv_str()
elif attr == 'switchblock':
return_str = self.switchblock_str()
elif attr == 'sweep_type':
return_str = self.sweep_type_str()
elif attr == 'arch_name':
return_str = self.arch_name_str()
else:
print('unrecognized attribute: ' + attr)
sys.exit()
return return_str
#returns a string describing the specified list of attributes
def attributes_as_string(self, attribute_list, separator='_'):
return_str = ''
#return_str += self.wirelength_str()
return_str += self.get_attr_as_string(attribute_list[0])
for attr in attribute_list[1:]:
return_str += separator + self.get_attr_as_string(attr)
return return_str
def __str__(self):
return self.as_str()
def __repr__(self):
return self.as_str()
#a class used to run wotan tests.
#I have also modified it to be able to run VPR tests (hence why two different architectures are passed in to the constructor)
class Wotan_Tester:
#constructor
def __init__(self, vtr_path, #path to the base vtr folder
wotan_path, #path to the base wotan folder
test_type, #string specifying test type (holds one of the values in e_Test_Type)
test_suite_2dlist): #a list of lists. each sublist contains a set of test suites which should be plotted on the same graph
#initialize wotan-related stuff
self.wotan_path = wotan_path
#initialize vtr-related stuff
self.vtr_path = vtr_path
self.vpr_path = vtr_path + "/vpr"
print('\n')
print('Wotan Path: ' + self.wotan_path)
print('VPR Path: ' + self.vpr_path)
if test_type not in e_Test_Type:
print('unrecognized test type: ' + test_type)
sys.exit()
self.test_type = test_type
############ Command-Line Related ############
#parses the specified string and returns a list of arguments where each
#space-delimited value receives its own entry in the list
def get_argument_list(self, string):
result = string.split()
return result
#runs command with specified arguments and returns the result
#arguments is a list where each individual argument is in it's own entry
#i.e. the -l and -a in "ls -l -a" would each have their own entry in the list
def run_command(self, command, arguments):
result = subprocess.check_output([command] + arguments)
return result
############ VPR Related ############
#compiles VPR
def make_vpr(self):
os.chdir( self.vpr_path )
result = self.run_command("make", [])
return result
#runs VPR with specified arguments
def run_vpr(self, arguments):
arg_list = self.get_argument_list(arguments)
#switch to vpr directory
os.chdir( self.vpr_path )
output = str(self.run_command("./vpr", arg_list))
return output
#returns list of MCNC benchmarks
def get_mcnc_benchmarks(self):
#benchmark names
benchmarks = [
'alu4',
'apex2',
'apex4',
'bigkey',
'clma',
'des',
'diffeq',
'dsip',
'elliptic',
'ex1010',
'ex5p',
'frisc',
'misex3',
'pdc',
's298',
's38417',
's38584.1',
'seq',
'spla',
'tseng'
]
#add blif suffix
#benchmarks = [bm + '.pre-vpr.blif' for bm in benchmarks]
benchmarks = [bm + '.blif' for bm in benchmarks]
#add full path as prefix
#bm_path = self.vtr_path + '/vtr_flow/benchmarks/blif/wiremap6/'
bm_path = self.vtr_path + '/vtr_flow/benchmarks/blif/'
benchmarks = [bm_path + bm for bm in benchmarks]
return benchmarks
#returns a list of VTR benchmarks
def get_vtr_benchmarks(self, lut_size=6):
#list of benchmark names
benchmarks = [
'bgm',
'blob_merge',
'boundtop',
'mkDelayWorker32B',
'LU8PEEng',
#'mcml',
#'stereovision2',
#'LU32PEEng',
'mkSMAdapter4B',
'or1200',
'raygentop',
'sha',
'stereovision0',
'stereovision1'
]
suffix = '.blif'
bm_dir = '/vtr_flow/benchmarks/vtr_benchmarks_blif/'
if lut_size == 4:
suffix = '.pre-vpr.blif'
bm_dir = '/vtr_flow/benchmarks/4LUT_DSP_vtr_benchmarks_blif/'
#add blif suffix
benchmarks = [bm + suffix for bm in benchmarks]
#add full path as prefix
bm_path = self.vtr_path + bm_dir
benchmarks = [bm_path + bm for bm in benchmarks]
return benchmarks
#runs provided list of benchmarks and returns outputs (based on regex_list) averaged over the specified list of seeds.
#this function basically calls 'run_vpr_benchmarks' for each seed in the list
def run_vpr_benchmarks_multiple_seeds(self, benchmark_list,
regex_list, vpr_arch,
vpr_seed_list = [1], #by default run with single seed
num_threads = 1): #by default run with 1 thread
result_table = []
if num_threads > len(benchmark_list):
num_threads = len(benchmark_list)
#run vpr benchmarks for each specified seed
for seed in vpr_seed_list:
print('SEED ' + str(seed) + ' of ' + str(len(vpr_seed_list)))
seed_result = self.run_vpr_benchmarks(benchmark_list, regex_list, vpr_arch,
seed, num_threads = num_threads)
result_table += [seed_result]
#take average of all results
result_table = np.array(result_table)
avg_results = []
for column in result_table.T:
column_avg = sum(column) / float(len(column))
avg_results += [column_avg]
return avg_results
#runs provided list of benchmarks and returns geomean outputs based on the provided list of regular expressions
def run_vpr_benchmarks(self, benchmark_list,
regex_list, arch_path,
vpr_seed = 1, #default seed of 1
num_threads = 1): #number of concurrent VPR executables to run
#VPR should be run with the -nodisp option and some seed
vpr_base_opts = '-nodisp -timing_analysis off --seed ' + str(vpr_seed)
#make 2-d list into which results of each benchmark run will go
outputs = []
for tmp in regex_list:
#an entry for each regex
outputs += [[]]
#self.change_vpr_rr_struct_dump(self.vpr_path, enable=False)
self.make_vpr()
#create a temporary directory for each benchmark to store the vpr executable (otherwise many threads try to write to the same vpr output file)
temp_dir = self.vpr_path + '/script_temp'
#cleanup temp directory if it already exists
if os.path.isdir(temp_dir):
self.run_command('rm', ['-r', temp_dir])
try:
self.run_command('mkdir', [temp_dir])
except subprocess.CalledProcessError as err:
print(err.output)
raise
#multithread vpr runs
iterables = []
for bm in benchmark_list:
index = benchmark_list.index(bm)
bm_dir = temp_dir + '/bm' + str(index)
try:
self.run_command('mkdir', [bm_dir])
self.run_command('cp', [arch_path, bm_dir])
self.run_command('cp', [self.vpr_path + '/vpr', bm_dir])
except subprocess.CalledProcessError as err:
print(err.output)
raise
arch_name = (arch_path.split('/'))[-1]
if 'xml' not in arch_name:
raise 'WTF'
new_arch_path = bm_dir + '/' + arch_name
new_vpr_path = bm_dir + '/vpr'
iterables += [VPR_Benchmark_Info(new_vpr_path, new_arch_path, bm, vpr_base_opts, benchmark_list, regex_list)]
os.system("taskset -p 0xffffffff %d" % os.getpid())
mp_pool = multiprocessing.Pool(processes=num_threads)
try:
outputs = mp_pool.map(self.run_vpr_benchmark, iterables)
mp_pool.close()
mp_pool.join()
except KeyboardInterrupt:
print('Caught KeyboardInterrupt. Terminating threads and exiting.')
mp_pool.terminate()
mp_pool.join()
sys.exit()
outputs = np.array(outputs)
#return geomean for each column (?) in the outputs table
geomean_outputs = []
for regex in regex_list:
ind = regex_list.index(regex)
benchmarks_result_list = outputs[:, ind].tolist()
for out in benchmarks_result_list:
print( '\t\tbenchmark %d routed at W=%d' % (benchmarks_result_list.index(out), int(out)) )
sys.stdout.flush()
geomean_outputs += [ get_geomean(benchmarks_result_list) ]
return geomean_outputs
#runs specified vpr benchmark and returns regex'd outputs in a list
def run_vpr_benchmark(self, bm_info):
vpr_path = bm_info.vpr_path
arch_path = bm_info.arch_path
benchmark = bm_info.benchmark
vpr_base_opts = bm_info.vpr_base_opts
benchmark_list = bm_info.benchmark_list
regex_list = bm_info.regex_list
run_dir = ''
for token in (arch_path.split('/'))[:-1]:
run_dir += token + '/'
os.chdir( run_dir )
output_list = []
vpr_opts = arch_path + ' ' + benchmark + ' ' + vpr_base_opts
try:
arg_list = self.get_argument_list(vpr_opts)
vpr_out = str(self.run_command(vpr_path, arg_list))
#vpr_out = self.run_vpr(vpr_opts)
except KeyboardInterrupt:
#dealing with python 2.7 compatibility stupidness... i can't get multiprocessing to terminate on "ctrl-c"
#unless I write this try-except statement. and even then I have to bang on ctrl-c repeatedly to get the desired effect :(
print('worker received interrupt. exiting.')
return
#parse outputs according to user's regex list
for regex in regex_list:
#ind = regex_list.index(regex)
parsed = float( regex_last_token(vpr_out, regex) )
output_list += [parsed]
ind = benchmark_list.index(benchmark)
print('\t\tbenchmark: ' + str(ind) + ' done')
#print('\t\t\tvpr opts: ' + vpr_opts)
return output_list
############ Wotan Related ############
#compiles wotan
def make_wotan(self):
os.chdir( self.wotan_path )
result = self.run_command("make", [])
return result
#runs wotan with specified arguments
def run_wotan(self, arguments):
arg_list = self.get_argument_list(arguments)
#switch to wotan directory
os.chdir( self.wotan_path )
output = str(self.run_command("./wotan", arg_list))
return output
#performs binary search to adjust demand multiplier in wotan until the target metric is equal to the desired value within some tolerance.
#returns 3-tuple: (final target value, final demand multiplier, wotan output)
def search_for_wotan_demand_multiplier(self, wotan_opts, test_type,
target = None,
target_tolerance = None,
target_regex = None,
demand_mult_low = 0.0,
demand_mult_high = 10,
max_tries = 30):
if '-demand_multiplier' in wotan_opts:
print('-demand_multiplier option already included in wotan_opts -- can\'t do binary search for pin demand')
sys.exit()
#true if increasing, false if decreasing
monotonic_increasing = True
#what we're searching for in wotan output
if test_type == 'binary_search_norm_demand':
if not target_regex:
target_regex = '.*Normalized demand: (\d+\.\d+).*'
if not target:
target = 0.8
if not target_tolerance:
target_tolerance = 0.01
elif test_type == 'binary_search_routability_metric':
if not target_regex:
target_regex = '.*Routability metric: (\d+\.*\d*).*'
if not target:
target = 0.3
if not target_tolerance:
target_tolerance = 0.02
monotonic_increasing = False
else:
print('unexpected test_type passed-in to binary search: ' + test_type)
sys.exit()
current = 0
wotan_out = ''
#perform binary search
self.make_wotan()
try_num = 1
while abs(current - target) > target_tolerance:
if try_num > max_tries:
if current < target:
#the architecture is probably very unroutable and it simply can't get to the specified target value
print('\t\tarchitecture looks too unroutable; can\'t meet binary search target of ' + str(target) + '. Returning.')
break
else:
print('WARNING! Binary search has taken more than ' + str(max_tries) + ' tries to binary search for correct pin demand. using last value...')
break
#sys.exit()
#get next value of pin demand to try
demand_mult_current = (demand_mult_high + demand_mult_low) / 2
adjusted_wotan_opts = wotan_opts + ' -demand_multiplier ' + str(demand_mult_current)
#run wotan and get the value of the target metric
self.make_wotan()
wotan_out = self.run_wotan(adjusted_wotan_opts)
regex_val = regex_last_token(wotan_out, target_regex)
current = float( regex_val )
if monotonic_increasing:
if current < target:
demand_mult_low = demand_mult_current
else:
demand_mult_high = demand_mult_current
else:
if current > target:
demand_mult_low = demand_mult_current
else:
demand_mult_high = demand_mult_current
print( '\tat demand mult ' + str(demand_mult_current) + ' current val is ' + str(current) )
sys.stdout.flush()
if demand_mult_low > demand_mult_high:
print('low value > high value in binary search!')
sys.exit()
try_num += 1
return (current, demand_mult_current, wotan_out)
############ Test Suite Related ############
#returns a 2-tuple containing a metric (based on target_regex) for each of the specified architectures.
#pin demand is calculated based on the first architecture (and the target/target_tolerance values) using a binary search.
#metric values for the two architectures are returned based on the aforementioned pin demand
def wotan_arch_metrics_with_first_as_reference(self, arch_point1, arch_point2, target, target_tolerance, target_regex,
wotan_opts, vpr_opts_arch1, vpr_opts_arch2):
arch1_metric = None
arch2_metric = None
path_arch1 = arch_point1.get_wotan_arch_path()
path_arch2 = arch_point2.get_wotan_arch_path()
#get pin demand / routability based on first architecture
self.update_arch_based_on_arch_point(path_arch1, arch_point1)
self.run_vpr( vpr_opts_arch1 )
(arch1_metric, demand_mult, arch1_output) = self.search_for_wotan_demand_multiplier(wotan_opts = wotan_opts,
test_type = self.test_type,
target = target,
target_tolerance = target_tolerance,
target_regex = target_regex)
#now get the routability of the second architecture
self.update_arch_based_on_arch_point(path_arch2, arch_point2)
self.run_vpr( vpr_opts_arch2 )
arch2_output = self.run_wotan( wotan_opts + ' -demand_multiplier ' + str(demand_mult) )
arch2_metric = float( regex_last_token(arch2_output, target_regex) )
print (arch1_metric, arch2_metric)
return (arch1_metric, arch2_metric)
#evaluates routability of each architecture point in the specified list (optionally runs VPR on this list as well).
#results are written in table form to the specified file
# wotan results are sorted best to worst
# VPR results, if enabled, are sorter best to worst (in terms on channel width)
def evaluate_architecture_list(self, arch_list,
results_file,
wotan_opts,
vpr_arch_ordering = []): #if not [], wotan results will be compared against this ordering. otherwise vpr comparisons will be run too
print ("Evaluating architecture list...")
run_vpr_comparisons = False
if vpr_arch_ordering == []:
run_vpr_comparisons = True
#specifies how architectures should be evaluated.
#binary search over pin demand until target prob is hit with specified tolerance
target_prob = 0.5
target_tolerance = 0.0099
target_regex = '.*Routability metric: (\d+\.*\d*).*'
#a list of channel widths over which to evaluate w/ wotan (i.e. geomean)
chan_widths = [100]
vpr_evaluation_seeds = [1]
wotan_results = []
vpr_results = []
#for each architecture point:
#- evaluate with wotan
#- evaluate with VPR if enabled
for arch_point in arch_list:
arch_point_index = arch_list.index(arch_point)
print('Run ' + str(arch_point_index+1) + '/' + str(len(arch_list)) + ' arch is ' + arch_point.as_str() )
###### Make Wotan and VPR ######
self.make_wotan()
self.make_vpr()
#path to architecture
wotan_arch_path = get_path_to_arch( arch_point )
###### Evaluate architecture with Wotan ######
metric_value_list = []
for chanw in chan_widths:
print('W = ' + str(chanw))
#6LUT and 4LUT benchmarks are from different directories
benchmark = 'vtr_benchmarks_blif/sha.blif'
if arch_point.lut_size == 4:
benchmark = '4LUT_DSP_vtr_benchmarks_blif/sha.pre-vpr.blif'
vpr_opts = wotan_arch_path + ' ' + self.vtr_path + '/vtr_flow/benchmarks/' + benchmark + ' -nodisp -dump_rr_structs_file ./dumped_rr_structs.txt -pack -place -route_chan_width ' + str(chanw)
vpr_out = self.run_vpr( vpr_opts )
#run binary search to find pin demand at which the target_regex hits its target value
(target_val, demand_mult, wotan_out) = self.search_for_wotan_demand_multiplier(wotan_opts = wotan_opts,
test_type = self.test_type,
target = target_prob,
target_tolerance = target_tolerance,
target_regex = target_regex,
demand_mult_high = 200)
#get metric used for evaluating the architecture
metric_regex = '.*Demand multiplier: (\d*\.*\d+).*' #TODO: put this value into arch point info based on test suites? don't want to be hard-coding...
metric_label = 'Demand Multiplier'
metric_value_list += [float(regex_last_token(wotan_out, metric_regex))]
#add metric to list of wotan results
metric_value = get_geomean(metric_value_list)
print('geomean score: ' + str(metric_value))
wotan_result_entry = [arch_point_index, arch_point.as_str(), metric_value]
wotan_results += [wotan_result_entry]
###### Evaluate architecture with VPR ######
if run_vpr_comparisons:
print ("Evaluating with vpr now...")
#what regex / benchmarks to run?
vpr_regex_list = ['channel width factor of (\d+)']
benchmarks = self.get_vtr_benchmarks(lut_size = arch_point.lut_size)
#run VPR and get regex results
#vpr_arch_path = arch_point.get_vpr_arch_path()
#self.update_arch_based_on_arch_point(vpr_arch_path, arch_point)
#TODO: each thread should be executing in its own directory. also, update architecture here?
results = self.run_vpr_benchmarks_multiple_seeds(benchmarks, vpr_regex_list, wotan_arch_path,
vpr_seed_list = vpr_evaluation_seeds,
num_threads = 10)
#add VPR result to running list
vpr_result_entry = [arch_point_index, arch_point.as_str(), results[0]]
vpr_results += [vpr_result_entry]
#sort results -- descending for wotan, ascending for vpr
wotan_results.sort(key=lambda x: x[2], reverse=True)
vpr_results.sort(key=lambda x: x[2])
try:
#figure out how many pairwise comparisons of wotan agree with VPR
# --> compare every architecture result to every other architecture result
agree_cases = 0
agree_within_tolerance = 0
total_cases = 0
vpr_tolerance = 2
wotan_arch_ordering = [el[1:3] for el in wotan_results] #get arch string and score for each element in 'wotan_arch_ordering'
if run_vpr_comparisons:
vpr_arch_ordering = [el[1:3] for el in vpr_results]
[agree_cases, agree_within_tolerance, total_cases] = compare_wotan_vpr_arch_orderings(wotan_arch_ordering, vpr_arch_ordering, vpr_tolerance)
else:
#compare wotan results against passed-in list
if len(wotan_arch_ordering) == len(vpr_arch_ordering):
[agree_cases, agree_within_tolerance, total_cases] = compare_wotan_vpr_arch_orderings(wotan_arch_ordering, vpr_arch_ordering, vpr_tolerance)
except TypeError as e:
print('caught exception:')
print(e)
print('continuing anyway')
#print results to a file
with open(results_file, 'w+') as f:
for w_result in wotan_results:
result_index = wotan_results.index(w_result)
if run_vpr_comparisons:
v_result = vpr_results[result_index]
else:
v_result = []
for w_elem in w_result:
f.write(str(w_elem) + '\t')
f.write('\t')
for v_elem in v_result:
f.write(str(v_elem) + '\t')
f.write('\n')
f.write('\n')
f.write('Wotan and VPR agree in ' + str(agree_cases) + '/' + str(total_cases) + ' pairwise comparisons\n')
f.write(str(agree_within_tolerance) + '/' + str(total_cases) + ' cases agree within VPR minW tolerance of ' + str(vpr_tolerance))
#Contains info about an architecture data point. Basically mirrors
# the info contained in Wotan_Test_Suite, except for only one architecture point
class Arch_Point_Info:
def __init__(self, lut_size, #size of the LUT (i.e. K)
s_wirelength, #semi-global wirelength
g_wirelength, #global-layer wirelength; specify None if not used
switchblock_pattern, #wilton/universal/subset
wire_topology, #'single-wirelength', 'on-cb-off-cb', 'on-cb-off-sb', 'on-cb-off-cbsb', 'on-cbsb-off-cbsb', 'on-sb-off-sb'
fcin, #cb input flexibility
fcout, #cb output flexibility
arch_string = None): #optional string that describes this architecture
if lut_size not in [4, 6]:
raise BaseException, 'Unexpected LUT size: %d' % (lut_size)
if switchblock_pattern not in ['wilton', 'universal', 'subset']:
raise BaseException, 'Unexpected switch block pattern: %s' % (switchblock_pattern)
if wire_topology not in ['single-wirelength', 'on-cb-off-cb', 'on-cb-off-sb', 'on-cb-off-cbsb', 'on-cbsb-off-cbsb', 'on-sb-off-sb']:
raise BaseException, 'Unexpected wire topology: %s' % (wire_topology)
self.lut_size = lut_size
self.s_wirelength = s_wirelength
self.g_wirelength = g_wirelength
self.switchblock_pattern = switchblock_pattern
self.wire_topology = wire_topology
self.fcin = fcin
self.fcout = fcout
self.arch_string = arch_string
#overload constructor -- initialize based on a string. Expecting string to be in the format of this class' 'as_str' function
@classmethod
def from_str(cls, s):
#this should be a dictionary...
regex_list = {
's_wirelength' : '.*_s(\d+)_.*',
'g_wirelength' : '.*_g(\d+)_.*',
'K' : '.*k(\d)_.*',
'wire_topology' : '.*_topology-([-\w]+)_.*',
'fcin' : '.*fcin(\d+\.*\d*)',
'fcout' : '.*fcout(\d+\.*\d*)',
}
#get wirelength, fcin, fcout
tmp_dict = {}
for key in regex_list:
try:
tmp_dict[key] = regex_last_token(s, regex_list[key])
except RegexException as exception:
if key == 'g_wirelength':
#it's OK if global wirelength wasn't specified
tmp_dict[key] = None
continue
else:
raise
s_wirelength = int(tmp_dict['s_wirelength'])
g_wirelength = tmp_dict['g_wirelength']
if g_wirelength != None:
g_wirelength = int(g_wirelength)
lut_size = int(tmp_dict['K'])
wire_topology = tmp_dict['wire_topology']
fcin = float(tmp_dict['fcin'])
fcout = float(tmp_dict['fcout'])
#get switchblock
switchblock = None
if 'subset' in s:
switchblock = 'subset'
elif 'universal' in s:
switchblock = 'universal'
elif 'wilton' in s:
switchblock = 'wilton'
else:
print('could not find a switchblock specification in string:\n\t' + s)
sys.exit()
return cls(lut_size, s_wirelength, g_wirelength, switchblock, wire_topology, fcin, fcout, s)
#returns a string describing an object of this class
def as_str(self):
return self.arch_string
def __str__(self):
return self.arch_string
def __repr__(self):
return self.arch_string
#class used for passing info in multithreading VPR benchmark runs
class VPR_Benchmark_Info():
def __init__(self, vpr_path,
arch_path,
benchmark,
vpr_base_opts,
benchmark_list,
regex_list
):
self.vpr_path = vpr_path
self.arch_path = arch_path
self.benchmark = benchmark
self.vpr_base_opts = vpr_base_opts
self.benchmark_list = benchmark_list
self.regex_list = regex_list
############ Miscellaneous ############
#reads each line in a file into a list of strings
def read_file_into_string_list(file_path):
string_list = []
with open(file_path) as f:
string_list = f.readlines()
#remove leading/trailing whitespace from strings
for string in string_list:
ind = string_list.index(string)
string_list[ind] = string_list[ind].strip()
return string_list
#reads each line in a file into a 2d list of strings. each string is split into a list of strings with spaces/tabs as delimiters
def read_file_into_split_string_list(file_path):
split_string_list = []
with open(file_path) as f:
split_string_list = f.readlines()
#now remove leading/trailing whitespace and split strings
for string in split_string_list:
ind = split_string_list.index(string)
split_string_list[ind] = split_string_list[ind].strip().split()
return split_string_list
#returns date string in year/month/day/hour/minute. the various entries are separated by specified separator string
def get_date_str(separator='-'):
now = datetime.datetime.now()
date_str = str(now.year) + separator + str(now.month) + separator + str(now.day) + separator + str(now.hour) + separator + str(now.minute)
return date_str
#returns geometric mean of list
def get_geomean(my_list):
result = 1.0
for num in my_list:
result *= num
result **= (1.0/len(my_list))
return result
#returns (x,y) index of 'val' in 'my_list'
def index_2d(my_list, val):
result_x = None
result_y = None
for sublist in my_list:
if val in sublist:
result_x = my_list.index(sublist)
result_y = sublist.index(val)
break
return (result_x, result_y)
#returns the number of pairwise comparisons where wotan ordering agrees with vpr ordering.
#basically match every architecture against every other architecture for wotan, and then see if this pairwise odering
#agrees with VPR.
# - assumed that architectures are ordered best to worst. first entry is architecture name, second entry is architecture 'score' (min W for VPR)
def compare_wotan_vpr_arch_orderings(wotan_ordering, vpr_ordering,
vpr_tolerance=2): #Wotan predictions always treated as correct for architectures within specified VPR score tolerance
#make sure both ordered lists are the same size
if len(wotan_ordering) != len(vpr_ordering):
print('expected wotan and vpr ordered list to be the same size')
sys.exit()
total_cases = 0
agree_cases = 0
agree_within_tolerance = 0
i = 0
while i < len(wotan_ordering)-1:
j = i+1
while j < len(wotan_ordering):
arch_one = wotan_ordering[i][0]
arch_two = wotan_ordering[j][0]
#now get the index of these two arch points in the vpr ordered list. since the lists are sorted from best to worst,
#a lower index means a better architecture
vpr_ind_one, dummy = index_2d(vpr_ordering, arch_one)
vpr_ind_two, dummy = index_2d(vpr_ordering, arch_two)
vpr_score_one = float(vpr_ordering[ vpr_ind_one ][1])
vpr_score_two = float(vpr_ordering[ vpr_ind_two ][1])
if vpr_ind_one < vpr_ind_two:
agree_cases += 1
agree_within_tolerance += 1
elif abs(vpr_score_one - vpr_score_two) <= vpr_tolerance:
agree_within_tolerance += 1
else:
print('Disagreed with VPR ordering:\t' + vpr_ordering[vpr_ind_one][0] + ' (' + str(vpr_score_one) + ') VS ' + vpr_ordering[vpr_ind_two][0] + ' (' + str(vpr_score_two) + ')')
total_cases += 1
j += 1
i += 1
return (agree_cases, agree_within_tolerance, total_cases)
#returns a hard-coded list of Arch_Point_Info elements (use my_custom_arch_pair_list for pairwise comparisons)
def my_custom_archs_list():
arch_list = []
arch_strings = []
#Legend (corresponds to arch_handler.py):
#k<LUT_size> s<semi-global segment length> g<global segment length> <switchblock (universal/subset/wilton)>
# topology-<interconect topology> fcin<input Fc> fcout<output Fc>
#### 100 random 6LUT architectures ####
#arch_strings += ['k6_s4_g8_universal_topology-on-cb-off-cb_fcin0.2_fcout0.05']
#arch_strings += ['k6_s4_wilton_topology-single-wirelength_fcin0.05_fcout0.05']
#arch_strings += ['k6_s2_wilton_topology-single-wirelength_fcin0.05_fcout0.2']
#arch_strings += ['k6_s2_subset_topology-single-wirelength_fcin0.2_fcout0.1']
#arch_strings += ['k6_s4_g16_universal_topology-on-cb-off-cb_fcin0.1_fcout0.1']
#arch_strings += ['k6_s8_wilton_topology-single-wirelength_fcin0.1_fcout0.4']
#arch_strings += ['k6_s4_universal_topology-single-wirelength_fcin0.1_fcout0.05']
#arch_strings += ['k6_s4_universal_topology-single-wirelength_fcin0.6_fcout0.4']
#arch_strings += ['k6_s4_g8_subset_topology-on-sb-off-sb_fcin0.4_fcout0.05']
#arch_strings += ['k6_s4_g8_wilton_topology-on-sb-off-sb_fcin0.4_fcout0.6']
#arch_strings += ['k6_s4_wilton_topology-single-wirelength_fcin0.05_fcout0.4']
#arch_strings += ['k6_s4_g16_wilton_topology-on-cbsb-off-cbsb_fcin0.6_fcout0.6']
#arch_strings += ['k6_s1_universal_topology-single-wirelength_fcin0.1_fcout0.6']
#arch_strings += ['k6_s4_g8_universal_topology-on-cb-off-sb_fcin0.1_fcout0.05']
#arch_strings += ['k6_s4_wilton_topology-single-wirelength_fcin0.4_fcout0.1']
#arch_strings += ['k6_s16_subset_topology-single-wirelength_fcin0.1_fcout0.6']
#arch_strings += ['k6_s8_universal_topology-single-wirelength_fcin0.4_fcout0.1']
#arch_strings += ['k6_s4_g4_wilton_topology-on-cb-off-sb_fcin0.4_fcout0.6']
#arch_strings += ['k6_s4_g4_subset_topology-on-cbsb-off-cbsb_fcin0.1_fcout0.1']
#arch_strings += ['k6_s16_wilton_topology-single-wirelength_fcin0.1_fcout0.1']
#arch_strings += ['k6_s4_g8_universal_topology-on-cbsb-off-cbsb_fcin0.1_fcout0.6']
#arch_strings += ['k6_s8_universal_topology-single-wirelength_fcin0.6_fcout0.2']
#arch_strings += ['k6_s2_universal_topology-single-wirelength_fcin0.1_fcout0.2']
#arch_strings += ['k6_s8_wilton_topology-single-wirelength_fcin0.4_fcout0.1']
#arch_strings += ['k6_s2_universal_topology-single-wirelength_fcin0.6_fcout0.6']
#arch_strings += ['k6_s8_universal_topology-single-wirelength_fcin0.6_fcout0.1']
#arch_strings += ['k6_s1_subset_topology-single-wirelength_fcin0.2_fcout0.4']
#arch_strings += ['k6_s4_g16_subset_topology-on-cbsb-off-cbsb_fcin0.1_fcout0.05']
#arch_strings += ['k6_s4_g16_universal_topology-on-sb-off-sb_fcin0.05_fcout0.4']
#arch_strings += ['k6_s4_g8_wilton_topology-on-cb-off-sb_fcin0.2_fcout0.05']
#arch_strings += ['k6_s4_g4_universal_topology-on-cb-off-cb_fcin0.4_fcout0.05']
#arch_strings += ['k6_s4_wilton_topology-single-wirelength_fcin0.2_fcout0.2']
#arch_strings += ['k6_s1_subset_topology-single-wirelength_fcin0.2_fcout0.6']
#arch_strings += ['k6_s2_wilton_topology-single-wirelength_fcin0.2_fcout0.05']
#arch_strings += ['k6_s4_universal_topology-single-wirelength_fcin0.2_fcout0.6']
#arch_strings += ['k6_s2_subset_topology-single-wirelength_fcin0.05_fcout0.1']
#arch_strings += ['k6_s8_wilton_topology-single-wirelength_fcin0.05_fcout0.2']
#arch_strings += ['k6_s1_universal_topology-single-wirelength_fcin0.4_fcout0.6']
#arch_strings += ['k6_s4_g8_wilton_topology-on-cbsb-off-cbsb_fcin0.6_fcout0.4']
#arch_strings += ['k6_s4_g16_universal_topology-on-cbsb-off-cbsb_fcin0.1_fcout0.6']
#arch_strings += ['k6_s4_subset_topology-single-wirelength_fcin0.6_fcout0.6']
#arch_strings += ['k6_s4_g4_wilton_topology-on-cb-off-cb_fcin0.2_fcout0.6']
#arch_strings += ['k6_s8_subset_topology-single-wirelength_fcin0.05_fcout0.2']
#arch_strings += ['k6_s16_subset_topology-single-wirelength_fcin0.2_fcout0.2']
#arch_strings += ['k6_s16_wilton_topology-single-wirelength_fcin0.1_fcout0.6']
#arch_strings += ['k6_s2_universal_topology-single-wirelength_fcin0.05_fcout0.4']
#arch_strings += ['k6_s2_universal_topology-single-wirelength_fcin0.2_fcout0.4']
#arch_strings += ['k6_s4_g16_universal_topology-on-cb-off-cb_fcin0.1_fcout0.4']
#arch_strings += ['k6_s4_g4_universal_topology-on-cbsb-off-cbsb_fcin0.05_fcout0.6']
#arch_strings += ['k6_s1_subset_topology-single-wirelength_fcin0.4_fcout0.1']
#arch_strings += ['k6_s1_wilton_topology-single-wirelength_fcin0.05_fcout0.4']
#arch_strings += ['k6_s4_g8_subset_topology-on-cb-off-cbsb_fcin0.1_fcout0.2']
#arch_strings += ['k6_s4_g4_universal_topology-on-cb-off-cbsb_fcin0.4_fcout0.6']
#arch_strings += ['k6_s4_g16_universal_topology-on-cbsb-off-cbsb_fcin0.2_fcout0.2']
#arch_strings += ['k6_s4_g4_wilton_topology-on-cb-off-sb_fcin0.6_fcout0.2']
#arch_strings += ['k6_s4_wilton_topology-single-wirelength_fcin0.1_fcout0.6']
#arch_strings += ['k6_s4_universal_topology-single-wirelength_fcin0.6_fcout0.05']
#arch_strings += ['k6_s4_g4_universal_topology-on-cb-off-cb_fcin0.6_fcout0.2']
#arch_strings += ['k6_s4_g16_wilton_topology-on-sb-off-sb_fcin0.6_fcout0.05']
#arch_strings += ['k6_s4_g4_wilton_topology-on-sb-off-sb_fcin0.05_fcout0.05']
#arch_strings += ['k6_s4_g8_subset_topology-on-cb-off-cbsb_fcin0.2_fcout0.05']
#arch_strings += ['k6_s2_wilton_topology-single-wirelength_fcin0.4_fcout0.2']
#arch_strings += ['k6_s2_universal_topology-single-wirelength_fcin0.6_fcout0.05']
#arch_strings += ['k6_s4_subset_topology-single-wirelength_fcin0.6_fcout0.05']
#arch_strings += ['k6_s16_wilton_topology-single-wirelength_fcin0.4_fcout0.4']
#arch_strings += ['k6_s16_subset_topology-single-wirelength_fcin0.2_fcout0.4']
#arch_strings += ['k6_s4_g4_subset_topology-on-cb-off-sb_fcin0.2_fcout0.1']
#arch_strings += ['k6_s16_universal_topology-single-wirelength_fcin0.05_fcout0.05']
#arch_strings += ['k6_s8_wilton_topology-single-wirelength_fcin0.05_fcout0.4']
#arch_strings += ['k6_s4_g4_universal_topology-on-sb-off-sb_fcin0.4_fcout0.05']
#arch_strings += ['k6_s4_g16_subset_topology-on-cb-off-cb_fcin0.4_fcout0.05']
#arch_strings += ['k6_s4_g4_universal_topology-on-cb-off-cbsb_fcin0.1_fcout0.4']
#arch_strings += ['k6_s8_subset_topology-single-wirelength_fcin0.2_fcout0.05']
#arch_strings += ['k6_s4_g8_universal_topology-on-cb-off-cb_fcin0.6_fcout0.2']
#arch_strings += ['k6_s4_g16_wilton_topology-on-cb-off-cbsb_fcin0.05_fcout0.2']
#arch_strings += ['k6_s4_g8_subset_topology-on-sb-off-sb_fcin0.05_fcout0.6']
#arch_strings += ['k6_s8_wilton_topology-single-wirelength_fcin0.6_fcout0.2']
#arch_strings += ['k6_s4_g16_wilton_topology-on-cbsb-off-cbsb_fcin0.6_fcout0.2']
#arch_strings += ['k6_s2_universal_topology-single-wirelength_fcin0.1_fcout0.4']
#arch_strings += ['k6_s8_wilton_topology-single-wirelength_fcin0.05_fcout0.6']
#arch_strings += ['k6_s4_g8_subset_topology-on-cb-off-cbsb_fcin0.6_fcout0.05']
#arch_strings += ['k6_s4_g4_subset_topology-on-sb-off-sb_fcin0.4_fcout0.4']
#arch_strings += ['k6_s2_universal_topology-single-wirelength_fcin0.2_fcout0.6']
#arch_strings += ['k6_s2_wilton_topology-single-wirelength_fcin0.1_fcout0.6']
#arch_strings += ['k6_s2_subset_topology-single-wirelength_fcin0.1_fcout0.05']
#arch_strings += ['k6_s4_g16_universal_topology-on-cb-off-cbsb_fcin0.2_fcout0.2']
#arch_strings += ['k6_s4_g8_wilton_topology-on-cb-off-sb_fcin0.6_fcout0.1']
#arch_strings += ['k6_s4_g16_universal_topology-on-cbsb-off-cbsb_fcin0.05_fcout0.05']
#arch_strings += ['k6_s4_universal_topology-single-wirelength_fcin0.4_fcout0.2']
#arch_strings += ['k6_s1_wilton_topology-single-wirelength_fcin0.6_fcout0.05']
#arch_strings += ['k6_s2_wilton_topology-single-wirelength_fcin0.1_fcout0.05']
#arch_strings += ['k6_s1_subset_topology-single-wirelength_fcin0.05_fcout0.4']
#arch_strings += ['k6_s4_g8_universal_topology-on-cbsb-off-cbsb_fcin0.05_fcout0.05']
#arch_strings += ['k6_s4_subset_topology-single-wirelength_fcin0.4_fcout0.4']
#arch_strings += ['k6_s4_g16_subset_topology-on-cb-off-cbsb_fcin0.4_fcout0.1']
#arch_strings += ['k6_s16_universal_topology-single-wirelength_fcin0.1_fcout0.4']
#arch_strings += ['k6_s1_subset_topology-single-wirelength_fcin0.6_fcout0.4']
#arch_strings += ['k6_s2_universal_topology-single-wirelength_fcin0.6_fcout0.1']
#arch_strings += ['k6_s4_g8_subset_topology-on-sb-off-sb_fcin0.6_fcout0.05']
#arch_strings += ['k6_s1_wilton_topology-single-wirelength_fcin0.2_fcout0.6']
#### 100 random 4LUT architectures ####
# arch_strings += ['k4_s2_g16_wilton_topology-on-cbsb-off-cbsb_fcin0.2_fcout0.4']
# arch_strings += ['k4_s1_wilton_topology-single-wirelength_fcin0.3_fcout0.2']
# arch_strings += ['k4_s4_subset_topology-single-wirelength_fcin0.4_fcout0.2']
# arch_strings += ['k4_s1_universal_topology-single-wirelength_fcin0.4_fcout0.2']
# arch_strings += ['k4_s2_g8_wilton_topology-on-sb-off-sb_fcin0.1_fcout0.3']
# arch_strings += ['k4_s2_wilton_topology-single-wirelength_fcin0.2_fcout0.1']
# arch_strings += ['k4_s2_g16_subset_topology-on-cbsb-off-cbsb_fcin0.4_fcout0.4']
# arch_strings += ['k4_s2_universal_topology-single-wirelength_fcin0.1_fcout0.1']
# arch_strings += ['k4_s2_g16_subset_topology-on-sb-off-sb_fcin0.1_fcout0.1']
# arch_strings += ['k4_s2_g16_subset_topology-on-cb-off-cbsb_fcin0.2_fcout0.2']
# arch_strings += ['k4_s8_wilton_topology-single-wirelength_fcin0.1_fcout0.1']
# arch_strings += ['k4_s2_g16_universal_topology-on-sb-off-sb_fcin0.1_fcout0.3']
# arch_strings += ['k4_s2_g8_universal_topology-on-sb-off-sb_fcin0.3_fcout0.4']
# arch_strings += ['k4_s8_wilton_topology-single-wirelength_fcin0.6_fcout0.1']
# arch_strings += ['k4_s2_universal_topology-single-wirelength_fcin0.6_fcout0.4']
# arch_strings += ['k4_s2_g16_subset_topology-on-cb-off-cbsb_fcin0.4_fcout0.3']
# arch_strings += ['k4_s4_subset_topology-single-wirelength_fcin0.6_fcout0.6']
# arch_strings += ['k4_s4_universal_topology-single-wirelength_fcin0.6_fcout0.1']
# arch_strings += ['k4_s1_subset_topology-single-wirelength_fcin0.6_fcout0.6']
# arch_strings += ['k4_s2_wilton_topology-single-wirelength_fcin0.6_fcout0.6']
# arch_strings += ['k4_s4_wilton_topology-single-wirelength_fcin0.6_fcout0.4']
# arch_strings += ['k4_s1_wilton_topology-single-wirelength_fcin0.6_fcout0.3']
# arch_strings += ['k4_s2_g4_universal_topology-on-sb-off-sb_fcin0.6_fcout0.6']
# arch_strings += ['k4_s1_universal_topology-single-wirelength_fcin0.6_fcout0.1']
# arch_strings += ['k4_s2_g8_subset_topology-on-cb-off-cbsb_fcin0.1_fcout0.2']
# arch_strings += ['k4_s2_g8_subset_topology-on-cb-off-sb_fcin0.3_fcout0.6']
# arch_strings += ['k4_s2_g4_subset_topology-on-sb-off-sb_fcin0.3_fcout0.6']
# arch_strings += ['k4_s8_universal_topology-single-wirelength_fcin0.3_fcout0.2']
# arch_strings += ['k4_s1_subset_topology-single-wirelength_fcin0.2_fcout0.1']
# arch_strings += ['k4_s2_g4_universal_topology-on-cbsb-off-cbsb_fcin0.1_fcout0.6']
# arch_strings += ['k4_s1_subset_topology-single-wirelength_fcin0.6_fcout0.4']
# arch_strings += ['k4_s1_subset_topology-single-wirelength_fcin0.2_fcout0.3']
# arch_strings += ['k4_s2_g16_subset_topology-on-cb-off-sb_fcin0.3_fcout0.2']
# arch_strings += ['k4_s2_g8_universal_topology-on-cbsb-off-cbsb_fcin0.4_fcout0.2']
# arch_strings += ['k4_s4_wilton_topology-single-wirelength_fcin0.2_fcout0.1']
# arch_strings += ['k4_s2_g4_wilton_topology-on-cb-off-sb_fcin0.3_fcout0.6']
# arch_strings += ['k4_s4_wilton_topology-single-wirelength_fcin0.3_fcout0.6']
# arch_strings += ['k4_s2_universal_topology-single-wirelength_fcin0.3_fcout0.2']
# arch_strings += ['k4_s4_wilton_topology-single-wirelength_fcin0.6_fcout0.3']
# arch_strings += ['k4_s2_subset_topology-single-wirelength_fcin0.1_fcout0.6']
# arch_strings += ['k4_s2_g8_universal_topology-on-cb-off-cb_fcin0.3_fcout0.6']
# arch_strings += ['k4_s4_subset_topology-single-wirelength_fcin0.6_fcout0.3']
# arch_strings += ['k4_s1_wilton_topology-single-wirelength_fcin0.1_fcout0.4']
# arch_strings += ['k4_s8_universal_topology-single-wirelength_fcin0.3_fcout0.4']
# arch_strings += ['k4_s2_subset_topology-single-wirelength_fcin0.2_fcout0.3']
# arch_strings += ['k4_s1_universal_topology-single-wirelength_fcin0.1_fcout0.6']
# arch_strings += ['k4_s2_g4_wilton_topology-on-cb-off-sb_fcin0.6_fcout0.2']
# arch_strings += ['k4_s2_g4_subset_topology-on-sb-off-sb_fcin0.6_fcout0.3']
# arch_strings += ['k4_s4_wilton_topology-single-wirelength_fcin0.4_fcout0.6']
# arch_strings += ['k4_s4_subset_topology-single-wirelength_fcin0.6_fcout0.4']
# arch_strings += ['k4_s2_g16_universal_topology-on-cbsb-off-cbsb_fcin0.3_fcout0.3']
# arch_strings += ['k4_s2_wilton_topology-single-wirelength_fcin0.3_fcout0.1']
# arch_strings += ['k4_s8_subset_topology-single-wirelength_fcin0.3_fcout0.6']
# arch_strings += ['k4_s2_g16_subset_topology-on-sb-off-sb_fcin0.4_fcout0.2']
# arch_strings += ['k4_s2_g16_universal_topology-on-cb-off-cb_fcin0.1_fcout0.2']
# arch_strings += ['k4_s4_wilton_topology-single-wirelength_fcin0.2_fcout0.3']
# arch_strings += ['k4_s2_subset_topology-single-wirelength_fcin0.4_fcout0.2']
# arch_strings += ['k4_s4_universal_topology-single-wirelength_fcin0.4_fcout0.6']
# arch_strings += ['k4_s2_g8_subset_topology-on-cb-off-sb_fcin0.3_fcout0.1']
# arch_strings += ['k4_s2_g4_wilton_topology-on-cb-off-sb_fcin0.1_fcout0.6']
# arch_strings += ['k4_s2_g16_subset_topology-on-cb-off-cb_fcin0.3_fcout0.4']
# arch_strings += ['k4_s2_g8_universal_topology-on-cb-off-cb_fcin0.4_fcout0.2']
# arch_strings += ['k4_s8_wilton_topology-single-wirelength_fcin0.2_fcout0.3']
# arch_strings += ['k4_s2_universal_topology-single-wirelength_fcin0.1_fcout0.3']
# arch_strings += ['k4_s2_g16_universal_topology-on-cb-off-sb_fcin0.1_fcout0.3']
# arch_strings += ['k4_s2_g4_wilton_topology-on-cb-off-cb_fcin0.4_fcout0.6']
# arch_strings += ['k4_s4_wilton_topology-single-wirelength_fcin0.3_fcout0.3']
# arch_strings += ['k4_s2_g4_universal_topology-on-cb-off-sb_fcin0.3_fcout0.3']
# arch_strings += ['k4_s2_g8_universal_topology-on-cb-off-sb_fcin0.6_fcout0.6']
# arch_strings += ['k4_s4_wilton_topology-single-wirelength_fcin0.1_fcout0.6']
# arch_strings += ['k4_s2_g8_universal_topology-on-cbsb-off-cbsb_fcin0.4_fcout0.4']
# arch_strings += ['k4_s8_subset_topology-single-wirelength_fcin0.1_fcout0.2']
# arch_strings += ['k4_s8_subset_topology-single-wirelength_fcin0.6_fcout0.6']
# arch_strings += ['k4_s2_g4_subset_topology-on-sb-off-sb_fcin0.2_fcout0.3']
# arch_strings += ['k4_s2_g16_universal_topology-on-cb-off-sb_fcin0.3_fcout0.3']
# arch_strings += ['k4_s1_universal_topology-single-wirelength_fcin0.6_fcout0.4']
# arch_strings += ['k4_s2_g16_subset_topology-on-cbsb-off-cbsb_fcin0.6_fcout0.6']
# arch_strings += ['k4_s2_g8_wilton_topology-on-cb-off-cb_fcin0.1_fcout0.1']
# arch_strings += ['k4_s2_g8_subset_topology-on-cbsb-off-cbsb_fcin0.3_fcout0.3']
# arch_strings += ['k4_s8_wilton_topology-single-wirelength_fcin0.1_fcout0.4']
# arch_strings += ['k4_s2_g16_universal_topology-on-cb-off-cbsb_fcin0.2_fcout0.4']
# arch_strings += ['k4_s2_g4_subset_topology-on-cb-off-sb_fcin0.1_fcout0.4']
# arch_strings += ['k4_s2_g8_wilton_topology-on-cbsb-off-cbsb_fcin0.3_fcout0.1']
# arch_strings += ['k4_s8_universal_topology-single-wirelength_fcin0.6_fcout0.3']
# arch_strings += ['k4_s4_wilton_topology-single-wirelength_fcin0.4_fcout0.4']
# arch_strings += ['k4_s2_g4_subset_topology-on-cb-off-cb_fcin0.3_fcout0.6']
# arch_strings += ['k4_s2_g16_universal_topology-on-sb-off-sb_fcin0.1_fcout0.1']
# arch_strings += ['k4_s2_g8_universal_topology-on-cbsb-off-cbsb_fcin0.3_fcout0.6']
# arch_strings += ['k4_s4_subset_topology-single-wirelength_fcin0.4_fcout0.6']
# arch_strings += ['k4_s2_g8_wilton_topology-on-cb-off-sb_fcin0.2_fcout0.1']
# arch_strings += ['k4_s2_wilton_topology-single-wirelength_fcin0.2_fcout0.6']
# arch_strings += ['k4_s1_subset_topology-single-wirelength_fcin0.6_fcout0.3']
# arch_strings += ['k4_s2_wilton_topology-single-wirelength_fcin0.4_fcout0.4']
# arch_strings += ['k4_s1_universal_topology-single-wirelength_fcin0.4_fcout0.1']
# arch_strings += ['k4_s2_g16_wilton_topology-on-cb-off-sb_fcin0.1_fcout0.2']
# arch_strings += ['k4_s8_universal_topology-single-wirelength_fcin0.2_fcout0.1']
# arch_strings += ['k4_s2_g4_subset_topology-on-sb-off-sb_fcin0.3_fcout0.1']
# arch_strings += ['k4_s2_g8_universal_topology-on-cb-off-sb_fcin0.3_fcout0.1']
# arch_strings += ['k4_s4_wilton_topology-single-wirelength_fcin0.2_fcout0.2']
# arch_strings += ['k4_s8_wilton_topology-single-wirelength_fcin0.4_fcout0.2']
# arch_strings += ['k4_s1_subset_topology-single-wirelength_fcin0.6_fcout0.6']
# arch_strings += ['k4_s1_wilton_topology-single-wirelength_fcin0.6_fcout0.6']
# arch_strings += ['k4_s1_subset_topology-single-wirelength_fcin0.2_fcout0.4']
# arch_strings += ['k4_s1_wilton_topology-single-wirelength_fcin0.2_fcout0.4']
# arch_strings += ['k4_s1_subset_topology-single-wirelength_fcin0.4_fcout0.2']
# arch_strings += ['k4_s1_wilton_topology-single-wirelength_fcin0.4_fcout0.2']
arch_strings += ['k4_s1_subset_topology-single-wirelength_fcin0.3_fcout0.2']
# arch_strings += ['k4_s1_wilton_topology-single-wirelength_fcin0.3_fcout0.1']
# arch_strings += ['k4_s1_subset_topology-single-wirelength_fcin0.7_fcout0.2']
# arch_strings += ['k4_s1_wilton_topology-single-wirelength_fcin0.7_fcout0.2']
#build a list of arch points based on the arch strings
for arch_str in arch_strings:
arch_point = Arch_Point_Info.from_str(arch_str)
arch_list += [arch_point]
return arch_list
#just a wrapper for the function of the same name in arch_handler.py
def get_path_to_arch(arch_point):
arch_path = ''
sb_pattern = arch_point.switchblock_pattern
wire_topology = arch_point.wire_topology
wirelengths = {}
wirelengths['semi-global'] = arch_point.s_wirelength
if arch_point.g_wirelength != None:
wirelengths['global'] = arch_point.g_wirelength
global_via_repeat = 4
fc_in = arch_point.fcin
fc_out = arch_point.fcout
lut_size = str(arch_point.lut_size) + 'LUT'
arch_path = ah.get_path_to_arch(sb_pattern, wire_topology, wirelengths, global_via_repeat, fc_in, fc_out, lut_size)
return arch_path
| {
"content_hash": "f089d79741feb7354a41a96583cfe350",
"timestamp": "",
"source": "github",
"line_count": 1228,
"max_line_length": 194,
"avg_line_length": 40.97801302931596,
"alnum_prop": 0.6915403112020826,
"repo_name": "wotan-fpga/wotan",
"id": "c10c3a2be3b4cbabcd99fa6fb6122a6ed3ee2237",
"size": "50322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/wotan_tester.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4770"
},
{
"name": "C++",
"bytes": "551250"
},
{
"name": "Makefile",
"bytes": "4749"
},
{
"name": "Python",
"bytes": "124266"
},
{
"name": "Shell",
"bytes": "1069"
}
],
"symlink_target": ""
} |
"""Google Cloud Platform library - ml cell magic."""
from __future__ import absolute_import
from __future__ import unicode_literals
try:
import IPython
import IPython.core.display
import IPython.core.magic
except ImportError:
raise Exception('This module can only be loaded in ipython.')
import argparse
import collections
import json
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import shutil
import six
from skimage.segmentation import mark_boundaries
import subprocess
import tempfile
import textwrap
import tensorflow as tf
from tensorflow.python.lib.io import file_io
import urllib
import google.datalab
from google.datalab import Context
import google.datalab.ml as datalab_ml
import google.datalab.utils.commands
import google.datalab.contrib.mlworkbench._local_predict as _local_predict
import google.datalab.contrib.mlworkbench._shell_process as _shell_process
import google.datalab.contrib.mlworkbench._archive as _archive
import google.datalab.contrib.mlworkbench._prediction_explainer as _prediction_explainer
DEFAULT_PACKAGE_PATH = '/datalab/lib/pydatalab/solutionbox/ml_workbench/tensorflow/'
@IPython.core.magic.register_line_cell_magic
def ml(line, cell=None):
"""Implements the datalab cell magic for MLWorkbench operations.
Args:
line: the contents of the ml command line.
Returns:
The results of executing the cell.
"""
parser = google.datalab.utils.commands.CommandParser(
prog='%ml',
description=textwrap.dedent("""\
Execute MLWorkbench operations
Use "%ml <command> -h" for help on a specific command.
"""))
dataset_parser = parser.subcommand(
'dataset',
formatter_class=argparse.RawTextHelpFormatter,
help='Create or explore datasets.')
dataset_sub_commands = dataset_parser.add_subparsers(dest='command')
dataset_create_parser = dataset_sub_commands.add_parser(
'create', help='Create datasets', formatter_class=argparse.RawTextHelpFormatter,
epilog=textwrap.dedent("""\
Example usage:
%%ml dataset
name: mydata
format: csv
train: path/to/train.csv
eval: path/to/eval.csv
schema:
- name: news_label
type: STRING
- name: text
type: STRING"""))
dataset_create_parser.add_argument('--name', required=True,
help='the name of the dataset to define. ')
dataset_create_parser.add_argument('--format', required=True,
choices=['csv', 'bigquery', 'transformed'],
help='The format of the data.')
dataset_create_parser.add_argument('--train', required=True,
help='The path of the training file pattern if format ' +
'is csv or transformed, or table name if format ' +
'is bigquery.')
dataset_create_parser.add_argument('--eval', required=True,
help='The path of the eval file pattern if format ' +
'is csv or transformed, or table name if format ' +
'is bigquery.')
dataset_create_parser.add_cell_argument('schema',
help='yaml representation of CSV schema, or path to ' +
'schema file. Only needed if format is csv.')
dataset_create_parser.set_defaults(func=_dataset_create)
dataset_explore_parser = dataset_sub_commands.add_parser(
'explore', help='Explore training data.')
dataset_explore_parser.add_argument('--name', required=True,
help='The name of the dataset to explore.')
dataset_explore_parser.add_argument('--overview', action='store_true', default=False,
help='Plot overview of sampled data. Set "sample_size" ' +
'to change the default sample size.')
dataset_explore_parser.add_argument('--facets', action='store_true', default=False,
help='Plot facets view of sampled data. Set ' +
'"sample_size" to change the default sample size.')
dataset_explore_parser.add_argument('--sample_size', type=int, default=1000,
help='sample size for overview or facets view. Only ' +
'used if either --overview or --facets is set.')
dataset_explore_parser.set_defaults(func=_dataset_explore)
analyze_parser = parser.subcommand(
'analyze',
formatter_class=argparse.RawTextHelpFormatter,
help='Analyze training data and generate stats, such as min/max/mean '
'for numeric values, vocabulary for text columns.',
epilog=textwrap.dedent("""\
Example usage:
%%ml analyze [--cloud]
output: path/to/dir
data: $mydataset
features:
serialId:
transform: key
num1:
transform: scale
value: 1
num2:
transform: identity
text1:
transform: bag_of_words
Also supports in-notebook variables, such as:
%%ml analyze --output path/to/dir
training_data: $my_csv_dataset
features: $features_def"""))
analyze_parser.add_argument('--output', required=True,
help='path of output directory.')
analyze_parser.add_argument('--cloud', action='store_true', default=False,
help='whether to run analysis in cloud or local.')
analyze_parser.add_argument('--package', required=False,
help='A local or GCS tarball path to use as the source. '
'If not set, the default source package will be used.')
analyze_parser.add_cell_argument(
'data',
required=True,
help="""Training data. A dataset defined by "%%ml dataset".""")
analyze_parser.add_cell_argument(
'features',
required=True,
help=textwrap.dedent("""\
features config indicating how to transform data into features. The
list of supported transforms:
"transform: identity"
does nothing (for numerical columns).
"transform: scale
value: x"
scale a numerical column to [-a, a]. If value is missing, x
defaults to 1.
"transform: one_hot"
treats the string column as categorical and makes one-hot
encoding of it.
"transform: embedding
embedding_dim: d"
treats the string column as categorical and makes embeddings of
it with specified dimension size.
"transform: bag_of_words"
treats the string column as text and make bag of words
transform of it.
"transform: tfidf"
treats the string column as text and make TFIDF transform of it.
"transform: image_to_vec
checkpoint: gs://b/o"
from image gs url to embeddings. "checkpoint" is a inception v3
checkpoint. If absent, a default checkpoint is used.
"transform: target"
denotes the column is the target. If the schema type of this
column is string, a one_hot encoding is automatically applied.
If numerical, an identity transform is automatically applied.
"transform: key"
column contains metadata-like information and will be output
as-is in prediction."""))
analyze_parser.set_defaults(func=_analyze)
transform_parser = parser.subcommand(
'transform',
formatter_class=argparse.RawTextHelpFormatter,
help='Transform the data into tf.example which is more efficient in training.',
epilog=textwrap.dedent("""\
Example usage:
%%ml transform [--cloud] [--shuffle]
analysis: path/to/analysis_output_folder
output: path/to/dir
batch_size: 100
data: $mydataset
cloud:
num_workers: 3
worker_machine_type: n1-standard-1
project_id: my_project_id"""))
transform_parser.add_argument('--analysis', required=True,
help='path of analysis output directory.')
transform_parser.add_argument('--output', required=True,
help='path of output directory.')
transform_parser.add_argument('--cloud', action='store_true', default=False,
help='whether to run transform in cloud or local.')
transform_parser.add_argument('--shuffle', action='store_true', default=False,
help='whether to shuffle the training data in output.')
transform_parser.add_argument('--batch_size', type=int, default=100,
help='number of instances in a batch to process once. '
'Larger batch is more efficient but may consume more memory.')
transform_parser.add_argument('--package', required=False,
help='A local or GCS tarball path to use as the source. '
'If not set, the default source package will be used.')
transform_parser.add_cell_argument(
'data',
required=True,
help="""Training data. A dataset defined by "%%ml dataset".""")
transform_parser.add_cell_argument(
'cloud_config',
help=textwrap.dedent("""\
A dictionary of cloud config. All of them are optional.
num_workers: Dataflow number of workers. If not set, DataFlow
service will determine the number.
worker_machine_type: a machine name from
https://cloud.google.com/compute/docs/machine-types
If not given, the service uses the default machine type.
project_id: id of the project to use for DataFlow service. If not set,
Datalab's default project (set by %%datalab project set) is used.
job_name: Unique name for a Dataflow job to use. If not set, a
random name will be used."""))
transform_parser.set_defaults(func=_transform)
train_parser = parser.subcommand(
'train',
formatter_class=argparse.RawTextHelpFormatter,
help='Train a model.',
epilog=textwrap.dedent("""\
Example usage:
%%ml train [--cloud]
analysis: path/to/analysis_output
output: path/to/dir
data: $mydataset
model_args:
model: linear_regression
cloud_config:
region: us-central1"""))
train_parser.add_argument('--analysis', required=True,
help='path of analysis output directory.')
train_parser.add_argument('--output', required=True,
help='path of trained model directory.')
train_parser.add_argument('--cloud', action='store_true', default=False,
help='whether to run training in cloud or local.')
train_parser.add_argument('--notb', action='store_true', default=False,
help='If set, tensorboard is not automatically started.')
train_parser.add_argument('--package', required=False,
help='A local or GCS tarball path to use as the source. '
'If not set, the default source package will be used.')
train_parser.add_cell_argument(
'data',
required=True,
help="""Training data. A dataset defined by "%%ml dataset".""")
package_model_help = subprocess.Popen(
['python', '-m', 'trainer.task', '--datalab-help'],
cwd=DEFAULT_PACKAGE_PATH,
stdout=subprocess.PIPE).communicate()[0]
package_model_help = ('model_args: a dictionary of model specific args, including:\n\n' +
package_model_help.decode())
train_parser.add_cell_argument('model_args', help=package_model_help)
train_parser.add_cell_argument(
'cloud_config',
help=textwrap.dedent("""\
A dictionary of cloud training config, including:
job_id: the name of the job. If not provided, a default job name is created.
region: see {url}
runtime_version: see "region". Must be a string like '1.2'.
scale_tier: see "region".""".format(
url='https://cloud.google.com/sdk/gcloud/reference/ml-engine/jobs/submit/training')))
train_parser.set_defaults(func=_train)
predict_parser = parser.subcommand(
'predict',
formatter_class=argparse.RawTextHelpFormatter,
help='Predict with local or deployed models. (Good for small datasets).',
epilog=textwrap.dedent("""\
Example usage:
%%ml predict
headers: key,num
model: path/to/model
data:
- key1,value1
- key2,value2
Or, in another cell, define a list of dict:
my_data = [{'key': 1, 'num': 1.2}, {'key': 2, 'num': 2.8}]
Then:
%%ml predict
headers: key,num
model: path/to/model
data: $my_data"""))
predict_parser.add_argument('--model', required=True,
help='The model path.')
predict_parser.add_argument('--no_show_image', action='store_true', default=False,
help='If not set, add a column of images in output.')
predict_parser.add_cell_argument(
'data',
required=True,
help=textwrap.dedent("""\
Prediction data can be
1) CSV lines in the input cell in yaml format or
2) a local variable which is one of
a) list of dict
b) list of strings of csv lines
c) a Pandas DataFrame"""))
predict_parser.set_defaults(func=_predict)
batch_predict_parser = parser.subcommand(
'batch_predict',
formatter_class=argparse.RawTextHelpFormatter,
help='Batch prediction with local or deployed models. (Good for large datasets)',
epilog=textwrap.dedent("""\
Example usage:
%%ml batch_predict [--cloud]
model: path/to/model
output: path/to/output
format: csv
data:
csv: path/to/file_pattern"""))
batch_predict_parser.add_argument('--model', required=True,
help='The model path if not --cloud, or the id in '
'the form of model.version if --cloud.')
batch_predict_parser.add_argument('--output', required=True,
help='The path of output directory with prediction results. '
'If --cloud, it has to be GCS path.')
batch_predict_parser.add_argument('--format',
help='csv or json. For cloud run, '
'the only supported format is json.')
batch_predict_parser.add_argument('--batch_size', type=int, default=100,
help='number of instances in a batch to process once. '
'Larger batch is more efficient but may consume '
'more memory. Only used in local run.')
batch_predict_parser.add_argument('--cloud', action='store_true', default=False,
help='whether to run prediction in cloud or local.')
batch_predict_parser.add_cell_argument(
'data',
required=True,
help='Data to predict with. Only csv is supported.')
batch_predict_parser.add_cell_argument(
'cloud_config',
help=textwrap.dedent("""\
A dictionary of cloud batch prediction config.
job_id: the name of the job. If not provided, a default job name is created.
region: see {url}
max_worker_count: see reference in "region".""".format(
url='https://cloud.google.com/sdk/gcloud/reference/ml-engine/jobs/submit/prediction'))) # noqa
batch_predict_parser.set_defaults(func=_batch_predict)
explain_parser = parser.subcommand(
'explain',
formatter_class=argparse.RawTextHelpFormatter,
help='Explain a prediction with LIME tool.')
explain_parser.add_argument('--type', default='all', choices=['text', 'image', 'tabular', 'all'],
help='the type of column to explain.')
explain_parser.add_argument('--algorithm', choices=['lime', 'ig'], default='lime',
help='"lime" is the open sourced project for prediction explainer.' +
'"ig" means integrated gradients and currently only applies ' +
'to image.')
explain_parser.add_argument('--model', required=True,
help='path of the model directory used for prediction.')
explain_parser.add_argument('--labels', required=True,
help='comma separated labels to explain.')
explain_parser.add_argument('--column_name',
help='the name of the column to explain. Optional if text type ' +
'and there is only one text column, or image type and ' +
'there is only one image column.')
explain_parser.add_cell_argument('data', required=True,
help='Prediction Data. Can be a csv line, or a dict.')
explain_parser.add_cell_argument('training_data',
help='A csv or bigquery dataset defined by %%ml dataset. ' +
'Used by tabular explainer only to determine the ' +
'distribution of numeric and categorical values. ' +
'Suggest using original training dataset.')
# options specific for lime
explain_parser.add_argument('--num_features', type=int,
help='number of features to analyze. In text, it is number of ' +
'words. In image, it is number of areas. For lime only.')
explain_parser.add_argument('--num_samples', type=int,
help='size of the neighborhood to learn the linear model. ' +
'For lime only.')
explain_parser.add_argument('--hide_color', type=int, default=0,
help='the color to use for perturbed area. If -1, average of ' +
'each channel is used for each channel. For image only.')
explain_parser.add_argument('--include_negative', action='store_true', default=False,
help='whether to show only positive areas. For lime image only.')
explain_parser.add_argument('--overview', action='store_true', default=False,
help='whether to show overview instead of details view.' +
'For lime text and tabular only.')
explain_parser.add_argument('--batch_size', type=int, default=100,
help='size of batches passed to prediction. For lime only.')
# options specific for integrated gradients
explain_parser.add_argument('--num_gradients', type=int, default=50,
help='the number of scaled images to get gradients from. Larger ' +
'number usually produces better results but slower.')
explain_parser.add_argument('--percent_show', type=int, default=10,
help='the percentage of top impactful pixels to show.')
explain_parser.set_defaults(func=_explain)
tensorboard_parser = parser.subcommand(
'tensorboard',
formatter_class=argparse.RawTextHelpFormatter,
help='Start/stop/list TensorBoard instances.')
tensorboard_sub_commands = tensorboard_parser.add_subparsers(dest='command')
tensorboard_start_parser = tensorboard_sub_commands.add_parser(
'start', help='Start a tensorboard instance.')
tensorboard_start_parser.add_argument('--logdir', required=True,
help='The local or GCS logdir path.')
tensorboard_start_parser.set_defaults(func=_tensorboard_start)
tensorboard_stop_parser = tensorboard_sub_commands.add_parser(
'stop', help='Stop a tensorboard instance.')
tensorboard_stop_parser.add_argument('--pid', required=True, type=int,
help='The pid of the tensorboard instance.')
tensorboard_stop_parser.set_defaults(func=_tensorboard_stop)
tensorboard_list_parser = tensorboard_sub_commands.add_parser(
'list', help='List tensorboard instances.')
tensorboard_list_parser.set_defaults(func=_tensorboard_list)
evaluate_parser = parser.subcommand(
'evaluate',
formatter_class=argparse.RawTextHelpFormatter,
help='Analyze model evaluation results, such as confusion matrix, ROC, RMSE.')
evaluate_sub_commands = evaluate_parser.add_subparsers(dest='command')
def _add_data_params_for_evaluate(parser):
parser.add_argument('--csv', help='csv file path patterns.')
parser.add_argument('--headers',
help='csv file headers. Required if csv is specified and ' +
'predict_results_schema.json does not exist in the same directory.')
parser.add_argument('--bigquery',
help='can be bigquery table, query as a string, or ' +
'a pre-defined query (%%bq query --name).')
evaluate_cm_parser = evaluate_sub_commands.add_parser(
'confusion_matrix', help='Get confusion matrix from evaluation results.')
_add_data_params_for_evaluate(evaluate_cm_parser)
evaluate_cm_parser.add_argument('--plot', action='store_true', default=False,
help='Whether to plot confusion matrix as graph.')
evaluate_cm_parser.add_argument('--size', type=int, default=10,
help='The size of the confusion matrix.')
evaluate_cm_parser.set_defaults(func=_evaluate_cm)
evaluate_accuracy_parser = evaluate_sub_commands.add_parser(
'accuracy', help='Get accuracy results from classification evaluation results.')
_add_data_params_for_evaluate(evaluate_accuracy_parser)
evaluate_accuracy_parser.set_defaults(func=_evaluate_accuracy)
evaluate_pr_parser = evaluate_sub_commands.add_parser(
'precision_recall', help='Get precision recall metrics from evaluation results.')
_add_data_params_for_evaluate(evaluate_pr_parser)
evaluate_pr_parser.add_argument('--plot', action='store_true', default=False,
help='Whether to plot precision recall as graph.')
evaluate_pr_parser.add_argument('--num_thresholds', type=int, default=20,
help='Number of thresholds which determines how many ' +
'points in the graph.')
evaluate_pr_parser.add_argument('--target_class', required=True,
help='The target class to determine correctness of ' +
'a prediction.')
evaluate_pr_parser.add_argument('--probability_column',
help='The name of the column holding the probability ' +
'value of the target class. If absent, the value ' +
'of target class is used.')
evaluate_pr_parser.set_defaults(func=_evaluate_pr)
evaluate_roc_parser = evaluate_sub_commands.add_parser(
'roc', help='Get ROC metrics from evaluation results.')
_add_data_params_for_evaluate(evaluate_roc_parser)
evaluate_roc_parser.add_argument('--plot', action='store_true', default=False,
help='Whether to plot ROC as graph.')
evaluate_roc_parser.add_argument('--num_thresholds', type=int, default=20,
help='Number of thresholds which determines how many ' +
'points in the graph.')
evaluate_roc_parser.add_argument('--target_class', required=True,
help='The target class to determine correctness of ' +
'a prediction.')
evaluate_roc_parser.add_argument('--probability_column',
help='The name of the column holding the probability ' +
'value of the target class. If absent, the value ' +
'of target class is used.')
evaluate_roc_parser.set_defaults(func=_evaluate_roc)
evaluate_regression_parser = evaluate_sub_commands.add_parser(
'regression', help='Get regression metrics from evaluation results.')
_add_data_params_for_evaluate(evaluate_regression_parser)
evaluate_regression_parser.set_defaults(func=_evaluate_regression)
model_parser = parser.subcommand(
'model',
help='Models and versions management such as deployment, deletion, listing.')
model_sub_commands = model_parser.add_subparsers(dest='command')
model_list_parser = model_sub_commands.add_parser(
'list', help='List models and versions.')
model_list_parser.add_argument('--name',
help='If absent, list all models of specified or current ' +
'project. If provided, list all versions of the ' +
'model.')
model_list_parser.add_argument('--project',
help='The project to list model(s) or version(s). If absent, ' +
'use Datalab\'s default project.')
model_list_parser.set_defaults(func=_model_list)
model_delete_parser = model_sub_commands.add_parser(
'delete', help='Delete models or versions.')
model_delete_parser.add_argument('--name', required=True,
help='If no "." in the name, try deleting the specified ' +
'model. If "model.version" is provided, try deleting ' +
'the specified version.')
model_delete_parser.add_argument('--project',
help='The project to delete model or version. If absent, ' +
'use Datalab\'s default project.')
model_delete_parser.set_defaults(func=_model_delete)
model_deploy_parser = model_sub_commands.add_parser(
'deploy', help='Deploy a model version.')
model_deploy_parser.add_argument('--name', required=True,
help='Must be model.version to indicate the model ' +
'and version name to deploy.')
model_deploy_parser.add_argument('--path', required=True,
help='The GCS path of the model to be deployed.')
model_deploy_parser.add_argument('--runtime_version',
help='The TensorFlow version to use for this model. ' +
'For example, "1.2.1". If absent, the current ' +
'TensorFlow version installed in Datalab will be used.')
model_deploy_parser.add_argument('--project',
help='The project to deploy a model version. If absent, ' +
'use Datalab\'s default project.')
model_deploy_parser.set_defaults(func=_model_deploy)
return google.datalab.utils.commands.handle_magic_line(line, cell, parser)
DataSet = collections.namedtuple('DataSet', ['train', 'eval'])
def _abs_path(path):
"""Convert a non-GCS path to its absolute path.
path can contain special filepath characters like '..', '*' and '.'.
Example: If the current folder is /content/datalab/folder1 and path is
'../folder2/files*', then this function returns the string
'/content/datalab/folder2/files*'.
This function is needed if using _shell_process.run_and_monitor() as that
function runs a command in a different folder.
Args:
path: string.
"""
if path.startswith('gs://'):
return path
return os.path.abspath(path)
def _create_json_file(tmpdir, data, filename):
json_file = os.path.join(tmpdir, filename)
with file_io.FileIO(json_file, 'w') as f:
json.dump(data, f)
return json_file
def _show_job_link(job):
log_url_query_strings = {
'project': Context.default().project_id,
'resource': 'ml.googleapis.com/job_id/' + job.info['jobId']
}
log_url = 'https://console.developers.google.com/logs/viewer?' + \
urllib.urlencode(log_url_query_strings)
html = 'Job "%s" submitted.' % job.info['jobId']
html += '<p>Click <a href="%s" target="_blank">here</a> to view cloud log. <br/>' % log_url
IPython.display.display_html(html, raw=True)
def get_dataset_from_arg(dataset_arg):
if isinstance(dataset_arg, DataSet):
return dataset_arg
if isinstance(dataset_arg, six.string_types):
return google.datalab.utils.commands.notebook_environment()[dataset_arg]
raise ValueError('Invalid dataset reference "%s". ' % dataset_arg +
'Expect a dataset defined with "%%ml dataset create".')
def _analyze(args, cell):
# For now, always run python2. If needed we can run python3 when the current kernel
# is py3. Since now our transform cannot work on py3 anyway, I would rather run
# everything with python2.
cmd_args = ['python', 'analyze.py', '--output', _abs_path(args['output'])]
if args['cloud']:
cmd_args.append('--cloud')
training_data = get_dataset_from_arg(args['data'])
if args['cloud']:
tmpdir = os.path.join(args['output'], 'tmp')
else:
tmpdir = tempfile.mkdtemp()
try:
if isinstance(training_data.train, datalab_ml.CsvDataSet):
csv_data = training_data.train
schema_file = _create_json_file(tmpdir, csv_data.schema, 'schema.json')
for file_name in csv_data.input_files:
cmd_args.append('--csv=' + _abs_path(file_name))
cmd_args.extend(['--schema', schema_file])
elif isinstance(training_data.train, datalab_ml.BigQueryDataSet):
bq_data = training_data.train
cmd_args.extend(['--bigquery', bq_data.table])
else:
raise ValueError('Unexpected training data type. Only csv or bigquery are supported.')
features = args['features']
features_file = _create_json_file(tmpdir, features, 'features.json')
cmd_args.extend(['--features', features_file])
if args['package']:
code_path = os.path.join(tmpdir, 'package')
_archive.extract_archive(args['package'], code_path)
else:
code_path = DEFAULT_PACKAGE_PATH
_shell_process.run_and_monitor(cmd_args, os.getpid(), cwd=code_path)
finally:
file_io.delete_recursively(tmpdir)
def _transform(args, cell):
if args['cloud_config'] and not args['cloud']:
raise ValueError('"cloud_config" is provided but no "--cloud". '
'Do you want local run or cloud run?')
cmd_args = ['python', 'transform.py',
'--output', _abs_path(args['output']),
'--analysis', _abs_path(args['analysis'])]
if args['cloud']:
cmd_args.append('--cloud')
cmd_args.append('--async')
if args['shuffle']:
cmd_args.append('--shuffle')
if args['batch_size']:
cmd_args.extend(['--batch-size', str(args['batch_size'])])
cloud_config = args['cloud_config']
if cloud_config:
google.datalab.utils.commands.validate_config(
cloud_config,
required_keys=[],
optional_keys=['num_workers', 'worker_machine_type', 'project_id', 'job_name'])
if 'num_workers' in cloud_config:
cmd_args.extend(['--num-workers', str(cloud_config['num_workers'])])
if 'worker_machine_type' in cloud_config:
cmd_args.extend(['--worker-machine-type', cloud_config['worker_machine_type']])
if 'project_id' in cloud_config:
cmd_args.extend(['--project-id', cloud_config['project_id']])
if 'job_name' in cloud_config:
cmd_args.extend(['--job-name', cloud_config['job_name']])
if args['cloud'] and (not cloud_config or 'project_id' not in cloud_config):
cmd_args.extend(['--project-id', google.datalab.Context.default().project_id])
training_data = get_dataset_from_arg(args['data'])
data_names = ('train', 'eval')
for name in data_names:
cmd_args_copy = list(cmd_args)
if isinstance(getattr(training_data, name), datalab_ml.CsvDataSet):
for file_name in getattr(training_data, name).input_files:
cmd_args_copy.append('--csv=' + _abs_path(file_name))
elif isinstance(getattr(training_data, name), datalab_ml.BigQueryDataSet):
cmd_args_copy.extend(['--bigquery', getattr(training_data, name).table])
else:
raise ValueError('Unexpected training data type. Only csv or bigquery are supported.')
cmd_args_copy.extend(['--prefix', name])
try:
tmpdir = None
if args['package']:
tmpdir = tempfile.mkdtemp()
code_path = os.path.join(tmpdir, 'package')
_archive.extract_archive(args['package'], code_path)
else:
code_path = DEFAULT_PACKAGE_PATH
_shell_process.run_and_monitor(cmd_args_copy, os.getpid(), cwd=code_path)
finally:
if tmpdir:
shutil.rmtree(tmpdir)
def _train(args, cell):
if args['cloud_config'] and not args['cloud']:
raise ValueError('"cloud_config" is provided but no "--cloud". '
'Do you want local run or cloud run?')
job_args = ['--job-dir', _abs_path(args['output']),
'--analysis', _abs_path(args['analysis'])]
training_data = get_dataset_from_arg(args['data'])
data_names = ('train', 'eval')
for name in data_names:
if (isinstance(getattr(training_data, name), datalab_ml.CsvDataSet) or
isinstance(getattr(training_data, name), datalab_ml.TransformedDataSet)):
for file_name in getattr(training_data, name).input_files:
job_args.append('--%s=%s' % (name, _abs_path(file_name)))
else:
raise ValueError('Unexpected training data type. ' +
'Only csv and transformed type are supported.')
if isinstance(training_data.train, datalab_ml.CsvDataSet):
job_args.append('--transform')
# TODO(brandondutra) document that any model_args that are file paths must
# be given as an absolute path
if args['model_args']:
for k, v in six.iteritems(args['model_args']):
job_args.extend(['--' + k, str(v)])
try:
tmpdir = None
if args['package']:
tmpdir = tempfile.mkdtemp()
code_path = os.path.join(tmpdir, 'package')
_archive.extract_archive(args['package'], code_path)
else:
code_path = DEFAULT_PACKAGE_PATH
if args['cloud']:
cloud_config = args['cloud_config']
if not args['output'].startswith('gs://'):
raise ValueError('Cloud training requires a GCS (starting with "gs://") output.')
staging_tarball = os.path.join(args['output'], 'staging', 'trainer.tar.gz')
datalab_ml.package_and_copy(code_path,
os.path.join(code_path, 'setup.py'),
staging_tarball)
job_request = {
'package_uris': [staging_tarball],
'python_module': 'trainer.task',
'job_dir': args['output'],
'args': job_args,
}
job_request.update(cloud_config)
job_id = cloud_config.get('job_id', None)
job = datalab_ml.Job.submit_training(job_request, job_id)
_show_job_link(job)
if not args['notb']:
datalab_ml.TensorBoard.start(args['output'])
else:
cmd_args = ['python', '-m', 'trainer.task'] + job_args
if not args['notb']:
datalab_ml.TensorBoard.start(args['output'])
_shell_process.run_and_monitor(cmd_args, os.getpid(), cwd=code_path)
finally:
if tmpdir:
shutil.rmtree(tmpdir)
def _predict(args, cell):
schema, features = _local_predict.get_model_schema_and_features(args['model'])
headers = [x['name'] for x in schema]
img_cols = []
for k, v in six.iteritems(features):
if v['transform'] in ['image_to_vec']:
img_cols.append(v['source_column'])
data = args['data']
df = _local_predict.get_prediction_results(
args['model'], data, headers, img_cols=img_cols, cloud=False,
show_image=not args['no_show_image'])
def _show_img(img_bytes):
return '<img src="data:image/png;base64,' + img_bytes + '" />'
def _truncate_text(text):
return (text[:37] + '...') if isinstance(text, six.string_types) and len(text) > 40 else text
# Truncate text explicitly here because we will set display.max_colwidth to -1.
# This applies to images to but images will be overriden with "_show_img()" later.
formatters = {x: _truncate_text for x in df.columns if df[x].dtype == np.object}
if not args['no_show_image'] and img_cols:
formatters.update({x + '_image': _show_img for x in img_cols})
# Set display.max_colwidth to -1 so we can display images.
old_width = pd.get_option('display.max_colwidth')
pd.set_option('display.max_colwidth', -1)
try:
IPython.display.display(IPython.display.HTML(
df.to_html(formatters=formatters, escape=False, index=False)))
finally:
pd.set_option('display.max_colwidth', old_width)
def _batch_predict(args, cell):
if args['cloud_config'] and not args['cloud']:
raise ValueError('"cloud_config" is provided but no "--cloud". '
'Do you want local run or cloud run?')
if args['cloud']:
job_request = {
'data_format': 'TEXT',
'input_paths': file_io.get_matching_files(args['data']['csv']),
'output_path': args['output'],
}
if args['model'].startswith('gs://'):
job_request['uri'] = args['model']
else:
parts = args['model'].split('.')
if len(parts) != 2:
raise ValueError('Invalid model name for cloud prediction. Use "model.version".')
version_name = ('projects/%s/models/%s/versions/%s' %
(Context.default().project_id, parts[0], parts[1]))
job_request['version_name'] = version_name
cloud_config = args['cloud_config'] or {}
job_id = cloud_config.pop('job_id', None)
job_request.update(cloud_config)
job = datalab_ml.Job.submit_batch_prediction(job_request, job_id)
_show_job_link(job)
else:
print('local prediction...')
_local_predict.local_batch_predict(args['model'],
args['data']['csv'],
args['output'],
args['format'],
args['batch_size'])
print('done.')
# Helper classes for explainer. Each for is for a combination
# of algorithm (LIME, IG) and type (text, image, tabular)
# ===========================================================
class _TextLimeExplainerInstance(object):
def __init__(self, explainer, labels, args):
num_features = args['num_features'] if args['num_features'] else 10
num_samples = args['num_samples'] if args['num_samples'] else 5000
self._exp = explainer.explain_text(
labels, args['data'], column_name=args['column_name'],
num_features=num_features, num_samples=num_samples)
self._col_name = args['column_name'] if args['column_name'] else explainer._text_columns[0]
self._show_overview = args['overview']
def visualize(self, label_index):
if self._show_overview:
IPython.display.display(
IPython.display.HTML('<br/> Text Column "<b>%s</b>"<br/>' % self._col_name))
self._exp.show_in_notebook(labels=[label_index])
else:
fig = self._exp.as_pyplot_figure(label=label_index)
# Clear original title set by lime.
plt.title('')
fig.suptitle('Text Column "%s"' % self._col_name, fontsize=16)
plt.close(fig)
IPython.display.display(fig)
class _ImageLimeExplainerInstance(object):
def __init__(self, explainer, labels, args):
num_samples = args['num_samples'] if args['num_samples'] else 300
hide_color = None if args['hide_color'] == -1 else args['hide_color']
self._exp = explainer.explain_image(
labels, args['data'], column_name=args['column_name'],
num_samples=num_samples, batch_size=args['batch_size'], hide_color=hide_color)
self._labels = labels
self._positive_only = not args['include_negative']
self._num_features = args['num_features'] if args['num_features'] else 3
self._col_name = args['column_name'] if args['column_name'] else explainer._image_columns[0]
def visualize(self, label_index):
image, mask = self._exp.get_image_and_mask(
label_index,
positive_only=self._positive_only,
num_features=self._num_features, hide_rest=False)
fig = plt.figure()
fig.suptitle('Image Column "%s"' % self._col_name, fontsize=16)
plt.grid(False)
plt.imshow(mark_boundaries(image, mask))
plt.close(fig)
IPython.display.display(fig)
class _ImageIgExplainerInstance(object):
def __init__(self, explainer, labels, args):
self._raw_image, self._analysis_images = explainer.probe_image(
labels, args['data'], column_name=args['column_name'],
num_scaled_images=args['num_gradients'], top_percent=args['percent_show'])
self._labels = labels
self._col_name = args['column_name'] if args['column_name'] else explainer._image_columns[0]
def visualize(self, label_index):
# Show both resized raw image and analyzed image.
fig = plt.figure()
fig.suptitle('Image Column "%s"' % self._col_name, fontsize=16)
plt.grid(False)
plt.imshow(self._analysis_images[label_index])
plt.close(fig)
IPython.display.display(fig)
class _TabularLimeExplainerInstance(object):
def __init__(self, explainer, labels, args):
if not args['training_data']:
raise ValueError('tabular explanation requires training_data to determine ' +
'values distribution.')
training_data = get_dataset_from_arg(args['training_data'])
if (not isinstance(training_data.train, datalab_ml.CsvDataSet) and
not isinstance(training_data.train, datalab_ml.BigQueryDataSet)):
raise ValueError('Require csv or bigquery dataset.')
sample_size = min(training_data.train.size, 10000)
training_df = training_data.train.sample(sample_size)
num_features = args['num_features'] if args['num_features'] else 5
self._exp = explainer.explain_tabular(training_df, labels, args['data'],
num_features=num_features)
self._show_overview = args['overview']
def visualize(self, label_index):
if self._show_overview:
IPython.display.display(
IPython.display.HTML('<br/>All Categorical and Numeric Columns<br/>'))
self._exp.show_in_notebook(labels=[label_index])
else:
fig = self._exp.as_pyplot_figure(label=label_index)
# Clear original title set by lime.
plt.title('')
fig.suptitle(' All Categorical and Numeric Columns', fontsize=16)
plt.close(fig)
IPython.display.display(fig)
# End of Explainer Helper Classes
# ===================================================
def _explain(args, cell):
explainer = _prediction_explainer.PredictionExplainer(args['model'])
labels = args['labels'].split(',')
instances = []
if args['type'] == 'all':
if explainer._numeric_columns or explainer._categorical_columns:
instances.append(_TabularLimeExplainerInstance(explainer, labels, args))
for col_name in explainer._text_columns:
args['column_name'] = col_name
instances.append(_TextLimeExplainerInstance(explainer, labels, args))
for col_name in explainer._image_columns:
args['column_name'] = col_name
if args['algorithm'] == 'lime':
instances.append(_ImageLimeExplainerInstance(explainer, labels, args))
elif args['algorithm'] == 'ig':
instances.append(_ImageIgExplainerInstance(explainer, labels, args))
elif args['type'] == 'text':
instances.append(_TextLimeExplainerInstance(explainer, labels, args))
elif args['type'] == 'image' and args['algorithm'] == 'lime':
instances.append(_ImageLimeExplainerInstance(explainer, labels, args))
elif args['type'] == 'image' and args['algorithm'] == 'ig':
instances.append(_ImageIgExplainerInstance(explainer, labels, args))
elif args['type'] == 'tabular':
instances.append(_TabularLimeExplainerInstance(explainer, labels, args))
for i, label in enumerate(labels):
IPython.display.display(
IPython.display.HTML('<br/>Explaining features for label <b>"%s"</b><br/>' % label))
for instance in instances:
instance.visualize(i)
def _tensorboard_start(args, cell):
datalab_ml.TensorBoard.start(args['logdir'])
def _tensorboard_stop(args, cell):
datalab_ml.TensorBoard.stop(args['pid'])
def _tensorboard_list(args, cell):
return datalab_ml.TensorBoard.list()
def _get_evaluation_csv_schema(csv_file):
# ML Workbench produces predict_results_schema.json in local batch prediction.
schema_file = os.path.join(os.path.dirname(csv_file), 'predict_results_schema.json')
if not file_io.file_exists(schema_file):
raise ValueError('csv data requires headers.')
return schema_file
def _evaluate_cm(args, cell):
if args['csv']:
if args['headers']:
headers = args['headers'].split(',')
cm = datalab_ml.ConfusionMatrix.from_csv(args['csv'], headers=headers)
else:
schema_file = _get_evaluation_csv_schema(args['csv'])
cm = datalab_ml.ConfusionMatrix.from_csv(args['csv'], schema_file=schema_file)
elif args['bigquery']:
cm = datalab_ml.ConfusionMatrix.from_bigquery(args['bigquery'])
else:
raise ValueError('Either csv or bigquery is needed.')
if args['plot']:
return cm.plot(figsize=(args['size'], args['size']), rotation=90)
else:
return cm.to_dataframe()
def _create_metrics(args):
if args['csv']:
if args['headers']:
headers = args['headers'].split(',')
metrics = datalab_ml.Metrics.from_csv(args['csv'], headers=headers)
else:
schema_file = _get_evaluation_csv_schema(args['csv'])
metrics = datalab_ml.Metrics.from_csv(args['csv'], schema_file=schema_file)
elif args['bigquery']:
metrics = datalab_ml.Metrics.from_bigquery(args['bigquery'])
else:
raise ValueError('Either csv or bigquery is needed.')
return metrics
def _evaluate_accuracy(args, cell):
metrics = _create_metrics(args)
return metrics.accuracy()
def _evaluate_regression(args, cell):
metrics = _create_metrics(args)
metrics_dict = []
metrics_dict.append({
'metric': 'Root Mean Square Error',
'value': metrics.rmse()
})
metrics_dict.append({
'metric': 'Mean Absolute Error',
'value': metrics.mae()
})
metrics_dict.append({
'metric': '50 Percentile Absolute Error',
'value': metrics.percentile_nearest(50)
})
metrics_dict.append({
'metric': '90 Percentile Absolute Error',
'value': metrics.percentile_nearest(90)
})
metrics_dict.append({
'metric': '99 Percentile Absolute Error',
'value': metrics.percentile_nearest(99)
})
return pd.DataFrame(metrics_dict)
def _evaluate_pr(args, cell):
metrics = _create_metrics(args)
df = metrics.precision_recall(args['num_thresholds'], args['target_class'],
probability_column=args['probability_column'])
if args['plot']:
plt.plot(df['recall'], df['precision'],
label='Precision-Recall curve for class ' + args['target_class'])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall')
plt.legend(loc="lower left")
plt.show()
else:
return df
def _evaluate_roc(args, cell):
metrics = _create_metrics(args)
df = metrics.roc(args['num_thresholds'], args['target_class'],
probability_column=args['probability_column'])
if args['plot']:
plt.plot(df['fpr'], df['tpr'],
label='ROC curve for class ' + args['target_class'])
plt.xlabel('fpr')
plt.ylabel('tpr')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('ROC')
plt.legend(loc="lower left")
plt.show()
else:
return df
def _model_list(args, cell):
if args['name']:
# model name provided. List versions of that model.
versions = datalab_ml.ModelVersions(args['name'], project_id=args['project'])
versions = list(versions.get_iterator())
df = pd.DataFrame(versions)
df['name'] = df['name'].apply(lambda x: x.split('/')[-1])
df = df.replace(np.nan, '', regex=True)
return df
else:
# List all models.
models = list(datalab_ml.Models(project_id=args['project']).get_iterator())
if len(models) > 0:
df = pd.DataFrame(models)
df['name'] = df['name'].apply(lambda x: x.split('/')[-1])
df['defaultVersion'] = df['defaultVersion'].apply(lambda x: x['name'].split('/')[-1])
df = df.replace(np.nan, '', regex=True)
return df
else:
print('No models found.')
def _model_delete(args, cell):
parts = args['name'].split('.')
if len(parts) == 1:
models = datalab_ml.Models(project_id=args['project'])
models.delete(parts[0])
elif len(parts) == 2:
versions = datalab_ml.ModelVersions(parts[0], project_id=args['project'])
versions.delete(parts[1])
else:
raise ValueError('Too many "." in name. Use "model" or "model.version".')
def _model_deploy(args, cell):
parts = args['name'].split('.')
if len(parts) == 2:
model_name, version_name = parts[0], parts[1]
model_exists = False
try:
# If describe() works, the model already exists.
datalab_ml.Models(project_id=args['project']).get_model_details(model_name)
model_exists = True
except:
pass
if not model_exists:
datalab_ml.Models(project_id=args['project']).create(model_name)
versions = datalab_ml.ModelVersions(model_name, project_id=args['project'])
runtime_version = args['runtime_version']
if not runtime_version:
runtime_version = tf.__version__
versions.deploy(version_name, args['path'], runtime_version=runtime_version)
else:
raise ValueError('Name must be like "model.version".')
def _dataset_create(args, cell):
if args['format'] == 'csv':
if not args['schema']:
raise ValueError('schema is required if format is csv.')
schema, schema_file = None, None
if isinstance(args['schema'], six.string_types):
schema_file = args['schema']
elif isinstance(args['schema'], list):
schema = args['schema']
else:
raise ValueError('schema should either be a file path, or a dictionary.')
train_dataset = datalab_ml.CsvDataSet(args['train'], schema=schema, schema_file=schema_file)
eval_dataset = datalab_ml.CsvDataSet(args['eval'], schema=schema, schema_file=schema_file)
elif args['format'] == 'bigquery':
train_dataset = datalab_ml.BigQueryDataSet(table=args['train'])
eval_dataset = datalab_ml.BigQueryDataSet(table=args['eval'])
elif args['format'] == 'transformed':
train_dataset = datalab_ml.TransformedDataSet(args['train'])
eval_dataset = datalab_ml.TransformedDataSet(args['eval'])
else:
raise ValueError('Invalid data format.')
dataset = DataSet(train_dataset, eval_dataset)
google.datalab.utils.commands.notebook_environment()[args['name']] = dataset
def _dataset_explore(args, cell):
dataset = get_dataset_from_arg(args['name'])
print('train data instances: %d' % dataset.train.size)
print('eval data instances: %d' % dataset.eval.size)
if args['overview'] or args['facets']:
if isinstance(dataset.train, datalab_ml.TransformedDataSet):
raise ValueError('transformed data does not support overview or facets.')
print('Sampled %s instances for each.' % args['sample_size'])
sample_train_df = dataset.train.sample(args['sample_size'])
sample_eval_df = dataset.eval.sample(args['sample_size'])
if args['overview']:
overview = datalab_ml.FacetsOverview().plot({'train': sample_train_df,
'eval': sample_eval_df})
IPython.display.display(overview)
if args['facets']:
sample_train_df['_source'] = pd.Series(['train'] * len(sample_train_df),
index=sample_train_df.index)
sample_eval_df['_source'] = pd.Series(['eval'] * len(sample_eval_df),
index=sample_eval_df.index)
df_merged = pd.concat([sample_train_df, sample_eval_df])
diveview = datalab_ml.FacetsDiveview().plot(df_merged)
IPython.display.display(diveview)
| {
"content_hash": "f9954358d825afd6e8efb39b4fd0902e",
"timestamp": "",
"source": "github",
"line_count": 1231,
"max_line_length": 113,
"avg_line_length": 43.38017871649066,
"alnum_prop": 0.6086402876350631,
"repo_name": "yebrahim/pydatalab",
"id": "1df97d9e17b1e491a26484f379564e716472ed2c",
"size": "53995",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "google/datalab/contrib/mlworkbench/commands/_ml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7596"
},
{
"name": "Python",
"bytes": "2406009"
},
{
"name": "Shell",
"bytes": "4256"
},
{
"name": "TypeScript",
"bytes": "105309"
}
],
"symlink_target": ""
} |
from django import forms
from django.core.exceptions import ValidationError
from dist.models import Category
from netboot.forms import boolean_choices
class AddCategoryForm(forms.Form):
parent = forms.ChoiceField(label='Parent', required=False)
title = forms.CharField(max_length=100)
description = forms.CharField(label='Description (optional)', required=False, widget=forms.Textarea)
is_active = forms.ChoiceField(label='Is active?', choices=boolean_choices)
is_public = forms.ChoiceField(label='Is public?', choices=boolean_choices)
def __init__(self, parent_choices, **kwargs):
super(AddCategoryForm, self).__init__(**kwargs)
self.fields['parent'].choices = parent_choices
def clean_parent(self):
if self.cleaned_data.get('parent'):
try:
parent = Category.objects.get(id=int(self.cleaned_data['parent']))
except Category.DoesNotExist:
raise ValidationError('No such parent')
else:
return parent
else:
return None
def clean_is_active(self):
return self.cleaned_data['is_active'] == 'yes'
def clean_is_public(self):
return self.cleaned_data['is_public'] == 'yes'
| {
"content_hash": "42996a48b9ce176b5e027350ccbc5549",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 104,
"avg_line_length": 35.8,
"alnum_prop": 0.656025538707103,
"repo_name": "tfmt/netboot",
"id": "fafe2e780d4d3d486809f74231f898310ae761eb",
"size": "1253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dist/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "681"
},
{
"name": "HTML",
"bytes": "11585"
},
{
"name": "JavaScript",
"bytes": "176"
},
{
"name": "Python",
"bytes": "22175"
}
],
"symlink_target": ""
} |
__author__ = 'shafferm'
from run_sparcc import sparcc_correlation, sparcc_correlation_w_bootstraps
| {
"content_hash": "f8bbbddf428a9855cd409c514d50f0b3",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 74,
"avg_line_length": 33.333333333333336,
"alnum_prop": 0.78,
"repo_name": "shafferm/fast_sparCC",
"id": "b350b0f3d038718146bf8480f50e822a50002c19",
"size": "100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sparcc_fast/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20686"
}
],
"symlink_target": ""
} |
"""A swing up experiment in Cartpole."""
from typing import Optional
from bsuite.environments import base
from bsuite.environments import cartpole
from bsuite.experiments.cartpole_swingup import sweep
import dm_env
from dm_env import specs
import numpy as np
class CartpoleSwingup(base.Environment):
"""A difficult 'swing up' version of the classic Cart Pole task.
In this version of the problem the pole begins downwards, and the agent must
swing the pole up in order to see reward. Unlike the typical cartpole task
the agent must pay a cost for moving, which aggravates the explore-exploit
tradedoff. Algorithms without 'deep exploration' will simply remain still.
"""
def __init__(self,
height_threshold: float = 0.5,
theta_dot_threshold: float = 1.,
x_reward_threshold: float = 1.,
move_cost: float = 0.1,
x_threshold: float = 3.,
timescale: float = 0.01,
max_time: float = 10.,
init_range: float = 0.05,
seed: Optional[int] = None):
# Setup.
self._state = cartpole.CartpoleState(0, 0, 0, 0, 0)
super().__init__()
self._rng = np.random.RandomState(seed)
self._init_fn = lambda: self._rng.uniform(low=-init_range, high=init_range)
# Logging info
self._raw_return = 0.
self._total_upright = 0.
self._best_episode = 0.
self._episode_return = 0.
# Reward/episode logic
self._height_threshold = height_threshold
self._theta_dot_threshold = theta_dot_threshold
self._x_reward_threshold = x_reward_threshold
self._move_cost = move_cost
self._x_threshold = x_threshold
self._timescale = timescale
self._max_time = max_time
# Problem config
self._cartpole_config = cartpole.CartpoleConfig(
mass_cart=1.,
mass_pole=0.1,
length=0.5,
force_mag=10.,
gravity=9.8,
)
# Public attributes.
self.bsuite_num_episodes = sweep.NUM_EPISODES
def reset(self):
self._reset_next_step = False
self._state = cartpole.CartpoleState(
x=self._init_fn(),
x_dot=self._init_fn(),
theta=np.pi + self._init_fn(),
theta_dot=self._init_fn(),
time_elapsed=0.,
)
self._episode_return = 0.
return dm_env.restart(self.observation)
def step(self, action):
if self._reset_next_step:
return self.reset()
self._state = cartpole.step_cartpole(
action=action,
timescale=self._timescale,
state=self._state,
config=self._cartpole_config,
)
# Rewards only when the pole is central and balanced
is_upright = (np.cos(self._state.theta) > self._height_threshold
and np.abs(self._state.theta_dot) < self._theta_dot_threshold
and np.abs(self._state.x) < self._x_reward_threshold)
reward = -1. * np.abs(action - 1) * self._move_cost
if is_upright:
reward += 1.
self._total_upright += 1
self._raw_return += reward
self._episode_return += reward
is_end_of_episode = (self._state.time_elapsed > self._max_time
or np.abs(self._state.x) > self._x_threshold)
if is_end_of_episode:
self._best_episode = max(self._episode_return, self._best_episode)
self._reset_next_step = True
return dm_env.termination(reward=reward, observation=self.observation)
else: # continuing transition.
return dm_env.transition(reward=reward, observation=self.observation)
def _step(self, action: int) -> dm_env.TimeStep:
raise NotImplementedError('This environment implements its own auto-reset.')
def _reset(self) -> dm_env.TimeStep:
raise NotImplementedError('This environment implements its own auto-reset.')
def action_spec(self):
return specs.DiscreteArray(dtype=np.int, num_values=3, name='action')
def observation_spec(self):
return specs.Array(shape=(1, 8), dtype=np.float32, name='state')
@property
def observation(self) -> np.ndarray:
"""Approximately normalize output."""
obs = np.zeros((1, 8), dtype=np.float32)
obs[0, 0] = self._state.x / self._x_threshold
obs[0, 1] = self._state.x_dot / self._x_threshold
obs[0, 2] = np.sin(self._state.theta)
obs[0, 3] = np.cos(self._state.theta)
obs[0, 4] = self._state.theta_dot
obs[0, 5] = self._state.time_elapsed / self._max_time
obs[0, 6] = 1. if np.abs(self._state.x) < self._x_reward_threshold else -1.
theta_dot = self._state.theta_dot
obs[0, 7] = 1. if np.abs(theta_dot) < self._theta_dot_threshold else -1.
return obs
def bsuite_info(self):
return dict(raw_return=self._raw_return,
total_upright=self._total_upright,
best_episode=self._best_episode)
| {
"content_hash": "5295702777598302afbab03fc1f06aea",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 80,
"avg_line_length": 34.24285714285714,
"alnum_prop": 0.6320400500625782,
"repo_name": "deepmind/bsuite",
"id": "13c3b9e902fddc1921b69cc499d6a7625fcfcc25",
"size": "5524",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "bsuite/experiments/cartpole_swingup/cartpole_swingup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "106470"
},
{
"name": "Python",
"bytes": "448602"
},
{
"name": "Shell",
"bytes": "2425"
},
{
"name": "TeX",
"bytes": "233184"
}
],
"symlink_target": ""
} |
import itertools
import sys
from mox3 import mox
from neutronclient.neutron.v2_0 import port
from neutronclient import shell
from neutronclient.tests.unit import test_cli20
class CLITestV20PortJSON(test_cli20.CLITestV20Base):
def setUp(self):
super(CLITestV20PortJSON, self).setUp(plurals={'tags': 'tag'})
def test_create_port(self):
"""Create port: netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid]
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_extra_dhcp_opts_args(self):
"""Create port: netid --extra_dhcp_opt."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
extra_dhcp_opts = [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]
args = [netid]
for dhcp_opt in extra_dhcp_opts:
args += ['--extra-dhcp-opt',
('opt_name=%(opt_name)s,opt_value=%(opt_value)s' %
dhcp_opt)]
position_names = ['network_id', 'extra_dhcp_opts']
position_values = [netid, extra_dhcp_opts]
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_full(self):
"""Create port: --mac_address mac --device_id deviceid netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--mac_address', 'mac', '--device_id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
position_values = [netid, 'mac', 'deviceid']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
# Test dashed options
args = ['--mac-address', 'mac', '--device-id', 'deviceid', netid]
position_names = ['network_id', 'mac_address', 'device_id']
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_tenant(self):
"""Create port: --tenant_id tenantid netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--tenant_id', 'tenantid', netid, ]
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
# Test dashed options
args = ['--tenant-id', 'tenantid', netid, ]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tenant_id='tenantid')
def test_create_port_tags(self):
"""Create port: netid mac_address device_id --tags a b."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--tags', 'a', 'b']
position_names = ['network_id']
position_values = []
position_values.extend([netid])
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values,
tags=['a', 'b'])
def test_create_port_secgroup(self):
"""Create port: --security-group sg1_id netid."""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id', netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroups(self):
"""Create port: <security_groups> netid
The <security_groups> are
--security-group sg1_id --security-group sg2_id
"""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--security-group', 'sg1_id',
'--security-group', 'sg2_id',
netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg1_id', 'sg2_id']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroup_off(self):
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = ['--no-security-group', netid]
position_names = ['network_id', 'security_groups']
position_values = [netid, []]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_create_port_secgroups_list(self):
"""Create port: netid <security_groups>
The <security_groups> are
--security-groups list=true sg_id1 sg_id2
"""
resource = 'port'
cmd = port.CreatePort(test_cli20.MyApp(sys.stdout), None)
name = 'myname'
myid = 'myid'
netid = 'netid'
args = [netid, '--security-groups', 'list=true', 'sg_id1', 'sg_id2']
position_names = ['network_id', 'security_groups']
position_values = [netid, ['sg_id1', 'sg_id2']]
self._test_create_resource(resource, cmd, name, myid, args,
position_names, position_values)
def test_list_ports(self):
"""List ports: -D."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_ports_pagination(self):
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ports_sort(self):
"""list ports: --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ports_limit(self):
"""list ports: -P."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_list_ports_tags(self):
"""List ports: -- --tags a b."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, tags=['a', 'b'])
def test_list_ports_detail_tags(self):
"""List ports: -D -- --tags a b."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, detail=True, tags=['a', 'b'])
def test_list_ports_fields(self):
"""List ports: --fields a --fields b -- --fields c d."""
resources = "ports"
cmd = port.ListPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
fields_1=['a', 'b'], fields_2=['c', 'd'])
def _test_list_router_port(self, resources, cmd,
myid, detail=False, tags=(),
fields_1=(), fields_2=()):
self.mox.StubOutWithMock(cmd, "get_client")
self.mox.StubOutWithMock(self.client.httpclient, "request")
cmd.get_client().MultipleTimes().AndReturn(self.client)
reses = {resources: [{'id': 'myid1', },
{'id': 'myid2', }, ], }
resstr = self.client.serialize(reses)
# url method body
query = ""
args = detail and ['-D', ] or []
if fields_1:
for field in fields_1:
args.append('--fields')
args.append(field)
args.append(myid)
if tags:
args.append('--')
args.append("--tag")
for tag in tags:
args.append(tag)
if (not tags) and fields_2:
args.append('--')
if fields_2:
args.append("--fields")
for field in fields_2:
args.append(field)
for field in itertools.chain(fields_1, fields_2):
if query:
query += "&fields=" + field
else:
query = "fields=" + field
for tag in tags:
if query:
query += "&tag=" + tag
else:
query = "tag=" + tag
if detail:
query = query and query + '&verbose=True' or 'verbose=True'
query = query and query + '&device_id=%s' or 'device_id=%s'
path = getattr(self.client, resources + "_path")
self.client.httpclient.request(
test_cli20.MyUrlComparator(
test_cli20.end_url(path, query % myid),
self.client),
'GET',
body=None,
headers=mox.ContainsKeyValue('X-Auth-Token', test_cli20.TOKEN)
).AndReturn((test_cli20.MyResp(200), resstr))
self.mox.ReplayAll()
cmd_parser = cmd.get_parser("list_" + resources)
shell.run_command(cmd, cmd_parser, args)
self.mox.VerifyAll()
self.mox.UnsetStubs()
_str = self.fake_stdout.make_string()
self.assertIn('myid1', _str)
def test_list_router_ports(self):
"""List router ports: -D."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, True)
def test_list_router_ports_tags(self):
"""List router ports: -- --tags a b."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd,
self.test_id, tags=['a', 'b'])
def test_list_router_ports_detail_tags(self):
"""List router ports: -D -- --tags a b."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
detail=True, tags=['a', 'b'])
def test_list_router_ports_fields(self):
"""List ports: --fields a --fields b -- --fields c d."""
resources = "ports"
cmd = port.ListRouterPort(test_cli20.MyApp(sys.stdout), None)
self._test_list_router_port(resources, cmd, self.test_id,
fields_1=['a', 'b'],
fields_2=['c', 'd'])
def test_update_port(self):
"""Update port: myid --name myname --tags a b."""
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'myname',
'--tags', 'a', 'b'],
{'name': 'myname', 'tags': ['a', 'b'], }
)
def test_update_port_secgroup(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--security-group', 'sg1_id', myid]
updatefields = {'security_groups': ['sg1_id']}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_secgroups(self):
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = ['--security-group', 'sg1_id',
'--security-group', 'sg2_id',
myid]
updatefields = {'security_groups': ['sg1_id', 'sg2_id']}
self._test_update_resource(resource, cmd, myid, args, updatefields)
def test_update_port_extra_dhcp_opts(self):
"""Update port: myid --extra_dhcp_opt."""
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=pxelinux.0",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=123.123.123.123",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=123.123.123.45"
]
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_delete_extra_dhcp_opts_from_port(self):
resource = 'port'
myid = 'myid'
args = [myid,
'--extra-dhcp-opt',
"opt_name=bootfile-name,opt_value=null",
'--extra-dhcp-opt',
"opt_name=tftp-server,opt_value=123.123.123.123",
'--extra-dhcp-opt',
"opt_name=server-ip-address,opt_value=123.123.123.45"
]
# the client code will change the null to None and send to server,
# where its interpreted as delete the DHCP option on the port.
updatedfields = {'extra_dhcp_opts': [{'opt_name': 'bootfile-name',
'opt_value': None},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.45'}]}
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, myid, args, updatedfields)
def test_update_port_security_group_off(self):
"""Update port: --no-security-groups myid."""
resource = 'port'
cmd = port.UpdatePort(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['--no-security-groups', 'myid'],
{'security_groups': []})
def test_show_port(self):
"""Show port: --fields id --fields name myid."""
resource = 'port'
cmd = port.ShowPort(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_delete_port(self):
"""Delete port: myid."""
resource = 'port'
cmd = port.DeletePort(test_cli20.MyApp(sys.stdout), None)
myid = 'myid'
args = [myid]
self._test_delete_resource(resource, cmd, myid, args)
class CLITestV20PortXML(CLITestV20PortJSON):
format = 'xml'
| {
"content_hash": "937a895cd11b40a99184b4249445dc02",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 79,
"avg_line_length": 41.61538461538461,
"alnum_prop": 0.5146979905789756,
"repo_name": "rackerlabs/python-neutronclient",
"id": "d7306d709ea56d4960d443534d0f05896c54381c",
"size": "17409",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutronclient/tests/unit/test_cli20_port.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from google.cloud import storage
import google.cloud.storage.acl
import pytest
import acl
# Typically we'd use a @example.com address, but GCS requires a real Google
# account.
TEST_EMAIL = '[email protected]'
@pytest.fixture
def test_bucket(cloud_config):
"""Yields a bucket that resets its acl after the test completes."""
bucket = storage.Client().bucket(cloud_config.storage_bucket)
acl = google.cloud.storage.acl.BucketACL(bucket)
object_default_acl = google.cloud.storage.acl.DefaultObjectACL(bucket)
acl.reload()
object_default_acl.reload()
yield bucket
acl.save()
object_default_acl.save()
@pytest.fixture
def test_blob(cloud_config):
"""Yields a blob that resets its acl after the test completes."""
bucket = storage.Client().bucket(cloud_config.storage_bucket)
blob = bucket.blob('storage_acl_test_sigil')
blob.upload_from_string('Hello, is it me you\'re looking for?')
acl = google.cloud.storage.acl.ObjectACL(blob)
acl.reload()
yield blob
acl.save()
def test_print_bucket_acl(cloud_config, capsys):
acl.print_bucket_acl(cloud_config.storage_bucket)
out, _ = capsys.readouterr()
assert out
def test_print_bucket_acl_for_user(test_bucket, cloud_config, capsys):
test_bucket.acl.user(TEST_EMAIL).grant_owner()
test_bucket.acl.save()
acl.print_bucket_acl_for_user(cloud_config.storage_bucket, TEST_EMAIL)
out, _ = capsys.readouterr()
assert 'OWNER' in out
def test_add_bucket_owner(test_bucket, cloud_config):
acl.add_bucket_owner(cloud_config.storage_bucket, TEST_EMAIL)
test_bucket.acl.reload()
assert 'OWNER' in test_bucket.acl.user(TEST_EMAIL).get_roles()
def test_remove_bucket_owner(test_bucket, cloud_config):
test_bucket.acl.user(TEST_EMAIL).grant_owner()
test_bucket.acl.save()
acl.remove_bucket_owner(cloud_config.storage_bucket, TEST_EMAIL)
test_bucket.acl.reload()
assert 'OWNER' not in test_bucket.acl.user(TEST_EMAIL).get_roles()
def test_add_bucket_default_owner(test_bucket, cloud_config):
acl.add_bucket_default_owner(cloud_config.storage_bucket, TEST_EMAIL)
test_bucket.default_object_acl.reload()
roles = test_bucket.default_object_acl.user(TEST_EMAIL).get_roles()
assert 'OWNER' in roles
def test_remove_bucket_default_owner(test_bucket, cloud_config):
test_bucket.acl.user(TEST_EMAIL).grant_owner()
test_bucket.acl.save()
acl.remove_bucket_default_owner(cloud_config.storage_bucket, TEST_EMAIL)
test_bucket.default_object_acl.reload()
roles = test_bucket.default_object_acl.user(TEST_EMAIL).get_roles()
assert 'OWNER' not in roles
def test_print_blob_acl(test_blob, cloud_config, capsys):
acl.print_blob_acl(cloud_config.storage_bucket, test_blob.name)
out, _ = capsys.readouterr()
assert out
def test_print_blob_acl_for_user(test_blob, cloud_config, capsys):
test_blob.acl.user(TEST_EMAIL).grant_owner()
test_blob.acl.save()
acl.print_blob_acl_for_user(
cloud_config.storage_bucket, test_blob.name, TEST_EMAIL)
out, _ = capsys.readouterr()
assert 'OWNER' in out
def test_add_blob_owner(test_blob, cloud_config):
acl.add_blob_owner(cloud_config.storage_bucket, test_blob.name, TEST_EMAIL)
test_blob.acl.reload()
assert 'OWNER' in test_blob.acl.user(TEST_EMAIL).get_roles()
def test_remove_blob_owner(test_blob, cloud_config):
test_blob.acl.user(TEST_EMAIL).grant_owner()
test_blob.acl.save()
acl.remove_blob_owner(
cloud_config.storage_bucket, test_blob.name, TEST_EMAIL)
test_blob.acl.reload()
assert 'OWNER' not in test_blob.acl.user(TEST_EMAIL).get_roles()
| {
"content_hash": "cb6c78c5ed7622552dd67d27df52f47a",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 79,
"avg_line_length": 30.429752066115704,
"alnum_prop": 0.7085822922324824,
"repo_name": "pongem/python-bot-project",
"id": "3197b4ea0f526558c67fcd8ff0a4496db8cdfab7",
"size": "4258",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "storage/cloud-client/acl_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "48744"
},
{
"name": "HTML",
"bytes": "91010"
},
{
"name": "JavaScript",
"bytes": "137355"
},
{
"name": "Makefile",
"bytes": "881"
},
{
"name": "Protocol Buffer",
"bytes": "8810"
},
{
"name": "Python",
"bytes": "5699896"
},
{
"name": "Shell",
"bytes": "8369"
}
],
"symlink_target": ""
} |
import httplib
import socket
import urlparse
import eventlet
import httplib2
from oslo.config import cfg
import webob
from quantum.agent.linux import daemon
from quantum.common import config
from quantum.common import utils
from quantum.openstack.common import log as logging
from quantum import wsgi
proxy_socket = cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location of Metadata Proxy UNIX domain '
'socket'))
cfg.CONF.register_opt(proxy_socket)
LOG = logging.getLogger(__name__)
class UnixDomainHTTPConnection(httplib.HTTPConnection):
"""Connection class for HTTP over UNIX domain socket."""
def __init__(self, host, port=None, strict=None, timeout=None,
proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if self.timeout:
self.sock.settimeout(self.timeout)
self.sock.connect(cfg.CONF.metadata_proxy_socket)
class NetworkMetadataProxyHandler(object):
"""Proxy AF_INET metadata request through Unix Domain socket.
The Unix domain socket allows the proxy access resource that are not
accessible within the isolated tenant context.
"""
def __init__(self, network_id=None, router_id=None):
self.network_id = network_id
self.router_id = router_id
if network_id is None and router_id is None:
msg = _('network_id and router_id are None. One must be provided.')
raise ValueError(msg)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
LOG.debug(_("Request: %s"), req)
try:
return self._proxy_request(req.remote_addr,
req.method,
req.path_info,
req.query_string,
req.body)
except Exception:
LOG.exception(_("Unexpected error."))
msg = _('An unknown error has occurred. '
'Please try your request again.')
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
def _proxy_request(self, remote_address, method, path_info,
query_string, body):
headers = {
'X-Forwarded-For': remote_address,
}
if self.router_id:
headers['X-Quantum-Router-ID'] = self.router_id
else:
headers['X-Quantum-Network-ID'] = self.network_id
url = urlparse.urlunsplit((
'http',
'169.254.169.254', # a dummy value to make the request proper
path_info,
query_string,
''))
h = httplib2.Http()
resp, content = h.request(
url,
method=method,
headers=headers,
body=body,
connection_type=UnixDomainHTTPConnection)
if resp.status == 200:
LOG.debug(resp)
LOG.debug(content)
return content
elif resp.status == 404:
return webob.exc.HTTPNotFound()
elif resp.status == 409:
return webob.exc.HTTPConflict()
elif resp.status == 500:
msg = _(
'Remote metadata server experienced an internal server error.'
)
LOG.debug(msg)
return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
else:
raise Exception(_('Unexpected response code: %s') % resp.status)
class ProxyDaemon(daemon.Daemon):
def __init__(self, pidfile, port, network_id=None, router_id=None):
super(ProxyDaemon, self).__init__(pidfile)
self.network_id = network_id
self.router_id = router_id
self.port = port
def run(self):
handler = NetworkMetadataProxyHandler(
self.network_id,
self.router_id)
proxy = wsgi.Server('quantum-network-metadata-proxy')
proxy.start(handler, self.port)
proxy.wait()
def main():
eventlet.monkey_patch()
opts = [
cfg.StrOpt('network_id'),
cfg.StrOpt('router_id'),
cfg.StrOpt('pid_file'),
cfg.BoolOpt('daemonize', default=True),
cfg.IntOpt('metadata_port',
default=9697,
help=_("TCP Port to listen for metadata server "
"requests.")),
]
cfg.CONF.register_cli_opts(opts)
# Don't get the default configuration file
cfg.CONF(project='quantum', default_config_files=[])
config.setup_logging(cfg.CONF)
utils.log_opt_values(LOG)
proxy = ProxyDaemon(cfg.CONF.pid_file,
cfg.CONF.metadata_port,
network_id=cfg.CONF.network_id,
router_id=cfg.CONF.router_id)
if cfg.CONF.daemonize:
proxy.start()
else:
proxy.run()
| {
"content_hash": "900c25743db59cd93099ff0b70dac09b",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 32.76923076923077,
"alnum_prop": 0.57179186228482,
"repo_name": "yamt/neutron",
"id": "f6282e8b4284a207effa2c471a15eff75ab077df",
"size": "5822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/agent/metadata/namespace_proxy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "4078056"
},
{
"name": "Shell",
"bytes": "10023"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
from django.template import Library
from django.conf import settings
from django.contrib.sites.models import Site
register = Library()
CSS_URL = getattr(settings, 'BOOTSTRAP_CSS_URL', '')
JS_URL = getattr(settings, 'BOOTSTRAP_JS_URL', '')
@register.inclusion_tag('bootstrap/styles.inc.html')
def bootstrap_styles():
"""
Includes the Bootstrap styles from Bower
"""
return {
'css_url': CSS_URL and (settings.MEDIA_URL + CSS_URL) or ''
}
@register.inclusion_tag('bootstrap/scripts.inc.html')
def bootstrap_scripts():
"""
Includes the jQuery and Bootstrap scripts from Bower
"""
return {
'js_url': JS_URL and (settings.MEDIA_URL + JS_URL) or ''
}
@register.inclusion_tag('bootstrap/navbar.inc.html', takes_context = True)
def bootstrap_navbar(context):
"""
Renders the navbar template (see :ref:`Navigation templates <templates_navigation>`) for more information
"""
context.update(
{
'INVERSE': getattr(settings, 'BOOTSTRAP_NAVBAR_INVERSE', False),
'FIXED_TOP': getattr(settings, 'BOOTSTRAP_NAVBAR_FIXED_TOP', False),
'FIXED_BOTTOM': getattr(settings, 'BOOTSTRAP_NAVBAR_FIXED_BOTTOM', False)
}
)
return context
@register.inclusion_tag('bootstrap/footer.inc.html', takes_context = True)
def bootstrap_footer(context):
"""
Renders the navbar template (see :ref:`Miscellaneous templates <templates_misc>`) for more information
"""
return {
'request': context.get('request'),
'SITE': Site.objects.get_current()
}
@register.simple_tag(takes_context = True)
def bootstrap_title(context, separator = ' | '):
"""
Renders the value of the ``title_parts`` context variable (a tuple) into a string, separated by
a delimiter
:param separator: The delimiter to use (defaults to a beam character (|))
"""
title_parts = context.get('title_parts')
site = Site.objects.get_current()
if title_parts:
return u''.join(
separator.join(
[unicode(u) for u in list(title_parts) + [site.name]]
)
)
else:
return site.name
@register.inclusion_tag('bootstrap/breadcrumb.inc.html', takes_context = True)
def breadcrumb_trail(context):
"""
Renders a two-tuple of page URLs and titles (see the
:ref:`breadcrumb_trail <context_breadcrumb_trail>` context variable for more information)
"""
return {
'breadcrumb_trail': context.get('breadcrumb_trail')
}
@register.simple_tag(takes_context = True)
def html_attrs(context):
"""
Adds a ``no-js`` class to the ``<html>`` tag
todo: Make this much more flexible
"""
request = context.get('request')
tags = [
('class', 'no-js')
]
return ' '.join(
['%s="%s"' % t for t in tags]
)
@register.inclusion_tag('bootstrap/typekit.inc.html')
def typekit():
return {
'key': getattr(settings, 'TYPEKIT_KEY', '')
} | {
"content_hash": "0bbcc060162bf645fc9b09d43b39f282",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 109,
"avg_line_length": 27.94495412844037,
"alnum_prop": 0.6214707813525936,
"repo_name": "iamsteadman/bambu-bootstrap",
"id": "43f407e887bfdbf82c933f7b3395764c90731a93",
"size": "3046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bambu_bootstrap/templatetags/bootstrap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16424"
},
{
"name": "JavaScript",
"bytes": "54587"
},
{
"name": "Python",
"bytes": "41162"
}
],
"symlink_target": ""
} |
from envisage.ui.tasks.task_extension import TaskExtension
from pyface.tasks.action.schema_addition import SchemaAddition
# ============= standard library imports ========================
from traits.api import List
from pychron.envisage.tasks.base_task_plugin import BaseTaskPlugin
from pychron.geochron.geochron_service import GeochronService
from pychron.geochron.tasks.actions import UploadAction
from pychron.geochron.tasks.node import GeochronNode
from pychron.geochron.tasks.preferences import GeochronPreferencesPane
from pychron.pipeline.nodes import NodeFactory
GEOCHRON = """
required:
- pychron.geochron.geochron_service.GeochronService
nodes:
- klass: UnknownNode
- klass: GeochronNode
"""
class GeochronPlugin(BaseTaskPlugin):
id = 'pychron.geochron.plugin'
node_factories = List(contributes_to='pychron.pipeline.node_factories')
predefined_templates = List(contributes_to='pychron.pipeline.predefined_templates')
def _node_factories_default(self):
def geochron_factory():
node = GeochronNode()
service = self.application.get_service('pychron.geochron.geochron_service.GeochronService')
node.trait_set(service=service)
return node
return [NodeFactory('GeochronNode', geochron_factory), ]
def _predefined_templates_default(self):
return [('Share', (('Geochron', GEOCHRON),))]
def _help_tips_default(self):
return ['More information about Geochron is located at http://geochron.org/']
def _service_offers_default(self):
so1 = self.service_offer_factory(factory=GeochronService,
protocol=GeochronService)
return [so1]
def _preferences_panes_default(self):
return [GeochronPreferencesPane]
def _task_extensions_default(self):
actions = [SchemaAddition(factory=UploadAction,
path='MenuBar/data.menu')]
ts = [TaskExtension(actions=actions)]
return ts
# ============= EOF =============================================
| {
"content_hash": "01a3bbef1ef1ac3d87d57e643b78fb02",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 103,
"avg_line_length": 35.87931034482759,
"alnum_prop": 0.6727534839019702,
"repo_name": "UManPychron/pychron",
"id": "15fee3f9f4e0833fd6d6b3b52ebc7d1aa0d28372",
"size": "2876",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pychron/geochron/tasks/geochron_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "279"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "40346"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10234954"
},
{
"name": "Shell",
"bytes": "10753"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('../python-mbus')
import pytest
import os
from mbus import MBus
# test if servial evice is connected
def nohardware():
try:
fd = os.open(config.getini(serialdevice), os.O_RDONLY)
except OSError:
return True
os.close(fd)
return False
def test_empty_init():
with pytest.raises(BaseException):
foo = MBus.MBus()
def test_invalid_argument():
with pytest.raises(TypeError):
foo = MBus.MBus(foo='bar')
@pytest.mark.serial
def test_device_null():
with pytest.raises(TypeError):
foo = MBus.MBus(device='/dev/null')
@pytest.mark.serial
def test_device_nonexistent():
with pytest.raises(FileNotFoundError):
foo = MBus.MBus(device='/dev/idonotexist')
@pytest.mark.serial
def test_device_serial(pytestconfig):
if '/dev/adjustme' == pytestconfig.getini('serialdevice'):
pytest.skip("serial device not configured")
with pytest.raises(TypeError):
foo = MBus.MBus(device=pytestconfig.getini('serialdevice'))
# device=None, host=None, port=8888
@pytest.mark.serial
def test_device_and_host():
with pytest.raises(BaseException):
foo = MBus.MBus(device='/dev/null', host='127.0.0.1')
def test_port():
foo = MBus.MBus(host="127.0.0.1", port=1234)
def test_port_too_low():
with pytest.raises(ValueError):
MBus.MBus(host="127.0.0.1", port=-1)
def test_port_too_high():
with pytest.raises(ValueError):
MBus.MBus(host="127.0.0.1", port=77777)
def test_port_float():
with pytest.raises(TypeError):
MBus.MBus(host="127.0.0.1", port=2.3)
def test_port_string():
with pytest.raises(TypeError):
MBus.MBus(host="127.0.0.1", port="123")
def test_libpath_empty():
with pytest.raises(OSError):
foo = MBus.MBus(libpath='')
def test_libpath_shared_object_only():
with pytest.raises(OSError):
foo = MBus.MBus(libpath='libmbus.so')
def test_libpath_shared_object_path():
foo = MBus.MBus(libpath="/usr/local/lib/libmbus.so", host="127.0.0.1")
| {
"content_hash": "017ac493961afc74ec42cb64649046e3",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 74,
"avg_line_length": 22.866666666666667,
"alnum_prop": 0.6579203109815355,
"repo_name": "Cougar/python-mbus",
"id": "95ab46f70e22f817155614cc6a54ea27f8523d54",
"size": "2058",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_MBus_init.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "54756"
}
],
"symlink_target": ""
} |
import os.path, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
import unittest
import itertools
from pipeline_base import PipelineBlock, Zip
from pipeline_hof import Filter, Map, Fold
from pipeline_parallel import FilterParallel, FoldParallel, MapParallel, BeginParallel, EndParallel
from pipeline_extra import TrainScikitModel
import numpy as np
class Square(PipelineBlock):
def run(self, input_data):
return input_data ** 2
class AddOne(PipelineBlock):
def run(self, input_data):
return input_data + 1
class SwapInputs(PipelineBlock):
def run(self, input_data):
a,b = input_data
return b,a
def square(x):
return x ** 2
def add_one(x):
return x+1
def is_even(x):
return x%2==0
def is_odd(x):
return x%2==1
def add_one_batch(batch):
return [x+1 for x in batch]
def infinite_generator():
while True:
yield 1
def infinite_generator_2():
while True:
yield 2
def add(x1,x2):
return x1+x2
class SyntaxTest(unittest.TestCase):
def test_pipe(self):
assert (2 | Square()) == 4
def test_shift(self):
f = Square() >> Square() >> AddOne()
assert f(2) == 17
def test_pipe_and_shift(self):
assert (2 | Square() >> Square() >> AddOne()) == 17
def test_multiple_input(self):
assert ((1,2) | SwapInputs()) == (2,1)
class BaseComponentTest(unittest.TestCase):
def test_zip(self):
input_1 = itertools.islice(infinite_generator(),0,1000)
input_2 = itertools.islice(infinite_generator_2(),0,1000)
output = (input_1,input_2) | Zip()
assert next(output) == (1,2)
class HigherOrderFunctionTest(unittest.TestCase):
def test_map(self):
assert list([1,2] | Map(square) >> Map(square) >> Map(add_one)) == [2,17]
def test_reuse(self):
square_map = Map(square) >> Map(square)
assert list([1,2] | square_map >> Map(add_one)) == [2,17]
assert list([1,2] | square_map >> Map(add_one)) == [2,17]
def test_imap(self):
assert list([1,2] | BeginParallel(1) >> MapParallel(square) >> MapParallel(square) >> MapParallel(add_one) >> EndParallel()) == [2,17]
def test_map_infinite(self):
data = infinite_generator()
output_stream = data | Map(square) >> Map(square)
assert next(output_stream) == 1
def test_filter(self):
data = range(0,10)
output = data | Filter(is_even)
assert list(output) == [0,2,4,6,8]
def test_filter_parallel(self):
data = [0,1,2,3,4,5,6,7,8,9]
output = data | BeginParallel(1) >> FilterParallel(is_even) >> EndParallel()
assert list(output) == [0,2,4,6,8]
def test_filter_infinite(self):
data = infinite_generator()
is_odd = lambda x: x%2==1
output = data | Filter(is_odd)
assert next(output) == 1
def test_filter_parallel_infinite(self):
data = infinite_generator()
output = data | BeginParallel(1) >> FilterParallel(is_odd) >> EndParallel()
assert next(output) == 1
def test_fold(self):
data = [0,1,2,3]
output = data | Fold(add)
assert output == 6
def test_fold_parallel(self):
data = [0,1,2,3]
output = data | BeginParallel(1) >> FoldParallel(add)
assert output == 6
def parse_row( element):
return np.array([int(element['COL_1']), int(element['COL_2'])])
def sum_vectors(input_1, input_2):
return input_1 + input_2
class UseCaseTest(unittest.TestCase):
def test_end_to_end(self):
import csv
input_file = csv.DictReader(open('test_data.csv'))
run_data_pipeline = Map(parse_row) >> Map(sum_vectors)
run_data_pipeline(input_file) == [4, 6]
def can_import_scikit():
try:
import sklearn
return True
except:
return False
@unittest.skipUnless(can_import_scikit(), 'no scikit learn, skipping test')
class SciKitLearnTest(unittest.TestCase):
def test_model_training(self):
from sklearn.ensemble import RandomForestClassifier
X = [[0,0],[1,1],[0,0],[1,1],[0,0],[1,1],[0,0],[1,1],[0,0],[1,1]]
y = [0,1,0,1,0,1,0,1,0,1]
model = RandomForestClassifier()
list(zip(X,y) | TrainScikitModel(model, batch_size=2))
assert list(model.predict([[0,0], [1,1]])) == [0,1] | {
"content_hash": "5105c3a6495de18ab5f8792f84425dd3",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 143,
"avg_line_length": 29.8843537414966,
"alnum_prop": 0.6057363988162987,
"repo_name": "lmc2179/chevrons",
"id": "bd6ce876bccf0b32249060061ae0b4cff962b056",
"size": "4393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chevrons/tests/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15568"
}
],
"symlink_target": ""
} |
from os.path import dirname
from ipkg.build import Formula, File
class c(Formula):
name = 'c'
version = '1.0'
sources = File(dirname(__file__) + '/../../sources/c-1.0.tar.gz')
platform = 'any'
dependencies = ('d', 'e')
def install(self):
pass
| {
"content_hash": "34624461163706bb8d22e880ba73fa93",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 69,
"avg_line_length": 17.5625,
"alnum_prop": 0.5693950177935944,
"repo_name": "pmuller/ipkg",
"id": "3c7b50d6af0599a0558804b05aae31402f54e987",
"size": "281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/data/formulas/c/c-1.0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "139473"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Eventos
from multimedia.models import *
class FotosAdmin(generic.GenericTabularInline):
model = Fotos
extra = 1
class AdjuntosAdmin(generic.GenericTabularInline):
model = Adjuntos
extra = 1
class EventosAdmin(admin.ModelAdmin):
def queryset(self, request):
if request.user.is_superuser:
return self.model.objects.all()
elif request.user.is_staff:
return self.model.objects.filter(autor=request.user)
def save_model(self, request, obj, form, change):
instance = form.save(commit=False)
instance.autor = request.user
instance.save()
return instance
exclude = ['autor']
inlines = [FotosAdmin, AdjuntosAdmin]
list_display = ['titulo','fecha_inicio','fecha_finalizacion',]
search_fields = ['titulo',]
date_hierarchy = 'fecha_inicio'
admin.site.register(Eventos, EventosAdmin) | {
"content_hash": "5445c661e902a7bd83362796e47478e5",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 66,
"avg_line_length": 29.53125,
"alnum_prop": 0.6835978835978836,
"repo_name": "CARocha/cesesma",
"id": "35810942c491e247288db6f2aea1ab643ef1ca63",
"size": "945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventos/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160779"
},
{
"name": "JavaScript",
"bytes": "190816"
},
{
"name": "Python",
"bytes": "57699"
}
],
"symlink_target": ""
} |
import requests.packages.urllib3
import serial
import json
from firebase import firebase
requests.packages.urllib3.disable_warnings()
arduinoData = serial.Serial('com31', 9600)
firebase = firebase.FirebaseApplication(
'https://___YOUR_PROJECT_NAME____.firebaseio.com/')
while 1:
myData = (arduinoData.readline().strip())
myData = (myData.decode('utf-8'))
myData = float(myData)
result = firebase.put('MainNode/Leaf', 'temp', myData)
print 'Data : ', result
| {
"content_hash": "1f44e6af31ddf538668e1f6ea0f93ac6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 58,
"avg_line_length": 27.944444444444443,
"alnum_prop": 0.68389662027833,
"repo_name": "Logan1x/Python-Scripts",
"id": "3a8b55963b22f1905e6d76c3d30c7ed7eea5396f",
"size": "503",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/Pyfirebase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64348"
}
],
"symlink_target": ""
} |
from PyQt5.QtCore import *
from Business import *
from Data.Components import Components
__author__ = 'mamj'
col_header = ["Components", "Type"]
types = ['Lines', 'Areas']
class ComponentsModel(QAbstractTableModel):
def __init__(self, doc):
QAbstractItemModel.__init__(self)
self._components = doc.get_components()
self._doc = doc
self._components.add_change_handler(self.on_components_changed)
def rowCount(self, model_index=None, *args, **kwargs):
return len(self._components.get_components())
def columnCount(self, model_index=None, *args, **kwargs):
return len(col_header)
def data(self, model_index: QModelIndex, int_role=None):
col = model_index.column()
row = model_index.row()
data = None
if int_role == Qt.DisplayRole:
component_item = self._components.get_components()[row]
if col == 0:
data = component_item.name
if col == 1:
data = types[component_item.type - 1]
elif int_role == Qt.EditRole:
component_item = self._components.get_components()[row]
if col == 0:
data = component_item.name
elif col == 1:
return types
return data
def setData(self, model_index: QModelIndex, value: QVariant, int_role=None):
col = model_index.column()
row = model_index.row()
component_item = self._components.get_components()[row]
if col == 0:
component_item.name = value
return True
elif col == 1:
component_item.type = value + 1
return False
def removeRow(self, row, QModelIndex_parent=None, *args, **kwargs):
component = self._components.get_components()[row]
remove_components(self._doc, [component])
def remove_rows(self, rows):
components = set()
for row in rows:
components.add(self._components.get_components()[row])
remove_components(self._doc, components)
def on_components_changed(self, event: ChangeEvent):
if type(event.sender) is Components:
if event.type == event.BeforeObjectAdded:
lenght = len(self._components.get_components())
self.beginInsertRows(QModelIndex(), lenght, lenght)
if event.type == event.ObjectAdded:
self.endInsertRows()
if event.type == event.BeforeObjectRemoved:
if event.object in self._components.get_components():
row = self._components.get_components().index(event.object)
self.beginRemoveRows(QModelIndex(), row, row)
if event.type == event.ObjectRemoved:
if event.object in self._components.get_components():
self.endRemoveRows()
if type(event.object) is Component:
if event.type == event.ValueChanged:
comp = event.sender
row = self._components.get_components.index(comp)
left = self.createIndex(row, 0)
right = self.createIndex(row, 3)
self.dataChanged.emit(left, right)
def flags(self, model_index: QModelIndex):
default_flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
return default_flags
def headerData(self, p_int, orientation, int_role=None):
if int_role == Qt.DisplayRole:
if orientation == Qt.Vertical:
return p_int
else:
return col_header[p_int]
else:
return
def get_components_object(self):
return self._components
def get_component(self, row):
return self._components.get_components()[row]
def get_index_from_edge(self, component):
row = self._components.get_components().index(component)
return self.index(row, 0)
def get_options(self, index):
return types
| {
"content_hash": "845fd0e74f26100148eb061717e88a7b",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 77,
"avg_line_length": 30.527272727272727,
"alnum_prop": 0.7004169148302561,
"repo_name": "pracedru/PracedruDesign",
"id": "c1ad567affa430bd14fa6e55a5ec3607692a0d71",
"size": "3358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GUI/Models/ComponentsModel.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4428"
},
{
"name": "CSS",
"bytes": "3334"
},
{
"name": "Python",
"bytes": "637422"
}
],
"symlink_target": ""
} |
import os
import unittest
import time
import jwt
import datetime
from collections import namedtuple
from hashlib import md5
from elasticsearch import Elasticsearch, NotFoundError
from tests.module.blueprints.config import LOCAL_ELASTICSEARCH
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
from importlib import import_module, reload
models = import_module('conductor.blueprints.user.models')
USERS_INDEX_NAME = os.environ.get('OS_ES_USERS_INDEX_NAME', 'test_users')
class UserAdminTest(unittest.TestCase):
USERID = 'uusseerriidd'
NAME = 'nnaammee'
EMAIL = 'eemmaaiill'
AVATAR_URL = 'aavvaattaarr__uurrll'
# Actions
def setUp(self):
self.ctrl = import_module('conductor.blueprints.user.controllers')
self.private_key = self.ctrl.PRIVATE_KEY
reload(self.ctrl)
# Clean index
self.es = Elasticsearch(hosts=[LOCAL_ELASTICSEARCH])
try:
self.es.indices.delete(index=USERS_INDEX_NAME)
except NotFoundError:
pass
self.es.indices.create(USERS_INDEX_NAME)
time.sleep(1)
def test___create_user___success(self):
user = models.create_or_get_user(self.USERID, self.NAME, self.EMAIL,
self.AVATAR_URL)
self.assertEquals(user['id'], self.USERID)
self.assertEquals(user['name'], self.NAME)
self.assertEquals(user['email'], self.EMAIL)
self.assertEquals(user['avatar_url'], self.AVATAR_URL)
def test___create__existing_user___success(self):
models.create_or_get_user(self.USERID, self.NAME, self.EMAIL,
self.AVATAR_URL)
user = models.create_or_get_user(self.USERID, self.NAME, self.EMAIL,
self.AVATAR_URL)
self.assertEquals(user['id'], self.USERID)
self.assertEquals(user['name'], self.NAME)
self.assertEquals(user['email'], self.EMAIL)
self.assertEquals(user['avatar_url'], self.AVATAR_URL)
def test___get__existing_user___success(self):
models.create_or_get_user(self.USERID, self.NAME, self.EMAIL,
self.AVATAR_URL)
hash = models.hash_email(self.EMAIL)
user = models.get_user(hash)
self.assertEquals(user['id'], self.USERID)
self.assertEquals(user['name'], self.NAME)
self.assertEquals(user['email'], self.EMAIL)
self.assertEquals(user['avatar_url'], self.AVATAR_URL)
def test___get__nonexisting_user___success(self):
hash = models.hash_email(self.EMAIL)
user = models.get_user(hash)
self.assertIs(user, None)
def test___save__existing_user___success(self):
user2 = models.create_or_get_user(self.USERID, self.NAME, self.EMAIL,
self.AVATAR_URL)
user2['email'] += 'X'
models. save_user(user2)
hash = models.hash_email(self.EMAIL)
user = models.get_user(hash)
self.assertEquals(user['id'], self.USERID)
self.assertEquals(user['name'], self.NAME)
self.assertEquals(user['email'], self.EMAIL+'X')
self.assertEquals(user['avatar_url'], self.AVATAR_URL)
def test___update___no_jwt(self):
ret = self.ctrl.update(None, 'new_username')
self.assertFalse(ret.get('success'))
self.assertEquals(ret.get('error'), 'No token')
def test___update___bad_jwt(self):
ret = self.ctrl.update('bla', 'new_username')
self.assertFalse(ret.get('success'))
self.assertEquals(ret.get('error'), 'Not authenticated')
def test___update___no_such_user(self):
hash = models.hash_email(self.EMAIL+'X')
token = {
'userid': hash,
'exp': (datetime.datetime.utcnow() +
datetime.timedelta(days=14))
}
client_token = jwt.encode(token, self.private_key)
ret = self.ctrl.update(client_token, 'new_username')
self.assertFalse(ret.get('success'))
self.assertEquals(ret.get('error'), 'Unknown User')
def test___update___new_user(self):
models.create_or_get_user(self.USERID, self.NAME, self.EMAIL,
self.AVATAR_URL)
hash = models.hash_email(self.EMAIL)
token = {
'userid': hash,
'exp': (datetime.datetime.utcnow() +
datetime.timedelta(days=14))
}
client_token = jwt.encode(token, self.private_key)
ret = self.ctrl.update(client_token, 'new_username')
self.assertTrue(ret.get('success'))
self.assertEquals(ret.get('error'), None)
user = models.get_user(hash)
self.assertEquals(user['username'], 'new_username')
def test___update___double_update(self):
models.create_or_get_user(self.USERID, self.NAME, self.EMAIL,
self.AVATAR_URL)
hash = models.hash_email(self.EMAIL)
token = {
'userid': hash,
'exp': (datetime.datetime.utcnow() +
datetime.timedelta(days=14))
}
client_token = jwt.encode(token, self.private_key)
ret = self.ctrl.update(client_token, 'new_username')
self.assertTrue(ret.get('success'))
self.assertEquals(ret.get('error'), None)
ret = self.ctrl.update(client_token, 'new_username_@')
self.assertFalse(ret.get('success'))
self.assertEquals(ret.get('error'),
'Cannot modify username, already set')
user = models.get_user(hash)
self.assertEquals(user['username'], 'new_username')
class AuthenticationTest(unittest.TestCase):
USERID = 'userid'
IDHASH = md5(USERID.encode('utf8')).hexdigest()
# Actions
def setUp(self):
self.ctrl = import_module('conductor.blueprints.user.controllers')
self.private_key = self.ctrl.PRIVATE_KEY
reload(self.ctrl)
# Cleanup
self.addCleanup(patch.stopall)
self.goog_provider = \
namedtuple("resp", ['headers'])({'Location': 'google'})
self.oauth_response = {
'access_token': 'access_token'
}
self.ctrl._google_remote_app = Mock(
return_value=namedtuple('_google_remote_app',
['authorize',
'authorized_response', 'name'])
(authorize=lambda **kwargs: self.goog_provider,
authorized_response=lambda **kwargs: self.oauth_response,
name='google')
)
self.ctrl.get_user = Mock(
return_value=namedtuple('User',
['name', 'email', 'avatar_url'])
('moshe', '[email protected]', 'http://google.com')
)
self.ctrl._get_user_profile = Mock(
return_value={
'id': 'userid',
'idhash': self.IDHASH,
'name': 'Moshe',
'email': '[email protected]',
'picture': 'http://moshe.com/picture'
}
)
# Tests
def test___check___no_jwt(self):
ret = self.ctrl.authenticate(None, 'next', 'callback')
self.assertFalse(ret.get('authenticated'))
self.assertIsNotNone(ret.get('providers', {}).get('google'))
def test___check___bad_jwt(self):
ret = self.ctrl.authenticate('bla', 'next', 'callback')
self.assertFalse(ret.get('authenticated'))
self.assertIsNotNone(ret.get('providers', {}).get('google'))
def test___check___good_jwt_no_such_user(self):
self.ctrl.get_user = Mock(
return_value=None
)
token = {
'userid': self.IDHASH,
'exp': (datetime.datetime.utcnow() +
datetime.timedelta(days=14))
}
client_token = jwt.encode(token, self.private_key)
ret = self.ctrl.authenticate(client_token, 'next', 'callback')
self.assertFalse(ret.get('authenticated'))
self.assertIsNotNone(ret.get('providers', {}).get('google'))
def test___check___expired_jwt(self):
token = {
'userid': self.IDHASH,
'exp': (datetime.datetime.utcnow() -
datetime.timedelta(days=1))
}
client_token = jwt.encode(token, self.private_key)
ret = self.ctrl.authenticate(client_token, 'next', 'callback')
self.assertFalse(ret.get('authenticated'))
self.assertIsNotNone(ret.get('providers', {}).get('google'))
def test___check___good_jwt(self):
token = {
'userid': self.IDHASH,
'exp': (datetime.datetime.utcnow() +
datetime.timedelta(days=14))
}
client_token = jwt.encode(token, self.private_key)
ret = self.ctrl.authenticate(client_token, 'next', 'callback')
self.assertTrue(ret.get('authenticated'))
self.assertIsNotNone(ret.get('profile'))
self.assertEquals(ret['profile'].email, '[email protected]')
self.assertEquals(ret['profile'].avatar_url, 'http://google.com')
self.assertEquals(ret['profile'].name, 'moshe')
def test___callback___good_response(self):
token = {
'next': 'http://next.com/',
'provider': 'dummy',
'exp': (datetime.datetime.utcnow() +
datetime.timedelta(days=14))
}
state = jwt.encode(token, self.private_key)
ret = self.ctrl.oauth_callback(state, 'callback')
self.assertTrue('jwt' in ret)
def test___callback___good_response_double(self):
token = {
'next': 'http://next.com/',
'provider': 'dummy',
'exp': (datetime.datetime.utcnow() +
datetime.timedelta(days=14))
}
state = jwt.encode(token, self.private_key)
ret = self.ctrl.oauth_callback(state, 'callback')
self.assertTrue('jwt' in ret)
ret = self.ctrl.oauth_callback(state, 'callback')
self.assertTrue('jwt' in ret)
def test___callback___bad_response(self):
self.oauth_response = None
token = {
'next': 'http://next.com/',
'provider': 'dummy',
'exp': (datetime.datetime.utcnow() +
datetime.timedelta(days=14))
}
state = jwt.encode(token, self.private_key)
ret = self.ctrl.oauth_callback(state, 'callback')
self.assertFalse('jwt' in ret)
def test___callback___bad_state(self):
ret = self.ctrl.oauth_callback("das", 'callback')
self.assertFalse('jwt' in ret)
| {
"content_hash": "1fc3909cfa5eede594a0fe40a0ed5a02",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 77,
"avg_line_length": 37.575438596491225,
"alnum_prop": 0.5726024838920534,
"repo_name": "openspending/os-authz-service",
"id": "eee158687da25043d2df5ea2391a0dd19ecad930",
"size": "10709",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/module/blueprints/user/test_user_controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1415"
},
{
"name": "Python",
"bytes": "43730"
},
{
"name": "Shell",
"bytes": "344"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from stackdio.core.utils import cached_url
from stackdio.ui import views
from stackdio.ui.views import accounts
from stackdio.ui.views import blueprints
from stackdio.ui.views import environments
from stackdio.ui.views import formulas
from stackdio.ui.views import images
from stackdio.ui.views import snapshots
from stackdio.ui.views import stacks
from stackdio.ui.views import users
auth_login_kwargs = {
'template_name': 'stackdio/login.html',
'extra_context': {'hide_navbar': True},
}
auth_reset_confirm_kwargs = {
'post_reset_redirect': 'ui:password_reset_complete',
'template_name': 'stackdio/auth/password_reset_confirm.html',
'extra_context': {'hide_navbar': True},
}
auth_reset_complete_kwargs = {
'template_name': 'stackdio/auth/password_reset_complete.html',
'extra_context': {'hide_navbar': True},
}
urlpatterns = (
cached_url(r'^$',
views.RootView.as_view(),
name='index'),
cached_url(r'^login/$',
auth_views.login,
auth_login_kwargs,
name='login',
user_sensitive=False),
url(r'^logout/$',
auth_views.logout_then_login,
name='logout'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.password_reset_confirm,
auth_reset_confirm_kwargs,
name='password_reset_confirm'),
url(r'^reset/done/$',
auth_views.password_reset_complete,
auth_reset_complete_kwargs,
name='password_reset_complete'),
cached_url(r'^js/main.js$',
views.AppMainView.as_view(),
name='js-main',
user_sensitive=False),
cached_url('^user/$',
users.UserProfileView.as_view(),
name='user-profile',
timeout=10),
cached_url('^user/password/$',
users.UserPasswordChangeView.as_view(),
name='user-password-change',
timeout=10),
cached_url(r'^users/$',
users.UserListView.as_view(),
name='user-list',
timeout=30),
cached_url(r'^users/create/$',
users.UserCreateView.as_view(),
name='user-create'),
cached_url(r'^users/permissions/$',
users.UserModelPermissionsView.as_view(),
name='user-model-permissions'),
cached_url(r'^groups/$',
users.GroupListView.as_view(),
name='group-list',
timeout=30),
cached_url(r'^groups/create/$',
users.GroupCreateView.as_view(),
name='group-create'),
cached_url(r'^groups/permissions/$',
users.GroupModelPermissionsView.as_view(),
name='group-model-permissions'),
cached_url(r'^groups/(?P<name>[\w.@+-]+)/$',
users.GroupDetailView.as_view(),
name='group-detail',
timeout=30),
cached_url(r'^groups/(?P<name>[\w.@+-]+)/members/$',
users.GroupMembersView.as_view(),
name='group-members'),
cached_url(r'^groups/(?P<name>[\w.@+-]+)/permissions/$',
users.GroupObjectPermissionsView.as_view(),
name='group-object-permissions'),
cached_url(r'^stacks/$',
stacks.StackListView.as_view(),
name='stack-list',
timeout=30),
cached_url(r'^stacks/create/$',
stacks.StackCreateView.as_view(),
name='stack-create'),
cached_url(r'^stacks/permissions/$',
stacks.StackModelPermissionsView.as_view(),
name='stack-model-permissions'),
cached_url(r'^stacks/(?P<pk>[0-9]+)/$',
stacks.StackDetailView.as_view(),
name='stack-detail',
timeout=30),
cached_url(r'^stacks/(?P<pk>[0-9]+)/properties/$',
stacks.StackPropertiesView.as_view(),
name='stack-properties'),
cached_url(r'^stacks/(?P<pk>[0-9]+)/labels/$',
stacks.StackLabelsView.as_view(),
name='stack-labels'),
cached_url(r'^stacks/(?P<pk>[0-9]+)/hosts/$',
stacks.StackHostsView.as_view(),
name='stack-hosts'),
cached_url(r'^stacks/(?P<pk>[0-9]+)/components/$',
stacks.StackComponentsView.as_view(),
name='stack-components'),
cached_url(r'^stacks/(?P<pk>[0-9]+)/volumes/$',
stacks.StackVolumesView.as_view(),
name='stack-volumes'),
cached_url(r'^stacks/(?P<pk>[0-9]+)/commands/$',
stacks.StackCommandsView.as_view(),
name='stack-commands'),
cached_url(r'^stacks/(?P<pk>[0-9]+)/commands/(?P<command_pk>[0-9]+)/$',
stacks.StackCommandDetailView.as_view(),
name='stack-command-detail'),
cached_url(r'^stacks/(?P<pk>[0-9]+)/access_rules/$',
stacks.StackAccessRulesView.as_view(),
name='stack-access-rules'),
cached_url(r'^stacks/(?P<pk>[0-9]+)/formula_versions/$',
stacks.StackFormulaVersionsView.as_view(),
name='stack-formula-versions'),
cached_url(r'^stacks/(?P<pk>[0-9]+)/permissions/$',
stacks.StackObjectPermissionsView.as_view(),
name='stack-object-permissions'),
cached_url(r'^stacks/(?P<pk>[0-9]+)/logs/$',
stacks.StackLogsView.as_view(),
name='stack-logs'),
cached_url(r'^environments/$',
environments.EnvironmentListView.as_view(),
name='environment-list',
timeout=30),
cached_url(r'^environments/create/$',
environments.EnvironmentCreateView.as_view(),
name='environment-create'),
cached_url(r'^environments/permissions/$',
environments.EnvironmentModelPermissionsView.as_view(),
name='environment-model-permissions'),
cached_url(r'^environments/(?P<name>[a-z0-9\-_]+)/$',
environments.EnvironmentDetailView.as_view(),
name='environment-detail',
timeout=30),
cached_url(r'^environments/(?P<name>[a-z0-9\-_]+)/properties/$',
environments.EnvironmentPropertiesView.as_view(),
name='environment-properties'),
cached_url(r'^environments/(?P<name>[a-z0-9\-_]+)/hosts/$',
environments.EnvironmentHostsView.as_view(),
name='environment-hosts'),
cached_url(r'^environments/(?P<name>[a-z0-9\-_]+)/labels/$',
environments.EnvironmentLabelsView.as_view(),
name='environment-labels'),
cached_url(r'^environments/(?P<name>[a-z0-9\-_]+)/formula_versions/$',
environments.EnvironmentFormulaVersionsView.as_view(),
name='environment-formula-versions'),
cached_url(r'^environments/(?P<name>[a-z0-9\-_]+)/components/$',
environments.EnvironmentComponentsView.as_view(),
name='environment-components'),
cached_url(r'^environments/(?P<name>[a-z0-9\-_]+)/logs/$',
environments.EnvironmentLogsView.as_view(),
name='environment-logs'),
cached_url(r'^environments/(?P<name>[a-z0-9\-_]+)/permissions/$',
environments.EnvironmentObjectPermissionsView.as_view(),
name='environment-object-permissions'),
cached_url(r'^blueprints/$',
blueprints.BlueprintListView.as_view(),
name='blueprint-list',
timeout=30),
cached_url(r'^blueprints/permissions/$',
blueprints.BlueprintModelPermissionsView.as_view(),
name='blueprint-model-permissions'),
cached_url(r'^blueprints/(?P<pk>[0-9]+)/$',
blueprints.BlueprintDetailView.as_view(),
name='blueprint-detail',
timeout=30),
cached_url(r'^blueprints/(?P<pk>[0-9]+)/properties/$',
blueprints.BlueprintPropertiesView.as_view(),
name='blueprint-properties'),
cached_url(r'^blueprints/(?P<pk>[0-9]+)/labels/$',
blueprints.BlueprintLabelsView.as_view(),
name='blueprint-labels'),
cached_url(r'^blueprints/(?P<pk>[0-9]+)/host_definitions/$',
blueprints.BlueprintHostDefinitionsView.as_view(),
name='blueprint-host-definitions'),
cached_url(r'^blueprints/(?P<pk>[0-9]+)/formula_versions/$',
blueprints.BlueprintFormulaVersionsView.as_view(),
name='blueprint-formula-versions'),
cached_url(r'^blueprints/(?P<pk>[0-9]+)/permissions/$',
blueprints.BlueprintObjectPermissionsView.as_view(),
name='blueprint-object-permissions'),
cached_url(r'^formulas/$',
formulas.FormulaListView.as_view(),
name='formula-list',
timeout=30),
cached_url(r'^formulas/import/$',
formulas.FormulaImportView.as_view(),
name='formula-import'),
cached_url(r'^formulas/permissions/$',
formulas.FormulaModelPermissionsView.as_view(),
name='formula-model-permissions'),
cached_url(r'^formulas/(?P<pk>[0-9]+)/$',
formulas.FormulaDetailView.as_view(),
name='formula-detail',
timeout=30),
cached_url(r'^formulas/(?P<pk>[0-9]+)/properties/$',
formulas.FormulaPropertiesView.as_view(),
name='formula-properties'),
cached_url(r'^formulas/(?P<pk>[0-9]+)/permissions/$',
formulas.FormulaObjectPermissionsView.as_view(),
name='formula-object-permissions'),
cached_url(r'^snapshots/$',
snapshots.SnapshotListView.as_view(),
name='snapshot-list',
timeout=30),
cached_url(r'^snapshots/create/$',
snapshots.SnapshotCreateView.as_view(),
name='snapshot-create'),
cached_url(r'^snapshots/permissions/$',
snapshots.SnapshotModelPermissionsView.as_view(),
name='snapshot-model-permissions'),
cached_url(r'^snapshots/(?P<pk>[0-9]+)/$',
snapshots.SnapshotDetailView.as_view(),
name='snapshot-detail',
timeout=30),
cached_url(r'^snapshots/(?P<pk>[0-9]+)/permissions/$',
snapshots.SnapshotObjectPermissionsView.as_view(),
name='snapshot-object-permissions'),
cached_url(r'^accounts/$',
accounts.AccountListView.as_view(),
name='cloud-account-list',
timeout=30),
cached_url(r'^accounts/create/$',
accounts.AccountCreateView.as_view(),
name='cloud-account-create'),
cached_url(r'^accounts/permissions/$',
accounts.AccountModelPermissionsView.as_view(),
name='cloud-account-model-permissions'),
cached_url(r'^accounts/(?P<pk>[0-9]+)/$',
accounts.AccountDetailView.as_view(),
name='cloud-account-detail',
timeout=30),
cached_url(r'^accounts/(?P<pk>[0-9]+)/permissions/$',
accounts.AccountObjectPermissionsView.as_view(),
name='cloud-account-object-permissions'),
cached_url(r'^accounts/(?P<pk>[0-9]+)/images/$',
accounts.AccountImagesView.as_view(),
name='cloud-account-images'),
cached_url(r'^accounts/(?P<pk>[0-9]+)/security_groups/$',
accounts.AccountSecurityGroupsView.as_view(),
name='cloud-account-security-groups'),
cached_url(r'^images/$',
images.ImageListView.as_view(),
name='cloud-image-list',
timeout=30),
cached_url(r'^images/create/$',
images.ImageCreateView.as_view(),
name='cloud-image-create'),
cached_url(r'^images/permissions/$',
images.ImageModelPermissionsView.as_view(),
name='cloud-image-model-permissions'),
cached_url(r'^images/(?P<pk>[0-9]+)/$',
images.ImageDetailView.as_view(),
name='cloud-image-detail',
timeout=30),
cached_url(r'^images/(?P<pk>[0-9]+)/permissions/$',
images.ImageObjectPermissionsView.as_view(),
name='cloud-image-object-permissions'),
)
| {
"content_hash": "57df3d3f0d7b8f948a4954135d8b0a5a",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 95,
"avg_line_length": 35.72159090909091,
"alnum_prop": 0.5627485287100366,
"repo_name": "stackdio/stackdio",
"id": "de94dab7f41e1860decfcc82c7cce49585937785",
"size": "13185",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stackdio/ui/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6462"
},
{
"name": "HTML",
"bytes": "200474"
},
{
"name": "JavaScript",
"bytes": "365621"
},
{
"name": "Makefile",
"bytes": "567"
},
{
"name": "Python",
"bytes": "1034237"
},
{
"name": "SaltStack",
"bytes": "4594"
},
{
"name": "Scheme",
"bytes": "2371"
},
{
"name": "Shell",
"bytes": "6131"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from flask import render_template
from flask import Flask
from flask import request, redirect, url_for
from flask import Response
from flask import send_from_directory
import CIV_template
targetlet = ['a','c','d','e','s','A','C','D','E','S']
tempdata =''
def markup_file(filename):
newtext = ""
fin = open("source_texts/"+filename,'r')
line = fin.readline()
while line.find('Text:') != 0: # could get doc, headline, source info here
line = fin.readline()
line = fin.readline()
ka = 0
while len(line) > 0: # add the markup
for let in targetlet:
tarlet = ' '+let
letclass = '<span=~="mk' + let.lower() + '">'
while True:
if tarlet in line:
inda = line.find(tarlet)
indb = line.find(' ',inda+1)
line = line[:inda+1] + letclass + line[inda+1:indb] + "</span>" + line[indb:] # use telltail to avoid matching ' c'
# print(line)
ka += 1
# if ka > 14: break
else: break
line = line.replace('n=~="','n class="')
newtext += line
line = fin.readline()
# print("Marked text:\n",newtext)
return newtext
def save_to_tempdata():
global tempdata
# print('STT1',CIV_template.savelist)
for avar in CIV_template.savelist:
# print('STT2:',avar)
if avar in CIV_template.specialvarlist:
tempdata += CIV_template.specialvar(avar) + '\t'
elif avar in CIV_template.constvardict.keys():
tempdata += CIV_template.constvardict[avar] + '\t'
elif avar in request.form:
tempdata += request.form[avar]+'\t'
else:
tempdata += CIV_template.unchecked[avar] + '\t'
tempdata = tempdata[:-1] + '\n'
print('STT3:\n',tempdata)
def create_header():
global tempdata
tempdata = ''
for avar in CIV_template.savelist: # write the header
tempdata += avar+'\t'
tempdata = tempdata[:-1] + '\n'
app = Flask(__name__)
app.config['DEBUG'] = True
basic_mode = True # normally this will get reset later
create_header() # this also gets overwritten in normal operation
@app.route('/')
def homepage():
# CIV_template.imalive() # debug
return render_template('index.html')
@app.route('/operating')
def operate():
return render_template('operating.html')
@app.route('/features')
def features():
return render_template('features.html')
@app.route('/file_select')
def file_select():
""" case file selection for the text-extraction demo """
CIV_template.savelist = ['Aword','Cword','Dword','Eword','Sword']
create_header()
return render_template('file_select.html')
@app.route('/continue_coding')
def continue_coding():
if basic_mode:
return render_template('basic_form.html', form_content = thetemplate)
else:
return render_template('file_select.html')
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, CIVET can find nothing at this URL.', 404
@app.route('/goto_edit', methods=['POST'])
def goto_edit():
global curfile
if request.form['inputfile'] != "":
curfile = request.form['inputfile']
else:
curfile = request.form['choosefile']
return redirect("/extract/"+curfile)
@app.route('/extract/<name>', methods=['POST', 'GET'])
def extractform(name=None):
global basic_mode
marked = markup_file(name)
basic_mode = False
return render_template('extractor.flask.html', markedtext=marked)
@app.route('/save_entry', methods=['POST'])
def save_entry():
""" save data then return to text extraction form """
global curfile
save_to_tempdata()
return redirect("/extract/"+curfile)
@app.route('/save_basic', methods=['POST'])
def save_basic():
""" save data then return to template-based form """
global thetemplate
save_to_tempdata()
return render_template('basic_form.html', form_content = thetemplate)
@app.route('/get_new_case', methods=['POST'])
def get_new_case():
save_to_tempdata()
return render_template('file_select.html')
@app.route('/display_data', methods=['POST'])
def display_data():
save_to_tempdata()
return render_template('download_data.html',filename=CIV_template.defaultfilename)
@app.route('/download_data', methods=['POST'])
def download_data():
curfilename = request.form['filename']
if curfilename[-4:] != ".csv":
curfilename += '.csv'
return Response(tempdata,
mimetype='application/json',
headers={'Content-Disposition':'attachment;filename=%s' % curfilename})
@app.route('/reset_data')
def reset_data():
create_header()
if basic_mode:
return render_template('basic_form.html', form_content = thetemplate)
else:
return render_template('file_select.html')
@app.route('/select_template')
def select_template():
return render_template('template_select.html')
@app.route('/use_demotemplate',methods=['POST'])
def use_demotemplate():
fin = open('static/CIVET.demo.template.txt','r')
for ka in range(8):
line = fin.readline()
print(line)
fin.close()
return render_template('template_select.html')
@app.route('/read_template',methods=['POST'])
def read_template():
""" main routine for setting up a template: reads a file, checks for errors, and then either renders the form or
lists the errors """
global thetemplate, basic_mode
CIV_template.init_template()
if 'codername' in request.form:
# print('RTcn: codername',request.form['codername'] )
CIV_template.codername = request.form['codername']
"""print('RT keys',request.form.keys() )
print('RT file keys',request.files.keys() )
print('RT2*:',request.files['template_name'].filename) """
if len(request.files['template_name'].filename) > 0:
# print('RT2:',request.files['template_name'].filename)
st = request.files.get('template_name')
else:
# print('RT: Use demo')
st = open('static/CIVET.demo.template.txt','r')
thecontent = ''
commln = CIV_template.get_commlines(st)
while len(commln) > 0:
thecontent += CIV_template.do_command(commln)
commln = CIV_template.get_commlines(st)
# print('thecontent:',thecontent)
if len(CIV_template.savelist) == 0:
thecontent += '~Error~<p>A "save:" command is required in the template<br>\n'
else:
misslist = []
for ele in CIV_template.savelist:
if ele not in CIV_template.varlist:
misslist.append(ele)
if len(misslist) > 0:
thecontent += '~Error~<p>The following variables are in the "save:" command but were not declared in a data field<br>' + str(misslist) + '\n'
if '~Error~' in thecontent:
errortext = ''
indx = thecontent.find('~Error~')
while indx >= 0:
indy = thecontent.find('\n',indx)
errortext += thecontent[indx+7:indy+1]
indx = thecontent.find('~Error~',indy)
return render_template('template_error.html', form_content = errortext)
else:
thetemplate = thecontent
create_header()
basic_mode = True
return render_template('basic_form.html', form_content = thecontent)
@app.route('/download_pdfdocs')
def download_pdfdocs():
return send_from_directory('static','CIVET.Documentation.pdf', as_attachment=True)
@app.route('/download_demotemplate')
def download_demotemplate():
return send_from_directory('static','CIVET.demo.template.txt', as_attachment=True)
| {
"content_hash": "417c087cb6e6132e14e8c660480b5622",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 153,
"avg_line_length": 34.004366812227076,
"alnum_prop": 0.6166688069860023,
"repo_name": "philip-schrodt/CIVET-Flask",
"id": "702c4515d761bbf2b586278e8dc3b4830a254ba8",
"size": "8713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "368"
},
{
"name": "HTML",
"bytes": "20222"
},
{
"name": "Python",
"bytes": "20183"
}
],
"symlink_target": ""
} |
"""A tiny Python program to check that Python is working.
Try running this program from the command line like this:
python hello.py
python hello.py Alice
That should print:
Hello World -or- Hello Alice
Try changing the 'Hello' to 'Howdy' and run again.
Once you have that working, you're ready for class -- you can edit
and run Python code; now you just need to learn Python!
"""
import sys
#Define a main() function that prints a little greeting.
def main():
# Get the name from the command line, using 'World' as a fallback.
if len(sys.argv) >= 2:
name = sys.argv[1]
else:
name = 'World'
print 'Hello There', name
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| {
"content_hash": "bcf7c6e56bcdaa29de7595284685d52c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 29.68,
"alnum_prop": 0.7021563342318059,
"repo_name": "chipoglesby/google-python-exercises",
"id": "a54b9f0f7b066e8e1bf79ad3368070e13e9c5963",
"size": "973",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hello.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "50662"
}
],
"symlink_target": ""
} |
"""The aws_lambda component."""
| {
"content_hash": "63ea1670b88aafbba7a41f1a30f1a089",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.65625,
"repo_name": "jamespcole/home-assistant",
"id": "f6d86d02e93f31a24517f6ccfb9f94899662bd68",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/aws_lambda/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14822074"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
import pkg_resources
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django PostcodeLookup'
project = u'Zeep'
copyright = u'2016, <a href="https://www.mvantellingen.nl/">Michael van Tellingen</a>'
author = u'Michael van Tellingen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = '0.4.0'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
'github_user': 'labd',
'github_banner': True,
'github_repo': 'django-postcode-lookup',
'travis_button': True,
'codecov_button': True,
'analytics_id': '',
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Django PostcodeLookup v0.4.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'*': [
'sidebar-intro.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoPostcodeLookupdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoPostcodeLookup.tex', u'Django PostcodeLookup Documentation',
u'Michael van Tellingen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wsgibasicauth', u'Django PostcodeLookup Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoPostcodeLookup', u'Django PostcodeLookup Documentation',
author, 'DjangoPostcodeLookup', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| {
"content_hash": "4896490955d6046ba8c91def0bf61c92",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 86,
"avg_line_length": 28.72072072072072,
"alnum_prop": 0.6884148891677122,
"repo_name": "LabD/django-postcode-lookup",
"id": "5c4d6c3f0de7b835fb68e5dd4104f1f6295063b9",
"size": "10306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "320"
},
{
"name": "Python",
"bytes": "20573"
}
],
"symlink_target": ""
} |
print "Hello World"
print "Something else"
print "This is the change"
print "This is the last thing"
| {
"content_hash": "ba61cc71d24f4f196d3ab2f7f9525846",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 30,
"avg_line_length": 25.25,
"alnum_prop": 0.7524752475247525,
"repo_name": "SameGordon/pynet_test",
"id": "2a9421ab6fd9ae273f9fc01fedcf5b82a813a2c7",
"size": "101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "138"
}
],
"symlink_target": ""
} |
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| {
"content_hash": "68395f7de65f1d10266aaebc56d9f41a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 79,
"avg_line_length": 23.9375,
"alnum_prop": 0.6762402088772846,
"repo_name": "ngageoint/gamification-server",
"id": "9f828095bceca240c2415b842194960200f5383a",
"size": "408",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "gamification/badges/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "40767"
},
{
"name": "HTML",
"bytes": "42147"
},
{
"name": "JavaScript",
"bytes": "241536"
},
{
"name": "Python",
"bytes": "235107"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
import sqlalchemy as sa
from sqlalchemy.orm import exc as sa_exc
from neutron.common import exceptions as exc
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.openstack.common import log
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
LOG = log.getLogger(__name__)
TYPE_VXLAN = 'vxlan'
VXLAN_UDP_PORT = 4789
MAX_VXLAN_VNI = 16777215
vxlan_opts = [
cfg.ListOpt('vni_ranges',
default=[],
help=_("Comma-separated list of <vni_min>:<vni_max> tuples "
"enumerating ranges of VXLAN VNI IDs that are "
"available for tenant network allocation")),
cfg.StrOpt('vxlan_group', default=None,
help=_("Multicast group for VXLAN. If unset, disables VXLAN "
"multicast mode.")),
]
cfg.CONF.register_opts(vxlan_opts, "ml2_type_vxlan")
class VxlanAllocation(model_base.BASEV2):
__tablename__ = 'ml2_vxlan_allocations'
vxlan_vni = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False)
class VxlanEndpoints(model_base.BASEV2):
"""Represents tunnel endpoint in RPC mode."""
__tablename__ = 'ml2_vxlan_endpoints'
ip_address = sa.Column(sa.String(64), primary_key=True)
udp_port = sa.Column(sa.Integer, primary_key=True, nullable=False,
autoincrement=False)
def __repr__(self):
return "<VxlanTunnelEndpoint(%s)>" % self.ip_address
class VxlanTypeDriver(type_tunnel.TunnelTypeDriver):
def get_type(self):
return TYPE_VXLAN
def initialize(self):
self.vxlan_vni_ranges = []
self._parse_tunnel_ranges(
cfg.CONF.ml2_type_vxlan.vni_ranges,
self.vxlan_vni_ranges,
TYPE_VXLAN
)
self._sync_vxlan_allocations()
def reserve_provider_segment(self, session, segment):
segmentation_id = segment.get(api.SEGMENTATION_ID)
with session.begin(subtransactions=True):
try:
alloc = (session.query(VxlanAllocation).
filter_by(vxlan_vni=segmentation_id).
with_lockmode('update').
one())
if alloc.allocated:
raise exc.TunnelIdInUse(tunnel_id=segmentation_id)
LOG.debug(_("Reserving specific vxlan tunnel %s from pool"),
segmentation_id)
alloc.allocated = True
except sa_exc.NoResultFound:
LOG.debug(_("Reserving specific vxlan tunnel %s outside pool"),
segmentation_id)
alloc = VxlanAllocation(vxlan_vni=segmentation_id)
alloc.allocated = True
session.add(alloc)
def allocate_tenant_segment(self, session):
with session.begin(subtransactions=True):
alloc = (session.query(VxlanAllocation).
filter_by(allocated=False).
with_lockmode('update').
first())
if alloc:
LOG.debug(_("Allocating vxlan tunnel vni %(vxlan_vni)s"),
{'vxlan_vni': alloc.vxlan_vni})
alloc.allocated = True
return {api.NETWORK_TYPE: TYPE_VXLAN,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: alloc.vxlan_vni}
def release_segment(self, session, segment):
vxlan_vni = segment[api.SEGMENTATION_ID]
with session.begin(subtransactions=True):
try:
alloc = (session.query(VxlanAllocation).
filter_by(vxlan_vni=vxlan_vni).
with_lockmode('update').
one())
alloc.allocated = False
for low, high in self.vxlan_vni_ranges:
if low <= vxlan_vni <= high:
LOG.debug(_("Releasing vxlan tunnel %s to pool"),
vxlan_vni)
break
else:
session.delete(alloc)
LOG.debug(_("Releasing vxlan tunnel %s outside pool"),
vxlan_vni)
except sa_exc.NoResultFound:
LOG.warning(_("vxlan_vni %s not found"), vxlan_vni)
def _sync_vxlan_allocations(self):
"""
Synchronize vxlan_allocations table with configured tunnel ranges.
"""
# determine current configured allocatable vnis
vxlan_vnis = set()
for tun_min, tun_max in self.vxlan_vni_ranges:
if tun_max + 1 - tun_min > MAX_VXLAN_VNI:
LOG.error(_("Skipping unreasonable VXLAN VNI range "
"%(tun_min)s:%(tun_max)s"),
{'tun_min': tun_min, 'tun_max': tun_max})
else:
vxlan_vnis |= set(xrange(tun_min, tun_max + 1))
session = db_api.get_session()
with session.begin(subtransactions=True):
# remove from table unallocated tunnels not currently allocatable
allocs = session.query(VxlanAllocation)
for alloc in allocs:
try:
# see if tunnel is allocatable
vxlan_vnis.remove(alloc.vxlan_vni)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug(_("Removing tunnel %s from pool"),
alloc.vxlan_vni)
session.delete(alloc)
# add missing allocatable tunnels to table
for vxlan_vni in sorted(vxlan_vnis):
alloc = VxlanAllocation(vxlan_vni=vxlan_vni)
session.add(alloc)
def get_vxlan_allocation(self, session, vxlan_vni):
with session.begin(subtransactions=True):
return session.query(VxlanAllocation).filter_by(
vxlan_vni=vxlan_vni).first()
def get_endpoints(self):
"""Get every vxlan endpoints from database."""
LOG.debug(_("get_vxlan_endpoints() called"))
session = db_api.get_session()
with session.begin(subtransactions=True):
vxlan_endpoints = session.query(VxlanEndpoints)
return [{'ip_address': vxlan_endpoint.ip_address,
'udp_port': vxlan_endpoint.udp_port}
for vxlan_endpoint in vxlan_endpoints]
def add_endpoint(self, ip, udp_port=VXLAN_UDP_PORT):
LOG.debug(_("add_vxlan_endpoint() called for ip %s"), ip)
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
vxlan_endpoint = (session.query(VxlanEndpoints).
filter_by(ip_address=ip).
with_lockmode('update').one())
except sa_exc.NoResultFound:
vxlan_endpoint = VxlanEndpoints(ip_address=ip,
udp_port=udp_port)
session.add(vxlan_endpoint)
return vxlan_endpoint
| {
"content_hash": "58b47d574ab91d91271f5fb6b80b4aef",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 79,
"avg_line_length": 39.839572192513366,
"alnum_prop": 0.5471140939597315,
"repo_name": "rickerc/neutron_audit",
"id": "5366215f5323ecf65687e5792ac8f3798a292ebd",
"size": "8135",
"binary": false,
"copies": "1",
"ref": "refs/heads/cis-havana-staging",
"path": "neutron/plugins/ml2/drivers/type_vxlan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "7052151"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import os
import ujson
from optparse import make_option
from django.test import Client
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = """
Create webhook message based on given fixture
Example:
./manage.py send_webhook_fixture_message \
--fixture=zerver/fixtures/integration/fixture.json \
'--url=/api/v1/external/integration?stream=stream_name&api_key=api_key'
"""
option_list = BaseCommand.option_list + (
make_option('-f', '--fixture',
dest='fixture',
type='str',
help='The path to the fixture you\'d like to send into Zulip'),
make_option('-u', '--url',
dest='url',
type='str',
help='The url on your Zulip server that you want to post the fixture to'),
)
def handle(self, **options):
if options['fixture'] is None or options['url'] is None:
self.print_help('python manage.py', 'send_webhook_fixture_message')
exit(1)
full_fixture_path = os.path.join(settings.DEPLOY_ROOT, options['fixture'])
if not self._does_fixture_path_exist(full_fixture_path):
print('Fixture {} does not exist'.format(options['fixture']))
exit(1)
json = self._get_fixture_as_json(full_fixture_path)
client = Client()
client.post(options['url'], json, content_type="application/json")
def _does_fixture_path_exist(self, fixture_path):
return os.path.exists(fixture_path)
def _get_fixture_as_json(self, fixture_path):
return ujson.dumps(ujson.loads(open(fixture_path).read()))
| {
"content_hash": "2e1b788c99f36b57e381f69aed687ab0",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 94,
"avg_line_length": 33.886792452830186,
"alnum_prop": 0.623608017817372,
"repo_name": "peiwei/zulip",
"id": "b0d41798951c103b3137822c1a82ed511686560c",
"size": "1796",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zerver/management/commands/send_webhook_fixture_message.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "183830"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5516"
},
{
"name": "HTML",
"bytes": "397966"
},
{
"name": "JavaScript",
"bytes": "1588795"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "96085"
},
{
"name": "Python",
"bytes": "2010761"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "33341"
}
],
"symlink_target": ""
} |
import httplib
import re
import sys
import getopt
# Pages to include in distribution
wiki_pages = [
"CamelCase",
"InterMapTxt",
"InterTrac",
"InterWiki",
"PageTemplates",
"RecentChanges",
"TitleIndex",
"TracAccessibility",
"TracAdmin",
"TracBackup",
"TracBrowser",
"TracCgi",
"TracChangeset",
"TracEnvironment",
"TracFastCgi",
"TracFineGrainedPermissions",
"TracGuide",
"TracImport",
"TracIni",
"TracInstall",
"TracInterfaceCustomization",
"TracLinks",
"TracLogging",
"TracModPython",
"TracModWSGI",
"TracNavigation",
"TracNotification",
"TracPermissions",
"TracPlugins",
"TracQuery",
"TracReports",
"TracRepositoryAdmin",
"TracRevisionLog",
"TracRoadmap",
"TracRss",
"TracSearch",
"TracStandalone",
"TracSupport",
"TracSyntaxColoring",
"TracTickets",
"TracTicketsCustomFields",
"TracTimeline",
"TracUnicode",
"TracUpgrade",
"TracWiki",
"TracWorkflow",
"WikiDeletePage",
"WikiFormatting",
"WikiHtml",
"WikiMacros",
"WikiNewPage",
"WikiPageNames",
"WikiProcessors",
"WikiRestructuredText",
"WikiRestructuredTextLinks"
]
def get_page_from_file(prefix, pname):
d = ''
try:
f = open(pname ,'r')
d = f.read()
f.close()
except:
print "Missing page: %s" % pname
return d
def get_page_from_web(prefix, pname):
host = "trac.edgewall.org"
rfile = "/wiki/%s%s?format=txt" % (prefix, pname)
c = httplib.HTTPConnection(host)
c.request("GET", rfile)
print "Getting", rfile
r = c.getresponse()
d = r.read()
if r.status == 200 and d:
f = open(pname, 'w+')
f.write(d)
f.close()
else:
print "Missing or empty page"
c.close()
return d
def check_links(data):
def get_refs(t, refs=[]):
r = "(?P<wikilink>(^|(?<=[^A-Za-z]))[!]?[A-Z][a-z/]+(?:[A-Z][a-z/]+)+)"
m = re.search (r, t)
if not m:
refs.sort()
result = []
orf = None
for rf in refs:
if rf != orf:
result.append(rf)
orf = rf
return result
refs.append(m.group())
return get_refs( t[m.end():], refs)
for p in data.keys():
links = get_refs(data[p], [])
for l in links:
if l not in data.keys():
print "Broken link: %s -> %s" % (p, l)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "dCp:")
except getopt.GetoptError:
# print help information and exit:
print "%s [-d] [-C] [-p prefix] [PAGE ...]" % sys.argv[0]
print "\t-d -- Download pages from the main project wiki."
print "\t-C -- Don't try to check links (it's broken anyway)"
print "\t-p prefix -- When downloading, prepend 'prefix/' to the page."
sys.exit()
get_page = get_page_from_file
prefix = None
check = True
for o,a in opts:
if o == '-d':
get_page = get_page_from_web
elif o == '-p':
prefix = a+'/'
elif o == '-C':
check = False
data = {}
for p in args or wiki_pages:
data[p] = get_page(prefix, p)
if check:
check_links(data)
| {
"content_hash": "ed462ea0104e5aa5dd5175e0123978a0",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 79,
"avg_line_length": 23.347826086956523,
"alnum_prop": 0.5561762880198634,
"repo_name": "dokipen/trac",
"id": "8047729e8991f3ac752758bd843c415938d01127",
"size": "3477",
"binary": false,
"copies": "1",
"ref": "refs/heads/announcer",
"path": "contrib/checkwiki.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C#",
"bytes": "11612"
},
{
"name": "JavaScript",
"bytes": "45742"
},
{
"name": "Python",
"bytes": "2183584"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division, absolute_import, print_function
import os
from asn1crypto import core, ocsp, x509, algos
from . import errors
from ._types import str_cls, type_name
from .version import __version__
from ._urllib import Request, urlopen, URLError
def fetch(cert, issuer, hash_algo='sha1', nonce=True, user_agent=None, timeout=10):
"""
Fetches an OCSP response for a certificate
:param cert:
An asn1cyrpto.x509.Certificate object to get an OCSP reponse for
:param issuer:
An asn1crypto.x509.Certificate object that is the issuer of cert
:param hash_algo:
A unicode string of "sha1" or "sha256"
:param nonce:
A boolean - if the nonce extension should be used to prevent replay
attacks
:param user_agent:
The HTTP user agent to use when requesting the OCSP response. If None,
a default is used in the format "certvalidation 1.0.0".
:param timeout:
The number of seconds after which an HTTP request should timeout
:raises:
urllib.error.URLError/urllib2.URLError - when a URL/HTTP error occurs
socket.error - when a socket error occurs
:return:
An asn1crypto.ocsp.OCSPResponse object
"""
if not isinstance(cert, x509.Certificate):
raise TypeError('cert must be an instance of asn1crypto.x509.Certificate, not %s' % type_name(cert))
if not isinstance(issuer, x509.Certificate):
raise TypeError('issuer must be an instance of asn1crypto.x509.Certificate, not %s' % type_name(issuer))
if hash_algo not in set(['sha1', 'sha256']):
raise ValueError('hash_algo must be one of "sha1", "sha256", not %s' % repr(hash_algo))
if not isinstance(nonce, bool):
raise TypeError('nonce must be a bool, not %s' % type_name(nonce))
if user_agent is None:
user_agent = 'certvalidator %s' % __version__
elif not isinstance(user_agent, str_cls):
raise TypeError('user_agent must be a unicode string, not %s' % type_name(user_agent))
cert_id = ocsp.CertId({
'hash_algorithm': algos.DigestAlgorithm({'algorithm': hash_algo}),
'issuer_name_hash': getattr(cert.issuer, hash_algo),
'issuer_key_hash': getattr(issuer.public_key, hash_algo),
'serial_number': cert.serial_number,
})
request = ocsp.Request({
'req_cert': cert_id,
})
tbs_request = ocsp.TBSRequest({
'request_list': ocsp.Requests([request]),
})
if nonce:
nonce_extension = ocsp.TBSRequestExtension({
'extn_id': 'nonce',
'critical': False,
'extn_value': core.OctetString(core.OctetString(os.urandom(16)).dump())
})
tbs_request['request_extensions'] = ocsp.TBSRequestExtensions([nonce_extension])
ocsp_request = ocsp.OCSPRequest({
'tbs_request': tbs_request,
})
last_e = None
for ocsp_url in cert.ocsp_urls:
try:
request = Request(ocsp_url)
request.add_header('Accept', 'application/ocsp-response')
request.add_header('Content-Type', 'application/ocsp-request')
request.add_header('User-Agent', user_agent)
response = urlopen(request, ocsp_request.dump(), timeout)
ocsp_response = ocsp.OCSPResponse.load(response.read())
request_nonce = ocsp_request.nonce_value
if ocsp_response['response_status'].native == 'unauthorized':
raise errors.OCSPNoMatchesError(
'Unable to verify OCSP response since the responder returned unauthorized'
)
response_nonce = ocsp_response.nonce_value
if request_nonce and response_nonce and request_nonce.native != response_nonce.native:
raise errors.OCSPValidationError(
'Unable to verify OCSP response since the request and response nonces do not match'
)
return ocsp_response
except (URLError) as e:
last_e = e
raise last_e
| {
"content_hash": "8f3f2ee1699c939a1b34fae2a9b2b319",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 112,
"avg_line_length": 36.357142857142854,
"alnum_prop": 0.6362966601178782,
"repo_name": "kak-bo-che/certvalidator",
"id": "f3e839b8704215e6b03de37354d5602c24fa65c3",
"size": "4088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "certvalidator/ocsp_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "336883"
}
],
"symlink_target": ""
} |
"""
Modulo de configuraciones globales.
"""
log = None
| {
"content_hash": "16cc59ebe6ac52ead95cfe3be3e83b1d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 35,
"avg_line_length": 9.5,
"alnum_prop": 0.6666666666666666,
"repo_name": "klashxx/PyConES",
"id": "b1a429f96625ef995ac2cb7ac29cd85fe90b4ddc",
"size": "81",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rspace/rspace/conf/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5278"
},
{
"name": "Python",
"bytes": "32415"
},
{
"name": "Shell",
"bytes": "7223"
}
],
"symlink_target": ""
} |
'''custom_grouping_helper.py'''
from collections import namedtuple
class CustomGroupingHelper:
"""Helper class for managing custom grouping"""
def __init__(self):
# map <stream_id -> list(targets)>
self.targets = {}
def add(self, stream_id, task_ids, grouping, source_comp_name):
"""Adds the target component
:type stream_id: str
:param stream_id: stream id into which tuples are emitted
:type task_ids: list of str
:param task_ids: list of task ids to which tuples are emitted
:type grouping: ICustomStreamGrouping object
:param grouping: custom grouping to use
:type source_comp_name: str
:param source_comp_name: source component name
"""
if stream_id not in self.targets:
self.targets[stream_id] = []
self.targets[stream_id].append(Target(task_ids, grouping, source_comp_name))
def prepare(self, context):
"""Prepares the custom grouping for this component"""
for stream_id, targets in list(self.targets.items()):
for target in targets:
target.prepare(context, stream_id)
def choose_tasks(self, stream_id, values):
"""Choose tasks for a given stream_id and values and Returns a list of target tasks"""
if stream_id not in self.targets:
return []
ret = []
for target in self.targets[stream_id]:
ret.extend(target.choose_tasks(values))
return ret
class Target(namedtuple('Target', 'task_ids, grouping, source_comp_name')):
"""Named tuple class for Target"""
__slots__ = ()
def prepare(self, context, stream_id):
"""Invoke prepare() of this custom grouping"""
self.grouping.prepare(context, self.source_comp_name, stream_id, self.task_ids)
def choose_tasks(self, values):
"""Invoke choose_tasks() of this custom grouping"""
ret = self.grouping.choose_tasks(values)
if not isinstance(ret, list):
raise TypeError("Returned object after custom grouping's choose_tasks() "\
f"needs to be a list, given: {str(type(ret))}")
for i in ret:
if not isinstance(i, int):
raise TypeError("Returned object after custom grouping's choose_tasks() "\
f"contained non-integer: {str(i)}")
if i not in self.task_ids:
raise ValueError("Returned object after custom grouping's choose_tasks() contained "\
f"a task id that is not registered: {i}")
return ret
| {
"content_hash": "9bdf52c31ad84c460858ca6c3b6d6051",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 93,
"avg_line_length": 38.903225806451616,
"alnum_prop": 0.6600331674958541,
"repo_name": "twitter/heron",
"id": "59f832fb2e936835dba628ca443706f7e0d87321",
"size": "3262",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heron/instance/src/python/utils/misc/custom_grouping_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11709"
},
{
"name": "C++",
"bytes": "1623082"
},
{
"name": "CSS",
"bytes": "109554"
},
{
"name": "HCL",
"bytes": "2115"
},
{
"name": "HTML",
"bytes": "156820"
},
{
"name": "Java",
"bytes": "4466689"
},
{
"name": "JavaScript",
"bytes": "1111202"
},
{
"name": "M4",
"bytes": "17941"
},
{
"name": "Makefile",
"bytes": "781"
},
{
"name": "Objective-C",
"bytes": "1929"
},
{
"name": "Python",
"bytes": "1537910"
},
{
"name": "Ruby",
"bytes": "1930"
},
{
"name": "Scala",
"bytes": "72781"
},
{
"name": "Shell",
"bytes": "166876"
},
{
"name": "Smarty",
"bytes": "528"
},
{
"name": "Thrift",
"bytes": "915"
}
],
"symlink_target": ""
} |
def test_sv_tokenizer_handles_long_text(sv_tokenizer):
text = """Det var så härligt ute på landet. Det var sommar, majsen var gul, havren grön,
höet var uppställt i stackar nere vid den gröna ängen, och där gick storken på sina långa,
röda ben och snackade engelska, för det språket hade han lärt sig av sin mor.
Runt om åkrar och äng låg den stora skogen, och mitt i skogen fanns djupa sjöar; jo, det var verkligen trevligt ute på landet!"""
tokens = sv_tokenizer(text)
assert len(tokens) == 86
def test_sv_tokenizer_handles_trailing_dot_for_i_in_sentence(sv_tokenizer):
text = "Provar att tokenisera en mening med ord i."
tokens = sv_tokenizer(text)
assert len(tokens) == 9
| {
"content_hash": "0088f687223e14f6856c8ac672606fda",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 129,
"avg_line_length": 50.214285714285715,
"alnum_prop": 0.7311522048364154,
"repo_name": "honnibal/spaCy",
"id": "1e26c45bc61ad588edfdbfe49d222eaca5091b9f",
"size": "723",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "spacy/tests/lang/sv/test_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "729544"
},
{
"name": "HTML",
"bytes": "26303"
},
{
"name": "JavaScript",
"bytes": "234039"
},
{
"name": "Jinja",
"bytes": "10482"
},
{
"name": "Makefile",
"bytes": "1576"
},
{
"name": "Python",
"bytes": "3361067"
},
{
"name": "Sass",
"bytes": "56639"
},
{
"name": "Shell",
"bytes": "984"
}
],
"symlink_target": ""
} |
from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.preconditions['python_version']
)
python_package.build()
python_package.verify()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage(
version=self.settings["postgres_version"],
)
postgres_package.build()
postgres_package.verify()
redis_package = hitchredis.RedisPackage(version="2.8.4")
redis_package.build()
redis_package.verify()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=5.0,
)
postgres_user = hitchpostgres.PostgresUser("brainiac_repo", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("brainiac_repo", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
version=str(self.settings.get("django_version")),
settings="config.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("quiet", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Configure selenium driver
self.driver = self.services['Firefox'].driver
self.driver.set_window_size(self.settings['window_size']['height'], self.settings['window_size']['width'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.services.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
def click(self, on):
"""Click on HTML id."""
self.driver.find_element_by_id(on).click()
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def click_submit(self):
"""Click on a submit button if it exists."""
self.driver.find_element_by_css_selector("button[type=\"submit\"]").click()
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
| {
"content_hash": "35423132b1bb0bfa7caad673c5a05caf",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 114,
"avg_line_length": 34.391891891891895,
"alnum_prop": 0.611984282907662,
"repo_name": "jondelmil/brainiac",
"id": "6e30dc4eb66c690c23059e136d631763af2e3a71",
"size": "5090",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/engine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1646"
},
{
"name": "HTML",
"bytes": "20095"
},
{
"name": "JavaScript",
"bytes": "3150"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "37842"
},
{
"name": "Shell",
"bytes": "4542"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.