code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
import pandas as pd
import pickle
from urllib.request import urlopen
from bs4 import BeautifulSoup
from collections import OrderedDict
import logging
import datetime
log = logging.getLogger(__name__)
# Make a publicly available filename output format
OUTPUT_FILENAME_PREFIX_FORMAT = "{date}_{ticker}"
OUTPUT_FILENAME_SUFFIX_FORMAT = "_exp{expiration}.{extension}"
OUTPUT_FILENAME_FORMAT = OUTPUT_FILENAME_PREFIX_FORMAT + "_" + OUTPUT_FILENAME_SUFFIX_FORMAT
# Make a string clean for use a as a filename
clean_filename = lambda s: "".join([c for c in s if c.isalpha() or c.isdigit() or c==' ']).rstrip()
# Common helper for creating a data header
data_header = lambda prefix, header: prefix + "_" + header
def secure_filename(ticker, expiration, extension="csv"):
""" Create a usable filename to write an options data file
Args:
ticker (str): ticker symbol of the option data
expiration (str): expiration string
extension (str): filename extension
Returns:
str: filename to be used to save options data
"""
today = datetime.date.today()
clean_exp = clean_filename(expiration).replace(" ", "-")
return OUTPUT_FILENAME_FORMAT.format(
date=today, ticker=ticker, expiration=clean_exp, extension=extension)
def load_symbol(ticker):
""" Loads the options chain for the index with the given symbol
Args:
ticker (str): ticker symbol of the option data
Returns:
BeautifulSoup: soup object containing options table
"""
url = "http://www.marketwatch.com/investing/index/{}/options".format(ticker.lower())
log.info("Loading webpage: {}".format(url))
with urlopen(url) as urlobj:
soup = BeautifulSoup(urlobj.read(), "html.parser")
return soup
def _checkItemWasFound(item_to_check, item_name, parent_name="webpage"):
""" Checks if item_to_check is None, and if it is then throws an Exception. """
if item_to_check is None:
raise Exception("Failed to find item '{}' in '{}'".format(item_name, parent_name))
else:
log.debug("Found item '{}' in '{}'".format(item_name, parent_name))
def parse_options(soup, symbol):
""" Parses the given marketwatch soup for an options table. Saves the extracted options table
to a file.
Args:
soup (BeautifulSoup): soup object containing data table
symbol (str): ticker symbol to use for labeling
"""
# Helper lambda functions
text_clean = lambda s: s.strip().replace(",","")
unpack_cols = lambda cols: [ text_clean(td.text) for td in cols ]
# First find the options table to parse
options = soup.find('div', {'id':'options'})
_checkItemWasFound(options, 'options_table')
# Find the first header row in the table
header_rows = options.select('tr.chainrow.understated')
header_row = header_rows[0] # TODO: Iterate over all headers to find all tables!
_checkItemWasFound(header_row, 'header_row', 'options_table')
# Parse the header row into fields
headers = unpack_cols(header_row.findAll('td'))
header_half_len = int(len(headers)/2)
strike_header = headers[header_half_len]
main_headers = headers[:header_half_len]
option_order = ["call", "put"]
data_headers = lambda option_type: [data_header(option_type, header) for header in main_headers]
log.info("Found main headers: {}".format(main_headers))
# Now process all of the rows and extract the pricing information from them
rows = options.findAll('tr', {'class': 'chainrow'})
log.debug("Found {} rows in options table.".format(len(rows)))
calls_are_itm = True
current_expiration = None
data_columns = [header for option_type in option_order for header in data_headers(option_type)]
current_table = None
for row in rows:
if "heading" in row["class"] and "Expires" in row.text:
# Extract the expiration date
this_expiration = row.text.strip().replace("Expires ", "")
if current_expiration is not None and this_expiration != current_expiration:
# We need to close the old file and start a new one
out_file = secure_filename(symbol, current_expiration)
current_table.to_csv(out_file)
log.info("Finshed expiration '{}'; Saved to: {}".format(
current_expiration, out_file))
log.debug("Starting new expiration: {}".format(this_expiration))
current_expiration = this_expiration
current_table = pd.DataFrame(columns=(data_columns))
elif "aright" in row['class']:
# this is a row containing option data. get the strike column first.
strike_col = text_clean(row.find('td', {'class': 'strike-col'}).text)
log.debug("Processing row with strike: {}".format(strike_col))
results = OrderedDict()
# add a prefix as a call or put depending on whether we know (by tracking our progress)
# whether calls are itm yet or not
extract_order = ["inthemoney", ""] if calls_are_itm else ["", "inthemoney"]
for option_type, extract in zip(option_order, extract_order):
cols = unpack_cols(row.findAll('td', {'class': extract}))
if len(cols) < len(main_headers):
cols += [None] * (len(headers) - len(cols))
cols = cols[:len(headers)]
entry = [(header, col) for header, col in zip(data_headers(option_type), cols)]
results.update(entry)
# add this row to the running table
strike_value = float(strike_col.replace(",", ""))
log.debug("Processed option row for strike: {}".format(strike_value))
current_table.loc[strike_value] = results
elif "stockprice" in row['class']:
# We have reached the stock price in the table, so we know calls are no longer itm
# (and we skip this row)
log.debug("Processed current stock price.")
calls_are_itm = False
else:
# We don't know or care how to process this row.
pass
if __name__ == "__main__":
import os
import argparse
parser = argparse.ArgumentParser(
description="Parses a MarketWatch options chain into a CSV made by pandas.")
parser.add_argument("--symbol", default="spx", help="Symbol to look up.")
parser.add_argument("--out", default=os.getcwd(),
help="Output directory. Default is current dir.")
parser.add_argument("--info", action="store_true", help="Print additional information")
parser.add_argument("--verbose", action="store_true", help="Print debug information")
args=parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
elif args.info:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
# first lets get this output file straight
if args.out is None:
args.out = ".".join([args.symbol, "csv"])
soup = load_symbol(args.symbol)
df = parse_options(soup, args.symbol) | ktarrant/options_csv | options_csv.py | Python | mit | 7,143 |
# Copyright (c) 2012 The Khronos Group Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
# See Core.Logic.FJudgementContext for the information
# of the 'context' parameter.
# This sample judging object does the following:
#
# JudgeBaseline: just verifies that the standard steps did not crash.
# JudgeSuperior: also verifies that the validation steps are not in error.
# JudgeExemplary: same as intermediate badge.
# We import an assistant script that includes the common verifications
# methods. The assistant buffers its checks, so that running them again
# does not incurs an unnecessary performance hint.
from StandardDataSets.scripts import JudgeAssistant
# Please feed your node list here:
tagLst = [['library_effects', 'effect', 'profile_COMMON', 'technique', 'phong', 'transparent', 'color'], ['library_effects', 'effect', 'profile_COMMON', 'newparam', 'float4'], ['library_effects', 'effect', 'newparam', 'float4']]
attrName = ''
attrVal = ''
dataToCheck = ''
class SimpleJudgingObject:
def __init__(self, _tagLst, _attrName, _attrVal, _data):
self.tagList = _tagLst
self.attrName = _attrName
self.attrVal = _attrVal
self.dataToCheck = _data
self.status_baseline = False
self.status_superior = False
self.status_exemplary = False
self.__assistant = JudgeAssistant.JudgeAssistant()
def JudgeBaseline(self, context):
# No step should not crash
self.__assistant.CheckCrashes(context)
# Import/export/validate must exist and pass, while Render must only exist.
self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"])
self.status_baseline = self.__assistant.GetResults()
return self.status_baseline
# To pass intermediate you need to pass basic, this object could also include additional
# tests that were specific to the intermediate badge.
def JudgeSuperior(self, context):
# if baseline fails, no point in further checking
if (self.status_baseline == False):
self.status_superior = self.status_baseline
return self.status_superior
# Compare the rendered images between import and export
# Then compare images against alpha 0 reference test for equivalence
# Then compare images against color black test for non-equivalence
# Last, check for preservation of element data
if ( self.__assistant.CompareRenderedImages(context) ):
if ( self.__assistant.CompareImagesAgainst(context, "_ref_phong_transparent_rgbzero_alpha1", None, None, 5, True, True) ):
if ( self.__assistant.CompareImagesAgainst(context, "_ref_phong_transparent_rgbzero_white", None, None, 5, True, False) ):
self.__assistant.ElementDataPreservedIn(context, self.tagList, "float")
self.status_superior = self.__assistant.DeferJudgement(context)
return self.status_superior
# To pass advanced you need to pass intermediate, this object could also include additional
# tests that were specific to the advanced badge
def JudgeExemplary(self, context):
self.status_exemplary = self.status_superior
return self.status_exemplary
# This is where all the work occurs: "judgingObject" is an absolutely necessary token.
# The dynamic loader looks very specifically for a class instance named "judgingObject".
#
judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
| KhronosGroup/COLLADA-CTS | StandardDataSets/collada/library_effects/effect/profile_COMMON/technique/phong/transparent/effect_phong_transparent_rgb_zero/effect_phong_transparent_rgb_zero.py | Python | mit | 4,635 |
import asyncio
import unittest
import random
from gremlinpy import Gremlin
from . import ConnectionTestCases, EntityTestCases, MapperTestCases
from gizmo import Mapper, Request, Collection, Vertex, Edge
from gizmo.mapper import EntityMapper
class BaseTests(unittest.TestCase):
def setUp(self):
self.request = Request('localhost', port=9192)
self.gremlin = Gremlin('gizmo_testing')
self.mapper = Mapper(self.request, self.gremlin)
self.ioloop = asyncio.get_event_loop()
super(BaseTests, self).setUp()
def tearDown(self):
super(BaseTests, self).tearDown()
async def purge(self):
script = "%s.V().map{it.get().remove()}" % self.gremlin.gv
res = await self.mapper.query(script=script)
return res
class ConnectionTests(BaseTests, ConnectionTestCases):
pass
class EntityTests(EntityTestCases, BaseTests):
pass
class MapperTests(MapperTestCases, BaseTests):
pass
class CollectionTests(BaseTests):
pass
class TraversalTests(BaseTests):
pass
if __name__ == '__main__':
unittest.main()
| emehrkay/Gizmo | gizmo/test/integration/titan.py | Python | mit | 1,103 |
# t1- TAS
import micropython
micropython.alloc_emergency_exception_buf(100)
import machine as m
from time import sleep_us
from t1 import T1
import unittest
class TestT1(unittest.TestCase):
def testInit(self):
tt = T1()
self.assertEqual(tt.counter, 0)
self.assertIsInstance(tt.led, m.Pin)
self.assertIsInstance(tt.sense, m.Pin)
def testCounter(self):
# requires a jumper from pin 4 to pin 5
tt = T1()
signal_gen = m.Pin(5, m.Pin.OUT, 0)
self.assertEqual(tt.counter, 0)
#until start()ed, counter does not advance
signal_gen(1)
self.assertEqual(tt.counter, 0)
signal_gen(0)
self.assertEqual(tt.counter, 0)
#once start()ed, counter advances
tt.start()
self.assertEqual(tt.counter, 0)
signal_gen(1) # on rising edge
sleep_us(1000)
self.assertEqual(tt.counter, 1)
signal_gen(0) # but not on falling edge
sleep_us(1000)
self.assertEqual(tt.counter, 1)
# Again by one
signal_gen(1) # on rising edge
sleep_us(1000)
self.assertEqual(tt.counter, 2)
signal_gen(0) # but not on falling edge
sleep_us(1000)
self.assertEqual(tt.counter, 2)
# one each time
c = tt.counter + 1
for i in range(100):
signal_gen(1)
signal_gen(0)
sleep_us(1000)
self.assertEqual(tt.counter, i+c)
| pramasoul/ESP-geiger | test_t1.py | Python | mit | 1,517 |
"""
Imports from the utils.multiclass module of Scikit-learn.
"""
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
# License: BSD 3 clause
from __future__ import division
from collections import Sequence
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from ..six import string_types
import numpy as np
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.unique(y.data).size == 1 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or
hasattr(y, '__array__')) and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and
isinstance(y[0], Sequence) and not
isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
| flennerhag/mlens | mlens/externals/sklearn/type_of_target.py | Python | mit | 5,606 |
# -*- coding: utf-8 -*-
'''
'''
import numpy as np
import pandas as pd
import unittest
from . import mask_the_info
class TestMaskingMethodSelection(unittest.TestCase):
'''
Test the function selects the masking method to be used
'''
def test_improper_method(self):
'''
Test providing an improper method
'''
df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
self.result = mask_the_info.masking_method_selection(df, [0],
'wrongMethod',
False,
'filename')
self.assertFalse(self.result)
def test_proper_method(self):
'''
Test providing an proper method
'''
df = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]))
self.result = mask_the_info.masking_method_selection(df, [0],
'replace',
False,
'filename')
self.assertTrue(isinstance(self.result, pd.DataFrame))
if __name__ == '__main__':
unittest.main()
| GeorgeManakanatas/PPDM | data_masking_methods/test_mask_the_info.py | Python | mit | 1,315 |
from distutils.core import setup
setup(
name='django-model-monitor',
version='0.0.1',
packages=['modelmonitor'],
package_dir={'': 'src'},
url='https://github.com/OmegaDroid/django-model-monitor',
license='MIT',
author='Daniel Bate',
author_email='',
description='App to monitor changes in models',
requires=["django"],
)
| OmegaDroid/django-model-monitor | setup.py | Python | mit | 362 |
import csv
def load_csv_data(file_path, data_type=float, max_rows=None):
"""
Example:
>>> csvio.load_csv_data("train_input.csv")
# Loads all train_input and returns a list of lists
>>> csvio.load_csv_data("train_output.csv" data_type=int)
# Reads csv numbers as integers instead of floats
# Also, there is only one column (excluding id column), it flattens
[[2], [3], [4]] into [2, 3, 4]
>>> csvio.load_csv_data("train_input.csv", max_rows=1000)
# Only loads 1000 rows. Great for quick testing
"""
data = []
with open(file_path) as f:
reader = csv.reader(f)
reader.next() # Getting rid of the headers
for i, row in enumerate(reader, 1):
if max_rows is not None and i > max_rows:
break
row_sans_id = row[1:]
converted_row = [data_type(x) for x in row_sans_id]
if len(converted_row) == 1:
# We want [4, 5, 6] instead of [[4], [5], [6]]
data.append(converted_row[0])
else:
data.append(converted_row)
return data
def write_csv_output(predictions, file_path):
"""
Example:
>>> csvio.write_csv_output(clf.predict(X_test), "deep_learning_submission.csv")
"""
out_file = open(file_path, "wb")
writer = csv.writer(out_file, delimiter=',')
writer.writerow(['Id', 'Prediction']) # header
for row in enumerate(predictions, 1):
writer.writerow(row)
out_file.close()
| deepanjanroy/aml3 | csvio.py | Python | mit | 1,512 |
from __future__ import absolute_import
import os
import sys
from subprocess import check_call, CalledProcessError
from google.protobuf.descriptor import FieldDescriptor
if sys.version_info[0] >= 3:
long = int
unicode = str
def compile_proto_file(input_files, output_path, include_path):
"""
Compile a .proto file using protoc.
The compiled files are stored in the given output path.
Returns the list of compiled filenames.
"""
# Init the output dir.
output_path = os.path.expanduser(output_path)
if not os.path.isdir(output_path):
os.makedirs(output_path)
# Assemble the include path.
include_path = ':'.join(os.path.expanduser(p) for p in include_path)
compiled_names = []
for filename in input_files:
filename = os.path.expanduser(filename)
# Check if the file exists.
if not os.path.isfile(filename):
raise ValueError("file {} does not exist".format(filename))
# Assemble compiled filename to return to caller.
basename = os.path.basename(filename)
name, ext = os.path.splitext(basename)
compiled_name = os.path.join(output_path, name + "_pb2.py")
compiled_names.append(compiled_name)
# Check if the file was already compiled.
if os.path.isfile(compiled_name):
if os.path.getmtime(compiled_name) >= os.path.getmtime(filename):
continue
# Compile.
dirname = os.path.dirname(filename)
include = dirname + ':' + include_path
command = ["protoc", "--python_out", output_path, "-I", include]
try:
check_call(command + [filename])
except OSError:
sys.stderr.write("The program 'protoc' is required but not installed")
raise
except CalledProcessError:
raise
return compiled_names
###############################################################################
# Protobuf to dict conversion
###############################################################################
DECODE_FN_MAP = {
FieldDescriptor.TYPE_DOUBLE: float,
FieldDescriptor.TYPE_FLOAT: float,
FieldDescriptor.TYPE_INT32: int,
FieldDescriptor.TYPE_INT64: long,
FieldDescriptor.TYPE_UINT32: int,
FieldDescriptor.TYPE_UINT64: long,
FieldDescriptor.TYPE_SINT32: int,
FieldDescriptor.TYPE_SINT64: long,
FieldDescriptor.TYPE_FIXED32: int,
FieldDescriptor.TYPE_FIXED64: long,
FieldDescriptor.TYPE_SFIXED32: int,
FieldDescriptor.TYPE_SFIXED64: long,
FieldDescriptor.TYPE_BOOL: bool,
FieldDescriptor.TYPE_STRING: unicode,
FieldDescriptor.TYPE_BYTES: lambda b: bytes_to_string(b),
FieldDescriptor.TYPE_ENUM: int,
}
def field_type_to_fn(msg, field):
if field.type == FieldDescriptor.TYPE_MESSAGE:
# For embedded messages recursively call this function. If it is
# a repeated field return a list
result = lambda msg: proto_to_dict(msg)
elif field.type in DECODE_FN_MAP:
result = DECODE_FN_MAP[field.type]
else:
raise TypeError("Field %s.%s has unrecognised type id %d" % (
msg.__class__.__name__, field.name, field.type))
return result
def proto_to_dict(msg):
result_dict = {}
extensions = {}
for field, value in msg.ListFields():
conversion_fn = field_type_to_fn(msg, field)
# Skip extensions
if not field.is_extension:
# Repeated fields result in an array, otherwise just call the
# conversion function to store the value
if field.label == FieldDescriptor.LABEL_REPEATED:
result_dict[field.name] = [conversion_fn(v) for v in value]
else:
result_dict[field.name] = conversion_fn(value)
return result_dict
| knipknap/telemetric | telemetric/protoutil.py | Python | mit | 3,836 |
from twisted.cred import portal
from twisted.conch import manhole_ssh
from twisted.conch.checkers import SSHPublicKeyDatabase
from carapace.util import ssh as util
from myriad.game.shell import servershell
def getShellFactory(game, **namespace):
realm = servershell.TerminalRealm(game, namespace)
sshPortal = portal.Portal(realm)
factory = manhole_ssh.ConchFactory(sshPortal)
factory.privateKeys = {'ssh-rsa': util.getPrivKey()}
factory.publicKeys = {'ssh-rsa': util.getPubKey()}
factory.portal.registerChecker(SSHPublicKeyDatabase())
return factory
| oubiwann/myriad-worlds | myriad/game/shell/service.py | Python | mit | 582 |
import datetime as dt
import lucy.core
import uuid
def _get_table(what):
return getattr(lucy.core.db, what)
class LucyObject(dict):
_type = None
def __init__(self, **kwargs):
self['_type'] = self._type
for k, v in kwargs.items():
self[k] = v
def _gen_uuid(self):
return str(uuid.uuid1())
def save(self):
if self._type is None:
raise ValueError("You done goofed, sucka")
uuid = self.get('_id')
self['updated_at'] = dt.datetime.utcnow()
if uuid is None:
uuid = self['_id'] = self._gen_uuid()
self['created_at'] = dt.datetime.utcnow()
_get_table(self._type).save(self)
return uuid
def delete(self):
table = _get_table(self._type)
table.remove({"_id": self['_id']})
return uuid
@classmethod
def load(cls, what):
table = _get_table(cls._type)
obj = table.find_one({"_id": what})
if obj is None:
raise KeyError("No such object: `%s' found." % (what))
return cls.from_dict(obj)
@classmethod
def query(cls, what, sort=None, sort_order=1,
limit=None, page_count=None, page=0):
table = _get_table(cls._type)
pointer = table.find(what)
if sort:
pointer = pointer.sort(sort, sort_order)
if limit:
pointer = pointer.limit(limit)
if page and page_count:
offset = int(page) * int(page_count)
pointer = pointer.skip(offset)
if page_count:
pointer = pointer.limit(page_count)
for x in pointer:
yield cls(**x)
@classmethod
def from_dict(cls, what):
klass = cls(**what)
return klass
@classmethod
def single(cls, query):
os = cls.query(query)
try:
return next(os)
except StopIteration:
raise KeyError("Error. No such thing")
| paultag/lucy | lucy/models/__init__.py | Python | mit | 1,973 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20150426_1817'),
]
operations = [
migrations.AddField(
model_name='blog',
name='comments_enabled',
field=models.BooleanField(default=True),
),
]
| codefisher/djangopress | djangopress/blog/migrations/0005_blog_comments_enabled.py | Python | mit | 407 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ClusterUpdateParameters(Model):
"""Parameters supplied to the Update operation.
:param tags: The user specified tags associated with the Cluster.
:type tags: dict[str, str]
:param scale_settings: Desired scale for the cluster.
:type scale_settings: ~azure.mgmt.batchai.models.ScaleSettings
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'scale_settings': {'key': 'properties.scaleSettings', 'type': 'ScaleSettings'},
}
def __init__(self, **kwargs):
super(ClusterUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.scale_settings = kwargs.get('scale_settings', None)
| lmazuel/azure-sdk-for-python | azure-mgmt-batchai/azure/mgmt/batchai/models/cluster_update_parameters.py | Python | mit | 1,217 |
__author__ = 'Ashwin'
__email__ = '[email protected]'
"""
Basic LDA module that is used in the project.
"""
import re
from gensim import corpora, models
import operator
class LDAVisualModel:
def __init__(self, word_corpus):
"""
The LDAVisualModel requires list of word lists from the
document corpus. Each list of words represents a document.
:param word_corpus: [[<words>],[],[]]
"""
self.id2word = corpora.Dictionary(word_corpus)
self.mm = []
self.lda = None
def create_word_corpus(self, word_corpus, store_corpus=False, store_loc='dicts/corpus.mm'):
"""
:param word_corpus: word_corpus: [[<words>],[],[]]
:param store_corpus: boolean to store the serialized corpus or not.
:param store_loc: Defines the location where the file is to be stored.
"""
for text in word_corpus:
self.mm.append(self.id2word.doc2bow(text))
if store_corpus:
corpora.MmCorpus.serialize(store_loc, word_corpus)
def train_lda(self, num_top=2, update_t=1, chunks=10000, num_pass=1):
"""
:param num_top: The number of topics for which LDA trains.
:param update_t:
:param chunks:
:param num_pass: The number of passes that LDA executes on the data.
"""
self.lda = models.LdaModel(corpus=self.mm, id2word=self.id2word, num_topics=num_top,
update_every=update_t, chunksize=chunks, passes=num_pass)
def get_lda_corpus(self, num_of_topics=10, num_of_words=10):
"""
Get the topic associated with each document.
"""
topics = []
if self.lda:
for topic in self.lda.print_topics(num_of_topics, num_of_words):
regex = re.findall(r'(0\.[0-9]*)\*([0-9a-z]*)', topic, re.M | re.I)
topics.append(regex)
return topics
def generate_doc_topic(self):
# Find the number of topics.
num_topics = self.lda.num_topics
# Build the topic - document matrix.
doc_top = []
for idx, doc in enumerate(self.lda[self.mm]):
doc_top.append([0] * num_topics)
for topic in doc:
doc_top[idx][topic[0]] = topic[1]
return doc_top
def generate_doc_topic_rank(self):
# Find the number of topics.
num_topics = self.lda.num_topics
doc_top_rank = []
# Build the topic - document matrix.
for idx, doc in enumerate(self.lda[self.mm]):
top_prob = [0] * num_topics
top_rank = [0] * num_topics
# This constructs the topic probability list.
for topic in doc:
top_prob[topic[0]] = topic[1]
# Construct the ranks.
prob_rank = sorted(top_prob, reverse=True)
top_sort = sorted(range(len(top_prob)), key=lambda k: top_prob[k],
reverse=True)
# Create a new list with the ranks.
for rank, topic in enumerate(top_sort):
if prob_rank[rank] > 0:
top_rank[topic] = rank
else:
top_rank[topic] = num_topics - 1
doc_top_rank.append(top_rank)
return doc_top_rank
@staticmethod
def gen_doc_top_words(topics, doc_top):
# This maintains the top words list for each document.
doc_to_word = []
# Check the probability of the topic and the word
# distribution in it.
for doc in doc_top:
tmp_word_prob = {}
for idx, top_prob in enumerate(doc):
if top_prob > 0:
for word in topics[idx]:
if word not in tmp_word_prob:
tmp_word_prob[word[1]] = float(word[0])*top_prob
else:
tmp_word_prob[word[1]] += float(word[0])*top_prob
# Sort the dictionary
sorted_word_prob = sorted(tmp_word_prob.items(), key=operator.itemgetter(1), reverse=True)
doc_to_word.append(sorted_word_prob)
return doc_to_word
| codehacken/LDAExplore | processdata/lda.py | Python | mit | 4,195 |
#
# Python Design Patterns: Builder
# Author: Jakub Vojvoda [github.com/JakubVojvoda]
# 2016
#
# Source code is licensed under MIT License
# (for more details see LICENSE)
#
import sys
#
# Product
# the final object that will be created using Builder
#
class Product:
def __init__(self):
self._partA = ""
self._partB = ""
self._partC = ""
def makeA(self, part):
self._partA = part
def makeB(self, part):
self._partB = part
def makeC(self, part):
self._partC = part
def get(self):
return self._partA +" "+ self._partB +" "+ self._partC
#
# Builder
# abstract interface for creating products
#
class Builder:
def __init__(self):
self._product = Product()
def get(self):
return self._product
def buildPartA():
pass
def buildPartB():
pass
def buildPartC():
pass
#
# Concrete Builders
# create real products and stores them in the composite structure
#
class ConcreteBuilderX(Builder):
def __init__(self):
Builder.__init__(self)
def buildPartA(self):
self._product.makeA("A-X")
def buildPartB(self):
self._product.makeB("B-X")
def buildPartC(self):
self._product.makeC("C-X")
class ConcreteBuilderY(Builder):
def __init__(self):
Builder.__init__(self)
def buildPartA(self):
self._product.makeA("A-Y")
def buildPartB(self):
self._product.makeB("B-Y")
def buildPartC(self):
self._product.makeC("C-Y")
#
# Director
# responsible for managing the correct sequence of object creation
#
class Director:
def __init__(self):
self._builder = None
def set(self, builder):
self._builder = builder
def get(self):
return self._builder.get()
def construct(self):
self._builder.buildPartA()
self._builder.buildPartB()
self._builder.buildPartC()
if __name__ == "__main__":
builderX = ConcreteBuilderX()
builderY = ConcreteBuilderY()
director = Director()
director.set(builderX)
director.construct()
product1 = director.get()
print("1st product parts: " + product1.get())
director.set(builderY)
director.construct()
product2 = director.get()
print("2nd product parts: " + product2.get())
| JakubVojvoda/design-patterns-python | builder/Builder.py | Python | mit | 2,322 |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import dateparser
STYLES_DICT = {
'black': '',
'green': 'primary',
'red': 'danger'
}
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
def create_blocks(text: str, entitlement: str, options: list, reply: str) -> list:
value = json.dumps({
'entitlement': entitlement,
'reply': reply
})
blocks: list = [{
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': text
}
}]
elements = []
for option in options:
element = {
'type': 'button',
'text': {
'type': 'plain_text',
'emoji': True,
'text': option['text']
},
'value': value
}
if 'style' in option:
element['style'] = option['style']
elements.append(element)
if elements:
actions = {
'type': 'actions',
'elements': elements
}
blocks.append(actions)
return blocks
def main():
res = demisto.executeCommand('addEntitlement', {'persistent': demisto.get(demisto.args(), 'persistent'),
'replyEntriesTag': demisto.get(demisto.args(), 'replyEntriesTag')})
if isError(res[0]):
demisto.results(res)
sys.exit(0)
entitlement = demisto.get(res[0], 'Contents')
option1 = demisto.get(demisto.args(), 'option1')
option2 = demisto.get(demisto.args(), 'option2')
extra_options = argToList(demisto.args().get('additionalOptions', ''))
reply = demisto.get(demisto.args(), 'reply')
response_type = demisto.get(demisto.args(), 'responseType')
lifetime = demisto.get(demisto.args(), 'lifetime')
slack_instance = demisto.get(demisto.args(), 'slackInstance')
slack_version = demisto.args().get('slackVersion', 'SlackV3')
try:
expiry = datetime.strftime(dateparser.parse('in ' + lifetime, settings={'TIMEZONE': 'UTC'}),
DATE_FORMAT)
except Exception:
expiry = datetime.strftime(dateparser.parse('in 1 day', settings={'TIMEZONE': 'UTC'}),
DATE_FORMAT)
default_response = demisto.get(demisto.args(), 'defaultResponse')
entitlement_string = entitlement + '@' + demisto.investigation()['id']
if demisto.get(demisto.args(), 'task'):
entitlement_string += '|' + demisto.get(demisto.args(), 'task')
args = {
'ignoreAddURL': 'true',
'using-brand': slack_version
}
if slack_instance:
args.update({
'using': slack_instance
})
user_options = [option1, option2]
options = []
if extra_options:
user_options += extra_options
if response_type == 'thread':
for option in user_options:
options.append(option.split('#')[0])
string_options = ' or '.join(list(map(lambda o: '`{}`'.format(o), options)))
message = '{} - Please reply to this thread with {}.'.format(demisto.args()['message'], string_options)
args['message'] = json.dumps({
'message': message,
'entitlement': entitlement_string,
'reply': reply,
'expiry': expiry,
'default_response': default_response
})
else:
for option in user_options:
option = option.split('#')
button = {
'text': option[0]
}
if len(option) > 1:
style = STYLES_DICT.get(option[1])
if style:
button['style'] = style
options.append(button)
blocks = json.dumps(create_blocks(demisto.args()['message'], entitlement_string, options, reply))
args['blocks'] = json.dumps({
'blocks': blocks,
'entitlement': entitlement_string,
'reply': reply,
'expiry': expiry,
'default_response': default_response
})
args['message'] = demisto.args()['message']
to = demisto.get(demisto.args(), 'user')
channel = demisto.get(demisto.args(), 'channel')
if to:
args['to'] = to
elif channel:
args['channel'] = channel
else:
return_error('Either a user or a channel must be provided.')
try:
demisto.results(demisto.executeCommand('send-notification', args))
except ValueError as e:
if 'Unsupported Command' in str(e):
return_error('The command is unsupported by this script. If you have SlackV2 enabled, '
'please use SlackAsk instead.')
else:
return_error('An error has occurred while executing the send-notification command',
error=e)
if __name__ in ('__builtin__', 'builtins', '__main__'):
main()
| VirusTotal/content | Packs/Slack/Scripts/SlackAskV2/SlackAskV2.py | Python | mit | 4,905 |
'''
Created on 24 Jan 2017
@author: muth
'''
import os
import RPi.GPIO as GPIO
import threading
from Adafruit_Thermal import *
from time import sleep
from PIL import Image
from PIL import ImageOps
from PIL import ImageEnhance
from PIL import ImageDraw
from PIL import ImageFont
from smemlcd import SMemLCD
from picamera import PiCamera
from io import BytesIO
# Constants
S_WIDTH = 400
S_HEIGHT = 240
S_SIZE = (S_WIDTH, S_HEIGHT)
P_WIDTH = 640
P_HEIGHT = 384
P_SIZE = (P_WIDTH, P_HEIGHT)
F_WIDTH = 1280
F_HEIGHT = 768
F_SIZE = (F_WIDTH, F_HEIGHT)
SHOT_PIN = 16
PRINT_PIN = 15
NEXT_PIN = 13
PREV_PIN = 11
HALT_PIN = 31
# Thread using the image full resolution
class CameraThread(threading.Thread):
takeAshot = False
exitNOshot = False
stream2 = BytesIO()
def __init__(self):
threading.Thread.__init__(self)
self.event = threading.Event()
def run(self):
global currentFileNumber
for bug in camera.capture_continuous(self.stream2, format='jpeg', use_video_port=True, splitter_port=0):
self.stream2.seek(0) # "Rewind" the stream to the beginning so we can read its content
# print('Capture thread: ', self.takeAshot)
if self.takeAshot:
image = Image.open(self.stream2)
# Increment file number
i = 1
while os.path.exists("pz%05d.jpg" % i):
i += 1
currentFileNumber = i
#
# Save last to a jpeg file
saveImageToFile(image, "pz%05d.jpg" % currentFileNumber)
self.takeAshot = False
break
if self.exitNOshot:
break
# Variables
currentFileNumber = -1
# GPIO setup
GPIO.setmode(GPIO.BOARD)
GPIO.setup(SHOT_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(PRINT_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(NEXT_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(PREV_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(HALT_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# add edge detection on a channel
GPIO.add_event_detect(SHOT_PIN, GPIO.FALLING, bouncetime=250)
GPIO.add_event_detect(PRINT_PIN, GPIO.FALLING, bouncetime=250)
GPIO.add_event_detect(NEXT_PIN, GPIO.FALLING, bouncetime=250)
GPIO.add_event_detect(PREV_PIN, GPIO.FALLING, bouncetime=250)
# Create Sharp mempry LCD
lcd = SMemLCD('/dev/spidev0.0')
# Create Printer
printer = Adafruit_Thermal("/dev/ttyAMA0", 115200, timeout=0, rtscts=True)
# Create camera and in-memory stream
stream = BytesIO()
camera = PiCamera()
camera.rotation = 180
camera.resolution = (F_WIDTH, F_HEIGHT)
camera.framerate = 8
camera.contrast = 50
camera.start_preview()
sleep(1)
#Printer resolution camera Thread to minimize shot delay
liveViewThread = CameraThread()
def haltSystem(channel):
print 'Halt...'
os.system("sudo halt")
GPIO.add_event_detect(31, GPIO.FALLING, callback = haltSystem, bouncetime = 2000)
def displayImageFileOnLCD(filename):
print 'displays ', filename
title = 'Review Mode'
# resize/dither to screen resolution and send to LCD
image = Image.open(filename)
im_width, im_height = image.size
if im_width < im_height:
image = image.rotate(90)
image.thumbnail(S_SIZE, Image.ANTIALIAS)
image_sized = Image.new('RGB', S_SIZE, (0, 0, 0))
image_sized.paste(image,((S_SIZE[0] - image.size[0]) / 2, (S_SIZE[1] - image.size[1]) / 2))
# draw filename
draw = ImageDraw.Draw(image_sized)
font = ImageFont.truetype('arial.ttf', 18)
draw.rectangle([(0, 0), (115, 22)], fill=(255,255,255), outline=(0,0,0))
draw.text((2, 2), title, fill='black', font=font)
draw.rectangle([(279, 217), (399, 239)], fill=(255,255,255), outline=(0,0,0))
draw.text((290, 218), filename, fill='black', font=font)
# display on LCD
image_sized = ImageOps.invert(image_sized)
image_sized = image_sized.convert('1') # convert image to black and white
lcd.write(image_sized.tobytes())
def printImageFile(filename):
print 'prints ', filename
# resize to printer resolution and send to printer
image = Image.open(filename)
im_width, im_height = image.size
if im_width > im_height:
image = image.rotate(90)
image.thumbnail((P_HEIGHT, P_WIDTH), Image.ANTIALIAS)
printer.printImage(image, False)
printer.feed(3)
def saveImageToFile(image, filename):
print 'saves image ', filename
# save full image
image.save(filename)
#Main loop
while True:
# Restart shooting thread
if not liveViewThread.isAlive():
liveViewThread = CameraThread()
liveViewThread.start()
# View Loop
stream.seek(0)
for foo in camera.capture_continuous(stream, format='jpeg', use_video_port=True, resize=(S_WIDTH, S_HEIGHT), splitter_port=1):
t1 = time.time()
stream.seek(0) # "Rewind" the stream to the beginning so we can read its content
image_source = Image.open(stream)
imageInverted = ImageOps.invert(image_source)
# convert image to black or white and send to LCD
lcd.write(imageInverted.convert('1').tobytes())
stream.seek(0)
# print('Live view : capture and display time: %f' % (time.time() - t1))
if GPIO.event_detected(SHOT_PIN):
liveViewThread.takeAshot = True
break
if GPIO.event_detected(PRINT_PIN):
liveViewThread.exitNOshot = True
break
# Wait the picture is taken
liveViewThread.join(5)
# Set current file number if not set yet
if currentFileNumber == -1 :
i = 0
while True:
if os.path.exists("pz%05d.jpg" % (i+1)):
i += 1
else :
break
currentFileNumber = i
# Display current image
displayImageFileOnLCD("pz%05d.jpg" % currentFileNumber)
# Review Loop
while True:
sleep(0.25)
if GPIO.event_detected(NEXT_PIN):
# Increment current file name and display it
if os.path.exists("pz%05d.jpg" % (currentFileNumber+1)):
currentFileNumber += 1
displayImageFileOnLCD("pz%05d.jpg" % currentFileNumber)
if GPIO.event_detected(PREV_PIN):
# Decrement current file name and display it
if os.path.exists("pz%05d.jpg" % (currentFileNumber-1)):
currentFileNumber -= 1
displayImageFileOnLCD("pz%05d.jpg" % currentFileNumber)
if GPIO.event_detected(PRINT_PIN):
# Print current file
printImageFile("pz%05d.jpg" % currentFileNumber)
if GPIO.event_detected(SHOT_PIN):
# Exit review
break
print("Main loop has exited")
| pierre-muth/polapi-zero | dev/polapizero_04.py | Python | mit | 7,080 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class PriceSheetResult(Resource):
"""An pricesheet resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar tags: Resource tags.
:vartype tags: dict[str, str]
:ivar pricesheets: Price sheet
:vartype pricesheets:
list[~azure.mgmt.consumption.models.PriceSheetProperties]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'tags': {'readonly': True},
'pricesheets': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'pricesheets': {'key': 'properties.pricesheets', 'type': '[PriceSheetProperties]'},
'next_link': {'key': 'properties.nextLink', 'type': 'str'},
}
def __init__(self):
super(PriceSheetResult, self).__init__()
self.pricesheets = None
self.next_link = None
| lmazuel/azure-sdk-for-python | azure-mgmt-consumption/azure/mgmt/consumption/models/price_sheet_result.py | Python | mit | 1,885 |
from setuptools import setup, find_packages
setup(
name='opencontracts',
version='0.1',
description="Contract viewing news application",
long_description='',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
keywords='sql data procurement',
author='Friedrich Lindenberg',
author_email='[email protected]',
url='http://pudo.org',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages=[],
include_package_data=False,
zip_safe=False,
install_requires=[
],
tests_require=[],
entry_points=\
""" """,
)
| pudo-attic/opencontracts.eu | setup.py | Python | mit | 771 |
# -*- coding: utf-8 -*-
"""
Classes for controling machine learning processes
"""
import numpy as np
import math
import matplotlib.pyplot as plt
import csv
class TrainingPlot:
"""
Creating live plot during training
REUIRES notebook backend: %matplotlib notebook
@TODO Migrate to Tensorboard
"""
train_loss = []
train_acc = []
valid_acc = []
test_iter = 0
loss_iter = 0
interval = 0
ax1 = None
ax2 = None
fig = None
def __init__(self, steps, test_itr, loss_itr):
self.test_iter = test_itr
self.loss_iter = loss_itr
self.interval = steps
self.fig, self.ax1 = plt.subplots()
self.ax2 = self.ax1.twinx()
self.ax1.set_autoscaley_on(True)
plt.ion()
self._update_plot()
# Description
self.ax1.set_xlabel('Iteration')
self.ax1.set_ylabel('Train Loss')
self.ax2.set_ylabel('Valid. Accuracy')
# Axes limits
self.ax1.set_ylim([0,10])
def _update_plot(self):
self.fig.canvas.draw()
def update_loss(self, loss_train, index):
self.trainLoss.append(loss_train)
if len(self.train_loss) == 1:
self.ax1.set_ylim([0, min(10, math.ceil(loss_train))])
self.ax1.plot(self.lossInterval * np.arange(len(self.train_loss)),
self.train_loss, 'b', linewidth=1.0)
self.updatePlot()
def update_acc(self, acc_val, acc_train, index):
self.validAcc.append(acc_val)
self.trainAcc.append(acc_train)
self.ax2.plot(self.test_iter * np.arange(len(self.valid_acc)),
self.valid_acc, 'r', linewidth=1.0)
self.ax2.plot(self.test_iter * np.arange(len(self.train_acc)),
self.train_acc, 'g',linewidth=1.0)
self.ax2.set_title('Valid. Accuracy: {:.4f}'.format(self.valid_acc[-1]))
self.updatePlot()
class DataSet:
"""Class for training data and feeding train function."""
images = None
labels = None
length = 0
index = 0
def __init__(self, img, lbl):
self.images = img
self.labels = lbl
self.length = len(img)
self.index = 0
def next_batch(self, batch_size):
"""Return the next batch from the data set."""
start = self.index
self.index += batch_size
if self.index > self.length:
# Shuffle the data
perm = np.arange(self.length)
np.random.shuffle(perm)
self.images = self.images[perm]
self.labels = self.labels[perm]
# Start next epoch
start = 0
self.index = batch_size
end = self.index
return self.images[start:end], self.labels[start:end]
| Breta01/handwriting-ocr | src/ocr/mlhelpers.py | Python | mit | 2,767 |
import cx_Oracle
import time
def callback(message):
print "Message type:", message.type
print "Message database name:", message.dbname
print "Message tables:"
for table in message.tables:
print "--> Table Name:", table.name
print "--> Table Operation:", table.operation
if table.rows is not None:
print "--> Table Rows:"
for row in table.rows:
print "--> --> Row RowId:", row.rowid
print "--> --> Row Operation:", row.operation
print "-" * 60
print "=" * 60
connection = cx_Oracle.Connection("scott/tiger", events = True)
sub = connection.subscribe(callback = callback, timeout = 1800, rowids = True)
print "Subscription:", sub
print "--> Connection:", sub.connection
print "--> Callback:", sub.callback
print "--> Namespace:", sub.namespace
print "--> Protocol:", sub.protocol
print "--> Timeout:", sub.timeout
print "--> Operations:", sub.operations
print "--> Rowids?:", sub.rowids
sub.registerquery("select * from TestExecuteMany")
while True:
print "Waiting for notifications...."
time.sleep(5000)
| marhar/sqlminus | examples/change-notify/oracle-changenotify.py | Python | mit | 1,135 |
#!/usr/bin/python
'''This is the job manage script.
This is a quite universal code for all different type of simulations'''
import os
import time
import subprocess
import logging
import inlist
PROCLIST = []
logging.basicConfig(filename="./project.log",
level=logging.INFO,
format="\n[job.daemon][%(asctime)s][%(levelname)s]:\n%(message)s",
datefmt='%y/%m/%d %H:%M:%S')
INFILEPATH = os.path.abspath("./infile")
OUTFILEPATH = os.path.abspath("./outfile")
class JobAtom():
'''atom class of all jobs'''
def __init__(self, pid, bundle):
self.pid = pid
if type(bundle.execute) is str:
self.execute = bundle.execute
elif type(bundle.execute) is list:
self.execute = " ".join(bundle.execute)
else:
print "Jobs.execute should be a list or str!"
self.is_cluster = bundle.is_cluster
self.auto_run = bundle.auto_run
self.keep_cpu_busy = bundle.keep_cpu_busy
self.name = bundle.name
self.input_str = bundle.to_string(pid)
return
def get_job_name(self):
'''get the name of JobAtom object'''
return "Job({}).{}".format(self.name, self.pid)
def construct_job_queue(to_do):
'''construct JobAtom queue from Job class '''
logging.info("Constructing the job queue...")
job_queue = []
pid = 0
if os.path.exists(INFILEPATH):
filelist = [int(elem.split('_')[-1]) for elem in os.listdir(INFILEPATH)]
filelist.sort()
if len(filelist)!=0:
pid = filelist[-1]
#bundle is class job
for bundle in [elem for elem in to_do if elem.keep_cpu_busy == False]:
#running the jobs doesn't use much cpu first
for _ in range(0, bundle.duplicate):
pid += 1
job_queue.append(JobAtom(pid, bundle))
for bundle in [elem for elem in to_do if elem.keep_cpu_busy == True]:
#running the jobs doesn't use much cpu first
for _ in range(0, bundle.duplicate):
pid += 1
job_queue.append(JobAtom(pid, bundle))
logging.info("Constructed the job queue!")
return job_queue
def check_status():
''' check the status of submitted jobs,
if the job is done, remove it from PROCLIST so new job could be submitted'''
for elemp in PROCLIST:
if elemp[0].poll() is not None:
PROCLIST.remove(elemp)
logging.info(elemp[1].get_job_name()+" is ended!")
print elemp[1].get_job_name()+" is ended..."
return
def submit_job(job_atom):
'''submit a job to cluster or your own computer'''
#topstr = "top -p"
if(os.path.exists(INFILEPATH) is not True):
os.system("mkdir "+INFILEPATH)
if(os.path.exists(OUTFILEPATH) is not True):
os.system("mkdir "+OUTFILEPATH)
homedir = os.getcwd()
jobname = homedir.split("/")[-1]+"."+job_atom.name
infile = INFILEPATH+"/_in_{}_{}".format(job_atom.name, job_atom.pid)
outfile = OUTFILEPATH+"/out_{}_{}.txt".format(
job_atom.name, job_atom.pid)
jobfile = os.path.abspath("./_job_{}_{}.sh".format(
job_atom.name, job_atom.pid))
#write input file into ./infile folder
f_job = open(infile,"w")
f_job.write(job_atom.input_str)
f_job.close()
f_allinput = open(os.path.abspath("./all_input.log"),"a")
f_allinput.write("Job ID: {}, Job name: {}\n".format(
job_atom.pid, job_atom.name))
f_allinput.write(job_atom.input_str)
f_allinput.close()
if job_atom.is_cluster:
fjob = open(jobfile,"w")
fjob.write("#!/bin/sh\n"+"#PBS -N "+jobname+"\n")
fjob.write("#PBS -o "+homedir+"/Output\n")
fjob.write("#PBS -e "+homedir+"/Error\n")
fjob.write("cd "+homedir+"\n")
fjob.write("echo "+infile+" | "+job_atom.execute)
fjob.close()
if job_atom.auto_run:
os.system("qsub "+jobfile)
os.system("rm "+jobfile)
logging.info(job_atom.get_job_name+" submitted!")
else:
print "You have to run "+job_atom.get_job_name()+" by yourself!"
else:
if job_atom.auto_run:
shellstr = "echo "+infile+" | "+job_atom.execute+" >> "+outfile
proc = subprocess.Popen(shellstr, shell=True)
if job_atom.keep_cpu_busy:
PROCLIST.append((proc, job_atom))
#topstr = topstr+str(proc.pid)+","
#f_temp = open("./tools/mytop.sh","w")
#f_temp.write(topstr[:-1])
#f_temp.close()
#os.system("chmod +x ./tools/mytop.sh")
#if job_atom.keep_cpu_busy is False:
#f_temp = open("./tools/kill_loop.sh","a")
#f_temp.write("kill -9 "+str(proc.pid))
#f_temp.close()
#os.system("chmod +x ./tools/kill_loop.sh")
logging.info(job_atom.get_job_name()+" is started...")
logging.info("input:\n"+job_atom.input_str)
logging.info("PID:{}\n".format(proc.pid))
print job_atom.get_job_name()+" is started..."
else:
print "You have to run "+job_atom.get_job_name()+" by yourself!"
return
if __name__ == "__main__":
logging.info("Jobs manage daemon is started...")
JOBQUEUE = construct_job_queue(inlist.TO_DO)
#print [e.keep_cpu_busy for e in JOBQUEUE]
i = 0
for ATOM in JOBQUEUE:
while ATOM.is_cluster is False and len(PROCLIST)>=inlist.CPU:
check_status()
time.sleep(inlist.SLEEP)
submit_job(ATOM)
check_status()
while len(PROCLIST)!=0:
time.sleep(inlist.SLEEP)
check_status()
logging.info("Jobs manage daemon is ended...")
| kunyuan/job_manage | job_manager.py | Python | mit | 5,695 |
# -*- coding: utf-8 -*-
"""User models."""
import datetime as dt
from flask_login import UserMixin
from members.database import Column, Model, SurrogatePK, db, reference_col, relationship
from members.extensions import bcrypt
class Role(SurrogatePK, Model):
"""A role for a user."""
__tablename__ = 'roles'
name = Column(db.String(80), unique=True, nullable=False)
user_id = reference_col('users', nullable=True)
user = relationship('User', backref='roles')
def __init__(self, name, **kwargs):
"""Create instance."""
db.Model.__init__(self, name=name, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return '<Role({name})>'.format(name=self.name)
class User(UserMixin, SurrogatePK, Model):
"""A user of the app."""
__tablename__ = 'users'
username = Column(db.String(80), unique=True, nullable=False)
email = Column(db.String(80), unique=True, nullable=False)
#: The hashed password
password = Column(db.String(128), nullable=True)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
first_name = Column(db.String(30), nullable=True)
last_name = Column(db.String(30), nullable=True)
active = Column(db.Boolean(), default=False)
is_admin = Column(db.Boolean(), default=False)
def __init__(self, username, email, password=None, **kwargs):
"""Create instance."""
db.Model.__init__(self, username=username, email=email, **kwargs)
if password:
self.set_password(password)
else:
self.password = None
def set_password(self, password):
"""Set password."""
self.password = bcrypt.generate_password_hash(password)
def check_password(self, value):
"""Check password."""
return bcrypt.check_password_hash(self.password, value)
@property
def full_name(self):
"""Full user name."""
return '{0} {1}'.format(self.first_name, self.last_name)
def __repr__(self):
"""Represent instance as a unique string."""
return '<User({username!r})>'.format(username=self.username)
| iandees/membership | members/user/models.py | Python | mit | 2,166 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from config import DefaultConfig
from flask import Flask
from flask import jsonify
from flask import render_template
from flask import request
from helpers import load_module_instances
import json
import logging
import logging.config
import os
def get_app(config=None, **kwargs):
"""Creates a Flask application"""
app = Flask(__name__, **kwargs)
configure_app(app, config)
configure_extensions(app)
configure_blueprints(app)
configure_logging(app)
configure_error_handlers(app)
return app
def configure_app(app, config):
app.config.from_object(DefaultConfig)
if config is not None:
app.config_from_object(config)
if 'CONFIG_ENVVAR' in app.config:
app.config.from_envvar(app.config['CONFIG_ENVVAR'])
if 'TEMPLATE_DIR' in app.config:
app.template_folder = app.config['TEMPLATE_DIR']
if 'STATIC_DIR' in app.config:
app.static_folder = app.config['STATIC_DIR']
def configure_blueprints(app):
blueprints = app.config['BLUEPRINTS'] if 'BLUEPRINTS' in app.config else []
for blueprint in blueprints:
app.register_blueprint(blueprint)
def configure_extensions(app):
for ext in load_module_instances('extensions'):
if getattr(ext, 'init_app', False):
ext.init_app(app)
def configure_error_handlers(app):
@app.errorhandler(401)
def unauthorized(error):
if request.is_xhr:
return jsonify(error="Unauthorized")
return render_template("errors/unauthorized.html", error=error), 401
@app.errorhandler(404)
def not_found(error):
if request.is_xhr:
return jsonify(error="Page not found")
return render_template("errors/not_found.html", error=error), 404
@app.errorhandler(500)
def internal_server_error(error):
if request.is_xhr:
return jsonify(error="An error has occurred")
return render_template("errors/internal_server_error.html", error=error), 500
def configure_logging(app):
log_ini = os.path.join(app.root_path, app.config['LOG_INI'])
if os.path.exists(log_ini):
with open(log_ini, 'rt') as f:
log_config = json.load(f)
logging.config.dictConfig(log_config)
# vim: filetype=python
| ryankanno/flask-skeleton | flask_skeleton/app.py | Python | mit | 2,313 |
"""
The logic and workings behind the stow and unstow sub-commands
"""
from collections import Counter
import pathlib
from dploy import actions
from dploy import utils
from dploy import error
from dploy import main
from dploy import ignore
# pylint: disable=too-few-public-methods
class AbstractBaseStow(main.AbstractBaseSubCommand):
"""
Abstract Base class that contains the shared logic for all of the stow
commands
"""
# pylint: disable=too-many-arguments
def __init__(self, subcmd, source, dest, is_silent, is_dry_run, ignore_patterns):
self.is_unfolding = False
super().__init__(subcmd, source, dest, is_silent, is_dry_run, ignore_patterns)
def _is_valid_input(self, sources, dest):
"""
Check to see if the input is valid
"""
return StowInput(self.errors, self.subcmd).is_valid(sources, dest)
def get_directory_contents(self, directory):
"""
Get the contents of a directory while handling errors that may occur
"""
contents = []
try:
contents = utils.get_directory_contents(directory)
except PermissionError:
self.errors.add(error.PermissionDenied(self.subcmd, directory))
except FileNotFoundError:
self.errors.add(error.NoSuchFileOrDirectory(self.subcmd, directory))
except NotADirectoryError:
self.errors.add(error.NoSuchDirectory(self.subcmd, directory))
return contents
def _are_same_file(self, source, dest):
"""
Abstract method that handles the case when the source and dest are the
same file when collecting actions
"""
pass
def _are_directories(self, source, dest):
"""
Abstract method that handles the case when the source and dest are directories
same file when collecting actions
"""
pass
def _are_other(self, source, dest):
"""
Abstract method that handles all other cases what to do if no particular
condition is true cases are found
"""
pass
def _collect_actions_existing_dest(self, source, dest):
"""
_collect_actions() helper to collect required actions to perform a stow
command when the destination already exists
"""
if utils.is_same_file(dest, source):
if dest.is_symlink() or self.is_unfolding:
self._are_same_file(source, dest)
else:
self.errors.add(error.SourceIsSameAsDest(self.subcmd, dest.parent))
elif dest.is_dir() and source.is_dir:
self._are_directories(source, dest)
else:
self.errors.add(error.ConflictsWithExistingFile(self.subcmd, source, dest))
def _collect_actions(self, source, dest):
"""
Concrete method to collect required actions to perform a stow
sub-command
"""
if self.ignore.should_ignore(source):
self.ignore.ignore(source)
return
if not StowInput(self.errors, self.subcmd).is_valid_collection_input(
source, dest
):
return
sources = self.get_directory_contents(source)
for subsources in sources:
if self.ignore.should_ignore(subsources):
self.ignore.ignore(subsources)
continue
dest_path = dest / pathlib.Path(subsources.name)
does_dest_path_exist = False
try:
does_dest_path_exist = dest_path.exists()
except PermissionError:
self.errors.add(error.PermissionDenied(self.subcmd, dest_path))
return
if does_dest_path_exist:
self._collect_actions_existing_dest(subsources, dest_path)
elif dest_path.is_symlink():
self.errors.add(
error.ConflictsWithExistingLink(self.subcmd, subsources, dest_path)
)
elif not dest_path.parent.exists() and not self.is_unfolding:
self.errors.add(error.NoSuchDirectory(self.subcmd, dest_path.parent))
else:
self._are_other(subsources, dest_path)
# pylint: disable=too-few-public-methods
class Stow(AbstractBaseStow):
"""
Concrete class implementation of the stow sub-command
"""
# pylint: disable=too-many-arguments
def __init__(
self, source, dest, is_silent=True, is_dry_run=False, ignore_patterns=None
):
super().__init__("stow", source, dest, is_silent, is_dry_run, ignore_patterns)
def _unfold(self, source, dest):
"""
Method unfold a destination directory
"""
self.is_unfolding = True
self.actions.add(actions.UnLink(self.subcmd, dest))
self.actions.add(actions.MakeDirectory(self.subcmd, dest))
self._collect_actions(source, dest)
self.is_unfolding = False
def _handle_duplicate_actions(self):
"""
check for symbolic link actions that would cause conflicting symbolic
links to the same destination. Also check for actions that conflict but
are candidates for unfolding instead.
"""
has_conflicts = False
dupes = self.actions.get_duplicates()
if len(dupes) == 0:
return
for indices in dupes:
first_action = self.actions.actions[indices[0]]
remaining_actions = [self.actions.actions[i] for i in indices[1:]]
if first_action.source.is_dir():
self._unfold(first_action.source, first_action.dest)
for action in remaining_actions:
self.is_unfolding = True
self._collect_actions(action.source, action.dest)
self.is_unfolding = False
else:
duplicate_action_sources = [
str(self.actions.actions[i].source) for i in indices
]
self.errors.add(
error.ConflictsWithAnotherSource(
self.subcmd, duplicate_action_sources
)
)
has_conflicts = True
if has_conflicts:
return
# remove duplicates
for indices in dupes:
for index in reversed(indices[1:]):
del self.actions.actions[index]
self._handle_duplicate_actions()
def _check_for_other_actions(self):
self._handle_duplicate_actions()
def _are_same_file(self, source, dest):
"""
what to do if source and dest are the same files
"""
if self.is_unfolding:
self.actions.add(actions.SymbolicLink(self.subcmd, source, dest))
else:
self.actions.add(actions.AlreadyLinked(self.subcmd, source, dest))
def _are_directories(self, source, dest):
if dest.is_symlink():
self._unfold(dest.resolve(), dest)
self._collect_actions(source, dest)
def _are_other(self, source, dest):
self.actions.add(actions.SymbolicLink(self.subcmd, source, dest))
# pylint: disable=too-few-public-methods
class UnStow(AbstractBaseStow):
"""
Concrete class implementation of the unstow sub-command
"""
# pylint: disable=too-many-arguments
def __init__(
self, source, dest, is_silent=True, is_dry_run=False, ignore_patterns=None
):
super().__init__("unstow", source, dest, is_silent, is_dry_run, ignore_patterns)
def _are_same_file(self, source, dest):
"""
what to do if source and dest are the same files
"""
self.actions.add(actions.UnLink(self.subcmd, dest))
def _are_directories(self, source, dest):
self._collect_actions(source, dest)
def _are_other(self, source, dest):
self.actions.add(actions.AlreadyUnlinked(self.subcmd, source, dest))
def _check_for_other_actions(self):
self._collect_folding_actions()
def _collect_folding_actions(self):
"""
find candidates for folding i.e. when a directory contains symlinks to
files that all share the same parent directory
"""
for parent in self.actions.get_unlink_target_parents():
items = utils.get_directory_contents(parent)
other_links_parents = []
other_links = []
source_parent = None
is_normal_files_detected = False
for item in items:
if item not in self.actions.get_unlink_targets():
does_item_exist = False
try:
does_item_exist = item.exists()
except PermissionError:
self.errors.add(error.PermissionDenied(self.subcmd, item))
return
if does_item_exist and item.is_symlink():
source_parent = item.resolve().parent
other_links_parents.append(item.resolve().parent)
other_links.append(item)
else:
is_normal_files_detected = True
break
if not is_normal_files_detected:
other_links_parent_count = len(Counter(other_links_parents))
if other_links_parent_count == 1:
assert source_parent is not None
if utils.is_same_files(
utils.get_directory_contents(source_parent), other_links
):
self._fold(source_parent, parent)
elif other_links_parent_count == 0 and not utils.is_same_file(
parent, self.dest_input
):
self.actions.add(actions.RemoveDirectory(self.subcmd, parent))
def _fold(self, source, dest):
"""
add the required actions for folding
"""
self._collect_actions(source, dest)
self.actions.add(actions.RemoveDirectory(self.subcmd, dest))
self.actions.add(actions.SymbolicLink(self.subcmd, source, dest))
class StowInput(main.Input):
"""
Input validator for the link command
"""
def _is_valid_dest(self, dest):
"""
Check if the test argument is valid
"""
result = True
if not dest.is_dir():
self.errors.add(error.NoSuchDirectoryToSubcmdInto(self.subcmd, dest))
result = False
else:
if not utils.is_directory_writable(dest):
self.errors.add(
error.InsufficientPermissionsToSubcmdTo(self.subcmd, dest)
)
result = False
if not utils.is_directory_readable(dest):
self.errors.add(
error.InsufficientPermissionsToSubcmdTo(self.subcmd, dest)
)
result = False
if not utils.is_directory_executable(dest):
self.errors.add(
error.InsufficientPermissionsToSubcmdTo(self.subcmd, dest)
)
result = False
return result
def _is_valid_source(self, source):
"""
Check if the source argument is valid
"""
result = True
if not source.is_dir():
self.errors.add(error.NoSuchDirectory(self.subcmd, source))
result = False
else:
if not utils.is_directory_readable(source):
self.errors.add(
error.InsufficientPermissionsToSubcmdFrom(self.subcmd, source)
)
result = False
if not utils.is_directory_executable(source):
self.errors.add(
error.InsufficientPermissionsToSubcmdFrom(self.subcmd, source)
)
result = False
return result
def is_valid_collection_input(self, source, dest):
"""
Helper to validate the source and dest parameters passed to
_collect_actions()
"""
result = True
if not self._is_valid_source(source):
result = False
if dest.exists():
if not self._is_valid_dest(dest):
result = False
return result
# pylint: disable=too-few-public-methods
class Clean(main.AbstractBaseSubCommand):
"""
Abstract Base class that contains the shared logic for all of the stow
commands
"""
# pylint: disable=too-many-arguments
def __init__(self, source, dest, is_silent, is_dry_run, ignore_patterns):
self.source = [pathlib.Path(s) for s in source]
self.dest = pathlib.Path(dest)
self.ignore_patterns = ignore_patterns
super().__init__("clean", source, dest, is_silent, is_dry_run, ignore_patterns)
def _is_valid_input(self, sources, dest):
"""
Check to see if the input is valid
"""
return StowInput(self.errors, self.subcmd).is_valid(sources, dest)
def get_directory_contents(self, directory):
"""
Get the contents of a directory while handling errors that may occur
"""
contents = []
try:
contents = utils.get_directory_contents(directory)
except PermissionError:
self.errors.add(error.PermissionDenied(self.subcmd, directory))
except FileNotFoundError:
self.errors.add(error.NoSuchFileOrDirectory(self.subcmd, directory))
except NotADirectoryError:
self.errors.add(error.NoSuchDirectory(self.subcmd, directory))
return contents
def _collect_clean_actions(self, source, source_names, dest):
subdests = utils.get_directory_contents(dest)
for subdest in subdests:
if subdest.is_symlink():
link_target = utils.readlink(subdest, absolute_target=True)
if not link_target.exists() and not source_names.isdisjoint(
set(link_target.parents)
):
self.actions.add(actions.UnLink(self.subcmd, subdest))
elif subdest.is_dir():
self._collect_clean_actions(source, source_names, subdest)
def _check_for_other_actions(self):
"""
Concrete method to collect required actions to perform a stow
sub-command
"""
valid_files = []
for a_file in self.source:
self.ignore = ignore.Ignore(self.ignore_patterns, a_file)
if self.ignore.should_ignore(a_file):
self.ignore.ignore(a_file)
continue
else:
valid_files.append(a_file)
if not StowInput(self.errors, self.subcmd).is_valid_collection_input(
a_file, self.dest
):
return
# NOTE: an option to make clean more aggressive is to change f.name to
# f.parent this could a be a good --option
files_names = [utils.get_absolute_path(f.name) for f in valid_files]
files_names_set = set(files_names)
self._collect_clean_actions(valid_files, files_names_set, self.dest)
| arecarn/dploy | dploy/stowcmd.py | Python | mit | 15,248 |
from lxml import etree
from dataset.data import Data
from strategy.parser.default_parser import DefaultParser
import re
class PostParser(DefaultParser):
def __init__(self):
self.super = super(PostParser, self)
self.super.__init__()
def get_url_value(self):
v = re.search(r'\d*.html$', self._url).group()
return int(re.search(r'^\d*', v).group())
def clear_space(self, content):
# r, n = re.subn(r'>\\n\s*<', '><', content)
r = re.compile(r'\n\s*')
return r.sub('', content)
def _get_new_data(self, dom):
new_datas = []
try:
nodes = dom.xpath("//article//h1//a/text()")
if len(nodes) < 1:
return None
author = nodes[0].encode('utf8')
nodes = dom.xpath("//article//h1/text()")
if len(nodes) < 1:
return None
title = nodes[1].encode('utf8')
content = ''
nodes = dom.xpath("//article//section")
for node in nodes:
content = content + etree.tostring(node, encoding='utf8')
data = Data('post')
data.set('title', title)
data.set('author', self.clear_space(author))
data.set('content', self.clear_space(content))
data.set('id', self.get_url_value())
new_datas.append(data)
except Exception as e:
print(str(e))
return new_datas
def _get_next_page_url(self, dom):
return None
| yfsoftcom/crawler | strategy/ssq_wilead_com/post_parser.py | Python | mit | 1,529 |
from flask import Blueprint, request, flash, render_template, redirect
from flask.ext.login import login_required, current_user
from mongoengine import DoesNotExist
from datetime import datetime
from recover.EmailClient import email_patient_invite
from recover.fitbit import Fitbit
from recover.forms.AddPatientForm import AddPatientForm
from recover.models import PatientInvite, Patient, PatientConfig
from recover.patient_data import PatientData
patient_add = Blueprint('patient_add', __name__)
@patient_add.route('/dashboard/add', methods=['GET', 'POST'])
@login_required
def add_patient():
"""
Allows a physician to enter in patient information and for an
invitational email to be sent to the patient. The PatientInvite
is then stored so as to not spam the patient and for logging purposes.
"""
form = AddPatientForm(request.form)
if request.method == 'POST':
try:
# First, ensure this physician is not already monitoring this patient
if current_user.patients.objects(email=form.email.data).count() > 0:
flash("Error: You are already monitoring this patient. Please specify a new patient.", 'warning')
return render_template('add-patient.html', form=form)
except AttributeError:
pass # Patients table is empty, so no need to check
if form.validate():
# Don't allow physician to send duplicate invite requests to a new patient
if PatientInvite.objects(email=form.email.data, inviting_physician=current_user.to_dbref()).count() > 0:
flash("Warning: You have already invited this patient to join.", 'warning')
return redirect('/dashboard')
full_name = "{} {}".format(form.first_name.data, form.last_name.data)
# Generate a PatientInvite object and send an invite email to given patient
invite = PatientInvite(inviting_physician=current_user.to_dbref(), accepted=False, email=form.email.data,
first_name=form.first_name.data, last_name=form.last_name.data)
invite.save()
email_sent = email_patient_invite(email=form.email.data, name=full_name,
invite_id=str(invite.id), physician_name=current_user.full_name)
if email_sent:
success_msg = "{name} has been emailed an invitation, and will appear" \
" on your Dashboard after granting access.".format(name=full_name)
flash(success_msg, 'success')
else:
flash('We were unable to send the patient invitation. Please ensure the address provided is correct.',
'warning')
return redirect('/dashboard')
else:
flash("Invalid input: please see the suggestions below.", 'warning')
return render_template('add-patient.html', form=form)
@patient_add.route('/authorize', methods=['GET'])
def authorize_new_patient():
"""
This is called once a patient clicks the confirmation link in their email.
Redirect a new patient to Fitbit to authorize our app via OAuth 2 Authorization Grant Flow, and
then receives access token (for making API calls on the user's behalf) as well as a
refresh token (for obtaining a new access token when the access token expires).
"""
access_code = request.args.get('code')
invite_id = request.args.get('state')
api = Fitbit()
# Require users to be invited by a physician. Only occurs when they receive an email w/ invite_id (aka "state").
if invite_id is None:
flash("Error: an authorization token is required. Please use the confirmation link that was emailed to you.",
'warning')
return redirect('/')
if access_code is None:
auth_url = api.get_authorization_uri(invite_id)
return redirect(auth_url)
try:
token = api.get_access_token(access_code)
except Exception as e:
flash(e.message, 'warning')
return redirect('/')
try:
response, token = api.api_call(token, '/1/user/-/profile.json')
except Exception as e:
flash(e.message, 'warning')
return redirect('/')
# fullname = response['user']['fullName'] # Using name entered by Physician on invite instead.
fitbit_id = response['user']['encodedId']
try:
invite = PatientInvite.objects.get(id=invite_id)
if not invite.accepted:
invite.accepted = True
PatientInvite.delete(invite)
try:
existing_patient = Patient.objects.get(slug=fitbit_id)
existing_patient.token = token['access_token']
existing_patient.refresh = token['refresh_token']
existing_patient.save()
new_patient = existing_patient
except DoesNotExist:
new_patient = Patient(slug=fitbit_id, first_name=invite.first_name, last_name=invite.last_name,
email=invite.email, token=token['access_token'], refresh=token['refresh_token'],
date_joined=datetime.now(), health_data_per_day=[], date_last_data_fetch='')
new_patient.save()
# By default, get 5 days worth of data for the brand new patient
new_patient_data = PatientData(new_patient)
PatientData.get_heart_rate_data_for_x_days(new_patient_data, 5)
PatientData.get_activity_data_for_x_days(new_patient_data, 5)
# Now save this patient to the inviting physician's list of patients.
inviting_physician = invite.inviting_physician
inviting_physician.patients.append(new_patient)
inviting_physician.save()
# Now attach a generic config to Patient for the Physician to edit later
min_hr_default = {'value': 50, 'window': 15} # BPS / minute
max_hr_default = {'value': 110, 'window': 15} # BPS / minute
min_steps_default = {'value': 500, 'window': 60 * 12} # steps / 12 hr
max_steps_default = {'value': 5000, 'window': 60} # steps / 1 hr
config = PatientConfig(minHR=min_hr_default, maxHR=max_hr_default, minSteps=min_steps_default,
maxSteps=max_steps_default, patient=new_patient)
inviting_physician.patient_config.append(config)
inviting_physician.save()
return redirect('/patient-registered?name=' + invite.first_name)
else:
flash("It appears you've already confirmed this account.", 'warning')
return redirect('/')
except DoesNotExist as e:
flash(e.__str__(), 'warning')
return redirect('/')
@patient_add.route('/patient-registered')
def thanks():
"""
Page to offer gratitude to a patient who just confirmed their account.
"""
fname = request.args.get('name')
return render_template('patient-registered.html', name=fname)
| SLU-Capstone/Recover | recover/views/new_patient.py | Python | mit | 7,097 |
#!/usr/bin/env python2
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2016 The Electrum developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import TYPE_CHECKING, Dict, List, Union, Tuple, Sequence, Optional, Type, Iterable, Any
from functools import partial
from electrum.plugin import (BasePlugin, hook, Device, DeviceMgr, DeviceInfo,
assert_runs_in_hwd_thread, runs_in_hwd_thread)
from electrum.i18n import _
from electrum.bitcoin import is_address, opcodes
from electrum.util import bfh, versiontuple, UserFacingException
from electrum.transaction import TxOutput, Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum.bip32 import BIP32Node
from electrum.storage import get_derivation_used_for_hw_device_encryption
from electrum.keystore import Xpub, Hardware_KeyStore
if TYPE_CHECKING:
import threading
from electrum.wallet import Abstract_Wallet
from electrum.base_wizard import BaseWizard
class HW_PluginBase(BasePlugin):
keystore_class: Type['Hardware_KeyStore']
libraries_available: bool
# define supported library versions: minimum_library <= x < maximum_library
minimum_library = (0,)
maximum_library = (float('inf'),)
DEVICE_IDS: Iterable[Any]
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.device = self.keystore_class.device
self.keystore_class.plugin = self
self._ignore_outdated_fw = False
def is_enabled(self):
return True
def device_manager(self) -> 'DeviceMgr':
return self.parent.device_manager
def create_device_from_hid_enumeration(self, d: dict, *, product_key) -> Optional['Device']:
# Older versions of hid don't provide interface_number
interface_number = d.get('interface_number', -1)
usage_page = d['usage_page']
id_ = d['serial_number']
if len(id_) == 0:
id_ = str(d['path'])
id_ += str(interface_number) + str(usage_page)
device = Device(path=d['path'],
interface_number=interface_number,
id_=id_,
product_key=product_key,
usage_page=usage_page,
transport_ui_string='hid')
return device
@hook
def close_wallet(self, wallet: 'Abstract_Wallet'):
for keystore in wallet.get_keystores():
if isinstance(keystore, self.keystore_class):
self.device_manager().unpair_xpub(keystore.xpub)
if keystore.thread:
keystore.thread.stop()
def scan_and_create_client_for_device(self, *, device_id: str, wizard: 'BaseWizard') -> 'HardwareClientBase':
devmgr = self.device_manager()
client = wizard.run_task_without_blocking_gui(
task=partial(devmgr.client_by_id, device_id))
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
client.handler = self.create_handler(wizard)
return client
def setup_device(self, device_info: DeviceInfo, wizard: 'BaseWizard', purpose) -> 'HardwareClientBase':
"""Called when creating a new wallet or when using the device to decrypt
an existing wallet. Select the device to use. If the device is
uninitialized, go through the initialization process.
Runs in GUI thread.
"""
raise NotImplementedError()
def get_client(self, keystore: 'Hardware_KeyStore', force_pair: bool = True, *,
devices: Sequence['Device'] = None,
allow_user_interaction: bool = True) -> Optional['HardwareClientBase']:
devmgr = self.device_manager()
handler = keystore.handler
client = devmgr.client_for_keystore(self, handler, keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
return client
def show_address(self, wallet: 'Abstract_Wallet', address, keystore: 'Hardware_KeyStore' = None):
pass # implemented in child classes
def show_address_helper(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not is_address(address):
keystore.handler.show_error(_('Invalid Bitcoin Address'))
return False
if not wallet.is_mine(address):
keystore.handler.show_error(_('Address not in wallet.'))
return False
if type(keystore) != self.keystore_class:
return False
return True
def get_library_version(self) -> str:
"""Returns the version of the 3rd party python library
for the hw wallet. For example '0.9.0'
Returns 'unknown' if library is found but cannot determine version.
Raises 'ImportError' if library is not found.
Raises 'LibraryFoundButUnusable' if found but there was some problem (includes version num).
"""
raise NotImplementedError()
def check_libraries_available(self) -> bool:
def version_str(t):
return ".".join(str(i) for i in t)
try:
# this might raise ImportError or LibraryFoundButUnusable
library_version = self.get_library_version()
# if no exception so far, we might still raise LibraryFoundButUnusable
if (library_version == 'unknown'
or versiontuple(library_version) < self.minimum_library
or versiontuple(library_version) >= self.maximum_library):
raise LibraryFoundButUnusable(library_version=library_version)
except ImportError:
return False
except LibraryFoundButUnusable as e:
library_version = e.library_version
self.libraries_available_message = (
_("Library version for '{}' is incompatible.").format(self.name)
+ '\nInstalled: {}, Needed: {} <= x < {}'
.format(library_version, version_str(self.minimum_library), version_str(self.maximum_library)))
self.logger.warning(self.libraries_available_message)
return False
return True
def get_library_not_available_message(self) -> str:
if hasattr(self, 'libraries_available_message'):
message = self.libraries_available_message
else:
message = _("Missing libraries for {}.").format(self.name)
message += '\n' + _("Make sure you install it with python3")
return message
def set_ignore_outdated_fw(self):
self._ignore_outdated_fw = True
def is_outdated_fw_ignored(self) -> bool:
return self._ignore_outdated_fw
def create_client(self, device: 'Device',
handler: Optional['HardwareHandlerBase']) -> Optional['HardwareClientBase']:
raise NotImplementedError()
def get_xpub(self, device_id: str, derivation: str, xtype, wizard: 'BaseWizard') -> str:
raise NotImplementedError()
def create_handler(self, window) -> 'HardwareHandlerBase':
# note: in Qt GUI, 'window' is either an ElectrumWindow or an InstallWizard
raise NotImplementedError()
def can_recognize_device(self, device: Device) -> bool:
"""Whether the plugin thinks it can handle the given device.
Used for filtering all connected hardware devices to only those by this vendor.
"""
return device.product_key in self.DEVICE_IDS
class HardwareClientBase:
handler = None # type: Optional['HardwareHandlerBase']
def __init__(self, *, plugin: 'HW_PluginBase'):
assert_runs_in_hwd_thread()
self.plugin = plugin
def device_manager(self) -> 'DeviceMgr':
return self.plugin.device_manager()
def is_pairable(self) -> bool:
raise NotImplementedError()
def close(self):
raise NotImplementedError()
def timeout(self, cutoff) -> None:
pass
def is_initialized(self) -> bool:
"""True if initialized, False if wiped."""
raise NotImplementedError()
def label(self) -> Optional[str]:
"""The name given by the user to the device.
Note: labels are shown to the user to help distinguish their devices,
and they are also used as a fallback to distinguish devices programmatically.
So ideally, different devices would have different labels.
"""
# When returning a constant here (i.e. not implementing the method in the way
# it is supposed to work), make sure the return value is in electrum.plugin.PLACEHOLDER_HW_CLIENT_LABELS
return " "
def get_soft_device_id(self) -> Optional[str]:
"""An id-like string that is used to distinguish devices programmatically.
This is a long term id for the device, that does not change between reconnects.
This method should not prompt the user, i.e. no user interaction, as it is used
during USB device enumeration (called for each unpaired device).
Stored in the wallet file.
"""
# This functionality is optional. If not implemented just return None:
return None
def has_usable_connection_with_device(self) -> bool:
raise NotImplementedError()
def get_xpub(self, bip32_path: str, xtype) -> str:
raise NotImplementedError()
@runs_in_hwd_thread
def request_root_fingerprint_from_device(self) -> str:
# digitalbitbox (at least) does not reveal xpubs corresponding to unhardened paths
# so ask for a direct child, and read out fingerprint from that:
child_of_root_xpub = self.get_xpub("m/0'", xtype='standard')
root_fingerprint = BIP32Node.from_xkey(child_of_root_xpub).fingerprint.hex().lower()
return root_fingerprint
@runs_in_hwd_thread
def get_password_for_storage_encryption(self) -> str:
# note: using a different password based on hw device type is highly undesirable! see #5993
derivation = get_derivation_used_for_hw_device_encryption()
xpub = self.get_xpub(derivation, "standard")
password = Xpub.get_pubkey_from_xpub(xpub, ()).hex()
return password
def device_model_name(self) -> Optional[str]:
"""Return the name of the model of this device, which might be displayed in the UI.
E.g. for Trezor, "Trezor One" or "Trezor T".
"""
return None
def manipulate_keystore_dict_during_wizard_setup(self, d: dict) -> None:
"""Called during wallet creation in the wizard, before the keystore
is constructed for the first time. 'd' is the dict that will be
passed to the keystore constructor.
"""
pass
class HardwareHandlerBase:
"""An interface between the GUI and the device handling logic for handling I/O."""
win = None
device: str
def get_wallet(self) -> Optional['Abstract_Wallet']:
if self.win is not None:
if hasattr(self.win, 'wallet'):
return self.win.wallet
def get_gui_thread(self) -> Optional['threading.Thread']:
if self.win is not None:
if hasattr(self.win, 'gui_thread'):
return self.win.gui_thread
def update_status(self, paired: bool) -> None:
pass
def query_choice(self, msg: str, labels: Sequence[str]) -> Optional[int]:
raise NotImplementedError()
def yes_no_question(self, msg: str) -> bool:
raise NotImplementedError()
def show_message(self, msg: str, on_cancel=None) -> None:
raise NotImplementedError()
def show_error(self, msg: str, blocking: bool = False) -> None:
raise NotImplementedError()
def finished(self) -> None:
pass
def get_word(self, msg: str) -> str:
raise NotImplementedError()
def get_passphrase(self, msg: str, confirm: bool) -> Optional[str]:
raise NotImplementedError()
def get_pin(self, msg: str, *, show_strength: bool = True) -> str:
raise NotImplementedError()
def is_any_tx_output_on_change_branch(tx: PartialTransaction) -> bool:
return any([txout.is_change for txout in tx.outputs()])
def trezor_validate_op_return_output_and_get_data(output: TxOutput) -> bytes:
validate_op_return_output(output)
script = output.scriptpubkey
if not (script[0] == opcodes.OP_RETURN and
script[1] == len(script) - 2 and script[1] <= 75):
raise UserFacingException(_("Only OP_RETURN scripts, with one constant push, are supported."))
return script[2:]
def validate_op_return_output(output: TxOutput, *, max_size: int = None) -> None:
script = output.scriptpubkey
if script[0] != opcodes.OP_RETURN:
raise UserFacingException(_("Only OP_RETURN scripts are supported."))
if max_size is not None and len(script) > max_size:
raise UserFacingException(_("OP_RETURN payload too large." + "\n"
+ f"(scriptpubkey size {len(script)} > {max_size})"))
if output.value != 0:
raise UserFacingException(_("Amount for OP_RETURN output must be zero."))
def get_xpubs_and_der_suffixes_from_txinout(tx: PartialTransaction,
txinout: Union[PartialTxInput, PartialTxOutput]) \
-> List[Tuple[str, List[int]]]:
xfp_to_xpub_map = {xfp: bip32node for bip32node, (xfp, path)
in tx.xpubs.items()} # type: Dict[bytes, BIP32Node]
xfps = [txinout.bip32_paths[pubkey][0] for pubkey in txinout.pubkeys]
try:
xpubs = [xfp_to_xpub_map[xfp] for xfp in xfps]
except KeyError as e:
raise Exception(f"Partial transaction is missing global xpub for "
f"fingerprint ({str(e)}) in input/output") from e
xpubs_and_deriv_suffixes = []
for bip32node, pubkey in zip(xpubs, txinout.pubkeys):
xfp, path = txinout.bip32_paths[pubkey]
der_suffix = list(path)[bip32node.depth:]
xpubs_and_deriv_suffixes.append((bip32node.to_xpub(), der_suffix))
return xpubs_and_deriv_suffixes
def only_hook_if_libraries_available(func):
# note: this decorator must wrap @hook, not the other way around,
# as 'hook' uses the name of the function it wraps
def wrapper(self: 'HW_PluginBase', *args, **kwargs):
if not self.libraries_available: return None
return func(self, *args, **kwargs)
return wrapper
class LibraryFoundButUnusable(Exception):
def __init__(self, library_version='unknown'):
self.library_version = library_version
class OutdatedHwFirmwareException(UserFacingException):
def text_ignore_old_fw_and_continue(self) -> str:
suffix = (_("The firmware of your hardware device is too old. "
"If possible, you should upgrade it. "
"You can ignore this error and try to continue, however things are likely to break.") + "\n\n" +
_("Ignore and continue?"))
if str(self):
return str(self) + "\n\n" + suffix
else:
return suffix
| spesmilo/electrum | electrum/plugins/hw_wallet/plugin.py | Python | mit | 16,377 |
from app import Handler
from entities.post import Post
from handlers.auth import Auth
class IndexHandler(Handler):
def get(self):
offset = 0
if self.request.get("page"):
offset = int(self.request.get("page")) - 1
current_page = offset + 1
next_page = current_page + 1
previous_page = current_page - 1
current_user = self.current_user
posts_query = Post.list(10, str(offset*10))
# iterates over posts
posts = []
for post in posts_query:
if Auth.is_logged_in(self.request):
for like in post.post_like:
if like.author.key().id() == current_user.key().id():
post.liked = True
else:
post.liked = False
posts.append(post)
self.render("index.html",
posts=posts,
page=current_page,
next_page=next_page,
previous_page=previous_page)
| diegopettengill/multiuserblog | handlers/main.py | Python | mit | 1,045 |
# -*- coding: utf-8 -*-
from .models import Author
def get_or_create_author(id, name, screen_name):
authors = Author.objects.filter(id=id)
if not authors.exists():
author = Author.objects.create(id=id, name=name, screen_name=screen_name)
else:
author = authors.first()
return author
| kk6/onedraw | onedraw/authors/api.py | Python | mit | 317 |
"version"
__version__ = '0.2.0'
| abe-winter/pg13-py | pg13/version.py | Python | mit | 32 |
"""
Tests of slot related functions.
"""
import time
import pkg_resources
from nose.tools import eq_ as eq
from nose.tools import with_setup
from pageobjects.slot import find_slot_figure
from pageobjects.util import NotifierPage
from util import main, setup_server, teardown_server, generate, \
startup, closeout
@with_setup(setup_server, teardown_server)
def test_generator():
for _test, browser in generate(__name__):
yield _test, browser
def _test_list_slot(browser):
project_dict, workspace_page = startup(browser)
# replace the 'top' assembly driver with a DOEdriver
# (this additionally verifies that an issue with DOEdriver slots is fixed)
workspace_page.add_library_item_to_dataflow('openmdao.main.assembly.Assembly', 'top')
workspace_page.replace_driver('top', 'DOEdriver')
# open the object editor dialog for the driver
driver = workspace_page.get_dataflow_figure('driver', 'top')
editor = driver.editor_page(False)
editor.move(-200, 200)
editor.show_slots()
# get the generator slot figure
generator_slot = find_slot_figure(workspace_page, 'DOEgenerator',
prefix='top.driver')
# check that slot is not filled
eq(False, generator_slot.filled,
"generator slot is showing as filled when it should not be")
# drop a FullFactorial onto the generator slot
workspace_page.fill_slot_from_library(generator_slot, 'FullFactorial')
# refresh and check that slot is now filled
time.sleep(1.0)
generator_slot = find_slot_figure(workspace_page, 'DOEgenerator',
prefix='top.driver')
eq(True, generator_slot.filled,
"FullFactorial did not drop into generator slot")
editor.close()
# open the object editor dialog for the assembly
assembly = workspace_page.get_dataflow_figure('top', '')
editor = assembly.editor_page(False)
editor.move(-200, 200)
editor.show_slots()
# get the recorders slot figure
recorders_slot = find_slot_figure(workspace_page, 'recorders', prefix='top')
# check that slot is not filled
eq(False, recorders_slot.filled,
"recorders slot is showing as filled when it should not be")
# set center pane to workflow to make sure workflow doesn't steal drops
workspace_page('workflow_tab').click()
# drop a DumpCaseRecorder onto the recorders slot
recorders_slot = find_slot_figure(workspace_page, 'recorders', prefix='top')
workspace_page.fill_slot_from_library(recorders_slot, 'DumpCaseRecorder')
# refresh and check that there is now a DumpCaseRecorder in the first slot
time.sleep(1.0) # give it a second to update the figure
recorders_slot = find_slot_figure(workspace_page, 'recorders[0]',
prefix='top')
eq(True, recorders_slot.filled,
"DumpCaseRecorder did not drop into recorders slot")
klass = recorders_slot.root.find_elements_by_css_selector('text#klass')
eq(klass[0].text, 'DumpCaseRecorder',
"Filled slot element should show the correct type (DumpCaseRecorder)")
# check that there is still an unfilled slot in the list
recorders_slot = find_slot_figure(workspace_page, 'recorders', prefix='top')
eq(False, recorders_slot.filled,
"recorders slot is not showing an unfilled slot")
klass = recorders_slot.root.find_elements_by_css_selector('text#klass')
eq(klass[0].text, 'ICaseRecorder',
"Unfilled slot element should show the correct klass (ICaseRecorder)")
# drop another CaseRecorder onto the recorders slot
workspace_page.fill_slot_from_library(recorders_slot, 'CSVCaseRecorder')
time.sleep(1.0) # give it a second to update the figure
recorders_slot = find_slot_figure(workspace_page, 'recorders[1]',
prefix='top')
eq(True, recorders_slot.filled,
"CSVCaseRecorder did not drop into recorders slot")
# check that there is still an unfilled slot in the list
recorders_slot = find_slot_figure(workspace_page, 'recorders', prefix='top')
eq(False, recorders_slot.filled,
"recorders slot is not showing an unfilled slot")
klass = recorders_slot.root.find_elements_by_css_selector('text#klass')
eq(klass[0].text, 'ICaseRecorder',
"Unfilled slot element should show the correct klass (ICaseRecorder)")
# remove the DumpCaseRecorder from the first slot in the list
recorders_slot = find_slot_figure(workspace_page, 'recorders[0]',
prefix='top')
recorders_slot.remove()
# check that the CSVCaseRecorder is now in the first filled slot
time.sleep(1.0) # give it a second to update the figure
recorders_slot = find_slot_figure(workspace_page, 'recorders[0]',
prefix='top')
eq(True, recorders_slot.filled,
"CSVCaseRecorder did not drop into recorders slot")
klass = recorders_slot.root.find_elements_by_css_selector('text#klass')
eq(klass[0].text, 'CSVCaseRecorder',
"Filled slot element should show the correct klass (CSVCaseRecorder)")
# Clean up.
editor.close()
closeout(project_dict, workspace_page)
def _test_slot_subclass(browser):
# test that a slot will accept subclasses
project_dict, workspace_page = startup(browser)
file_path = pkg_resources.resource_filename('openmdao.gui.test.functional',
'files/slot_test.py')
workspace_page.add_file(file_path)
name = workspace_page.put_element_on_grid("AutoAssemb")
aa = workspace_page.get_dataflow_figure(name)
editor = aa.editor_page(double_click=False)
editor.move(-200, 200)
inputs = editor.get_inputs()
expected = [
['', 'input', '0', '', ''],
['', 'directory', '', '',
'If non-blank, the directory to execute in.'],
['', 'excludes', '[]', '',
'Patterns for variables to exclude from the recorders'
' (only valid at top level).'],
['', 'force_fd', 'False', '',
'If True, always finite difference this component.'],
['', 'includes', "['*']", '',
'Patterns for variables to include in the recorders'
' (only valid at top level).'],
['', 'missing_deriv_policy', 'assume_zero', '',
'Determines behavior when some analytical derivatives are provided but'
' some are missing'],
]
for i, row in enumerate(inputs.value):
eq(row, expected[i])
inputs[0][2] = "10"
aa.run()
message = NotifierPage.wait(workspace_page)
eq(message, 'Run complete: success')
outputs = editor.get_outputs()
expected = [
['', 'output', '80', '', ''],
['', 'derivative_exec_count', '0', '',
"Number of times this Component's derivative function has been executed."],
['', 'exec_count', '1', '',
'Number of times this Component has been executed.'],
['', 'itername', '', '', 'Iteration coordinates.'],
]
for i, row in enumerate(outputs.value):
eq(row, expected[i])
editor.show_slots()
recorders_slot = find_slot_figure(workspace_page, 'd2', prefix=name)
workspace_page.fill_slot_from_library(recorders_slot, 'Dummy2')
aa.run()
message = NotifierPage.wait(workspace_page)
eq(message, 'Run complete: success')
outputs = editor.get_outputs()
expected = [
['', 'output', '160', '', ''],
['', 'derivative_exec_count', '0', '',
"Number of times this Component's derivative function has been executed."],
['', 'exec_count', '2', '',
'Number of times this Component has been executed.'],
['', 'itername', '', '', 'Iteration coordinates.'],
]
for i, row in enumerate(outputs.value):
eq(row, expected[i])
# Clean up.
closeout(project_dict, workspace_page)
def _test_dict_slot(browser):
project_dict, workspace_page = startup(browser)
workspace_page.add_library_item_to_dataflow('openmdao.main.assembly.Assembly', 'top')
workspace_page.show_dataflow('top')
# load in some files needed for the tests
file1_path = pkg_resources.resource_filename('openmdao.examples.simple',
'paraboloid.py')
workspace_page.add_file(file1_path)
file2_path = pkg_resources.resource_filename('openmdao.examples.enginedesign',
'transmission.py')
workspace_page.add_file(file2_path)
vt_comp_path = pkg_resources.resource_filename('openmdao.gui.test.functional',
'files/simple_vartree_component.py')
workspace_page.add_file(vt_comp_path)
workspace_page.show_dataflow('top')
args = ["('ratio1', 'ratio2')", "('torque_ratio', 'RPM')"]
workspace_page.add_library_item_to_dataflow(
'openmdao.lib.components.metamodel.MetaModel', 'mm', args=args)
mm_figure = workspace_page.get_dataflow_figure('mm', 'top')
mm_editor = mm_figure.editor_page()
mm_editor.show_slots()
mm_editor.move(-500, 0) # need clear LOS to the library
# see what happens when you change the model
# model_slot = find_slot_figure(workspace_page, 'model', prefix='top.mm')
# workspace_page.fill_slot_from_library(model_slot, 'Transmission')
# There should two surrogates slots
time.sleep(1.0) # give it a bit to update the figure
surrogates = browser.find_elements_by_xpath(
"//div[starts-with( @id,'SlotFigure-top-mm-surrogates')]")
eq(2, len(surrogates),
"There should be two surrogates in the surrogates dict but "
"%d surrogate(s) are being displayed" % len(surrogates))
# They should all be empty: RPM and torque_ratio
for surrogate in surrogates:
eq(False, ("filled" in surrogate.get_attribute('class')),
"Surrogate should not be filled")
# Fill the torque_ratio surrogate slot with FloatKrigingSurrogate
surrogate_slot = find_slot_figure(workspace_page, 'torque_ratio',
prefix='top.mm.surrogates')
workspace_page.fill_slot_from_library(surrogate_slot, 'KrigingSurrogate')
# One should be filled now
time.sleep(2) # give it a bit to update the figure
num_surrogates_filled = 0
surrogates = browser.find_elements_by_xpath(
"//div[starts-with( @id,'SlotFigure-top-mm-surrogates')]")
for surrogate in surrogates:
if "filled" in surrogate.get_attribute('class'):
num_surrogates_filled += 1
eq(1, num_surrogates_filled,
"Exactly one surrogate slot should be filled but "
"%d are filled" % num_surrogates_filled)
# Fill the RPM surrogate slot with FloatKrigingSurrogate
surrogate_slot = find_slot_figure(workspace_page, 'RPM',
prefix='top.mm.surrogates')
workspace_page.fill_slot_from_library(surrogate_slot,
'FloatKrigingSurrogate')
# Two should be filled now
time.sleep(2) # give it a bit to update the figure
num_surrogates_filled = 0
surrogates = browser.find_elements_by_xpath(
"//div[starts-with( @id,'SlotFigure-top-mm-surrogates')]")
for surrogate in surrogates:
if "filled" in surrogate.get_attribute('class'):
num_surrogates_filled += 1
eq(2, num_surrogates_filled,
"Exactly two surrogate slot should be filled but "
"%d are filled" % num_surrogates_filled)
# Vartrees currently not supported in the new Metamodel -- KTM
## Test with components that have variable trees
## test vartree with metamodel
# model_slot = find_slot_figure(workspace_page, 'model', prefix='top.mm')
# workspace_page.fill_slot_from_library(model_slot, 'InandOutTree')
## There should 3 surrogates slots
# time.sleep(2) # give it a bit to update the figure
# surrogates = browser.find_elements_by_xpath("//div[starts-with( @id,'SlotFigure-top-mm-surrogates')]")
# eq(3, len(surrogates),
# "There should be three surrogates in the surrogates dict but %d surrogate(s) are being displayed" % len(surrogates))
## They should all be empty
# for surrogate in surrogates:
# eq(False, ("filled" in surrogate.get_attribute('class')), "Surrogate should not be filled")
## Fill the outs.x surrogate slot with FloatKrigingSurrogate
# surrogate_slot = find_slot_figure(workspace_page, 'outs.x', prefix='top.mm.surrogates')
# workspace_page.fill_slot_from_library(surrogate_slot, 'FloatKrigingSurrogate')
## One should be filled now
# time.sleep(2) # give it a bit to update the figure
# num_surrogates_filled = 0
# surrogates = browser.find_elements_by_xpath("//div[starts-with( @id,'SlotFigure-top-mm-surrogates')]")
# for surrogate in surrogates:
# if "filled" in surrogate.get_attribute('class'):
# num_surrogates_filled += 1
# eq(1, num_surrogates_filled,
# "Exactly one surrogate slot should be filled but %d are filled" % num_surrogates_filled)
## Fill the zzz surrogate slot with KrigingSurrogate
# surrogate_slot = find_slot_figure(workspace_page, 'zzz', prefix='top.mm.surrogates')
# workspace_page.fill_slot_from_library(surrogate_slot, 'KrigingSurrogate')
## Two should be filled now
# time.sleep(2) # give it a bit to update the figure
# num_surrogates_filled = 0
# surrogates = browser.find_elements_by_xpath("//div[starts-with( @id,'SlotFigure-top-mm-surrogates')]")
# for surrogate in surrogates:
# if "filled" in surrogate.get_attribute('class'):
# num_surrogates_filled += 1
# eq(2, num_surrogates_filled,
# "Exactly two surrogate slot should be filled but %d are filled" % num_surrogates_filled)
## Fill the outs.y surrogate slot with ResponseSurface
# surrogate_slot = find_slot_figure(workspace_page, 'outs.y', prefix='top.mm.surrogates')
# workspace_page.fill_slot_from_library(surrogate_slot, 'ResponseSurface', [1, 1])
## Three should be filled now
# time.sleep(2) # give it a bit to update the figure
# num_surrogates_filled = 0
# surrogates = browser.find_elements_by_xpath("//div[starts-with( @id,'SlotFigure-top-mm-surrogates')]")
# for surrogate in surrogates:
# if "filled" in surrogate.get_attribute('class'):
# num_surrogates_filled += 1
# eq(3, num_surrogates_filled,
# "Exactly three surrogate slots should be filled but %d are filled" % num_surrogates_filled)
# Clean up.
closeout(project_dict, workspace_page)
if __name__ == '__main__':
main()
| DailyActie/Surrogate-Model | 01-codes/OpenMDAO-Framework-dev/openmdao.gui/src/openmdao/gui/test/functional/test_slots.py | Python | mit | 14,693 |
#!/usr/bin/env python
# encoding: utf-8
"""
x_models.py
Functions to check if the X linked models are followed.
Created by Måns Magnusson on 2013-02-12.
Copyright (c) 2013 __MoonsoInc__. All rights reserved.
"""
from __future__ import print_function
import logging
logger = logging.getLogger(__name__)
def check_X_recessive(variant, family, strict=False):
"""
Check if the variant follows the x linked heterozygous (XR) pattern of
inheritance in this family.
A variant is following the XR pattern if:
Healthy:
- Can not be homozygote alternative
- If no call we can not exclude XR
- Males can not have variant at all. This is added since sometimes males
get called as heterozygotes but this should not be possible since
they only have one copy of the X chromosome.
if strict:
- Have to be homozygote reference(if male) or heterozygote(if female).
- No call will return False
Affected:
- Have to be homozygote alternative(or heterozygote if male).
- If no call we can not exclude AR
if strict:
- Have to be homozygote alternative(or heterozygote if male)
- No call will return false
No affection status:
We can not tell if variant follows the model or not.
Args:
variant: variant dictionary.
family: A Family object with the individuals
strict: A boolean that tells if strict analyzis should be performed.
Return:
bool: depending on if the model is followed in these indivduals
"""
for individual in family.individuals:
# Get the genotype for this variant for this individual
individual_genotype = variant['genotypes'][individual]
if strict:
if not individual_genotype.genotyped:
return False
# The case where the individual is healthy
if family.individuals[individual].healthy:
# If individual is healthy and homozygote alternative
# the variant can not be deleterious:
if individual_genotype.genotyped:
if individual_genotype.homo_alt:
return False
# If individual is male it can not have the variant at all
if family.individuals[individual].sex == 1:
if individual_genotype.has_variant:
return False
# The case when the individual is sick
elif family.individuals[individual].affected:
#If the individual is sick and homozygote ref it can not be x-recessive
if individual_genotype.genotyped:
if individual_genotype.homo_ref:
return False
# Women have to be hom alt to be sick (almost allways carriers)
elif family.individuals[individual].sex == 2:
if not individual_genotype.homo_alt:
return False
return True
def check_X_dominant(variant, family, strict=False):
"""
Check if the variant follows the x linked dominant (XD) pattern of
inheritance in this family.
A variant is following the XD pattern if:
Healthy:
- Can not be homozygote alternative
- Healthy females can be heterozygotes. This is possible since there
are several documented diseases where only one allele at a time is
expressed during development.
- If no call we can not exclude XR
if strict:
- Have to be homozygote reference (or heterozygote womens).
- No call will return False
Affected:
- Have to be heterozygote.
- If no call we can not exclude AR
if strict:
- Have to be heterozygote or homozygote(for males)
- No call will return false
No affection status:
We can not tell if variant follows the model or not.
Args:
variant: variant dictionary.
family: A family object with the individuals
strict: A boolean that tells if strict analyzis should be performed.
Return:
bool: depending on if the model is followed in these indivduals
"""
for individual in family.individuals:
# Get the genotype for this variant for this individual
individual_genotype = variant['genotypes'][individual]
if strict:
if not individual_genotype.genotyped:
return False
# The case where the individual is healthy
if family.individuals[individual].healthy:
# Healthy womans can be carriers but not homozygote:
if individual_genotype.genotyped:
if family.individuals[individual].sex == 2:
if individual_genotype.homo_alt:
return False
# Males can not carry the variant:
elif family.individuals[individual].sex == 1:
if individual_genotype.has_variant:
return False
# The case when the individual is sick
elif family.individuals[individual].affected:
# If the individual is sick and homozygote ref it
# can not be x-linked-dominant
if individual_genotype.genotyped:
if individual_genotype.homo_ref:
return False
return True
| moonso/genmod | genmod/annotate_models/models/x_models.py | Python | mit | 5,496 |
from django.shortcuts import render
# Create your views here.
# coding:utf-8
import json
import os
import time
from django.conf import settings
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
@csrf_exempt
def ueditor_index(request):
"""
ueditor接入地址
:param request:
:return:
"""
action = request.GET.get('action', '')
if action == 'config':
response_dict = settings.UEDITER_SETTING
response_json = json.dumps(response_dict, ensure_ascii=False)
return HttpResponse(response_json, content_type="application/javascript")
elif action == 'uploadfile':
return HttpResponse(ueditor_FileUp(request))
elif action == 'uploadimage':
return HttpResponse(ueditor_ImgUp(request))
else:
return HttpResponseBadRequest()
def format_file_name(name):
'''
去掉名称中的url关键字
'''
URL_KEY_WORDS = ['#', '?', '/', '&', '.', '%']
for key in URL_KEY_WORDS:
name_list = name.split(key)
name = ''.join(name_list)
return name
def my_upload_file(file_obj, file_type='pic'):
"""
上传文件
:param file_obj:
:param file_type:
:return:
"""
if file_obj:
filename = file_obj.name
# filename = file_obj.name.decode('utf-8', 'ignore')
filename_list = filename.split('.')
file_postfix = filename_list[-1] # 后缀
# if file_postfix in ['txt', 'sql']:
filename_list_clean = filename_list[:-1]
file_name = ''.join(filename_list_clean) + str(int(time.time() * 1000))
file_name = format_file_name(file_name)
# else:
# file_name = str(uuid.uuid1())
sub_folder = time.strftime("%Y%m")
upload_folder = os.path.join(settings.MEDIA_ROOT, 'upload', sub_folder)
if not os.path.exists(upload_folder):
os.makedirs(upload_folder)
absolute_path = os.path.join(upload_folder, file_name) + '.%s' % file_postfix
if file_postfix.lower() in (
"sql", "jpg", "jpeg", "bmp", "gif", "png", "xls", "xlsx", "rar", "doc", "docx", "zip", "pdf", "txt", "swf",
"wmv"):
destination = open(absolute_path, 'wb+')
for chunk in file_obj.chunks():
destination.write(chunk)
destination.close()
# if file_type == 'pic': #暂不剪切图片
# if file_postfix.lower() in ('jpg', 'jpeg', 'bmp', 'gif', 'png'):
# im = Image.open(absolute_path)
# im.thumbnail((720, 720))
# im.save(absolute_path)
real_url = os.path.join('/media/', 'upload', sub_folder, file_name) + '.%s' % file_postfix
response_dict = {'original': filename, 'url': real_url, 'title': 'source_file_tile', 'state': 'SUCCESS',
'msg': ''}
else:
response_dict = {'original': filename, 'url': '', 'title': 'source_file_tile', 'state': 'FAIL',
'msg': 'invalid file format'}
else:
response_dict = {'original': '', 'url': '', 'title': 'source_file_tile', 'state': 'FAIL',
'msg': 'invalid file obj'}
return json.dumps(response_dict)
@csrf_exempt
def ueditor_ImgUp(request):
"""
上传图片
:param request:
:return:
"""
fileObj = request.FILES.get('upfile', None)
response = HttpResponse()
my_response = my_upload_file(fileObj, 'pic')
response.write(my_response)
return response
@csrf_exempt
def ueditor_FileUp(request):
""" 上传文件 """
fileObj = request.FILES.get('upfile', None)
response = HttpResponse()
my_response = my_upload_file(fileObj, 'file')
response.write(my_response)
return response
| blackholll/loonblog | apps/ueditor/views.py | Python | mit | 3,838 |
from functools import partial
from mongoengine.queryset.queryset import QuerySet
__all__ = ("queryset_manager", "QuerySetManager")
class QuerySetManager:
"""
The default QuerySet Manager.
Custom QuerySet Manager functions can extend this class and users can
add extra queryset functionality. Any custom manager methods must accept a
:class:`~mongoengine.Document` class as its first argument, and a
:class:`~mongoengine.queryset.QuerySet` as its second argument.
The method function should return a :class:`~mongoengine.queryset.QuerySet`
, probably the same one that was passed in, but modified in some way.
"""
get_queryset = None
default = QuerySet
def __init__(self, queryset_func=None):
if queryset_func:
self.get_queryset = queryset_func
def __get__(self, instance, owner):
"""Descriptor for instantiating a new QuerySet object when
Document.objects is accessed.
"""
if instance is not None:
# Document object being used rather than a document class
return self
# owner is the document that contains the QuerySetManager
queryset_class = owner._meta.get("queryset_class", self.default)
queryset = queryset_class(owner, owner._get_collection())
if self.get_queryset:
arg_count = self.get_queryset.__code__.co_argcount
if arg_count == 1:
queryset = self.get_queryset(queryset)
elif arg_count == 2:
queryset = self.get_queryset(owner, queryset)
else:
queryset = partial(self.get_queryset, owner, queryset)
return queryset
def queryset_manager(func):
"""Decorator that allows you to define custom QuerySet managers on
:class:`~mongoengine.Document` classes. The manager must be a function that
accepts a :class:`~mongoengine.Document` class as its first argument, and a
:class:`~mongoengine.queryset.QuerySet` as its second argument. The method
function should return a :class:`~mongoengine.queryset.QuerySet`, probably
the same one that was passed in, but modified in some way.
"""
return QuerySetManager(func)
| MongoEngine/mongoengine | mongoengine/queryset/manager.py | Python | mit | 2,222 |
from tastypie.api import Api
from django.conf.urls import patterns, include, url
'''
v1_api = Api(api_name='v1')
v1_api.register(DeviceResource())
'''
from machine.views import ControllerListView
urlpatterns = patterns('',
# ...more URLconf bits here...
# Then add:
# url(r'^api/', include(v1_api.urls)),
# url(r'^add_machine/', add_machine, name='add-machine'),
# url(r'^add_machine_through_phone/', add_machine_through_phone, name='add-machine-through-phone'),
url(r'^get/(?P<data_id>\d+)/command/$', 'machine.views.commandData'),
url(r'^get/(?P<data_id>\d+)/command/submit/$', 'machine.views.submitData'),
url(r'^get/(?P<data_id>\d+)/master_command/$', 'machine.views.masterCommandData'),
url(r'^get/(?P<data_id>\d+)/master_command/submit/$', 'machine.views.masterSubmitData'),
url(r'^get/(?P<data_id>\d+)/user_interface/$', 'machine.views.userInterface'),
url(r'^get/(?P<data_id>\d+)/user_interface/tank-setting/$', 'machine.views.userInterfaceTankSetting'),
url(r'^get/(?P<data_id>\d+)/user_interface/controller-setting/$', 'machine.views.userInterfaceControllerSetting'),
url(r'^get/(?P<data_id>\d+)/user_interface/display-setting/$', 'machine.views.userInterfaceDisplaySetting'),
url(r'^get/(?P<data_id>\d+)/user_interface/timer-setting/$', 'machine.views.userInterfaceTimerSetting'),
url(r'^get/(?P<data_id>\d+)/user_interface/submit/$', 'machine.views.submitDataUIHome'),
url(r'^get/(?P<data_id>\d+)/user_interface/tank-setting/submit/$', 'machine.views.submitDataUITank'),
url(r'^get/(?P<data_id>\d+)/user_interface/controller-setting/submit/$', 'machine.views.submitDataUIController'),
url(r'^get/(?P<data_id>\d+)/user_interface/display-setting/submit/$', 'machine.views.submitDataUIDisplay'),
url(r'^get/(?P<data_id>\d+)/user_interface/timer-setting/submit/$', 'machine.views.submitDataUITimer'),
url(r'^get/(?P<data_id>\d+)/$', 'machine.views.deviceData'),
url(r'^get/(?P<data_id>\d+)/value/$', 'machine.views.changeViews'),
url(r'^toggle/$', 'machine.views.list_change', name='device-list'),
url(r'^$', ControllerListView.as_view(), name='device-list'),
# url(r'^send_data_to_server/$', 'machine.views.collect_data_from_device'),
url(r'^start_server/$', 'machine.views.activate_server'),
url(r'^log/$', 'machine.views.generate_log'),
# url(r'^send_data_to_device/$', 'machine.views.prepare_string_to_activate_command'),
) | nikaashpuri/aquabrim_project | machine/urls.py | Python | mit | 2,401 |
#!/usr/bin/env python3
import logging
import os
import json
from gi.repository import Gtk, Gst, GLib
from lib.config import Config
from lib.uibuilder import UiBuilder
import lib.connection as Connection
# time interval to re-fetch queue timings
TIMER_RESOLUTION = 1.0
class QueuesWindowController():
def __init__(self,uibuilder):
self.log = logging.getLogger('QueuesWindowController')
# get related widgets
self.win = uibuilder.get_check_widget('queue_win')
self.store = uibuilder.get_check_widget('queue_store')
self.scroll = uibuilder.get_check_widget('queue_scroll')
# remember row iterators
self.iterators = None
# listen for queue_report from voctocore
Connection.on('queue_report', self.on_queue_report)
def on_queue_report(self, *report):
# read string report into dictonary
report = json.loads("".join(report))
# check if this is the initial report
if not self.iterators:
# append report as rows to treeview store and remember row iterators
self.iterators = dict()
for queue, time in report.items():
self.iterators[queue] = self.store.append((queue, time / Gst.SECOND))
else:
# just update values of second column
for queue, time in report.items():
self.store.set_value(self.iterators[queue], 1, time / Gst.SECOND)
def show(self,visible=True):
# check if widget is getting visible
if visible:
# request queue timing report from voctocore
Connection.send('report_queues')
# schedule repetition
GLib.timeout_add(TIMER_RESOLUTION * 1000, self.do_timeout)
# do the boring stuff
self.win.show()
else:
self.win.hide()
def do_timeout(self):
# re-request queue report
Connection.send('report_queues')
# repeat if widget is visible
return self.win.is_visible()
| voc/voctomix | voctogui/lib/queues.py | Python | mit | 2,030 |
'''
Using decorators with syntactic sugar!!
'''
def decorator(fnc_arg):
def wrapper_fnc():
# perform some operations before calling fnc_arg
fnc_arg()
# perform other operations after calling fnc_arg
return wrapper_fnc
def fnc_arg():
# orginal function
pass
# using decorators
decorated_fnc = decorator(fnc_arg)
decorated_fnc()
# OR
@decorator
def other_fnc():
# some other fnc
pass
other_fnc() # this call is equivalent to decorated version of other_fnc
'''
Working with arguments
'''
def sleep_decorator(function):
"""
Limits how fast the function is
called.
"""
def wrapper(*args, **kwargs):
# sleep(2)
print(args)
print(len(args))
print(args[1])
return function(*args, **kwargs)
return wrapper
@sleep_decorator
def print_number(num, num2):
return num
print(print_number(222, 888))
# for num in range(1, 6):
# print(print_number(num)) | smenon8/AlgDataStruct_practice | practice_problems/Decorators.py | Python | mit | 945 |
from flask import render_template, request, flash, redirect, url_for, make_response, session, g
from wwag import app, database
from datetime import datetime
@app.before_request
def before_request():
if '/static/' in request.path:
return
if session.get('user_type') == "Player":
player = database.execute("SELECT * FROM Player WHERE PlayerID = %s;", (session.get('user_id'),)).fetchone()
g.current_player = player
if player and player['Type'] == "S":
g.current_staff = player
elif session.get('user_type') == "Viewer":
viewer = fetch_viewer(session.get('user_id'))
if viewer:
if viewer['ViewerType'] == "P" and viewer['RenewalDate'] < datetime.today().date():
database.execute("DELETE FROM PremiumViewer WHERE ViewerID = %s;", (viewer['ViewerID'],))
database.execute("UPDATE Viewer SET ViewerType = 'R' WHERE ViewerID = %s;", (viewer['ViewerID'],))
database.commit()
viewer = fetch_viewer(session.get('user_id'))
g.current_viewer = viewer
g.open_order = open_order()
def fetch_viewer(viewer_id):
return database.execute("SELECT * FROM Viewer LEFT JOIN PremiumViewer ON Viewer.ViewerID = PremiumViewer.ViewerID LEFT JOIN CrowdFundingViewer on Viewer.ViewerID = CrowdFundingViewer.ViewerID WHERE Viewer.ViewerID = %s;", (viewer_id,)).fetchone()
def open_order():
viewer_id = g.current_viewer['ViewerID']
open_order = database.execute("SELECT * FROM ViewerOrder WHERE ViewerID = %s AND ViewedStatus = 'Open';", (viewer_id,)).fetchone()
if open_order:
return open_order
else:
lastrowid = database.execute("INSERT INTO ViewerOrder (OrderDate, ViewedStatus, ViewerID) VALUES (%s, %s, %s);", (datetime.now().date(), "Open", viewer_id)).lastrowid
database.commit()
return database.execute("SELECT * FROM ViewerOrder WHERE ViewerID = %s AND ViewedStatus = 'Open';", (viewer_id,)).fetchone()
import wwag.views.index
import wwag.views.instance_runs
import wwag.views.videos
import wwag.views.games
import wwag.views.users
import wwag.views.viewers
import wwag.views.utilities
import wwag.views.players
import wwag.views.orders
import wwag.views.venues
import wwag.views.equipment
| zhoutong/wwag | wwag/views/__init__.py | Python | mit | 2,184 |
#!/usr/bin/env python3
import machine
import utime
def default_remote_id():
"""generate remote id based on machine.unique_id()"""
import uhashlib
from ustruct import unpack
# we compute a hash of board's unique_id, since a boards manufactured
# closely together probably share prefix or suffix, and I don't know
# which one. we want to avoid accidental remote_id clashes
unique_hash = uhashlib.sha256(machine.unique_id()).digest()
# and a 4 byte prefix of a sha256 hash is more than enough
uint32 = unpack("I", unique_hash[:4])[0]
# let's mask it to 26 bits
uint26 = uint32 & (2**26 - 1)
return uint26
def payload(remote_id, group, toggle, chan, unit):
"""generate binary payload from given values"""
return (remote_id << 6) | (group << 5) | (toggle << 4) | (chan << 2) | unit
class RFTimings:
# timings
T = 250 # base delay of 250 us/microseconds
ONE = T # ^_
ZERO = 5 * T # ^_____
START = 10 * T # ^__________
STOP = 40 * T # ^________________________________________
RETRIES = 5 # number of times, each message is sent
class Esp8266Timings(RFTimings):
T = 210
ONE = 130
RETRIES = 7
class RFSocket:
"""
Control popular 433MHz RF sockets.
>>> p = Pin('X1', Pin.OUT_PP)
>>> r = RFSocket(p, RFSocket.ANSLUT)
>>> r.on(1) # or 2 or 3
>>> r.off(1)
>>> r.group_on()
>>> r.group_off()
By default each micropython board will have a unique remote_id
generated from machine.unique_id().
"""
# group values
GROUP = 0 # control a whole group
DEVICE = 1 # control a single device
# toggle values
ON = 0 # [sic!] turn ON a DEVICE or GROUP
OFF = 1 # [sic!] turn OFF a DEVICE or GROUP
# chann values
ANSLUT = 0b00
NEXA = 0b11
# unit values
UNITS = {
ANSLUT: {1: 0b00, 2: 0b01, 3: 0b10},
NEXA: {1: 0b11, 2: 0b10, 3: 0b01},
}
def __init__(self, pin, chann=ANSLUT, remote_id=0, timings=RFTimings):
self._pin = pin
self._chann = chann
self._remote_id = (remote_id & (2**26 - 1)) or default_remote_id()
self._timings = timings
self._status = [False, False, False]
def group_on(self):
"""Turn on all the devices."""
bits = payload(self._remote_id, self.GROUP, self.ON, self._chann, 0)
self._send(bits)
for i in range(3):
self._status[i] = True
def group_off(self):
"""Turn off all the devices."""
bits = payload(self._remote_id, self.GROUP, self.OFF, self._chann, 0)
self._send(bits)
for i in range(3):
self._status[i] = False
def on(self, unit):
bits = payload(self._remote_id, self.DEVICE, self.ON, self._chann, self.UNITS[self._chann][unit])
self._send(bits)
self._status[unit - 1] = True
def off(self, unit):
bits = payload(self._remote_id, self.DEVICE, self.OFF, self._chann, self.UNITS[self._chann][unit])
self._send(bits)
self._status[unit - 1] = False
def status(self):
return tuple(self._status)
@staticmethod
def _phys(t, m, high, low, udelay=utime.sleep_us):
"""
Send one physical 'bit' of information, either ONE, ZERO, START or STOP.
Using high, low and udelay locals for performance and better timing.
"""
high()
udelay(t)
low()
udelay(m)
def _send(self, msg):
"""Send msg to the transmitter, repeat it appropriate number of times."""
for _ in range(self._timings.RETRIES):
self._send_one(msg)
def _send_one(self, msg):
"""Send a single 32bit message."""
# bring some of the stuff as local variables, this greately
# improves/stabilizes message signal timings
t, one, zero, start, stop = self._timings.T, self._timings.ONE, self._timings.ZERO, self._timings.START, self._timings.STOP
high = self._pin.high
low = self._pin.low
_phys = self._phys
mask = 1 << 31
_phys(t, start, high, low)
for _ in range(32):
if mask & msg:
# logical one is encoded as physical ONE followed by physical ZERO
_phys(t, one, high, low)
_phys(t, zero, high, low)
else:
# logical zero is encoded as physical ZERO followed by physical ONE
_phys(t, zero, high, low)
_phys(t, one, high, low)
msg = msg << 1 # next bit
_phys(t, stop, high, low)
| wuub/micropython-rfsocket | rfsocket.py | Python | mit | 4,613 |
from . import abstractarchiver
class TARArchiver(abstractarchiver.AbstractArchiver):
def get_file_extensions(self):
return ['tar']
def list_contents(self, opts):
self.run_cmd(opts, ['tar', '--list', '--verbose', '--file', opts.archive])
def extract_archive(self, opts):
self.run_cmd(opts, ['tar', '--extract', '--verbose', '--file', opts.archive])
def create_archive(self, opts):
cmd = ['tar', '--create', '--verbose', '--file', opts.dest]
cmd.extend(opts.files)
self.run_cmd(opts, cmd)
| eviljoe/junk-n-stuff | src/unitelib/archivers/tar.py | Python | mit | 577 |
for n in range(2, 10):
for x in range(2, n):
if n % x == 0:
print(n, 'equals', x, '*', n//x)
break
else:
# 循环中没有找到元素
print(n, 'is a prime number')
| darkless456/Python | 求质数.py | Python | mit | 228 |
# cryptopals challenge 3
import array
import re
#most common english letters
letter_points = {
'e' : 10,
't' : 10,
'a' : 10,
'o' : 10,
'i' : 10,
'n' : 10,
's' : 5,
'h' : 5,
'r' : 5,
'd' : 5,
'l' : 5,
'u' : 5
}
def decode(orig, char):
hex_data = orig.decode('hex')
byte_arr = array.array('b', hex_data)
result = [b ^ char for b in byte_arr]
return "".join("{:c}".format(b) for b in result)
def score(string):
score = 0
for x in string:
if x in letter_points:
score += letter_points[x]
return score
def get_decrypted(orig):
most_likely_mess = ""
max_score = 0
most_likely_key = ""
for i in range(256):
string = decode(orig, i)
curr_score = score(string)
if curr_score > max_score:
max_score = curr_score
most_likely_mess = string
most_likely_key = i
print (most_likely_mess)
print chr(most_likely_key)
encoded = '1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736'
get_decrypted(encoded)
| muursh/cryptopals | challenge3.py | Python | mit | 1,107 |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import pytest
from azure.eventhub import EventHubSharedKeyCredential
from azure.eventhub import EventHubConsumerClient
from azure.eventhub.exceptions import AuthenticationError, ConnectError, EventHubError
@pytest.mark.liveTest
def test_get_properties(live_eventhub):
client = EventHubConsumerClient(live_eventhub['hostname'], live_eventhub['event_hub'], '$default',
EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key']))
with client:
properties = client.get_eventhub_properties()
assert properties['eventhub_name'] == live_eventhub['event_hub'] and properties['partition_ids'] == ['0', '1']
@pytest.mark.liveTest
def test_get_properties_with_auth_error_sync(live_eventhub):
client = EventHubConsumerClient(live_eventhub['hostname'], live_eventhub['event_hub'], '$default',
EventHubSharedKeyCredential(live_eventhub['key_name'], "AaBbCcDdEeFf="))
with client:
with pytest.raises(AuthenticationError) as e:
client.get_eventhub_properties()
client = EventHubConsumerClient(live_eventhub['hostname'], live_eventhub['event_hub'], '$default',
EventHubSharedKeyCredential("invalid", live_eventhub['access_key'])
)
with client:
with pytest.raises(AuthenticationError) as e:
client.get_eventhub_properties()
@pytest.mark.liveTest
def test_get_properties_with_connect_error(live_eventhub):
client = EventHubConsumerClient(live_eventhub['hostname'], "invalid", '$default',
EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key'])
)
with client:
with pytest.raises(ConnectError) as e:
client.get_eventhub_properties()
client = EventHubConsumerClient("invalid.servicebus.windows.net", live_eventhub['event_hub'], '$default',
EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key'])
)
with client:
with pytest.raises(EventHubError) as e: # This can be either ConnectError or ConnectionLostError
client.get_eventhub_properties()
@pytest.mark.liveTest
def test_get_partition_ids(live_eventhub):
client = EventHubConsumerClient(live_eventhub['hostname'], live_eventhub['event_hub'], '$default',
EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key']))
with client:
partition_ids = client.get_partition_ids()
assert partition_ids == ['0', '1']
@pytest.mark.liveTest
def test_get_partition_properties(live_eventhub):
client = EventHubConsumerClient(live_eventhub['hostname'], live_eventhub['event_hub'], '$default',
EventHubSharedKeyCredential(live_eventhub['key_name'], live_eventhub['access_key']))
with client:
properties = client.get_partition_properties('0')
assert properties['eventhub_name'] == live_eventhub['event_hub'] \
and properties['id'] == '0' \
and 'beginning_sequence_number' in properties \
and 'last_enqueued_sequence_number' in properties \
and 'last_enqueued_offset' in properties \
and 'last_enqueued_time_utc' in properties \
and 'is_empty' in properties
| Azure/azure-sdk-for-python | sdk/eventhub/azure-eventhub/tests/livetest/synctests/test_properties.py | Python | mit | 3,596 |
import json
from models import *
ANGKATAN = ["2010", "2011", "2012", "2013"]
PRODI = ["Matematika", "Statistika", "Fisika", "Geofisika", "Kimia",
"Elektronika+dan+Instrumentasi", "Ilmu+Komputer"]
def seed_mahasiswa():
for prodi in PRODI:
for angkatan in ANGKATAN:
file_path = ('data/{prodi}/{angkatan}.json'
.format(angkatan=angkatan, prodi=prodi)
.replace('+', ' '))
with open(file_path, 'r') as fp:
mahasiswa = json.load(fp)
for entry in mahasiswa:
nama = entry['nama'].rstrip()
niu = entry['niu'].rstrip()
Mahasiswa.create(nama=nama,
niu=niu,
angkatan=angkatan,
prodi=prodi,
visited=0)
if __name__ == "__main__":
Mahasiswa.create_table()
Kelas.create_table()
KelasMahasiswa.create_table()
seed_mahasiswa()
print "SUCCESS" | bahrunnur/InfoKelas | scripts/db_provision.py | Python | mit | 1,093 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-19 12:21
from __future__ import unicode_literals
import importlib
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
########################################################################################################################
## For each fieldtranslation we have to assign its module.
## Please, take note that we can do it now because we don't have models with the same name in different modules
## so we are going to fix this error before that situation happens.
def assign_module(apps, schema_editor):
# Get the current model
FieldTranslation = apps.get_model('modeltranslation', 'FieldTranslation')
## For each fieldtranslation we have to assign its module.
field_translations = FieldTranslation.objects.all()
for field_translation in field_translations:
# For each module with translatable models, we load it
# and test if the model is inside this module
# It could be made more efficiently but there are no many developers currently using this application so
# we don't have to be extremely careful about performance.
for module_path in settings.TRANSLATABLE_MODEL_MODULES:
module = importlib.import_module(module_path)
if hasattr(module, field_translation.model):
field_translation.module = module_path
field_translation.save()
class Migration(migrations.Migration):
dependencies = [
('modeltranslation', '0005_auto_20160107_1058'),
]
operations = [
migrations.AlterModelOptions(
name='fieldtranslation',
options={'verbose_name': 'model object field translation', 'verbose_name_plural': 'model object field translations'},
),
migrations.AddField(
model_name='fieldtranslation',
name='module',
field=models.CharField(default='', help_text='Module name that contains the model whose field is translated', max_length=128, verbose_name='Module name'),
preserve_default=False,
),
migrations.AlterField(
model_name='fieldtranslation',
name='context',
field=models.TextField(default=None, help_text='Help context that will be helpful for translators.', null=True, verbose_name='Context'),
),
migrations.AlterField(
model_name='fieldtranslation',
name='creation_datetime',
field=models.DateTimeField(verbose_name='Creation date and time of this translation'),
),
migrations.AlterField(
model_name='fieldtranslation',
name='creator_user',
field=models.ForeignKey(default=None, help_text='User that created last translation version', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='model_translation', to=settings.AUTH_USER_MODEL, verbose_name='User translator'),
),
migrations.AlterField(
model_name='fieldtranslation',
name='field',
field=models.CharField(help_text='Name of the field that is translated', max_length=128, verbose_name='Object field'),
),
migrations.AlterField(
model_name='fieldtranslation',
name='is_fuzzy',
field=models.BooleanField(default=False, help_text='This translation needs some reviewing.', verbose_name='\xbfNeeds reviewing?'),
),
migrations.AlterField(
model_name='fieldtranslation',
name='lang',
field=models.CharField(choices=[('es', 'Espa\xf1ol'), ('en', 'English'), ('de', 'Deutsch')], help_text='Language ISO code of this translation', max_length=16, verbose_name='Language Id'),
),
migrations.AlterField(
model_name='fieldtranslation',
name='last_update_datetime',
field=models.DateTimeField(verbose_name='Last update date and time of this translation'),
),
migrations.AlterField(
model_name='fieldtranslation',
name='model',
field=models.CharField(help_text='Model name whose field is translated', max_length=128, verbose_name='Model name'),
),
migrations.AlterField(
model_name='fieldtranslation',
name='object_id',
field=models.PositiveIntegerField(default=1, help_text='Object id whose field is translated', verbose_name='Object id'),
),
migrations.AlterField(
model_name='fieldtranslation',
name='source_md5',
field=models.CharField(help_text='MD5 checksum of source text', max_length=128, verbose_name='MD5 source text'),
),
migrations.AlterField(
model_name='fieldtranslation',
name='source_text',
field=models.TextField(help_text='Source text in default language', verbose_name='Source text'),
),
migrations.AlterField(
model_name='fieldtranslation',
name='translation',
field=models.TextField(default=None, help_text='Translation showed to users in website when showing it in choosed language.', null=True, verbose_name='Translation'),
),
migrations.RunPython(assign_module)
]
| intelligenia/modeltranslation | modeltranslation/migrations/0006_auto_20160119_1321.py | Python | mit | 5,355 |
"""
Landmark
:Authors:
Berend Klein Haneveld
"""
from vtk import vtkProp3DFollower
from vtk import vtkTransform
from core.vtkDrawing import CreateSphere
from core.vtkDrawing import ColorActor
from core.vtkDrawing import CreateCircle
class Landmark(object):
"""
Landmark is a container for vtkProps for easier
management of resources.
"""
def __init__(self, index, renderer, overlay, flag="Fixed"):
super(Landmark, self).__init__()
self.renderer = renderer
self.overlay = overlay
self.flag = flag
self.colorActive = [0.5, 1.0, 0.5]
self.colorInactive = [0.8, 0.8, 0.8]
self._position = [0.0, 0.0, 0.0] # coordinates in volume
self._scale = 1.0
self.transform = vtkTransform()
self.active = True
self.id = index
self.landmark = CreateSphere(0.1, [1, 1, 0.6])
self.landmarkIndicator = CreateCircle(1.2)
self.landmarkIndicator.GetProperty().SetLineWidth(2)
self.landmarkIndicator.GetProperty().SetOpacity(0.7)
self.landmarkFollower = vtkProp3DFollower()
self.landmarkFollower.SetProp3D(self.landmarkIndicator)
self.landmarkFollower.SetCamera(self.renderer.GetActiveCamera())
self.renderer.AddViewProp(self.landmark)
self.overlay.AddViewProp(self.landmarkFollower)
def cleanUp(self):
self.renderer.RemoveViewProp(self.landmark)
self.overlay.RemoveViewProp(self.landmarkFollower)
@property
def position(self):
return self._position
@position.setter
def position(self, position):
"""
Position should be in local coordinates.
"""
self._position = position
self.update()
@property
def scale(self):
return self._scale
@scale.setter
def scale(self, value):
self._scale = value
self.landmark.SetScale(value)
self.landmarkIndicator.SetScale(value)
def update(self):
# Update color for landmark and landmarkIndicator
if self.active:
color = self.colorActive
opacity = 0.7
else:
color = self.colorInactive
opacity = 0.4
ColorActor(self.landmark, color)
ColorActor(self.landmarkIndicator, color, opacity)
# Update position of landmark and landmarkFollower with the latest transform
location = list(self.transform.TransformPoint(self._position[0], self._position[1], self._position[2]))
self.landmark.SetPosition(location[0], location[1], location[2])
self.landmarkFollower.SetPosition(location[0], location[1], location[2])
| berendkleinhaneveld/Registrationshop | ui/transformations/Landmark.py | Python | mit | 2,337 |
friends = ['Tom', 'Dick', 'Harry']
for friend in friends:
print 'Hello', friend
print 'Out of FOR loop.'
| rahulbohra/Python-Basic | 27_for_loop.py | Python | mit | 110 |
# -*- coding: utf-8 -*-
from dp_tornado.engine.controller import Controller
class MysqlController(Controller):
def get(self):
self.model.tests.model_test.db_test.mysql.test()
self.finish('done')
| why2pac/dp-tornado | example/controller/tests/model/db/mysql.py | Python | mit | 219 |
import exifread
import os
import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
def processFolder(directoryInput,directoryOutput,fileName,directoryInput2= "none"):
# count the pictures per hour
clearCounts()
file_object = open("%s/%s.txt" % (directoryOutput, fileName), 'w')
for filename in os.listdir(directoryInput):
if filename.endswith('.JPG'):
with open("%s/%s" % (directoryInput, filename), 'rb') as image: # file path and name
exif = exifread.process_file(image)
# print(exif)
dt = str(exif['EXIF DateTimeOriginal']) # might be different
# rename files to a specific format
# print(dt)
# segment string dt into date and time
day, dtime = dt.split(" ", 1)
# print(dtime)
# segment time into hour, minute, second
hour, minute, second = dtime.split(":", 2)
# print(hour)
options[hour]()
file_object.write('%s\t' % dt)
continue
else:
continue
file_object.close()
hourarray1,sum1 = countImages()
if directoryInput2 != "none":
clearCounts()
file_object = open("%s/%s.txt" % (directoryOutput, fileName+str(2)), 'w')
for filename in os.listdir(directoryInput2):
if filename.endswith('.JPG'):
with open("%s/%s" % (directoryInput2, filename), 'rb') as image: # file path and name
exif = exifread.process_file(image)
# print(exif)
dt = str(exif['EXIF DateTimeOriginal']) # might be different
# rename files to a specific format
# print(dt)
# segment string dt into date and time
day, dtime = dt.split(" ", 1)
# print(dtime)
# segment time into hour, minute, second
hour, minute, second = dtime.split(":", 2)
# print(hour)
options[hour]()
file_object.write('%s\t' % dt)
continue
else:
continue
file_object.close()
hourarray2, sum2 = countImages()
doubleHistogram(hourarray1, sum1, hourarray2, sum2)
if desiredChart == "0":
histogram(hourarray1,sum1)
elif desiredChart == "2":
barchart(hourarray1, sum1)
clearCounts()
file_object = open("%s/%s.txt" % (directoryOutput, fileName), 'w')
for filename in os.listdir(directoryInput):
if filename.endswith('.JPG'):
with open("%s/%s" % (directoryInput, filename), 'rb') as image: # file path and name
exif = exifread.process_file(image)
# print(exif)
dt = str(exif['EXIF DateTimeOriginal']) # might be different
# rename files to a specific format
# print(dt)
# segment string dt into date and time
day, dtime = dt.split(" ", 1)
# print(dtime)
# segment time into hour, minute, second
hour, minute, second = dtime.split(":", 2)
# print(hour)
options[hour]()
file_object.write('%s\t' % dt)
continue
else:
continue
file_object.close()
def processDirectory(directoryInput,directoryOutput,fileName,directoryInput2= "none"):
# count the pictures per hour
clearCounts()
file_object = open("%s/%s.txt" % (directoryOutput, fileName), 'w')
for folder in os.listdir(directoryInput):
#find way to enter check if folder then enter
ndirect = directoryInput+"/"+folder
if os.path.isdir(ndirect):
print(ndirect)
for filename in os.listdir(ndirect):
if filename.endswith('.JPG'):
with open("%s/%s" % (ndirect, filename), 'rb') as image: # file path and name
exif = exifread.process_file(image)
"""if ndirect == "/Users/awhite/Desktop/Baited/PM_7":
print(exif)
print(filename)"""
dt = str(exif['EXIF DateTimeOriginal']) # might be different
# rename files to a specific format
# print(dt)
# segment string dt into date and time
day, dtime = dt.split(" ", 1)
# print(dtime)
# segment time into hour, minute, second
hour, minute, second = dtime.split(":", 2)
# print(hour)
options[hour]()
file_object.write('%s\t' % dt)
continue
else:
continue
file_object.close()
hourarray1,sum1 = countImages()
if directoryInput2 != "none":
clearCounts()
file_object = open("%s/%s.txt" % (directoryOutput, fileName+str(2)), 'w')
for folder in os.listdir(directoryInput):
# find way to enter check if folder then enter
ndirect = directoryInput + "/" + folder
if os.path.isdir(ndirect):
for filename in os.listdir(ndirect):
if filename.endswith('.JPG'):
with open("%s/%s" % (ndirect, filename), 'rb') as image: # file path and name
exif = exifread.process_file(image)
# print(exif)
dt = str(exif['EXIF DateTimeOriginal']) # might be different
# rename files to a specific format
# print(dt)
# segment string dt into date and time
day, dtime = dt.split(" ", 1)
# print(dtime)
# segment time into hour, minute, second
hour, minute, second = dtime.split(":", 2)
# print(hour)
options[hour]()
file_object.write('%s\t' % dt)
continue
else:
continue
file_object.close()
hourarray2, sum2 = countImages()
doubleHistogram(hourarray1, sum1, hourarray2, sum2)
if desiredChart == "0":
histogram(hourarray1,sum1)
elif desiredChart == "2":
barchart(hourarray1, sum1)
clearCounts()
file_object = open("%s/%s.txt" % (directoryOutput, fileName), 'w')
for filename in os.listdir(directoryInput):
if filename.endswith('.JPG'):
with open("%s/%s" % (directoryInput, filename), 'rb') as image: # file path and name
exif = exifread.process_file(image)
# print(exif)
dt = str(exif['EXIF DateTimeOriginal']) # might be different
# rename files to a specific format
# print(dt)
# segment string dt into date and time
day, dtime = dt.split(" ", 1)
# print(dtime)
# segment time into hour, minute, second
hour, minute, second = dtime.split(":", 2)
# print(hour)
options[hour]()
file_object.write('%s\t' % dt)
continue
else:
continue
file_object.close()
#sums the total images and counts the images per hour and puts that into an array
def countImages():
hourarray = []
for i in range(24):
if (i >= 10):
hourcount = 'hour' + str(i) + 'count'
else:
hourcount = 'hour' + str(0) + str(i) + 'count'
value = eval(hourcount) #casts hourcount the variable which has the actual no. of images taken
hourarray.append(value)
sum = 0
for j in range(24):
print("hour:", j, " count: ", hourarray[j])
sum += hourarray[j]
print("Total sightings:", sum)
return hourarray,sum
def histogram(hourarray,sum):
#pseudo histogram of the sightings
normhourarray = []
if sum != 0:
for k in range(24):
normhourarray.append(hourarray[k]/sum)
else:
return
N = 24
ind = np.arange(N) # the x locations for the groups
width = 1 # the width of the bars
f=plt.figure(1)
plt.bar(ind, normhourarray, width, color='r')
plt.xlabel('Hour')
plt.ylabel('Percentage of Sightings')
plt.title('Percentage of Deer Sightings per Hour at Unbaited Sites:')
plt.axis([0, 24, 0, .2])
plt.xticks(np.arange(0, 24, 1.0))
plt.grid(True)
f.show()
showfigure()
def doubleHistogram(hourarray1,sum1,hourarray2,sum2):
#pseudo histogram of the sightings
normhourarray1 = []
normhourarray2 = []
if sum1 != 0:
if sum2 != 0:
for k in range(24):
normhourarray1.append(hourarray1[k]/sum1)
normhourarray2.append(hourarray2[k]/sum2)
else:
return
else:
return
N = 24
ind = np.arange(N) # the x locations for the groups
width = 1 # the width of the bars
f=plt.figure(1)
plt.bar(ind, normhourarray1, width, color='r')
plt.bar(ind, normhourarray2, width, color='b')
plt.xlabel('Hour')
plt.ylabel('Percentage of Sightings')
plt.title('Percentage of Deer Sightings per Hour:')
plt.axis([0, 24, 0, .2])
plt.xticks(np.arange(0, 24, 1.0))
plt.grid(True)
f.show()
showfigure()
def barchart():
# barchart of sightings
N = 24
ind = np.arange(N) # the x locations for the groups
width = 1 # the width of the bars
g = plt.figure(2)
plt.bar(ind, hourarray, width, color='r')
plt.xlabel('Hour')
plt.ylabel('Sightings')
plt.title('Barchart of Deer Sightings:')
plt.axis([0, 24, 0, 60])
plt.xticks(np.arange(0, 24, 1.0))
plt.grid(True)
g.show()
showfigure()
def showfigure():
input()
#case counting function definitions
def hour00():
global hour00count
hour00count+=1
def hour01():
global hour01count
hour01count+=1
def hour02():
global hour02count
hour02count+=1
def hour03():
global hour03count
hour03count+=1
def hour04():
global hour04count
hour04count+=1
def hour05():
global hour05count
hour05count+=1
def hour06():
global hour06count
hour06count+=1
def hour07():
global hour07count
hour07count+=1
def hour08():
global hour08count
hour08count+=1
def hour09():
global hour09count
hour09count+=1
def hour10():
global hour10count
hour10count+=1
def hour11():
global hour11count
hour11count+=1
def hour12():
global hour12count
hour12count+=1
def hour13():
global hour13count
hour13count+=1
def hour14():
global hour14count
hour14count+=1
def hour15():
global hour15count
hour15count+=1
def hour16():
global hour16count
hour16count+=1
def hour17():
global hour17count
hour17count+=1
def hour18():
global hour18count
hour18count+=1
def hour19():
global hour19count
hour19count+=1
def hour20():
global hour20count
hour20count+=1
def hour21():
global hour21count
hour21count+=1
def hour22():
global hour22count
hour22count+=1
def hour23():
global hour23count
hour23count+=1
#clean variables
def clearCountsArray():
global hour00count
global hour01count
global hour02count
global hour03count
global hour04count
global hour05count
global hour06count
global hour07count
global hour08count
global hour09count
global hour10count
global hour11count
global hour12count
global hour13count
global hour14count
global hour15count
global hour16count
global hour17count
global hour18count
global hour19count
global hour20count
global hour21count
global hour22count
global hour23count
hour00count = 0
hour01count = 0
hour02count = 0
hour03count = 0
hour04count = 0
hour05count = 0
hour06count = 0
hour07count = 0
hour08count = 0
hour09count = 0
hour10count = 0
hour11count = 0
hour12count = 0
hour13count = 0
hour14count = 0
hour15count = 0
hour16count = 0
hour17count = 0
hour18count = 0
hour19count = 0
hour20count = 0
hour21count = 0
hour22count = 0
hour23count = 0
#initialize counts
hour00count = 0
hour01count = 0
hour02count = 0
hour03count = 0
hour04count = 0
hour05count = 0
hour06count = 0
hour07count = 0
hour08count = 0
hour09count = 0
hour10count = 0
hour11count = 0
hour12count = 0
hour13count = 0
hour14count = 0
hour15count = 0
hour16count = 0
hour17count = 0
hour18count = 0
hour19count = 0
hour20count = 0
hour21count = 0
hour22count = 0
hour23count = 0
#dictionary of functions, which acts as a case switch
options = { '00' : hour00,
'01' : hour01,
'02' : hour02,
'03' : hour03,
'04' : hour04,
'05' : hour05,
'06' : hour06,
'07' : hour07,
'08' : hour08,
'09' : hour09,
'10' : hour10,
'11' : hour11,
'12' : hour12,
'13' : hour13,
'14' : hour14,
'15' : hour15,
'16' : hour16,
'17' : hour17,
'18' : hour18,
'19' : hour19,
'20' : hour20,
'21' : hour21,
'22' : hour22,
'23' : hour23,
}
#interface
directOrFile =input("Enter '0' if you want to process a folder with images or '1' if you want to process a folder of folders containing images: ")
directoryInput = input("Example filepath '/Users/awhite/Desktop/Research/Silman_Lab_Fall_16/PM_12'\nEnter the file path of the folder of images you want to process: ")
directoryOutput = input("Enter the destination file path of the output file: ")
fileName = input("Enter the desired name for the output file: ")
desiredChart =input("Type a '0' for a histogram of the data, '1' for a comparative histogram or a '2' for a barchart:")
#do you want raw output?
#fix these if statements for correct directory
if directoryInput == "default":
directoryInput = "/Users/awhite/Desktop/Research/Silman_Lab_Fall_16/PM_12"
default1= "/Users/awhite/Desktop/Baited"
if directoryOutput == "default":
directoryOutput = "/Users/awhite/Desktop"
if fileName == "default":
fileName = "output"
#processes folder
if directOrFile == "0":
if desiredChart == "1":
directoryInput2 = input(
"Example filepath '/Users/awhite/Desktop/Research/Silman_Lab_Fall_16/PM_12'\nEnter the file path of the second folder of images you want to process: ")
if directoryInput2 == "default":
directoryInput2 = "/Users/awhite/Desktop/Unbaited/PM_11"
processFolder(directoryInput, directoryOutput, fileName, directoryInput2)
elif desiredChart == "0":
processFolder(directoryInput, directoryOutput, fileName) # add how you want to process file
elif desiredChart == "2":
processFolder(directoryInput, directoryOutput, fileName) # add how you want to process file
elif desiredChart == "3":
processFolder2("/Users/awhite/Desktop/Baited", directoryOutput, fileName)
#process folder of folders
elif directOrFile == "1":
if directoryInput == "/Users/awhite/Desktop/Research/Silman_Lab_Fall_16/PM_12":
directoryInput = "/Users/awhite/Desktop/Unbaited"
if desiredChart == "1":
directoryInput2 = input(
"Example filepath '/Users/awhite/Desktop/Research/Silman_Lab_Fall_16/PM_12'\nEnter the file path of the second folder of images you want to process: ")
if directoryInput2 == "default":
directoryInput2 = "/Users/awhite/Desktop/Baited"
processDirectory(directoryInput, directoryOutput, fileName, directoryInput2)
elif desiredChart == "0":
processDirectory(directoryInput, directoryOutput, fileName) # add how you want to process file
elif desiredChart == "2":
processDirectory(directoryInput, directoryOutput, fileName) # add how you want to process file
| aleckwhite/EXIFExport | ShutterCountJPG.py | Python | mit | 16,497 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
server_name: str,
database_name: str,
workload_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/workloadGroups/{workloadGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"workloadGroupName": _SERIALIZER.url("workload_group_name", workload_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
resource_group_name: str,
server_name: str,
database_name: str,
workload_group_name: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/workloadGroups/{workloadGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"workloadGroupName": _SERIALIZER.url("workload_group_name", workload_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
resource_group_name: str,
server_name: str,
database_name: str,
workload_group_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/workloadGroups/{workloadGroupName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"workloadGroupName": _SERIALIZER.url("workload_group_name", workload_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
**kwargs
)
def build_list_by_database_request(
resource_group_name: str,
server_name: str,
database_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2020-11-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/workloadGroups')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serverName": _SERIALIZER.url("server_name", server_name, 'str'),
"databaseName": _SERIALIZER.url("database_name", database_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class WorkloadGroupsOperations(object):
"""WorkloadGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.sql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
server_name: str,
database_name: str,
workload_group_name: str,
**kwargs: Any
) -> "_models.WorkloadGroup":
"""Gets a workload group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param workload_group_name: The name of the workload group.
:type workload_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkloadGroup, or the result of cls(response)
:rtype: ~azure.mgmt.sql.models.WorkloadGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkloadGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
workload_group_name=workload_group_name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkloadGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/workloadGroups/{workloadGroupName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
database_name: str,
workload_group_name: str,
parameters: "_models.WorkloadGroup",
**kwargs: Any
) -> Optional["_models.WorkloadGroup"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.WorkloadGroup"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'WorkloadGroup')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
workload_group_name=workload_group_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkloadGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('WorkloadGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/workloadGroups/{workloadGroupName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
database_name: str,
workload_group_name: str,
parameters: "_models.WorkloadGroup",
**kwargs: Any
) -> LROPoller["_models.WorkloadGroup"]:
"""Creates or updates a workload group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param workload_group_name: The name of the workload group.
:type workload_group_name: str
:param parameters: The requested workload group state.
:type parameters: ~azure.mgmt.sql.models.WorkloadGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either WorkloadGroup or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.sql.models.WorkloadGroup]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkloadGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
workload_group_name=workload_group_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('WorkloadGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/workloadGroups/{workloadGroupName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
server_name: str,
database_name: str,
workload_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
workload_group_name=workload_group_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/workloadGroups/{workloadGroupName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
server_name: str,
database_name: str,
workload_group_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a workload group.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:param workload_group_name: The name of the workload group to delete.
:type workload_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
workload_group_name=workload_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/workloadGroups/{workloadGroupName}'} # type: ignore
@distributed_trace
def list_by_database(
self,
resource_group_name: str,
server_name: str,
database_name: str,
**kwargs: Any
) -> Iterable["_models.WorkloadGroupListResult"]:
"""Gets the list of workload groups.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param database_name: The name of the database.
:type database_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkloadGroupListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.sql.models.WorkloadGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkloadGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_database_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
template_url=self.list_by_database.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_database_request(
resource_group_name=resource_group_name,
server_name=server_name,
database_name=database_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("WorkloadGroupListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_database.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/workloadGroups'} # type: ignore
| Azure/azure-sdk-for-python | sdk/sql/azure-mgmt-sql/azure/mgmt/sql/operations/_workload_groups_operations.py | Python | mit | 25,692 |
"""Test calendar rrules."""
import datetime
from django.test import TestCase
from dateutil.rrule import DAILY
from dateutil.rrule import MO
from dateutil.rrule import MONTHLY
from dateutil.rrule import TH
from dateutil.rrule import TU
from dateutil.rrule import WEEKLY
from dateutil.rrule import YEARLY
from dateutil.rrule import rrule
from icalendar.prop import vRecur
import pytz
import recurrence
from django_ical import utils
class BuildRruleTest(TestCase):
"""Test building an Rrule for icalendar."""
def test_every_day(self):
"""Repeat every day."""
vrecurr = vRecur(utils.build_rrule(freq="DAILY"))
assert vrecurr["FREQ"] == "DAILY"
assert vrecurr.to_ical().decode() == "FREQ=DAILY"
assert len(vrecurr.keys()) == 1
def test_daily_byhour(self):
"""Repeat every day at 10, 12 and 17."""
vrecurr = utils.build_rrule(freq="DAILY", byhour=[10, 12, 17])
assert vrecurr["FREQ"] == "DAILY"
assert vrecurr["BYHOUR"] == [10, 12, 17]
vRecur(vrecurr).to_ical().decode() == "FREQ=DAILY;BYHOUR=10,12,17"
assert len(vrecurr.keys()) == 2
def test_daily_byhour_once(self):
"""Repeat every day at 10."""
vrecurr = utils.build_rrule(freq="DAILY", byhour=10)
assert vrecurr["FREQ"] == "DAILY"
assert vrecurr["BYHOUR"] == 10
vRecur(vrecurr).to_ical().decode() == "FREQ=DAILY;BYHOUR=10"
assert len(vrecurr.keys()) == 2
def test_every_week(self):
"""Repeat every week."""
vrecurr = utils.build_rrule(freq="WEEKLY")
assert vrecurr["FREQ"] == "WEEKLY"
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY"
assert len(vrecurr.keys()) == 1
def test_ever_hour(self):
"""Repeat every hour."""
vrecurr = utils.build_rrule(freq="HOURLY")
assert vrecurr["FREQ"] == "HOURLY"
vRecur(vrecurr).to_ical().decode() == "FREQ=HOURLY"
assert len(vrecurr.keys()) == 1
def test_ever_4_hours(self):
"""Repeat every 4 hours."""
vrecurr = utils.build_rrule(interval=4, freq="HOURLY")
assert vrecurr["FREQ"] == "HOURLY"
assert vrecurr["INTERVAL"] == 4
vRecur(vrecurr).to_ical().decode() == "FREQ=HOURLY;INTERVAL=4"
assert len(vrecurr.keys()) == 2
def test_weekly_tue(self):
"""Repeat every week on Tuesday."""
vrecurr = utils.build_rrule(freq="WEEKLY", byday="TU")
assert vrecurr["FREQ"] == "WEEKLY"
assert vrecurr["BYDAY"] == "TU"
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;BYDAY=TU"
assert len(vrecurr.keys()) == 2
def test_weekly_mo_wed(self):
"""Repeat every week on Monday, Wednesday."""
vrecurr = utils.build_rrule(freq="WEEKLY", byday=["MO", "WE"])
assert vrecurr["FREQ"] == "WEEKLY"
assert vrecurr["BYDAY"] == ["MO", "WE"]
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;BYDAY=MO,WE"
assert len(vrecurr.keys()) == 2
def test_every_weekday(self):
"""Repeat every weekday."""
vrecurr = utils.build_rrule(freq="WEEKLY", byday=["MO", "TU", "WE", "TH", "FR"])
assert vrecurr["FREQ"] == "WEEKLY"
assert vrecurr["BYDAY"] == ["MO", "TU", "WE", "TH", "FR"]
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR"
assert len(vrecurr.keys()) == 2
def test_every_2_weeks(self):
"""Repeat every 2 weeks."""
vrecurr = utils.build_rrule(interval=2, freq="WEEKLY")
assert vrecurr["FREQ"] == "WEEKLY"
assert vrecurr["INTERVAL"] == 2
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;INTERVAL=2"
assert len(vrecurr.keys()) == 2
def test_every_month(self):
"""Repeat every month."""
vrecurr = utils.build_rrule(freq="MONTHLY")
assert vrecurr["FREQ"] == "MONTHLY"
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY"
assert len(vrecurr.keys()) == 1
def test_every_6_months(self):
"""Repeat very 6 months."""
vrecurr = utils.build_rrule(interval=6, freq="MONTHLY")
assert vrecurr["FREQ"] == "MONTHLY"
assert vrecurr["INTERVAL"] == 6
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;INTERVAL=6"
assert len(vrecurr.keys()) == 2
def test_every_year(self):
"""Repeat every year."""
vrecurr = utils.build_rrule(freq="YEARLY")
assert vrecurr["FREQ"] == "YEARLY"
vRecur(vrecurr).to_ical().decode() == "FREQ=YEARLY"
assert len(vrecurr.keys()) == 1
def test_every_month_on_the_4th(self):
"""Repeat every month on the 4th."""
vrecurr = utils.build_rrule(freq="MONTHLY", bymonthday=4)
assert vrecurr["FREQ"] == "MONTHLY"
assert vrecurr["BYMONTHDAY"] == 4
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYMONTHDAY=4"
assert len(vrecurr.keys()) == 2
def test_every_month_on_the_4th_last(self):
"""Repeat every month on the 4th last."""
vrecurr = utils.build_rrule(freq="MONTHLY", bymonthday=-4)
assert vrecurr["FREQ"] == "MONTHLY"
assert vrecurr["BYMONTHDAY"] == -4
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYMONTHDAY=-4"
assert len(vrecurr.keys()) == 2
def test_ever_month_3rd_tu(self):
"""Repeat every month on the 3rd Tuesday."""
vrecurr = utils.build_rrule(freq="MONTHLY", byday="+3TU")
assert vrecurr["FREQ"] == "MONTHLY"
assert vrecurr["BYDAY"] == "+3TU"
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYDAY=+3TU"
assert len(vrecurr.keys()) == 2
def test_ever_month_3rd_last_tu(self):
"""Repeat every month on the 3rd last Tuesday."""
vrecurr = utils.build_rrule(freq="MONTHLY", byday="-3TU")
assert vrecurr["FREQ"] == "MONTHLY"
assert vrecurr["BYDAY"] == "-3TU"
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYDAY=-3TU"
assert len(vrecurr.keys()) == 2
def test_ever_month_last_mo(self):
"""Repeat every month on the last Monday."""
vrecurr = utils.build_rrule(freq="MONTHLY", byday="-1MO")
assert vrecurr["FREQ"] == "MONTHLY"
assert vrecurr["BYDAY"] == "-1MO"
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYDAY=-1MO"
assert len(vrecurr.keys()) == 2
def test_every_week_until_jan_2007(self):
"""Repeat every week until January 1, 2007."""
utc = pytz.UTC
jan2007 = datetime.datetime(2007, 1, 1, 0, 0, tzinfo=utc)
vrecurr = utils.build_rrule(freq="WEEKLY", until=jan2007)
assert vrecurr["FREQ"] == "WEEKLY"
assert vrecurr["UNTIL"] == jan2007
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;UNTIL=20070101T000000Z"
assert len(vrecurr.keys()) == 2
def test_every_week_20_times(self):
"""Repeat every week for 20 times."""
vrecurr = utils.build_rrule(freq="WEEKLY", count=20)
assert vrecurr["FREQ"] == "WEEKLY"
assert vrecurr["COUNT"] == 20
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;COUNT=20"
assert len(vrecurr.keys()) == 2
def test_every_month_last_working_day(self):
"""Repeat the last working day of each month."""
vrecurr = utils.build_rrule(
freq="MONTHLY", byday=["MO", "TU", "WE", "TH", "FR"], bysetpos=-1
)
assert vrecurr["FREQ"] == "MONTHLY"
assert vrecurr["BYDAY"] == ["MO", "TU", "WE", "TH", "FR"]
assert vrecurr["BYSETPOS"] == -1
vRecur(
vrecurr
).to_ical().decode() == "FREQ=MONTHLY;BYDAY=MO,TU,WE,TH,FR;BYSETPOS=-1"
assert len(vrecurr.keys()) == 3
def test_ever_month_last_day(self):
"""Repeat the last day of each month."""
vrecurr = utils.build_rrule(freq="MONTHLY", bymonthday=-1)
assert vrecurr["FREQ"] == "MONTHLY"
assert vrecurr["BYMONTHDAY"] == -1
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYMONTHDAY=-1"
assert len(vrecurr.keys()) == 2
def test_every_day_in_jan(self):
"""Repeat every day in January"""
vrecurr = utils.build_rrule(
freq="YEARLY", bymonth=1, byday=["MO", "TU", "WE", "TH", "FR", "SA", "SU"]
)
assert vrecurr["FREQ"] == "YEARLY"
assert vrecurr["BYMONTH"] == 1
assert vrecurr["BYDAY"] == ["MO", "TU", "WE", "TH", "FR", "SA", "SU"]
vRecur(
vrecurr
).to_ical().decode() == "FREQ=YEARLY;BYDAY=MO,TU,WE,TH,FR,SA,SU;BYMONTH=1"
assert len(vrecurr.keys()) == 3
def test_every_2nd_15th_of_month(self):
"""Repeat monthly on the 2nd and 15th of the month."""
vrecurr = utils.build_rrule(freq="MONTHLY", bymonthday=[4, 15])
assert vrecurr["FREQ"] == "MONTHLY"
assert vrecurr["BYMONTHDAY"] == [4, 15]
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYMONTHDAY=4,15"
assert len(vrecurr.keys()) == 2
def test_every_fr_13th(self):
"""Repeat every Friday the 13th."""
vrecurr = utils.build_rrule(freq="YEARLY", bymonthday=13, byday="FR")
assert vrecurr["FREQ"] == "YEARLY"
assert vrecurr["BYMONTHDAY"] == 13
assert vrecurr["BYDAY"] == "FR"
vRecur(vrecurr).to_ical().decode() == "FREQ=YEARLY;BYDAY=FR;BYMONTHDAY=13"
assert len(vrecurr.keys()) == 3
class FromTextTests(TestCase):
"""Test build a vRecur dictionary from an RRULE string."""
def test_every_day(self):
"""Repeat every day."""
vrecurr = utils.build_rrule_from_text("FREQ=DAILY")
assert vrecurr["FREQ"] == ["DAILY"]
vRecur(vrecurr).to_ical().decode() == "FREQ=DAILY"
assert len(vrecurr.keys()) == 1
def test_daily_byhour(self):
"""Repeat every day at 10, 12 and 17."""
vrecurr = utils.build_rrule_from_text("FREQ=DAILY;BYHOUR=10,12,17")
assert vrecurr["FREQ"] == ["DAILY"]
assert vrecurr["BYHOUR"] == [10, 12, 17]
vRecur(vrecurr).to_ical().decode() == "FREQ=DAILY;BYHOUR=10,12,17"
assert len(vrecurr.keys()) == 2
def test_every_week(self):
"""Repeat every week."""
vrecurr = utils.build_rrule_from_text("FREQ=WEEKLY")
assert vrecurr["FREQ"] == ["WEEKLY"]
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY"
assert len(vrecurr.keys()) == 1
def test_ever_hour(self):
"""Repeat every hour."""
vrecurr = utils.build_rrule_from_text("FREQ=HOURLY")
assert vrecurr["FREQ"] == ["HOURLY"]
vRecur(vrecurr).to_ical().decode() == "FREQ=HOURLY"
assert len(vrecurr.keys()) == 1
def test_ever_4_hours(self):
"""Repeat every 4 hours."""
vrecurr = utils.build_rrule_from_text("INTERVAL=4;FREQ=HOURLY")
assert vrecurr["FREQ"] == ["HOURLY"]
assert vrecurr["INTERVAL"] == [4]
vRecur(vrecurr).to_ical().decode() == "FREQ=HOURLY;INTERVAL=4"
assert len(vrecurr.keys()) == 2
def test_weekly_tue(self):
"""Repeat every week on Tuesday."""
vrecurr = utils.build_rrule_from_text("FREQ=WEEKLY;BYDAY=TU")
assert vrecurr["FREQ"] == ["WEEKLY"]
assert vrecurr["BYDAY"] == ["TU"]
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;BYDAY=TU"
assert len(vrecurr.keys()) == 2
def test_weekly_mo_wed(self):
"""Repeat every week on Monday, Wednesday."""
vrecurr = utils.build_rrule_from_text("FREQ=WEEKLY;BYDAY=MO,WE")
assert vrecurr["FREQ"] == ["WEEKLY"]
assert vrecurr["BYDAY"] == ["MO", "WE"]
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;BYDAY=MO,WE"
assert len(vrecurr.keys()) == 2
def test_every_weekday(self):
"""Repeat every weekday."""
vrecurr = utils.build_rrule_from_text("FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR")
assert vrecurr["FREQ"] == ["WEEKLY"]
assert vrecurr["BYDAY"] == ["MO", "TU", "WE", "TH", "FR"]
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR"
assert len(vrecurr.keys()) == 2
def test_every_2_weeks(self):
"""Repeat every 2 weeks."""
vrecurr = utils.build_rrule_from_text("INTERVAL=2;FREQ=WEEKLY")
assert vrecurr["FREQ"] == ["WEEKLY"]
assert vrecurr["INTERVAL"] == [2]
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;INTERVAL=2"
assert len(vrecurr.keys()) == 2
def test_every_month(self):
"""Repeat every month."""
vrecurr = utils.build_rrule_from_text("FREQ=MONTHLY")
assert vrecurr["FREQ"] == ["MONTHLY"]
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY"
assert len(vrecurr.keys()) == 1
def test_every_6_months(self):
"""Repeat very 6 months."""
vrecurr = utils.build_rrule_from_text("INTERVAL=6;FREQ=MONTHLY")
assert vrecurr["FREQ"] == ["MONTHLY"]
assert vrecurr["INTERVAL"] == [6]
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;INTERVAL=6"
assert len(vrecurr.keys()) == 2
def test_every_year(self):
"""Repeat every year."""
vrecurr = utils.build_rrule_from_text("FREQ=YEARLY")
assert vrecurr["FREQ"] == ["YEARLY"]
vRecur(vrecurr).to_ical().decode() == "FREQ=YEARLY"
assert len(vrecurr.keys()) == 1
def test_every_month_on_the_4th(self):
"""Repeat every month on the 4th."""
vrecurr = utils.build_rrule_from_text("FREQ=MONTHLY;BYMONTHDAY=4")
assert vrecurr["FREQ"] == ["MONTHLY"]
assert vrecurr["BYMONTHDAY"] == [4]
assert len(vrecurr.keys()) == 2
vrecurr = utils.build_rrule_from_text("FREQ=MONTHLY;BYMONTHDAY=+4")
assert vrecurr["FREQ"] == ["MONTHLY"]
assert vrecurr["BYMONTHDAY"] == [4]
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYMONTHDAY=4"
assert len(vrecurr.keys()) == 2
def test_every_month_on_the_4th_last(self):
"""Repeat every month on the 4th last."""
vrecurr = utils.build_rrule_from_text("FREQ=MONTHLY;BYMONTHDAY=-4")
assert vrecurr["FREQ"] == ["MONTHLY"]
assert vrecurr["BYMONTHDAY"] == [-4]
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYMONTHDAY=-4"
assert len(vrecurr.keys()) == 2
def test_ever_month_3rd_tu(self):
"""Repeat every month on the 3rd Tuesday."""
vrecurr = utils.build_rrule_from_text("FREQ=MONTHLY;BYDAY=+3TU")
assert vrecurr["FREQ"] == ["MONTHLY"]
assert vrecurr["BYDAY"] == ["+3TU"]
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYDAY=+3TU"
assert len(vrecurr.keys()) == 2
def test_ever_month_3rd_last_tu(self):
"""Repeat every month on the 3rd last Tuesday."""
vrecurr = utils.build_rrule_from_text("FREQ=MONTHLY;BYDAY=-3TU")
assert vrecurr["FREQ"] == ["MONTHLY"]
assert vrecurr["BYDAY"] == ["-3TU"]
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYDAY=-3TU"
assert len(vrecurr.keys()) == 2
def test_ever_month_last_mo(self):
"""Repeat every month on the last Monday."""
vrecurr = utils.build_rrule_from_text("FREQ=MONTHLY;BYDAY=-1MO")
assert vrecurr["FREQ"] == ["MONTHLY"]
assert vrecurr["BYDAY"] == ["-1MO"]
assert len(vrecurr.keys()) == 2
def test_ever_month_second_last_fr(self):
"""Repeat every month on the 2nd last Friday."""
vrecurr = utils.build_rrule_from_text("FREQ=MONTHLY;BYDAY=-2FR")
assert vrecurr["FREQ"] == ["MONTHLY"]
assert vrecurr["BYDAY"] == ["-2FR"]
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYDAY=-2FR"
assert len(vrecurr.keys()) == 2
def test_every_week_until_jan_2007(self):
"""Repeat every week until January 1, 2007."""
utc = pytz.UTC
vrecurr = utils.build_rrule_from_text("FREQ=WEEKLY;UNTIL=20070101T000000Z")
assert vrecurr["FREQ"] == ["WEEKLY"]
assert vrecurr["UNTIL"] == [datetime.datetime(2007, 1, 1, 0, 0, tzinfo=utc)]
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;UNTIL=20070101T000000Z"
assert len(vrecurr.keys()) == 2
def test_every_week_20_times(self):
"""Repeat every week for 20 times."""
vrecurr = utils.build_rrule_from_text("FREQ=WEEKLY;COUNT=20")
assert vrecurr["FREQ"] == ["WEEKLY"]
assert vrecurr["COUNT"] == [20]
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;COUNT=20"
assert len(vrecurr.keys()) == 2
def test_every_month_last_working_day(self):
"""Repeat the last working day of each month."""
vrecurr = utils.build_rrule_from_text(
"FREQ=MONTHLY;BYDAY=MO,TU,WE,TH,FR;BYSETPOS=-1;"
)
assert vrecurr["FREQ"] == ["MONTHLY"]
assert vrecurr["BYDAY"] == ["MO", "TU", "WE", "TH", "FR"]
assert vrecurr["BYSETPOS"] == [-1]
vRecur(
vrecurr
).to_ical().decode() == "FREQ=MONTHLY;BYDAY=MO,TU,WE,TH,FR;BYSETPOS=-1"
assert len(vrecurr.keys()) == 3
def test_ever_month_last_day(self):
"""Repeat the last day of each month."""
vrecurr = utils.build_rrule_from_text("FREQ=MONTHLY;BYMONTHDAY=-1")
assert vrecurr["FREQ"] == ["MONTHLY"]
assert vrecurr["BYMONTHDAY"] == [-1]
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYMONTHDAY=-1"
assert len(vrecurr.keys()) == 2
def test_every_day_in_jan(self):
"""Repeat every day in January"""
vrecurr = utils.build_rrule_from_text(
"FREQ=YEARLY;BYMONTH=1;BYDAY=MO,TU,WE,TH,FR,SA,SU;"
)
assert vrecurr["FREQ"] == ["YEARLY"]
assert vrecurr["BYMONTH"] == [1]
assert vrecurr["BYDAY"] == ["MO", "TU", "WE", "TH", "FR", "SA", "SU"]
vRecur(
vrecurr
).to_ical().decode() == "FREQ=YEARLY;BYDAY=MO,TU,WE,TH,FR,SA,SU;BYMONTH=1"
assert len(vrecurr.keys()) == 3
def test_every_2nd_15th_of_month(self):
"""Repeat monthly on the 2nd and 15th of the month."""
vrecurr = utils.build_rrule_from_text("FREQ=MONTHLY;BYMONTHDAY=4,15")
assert vrecurr["FREQ"] == ["MONTHLY"]
assert vrecurr["BYMONTHDAY"] == [4, 15]
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYMONTHDAY=4,15"
assert len(vrecurr.keys()) == 2
def test_every_fr_13th(self):
"""Repeat every Friday the 13th."""
vrecurr = utils.build_rrule_from_text("FREQ=YEARLY;BYMONTHDAY=13;BYDAY=FR")
assert vrecurr["FREQ"] == ["YEARLY"]
assert vrecurr["BYMONTHDAY"] == [13]
assert vrecurr["BYDAY"] == ["FR"]
vRecur(vrecurr).to_ical().decode() == "FREQ=YEARLY;BYDAY=FR;BYMONTHDAY=13"
assert len(vrecurr.keys()) == 3
class FromDateutilRruleTests(TestCase):
"""Build an ical string from a dateutil rrule."""
def test_weekly_by_month_year_day(self):
rule = rrule(
WEEKLY,
bymonth=(1, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
vRecur(
vrecurr
).to_ical().decode() == "FREQ=WEEKLY;BYYEARDAY=1,100,200,365;BYMONTH=1,7"
def test_weekly_by_month_nweekday(self):
rule = rrule(
WEEKLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
vRecur(
vrecurr
).to_ical().decode() == "FREQ=WEEKLY;COUNT=3;BYDAY=TU,TH;BYMONTH=1,3"
def test_weekly_by_monthday(self):
rule = rrule(
WEEKLY,
count=3,
bymonthday=(1, 3),
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;COUNT=3;BYMONTHDAY=1,3"
def test_weekly_by_weekday(self):
rule = rrule(
WEEKLY,
count=3,
byweekday=(TU, TH),
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY;COUNT=3;BYDAY=TU,TH"
def test_daily_by_month_nweekday(self):
rule = rrule(
DAILY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
vRecur(
vrecurr
).to_ical().decode() == "FREQ=DAILY;COUNT=3;BYDAY=TU,TH;BYMONTH=1,3"
def test_yearly_month_nweekday(self):
rule = rrule(
YEARLY,
count=3,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
vRecur(
vrecurr
).to_ical().decode() == "FREQ=YEARLY;COUNT=3;BYDAY=+1TU,-1TH;BYMONTH=1,3"
def test_yearly_month_yearday(self):
rule = rrule(
YEARLY,
count=4,
bymonth=(4, 7),
byyearday=(1, 100, 200, 365),
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
assert (
vRecur(vrecurr).to_ical().decode()
== "FREQ=YEARLY;COUNT=4;BYYEARDAY=1,100,200,365;BYMONTH=4,7"
)
def test_yearly_weekno_weekday(self):
rule = rrule(
YEARLY,
count=3,
byweekno=1,
byweekday=MO,
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
vRecur(vrecurr).to_ical().decode() == "FREQ=YEARLY;COUNT=3;BYDAY=MO;BYWEEKNO=1"
def test_yearly_setpos(self):
rule = rrule(
YEARLY,
count=3,
bymonthday=15,
byhour=(6, 18),
bysetpos=(3, -3),
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
assert (
vRecur(vrecurr).to_ical().decode()
== "FREQ=YEARLY;COUNT=3;BYHOUR=6,18;BYMONTHDAY=15;BYSETPOS=3,-3"
)
def test_monthly_month_monthday(self):
rule = rrule(
MONTHLY,
count=3,
bymonth=(1, 3),
bymonthday=(5, 7),
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
vRecur(
vrecurr
).to_ical().decode() == "FREQ=MONTHLY;COUNT=3;BYMONTHDAY=5,7;BYMONTH=1,3"
def test_monthly_nweekday(self):
rule = rrule(
MONTHLY,
count=3,
byweekday=(TU(1), TH(-1)),
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;COUNT=3;BYDAY=+1TU,-1TH"
def test_monthly_month_nweekday(self):
rule = rrule(
MONTHLY,
bymonth=(1, 3),
byweekday=(TU(1), TH(-1)),
dtstart=datetime.datetime(1997, 9, 2, 9, 0),
)
vrecurr = utils.build_rrule_from_dateutil_rrule(rule)
vRecur(vrecurr).to_ical().decode() == "FREQ=MONTHLY;BYDAY=+1TU,-1TH;BYMONTH=1,3"
class FromDjangoRecurrenceRruleTests(TestCase):
"""Build an ical string from a django-recurrence rrule."""
def test_rule(self):
rule = recurrence.Rule(recurrence.WEEKLY)
vrecurr = utils.build_rrule_from_recurrences_rrule(rule)
vRecur(vrecurr).to_ical().decode() == "FREQ=WEEKLY"
def test_complex_rule_serialization(self):
rule = recurrence.Rule(
recurrence.WEEKLY,
interval=17,
wkst=1,
count=7,
byday=[recurrence.to_weekday("-1MO"), recurrence.to_weekday("TU")],
bymonth=[1, 3],
)
vrecurr = utils.build_rrule_from_recurrences_rrule(rule)
assert (
vRecur(vrecurr).to_ical().decode()
== "FREQ=WEEKLY;COUNT=7;INTERVAL=17;BYDAY=-1MO,TU;BYMONTH=1,3;WKST=TU"
)
def test_complex_rule_serialization_with_weekday_instance(self):
rule = recurrence.Rule(
recurrence.WEEKLY,
interval=17,
wkst=recurrence.to_weekday(1),
count=7,
byday=[recurrence.to_weekday("-1MO"), recurrence.to_weekday("TU")],
bymonth=[1, 3],
)
vrecurr = utils.build_rrule_from_recurrences_rrule(rule)
assert (
vRecur(vrecurr).to_ical().decode()
== "FREQ=WEEKLY;COUNT=7;INTERVAL=17;BYDAY=-1MO,TU;BYMONTH=1,3;WKST=TU"
)
| Pinkerton/django-ical | django_ical/tests/test_recurrence.py | Python | mit | 25,045 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Author: Lokal_Profil
# License: MIT
#
"""A class for encoding the contents of a qualifier."""
from __future__ import unicode_literals
from builtins import object
from wikidatastuff.helpers import std_p
class Qualifier(object):
"""
A class for encoding the contents of a qualifier.
Essentially pywikibot.Claim without having to provide an instantiated
repo.
@todo: redo as SimpleClaim (if so reuse in Reference) or
retire in favor of pywikibot.Claim
"""
def __init__(self, prop, itis):
"""
Make a correctly formatted qualifier object for claims.
@param prop: the property (with or without "P")
@type prop: basestring
@param itis: a valid claim target e.g. pywikibot.ItemPage
@type itis: object
"""
self.prop = std_p(prop)
self.itis = itis
def __repr__(self):
"""Return a more complete string representation."""
return 'WD.Qualifier({0}, {1})'.format(self.prop, self.itis)
def __eq__(self, other):
"""Implement equality comparison."""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
"""Implement non-equality comparison."""
return not self.__eq__(other)
def __hash__(self):
"""Implement hash to allow for e.g. sorting and sets."""
return hash((self.prop, self.itis))
| lokal-profil/wikidata-stuff | wikidatastuff/qualifier.py | Python | mit | 1,509 |
from trace_nums import setup_data_workspace, DataInt
class Calculation(object):
def __init__(self):
self.diffs = {}
def set_data(self, a, b, c):
self.a = a
self.b = b
self.c = c
def operation(self, data):
# returns the new chng'd calculation object
# original + data => changed
# override this to implement functionality
return Calculation(self.a+data.real, self.b+data.real, self.c+data.real)
def update(self, data):
# call this method with data to update in a reversible way
new_state = self.operation(data)
diff = Diff(self, new_state)
data_id = data.unique_id
self.store_diff(data_id, diff)
return diff.apply(self)
def reverse(self, unique_id):
new_state = self
for uid in unique_id:
if uid in self.diffs:
for diff in self.diffs[uid]:
new_state = diff.reverse(new_state)
return new_state
def store_diff(self, data_id_set, diff):
for id_ in data_id_set:
if id_ not in self.diffs:
self.diffs[id_] = []
self.diffs[id_].append(diff)
def data_to_value(self):
# calculate value from the data. This should still work even if data changes
return self.a
class Diff(object):
def __init__(self, orig, chng, debug=False):
for attr in set(dir(orig)).union(dir(chng)):
if hasattr(getattr(orig,attr),'__call__'):
pass
elif getattr(orig,attr) is None:
pass
# elif len(attr) > 2 and attr[0:2] == '__'
elif attr in dir(orig) and attr in dir(chng):
# this is a value update
try:
diff = getattr(chng,attr) - getattr(orig,attr)
if debug:
print str(attr)+' diff: '+str(diff)
setattr(self,attr,diff)
except TypeError:
if debug:
print str(attr)+" can't do simple diff"
elif attr in dir(orig):
# delete this attr
# print 'attr '+str(attr)+' in old only'
pass
else: # attr in dir(chng)
# add this value
# print 'attr '+str(attr)+' in new only'
pass
def apply(self, orig, debug=False):
# make stored changes to an original
for attr in set(dir(self)).union(dir(orig)):
if hasattr(self, attr) and hasattr(getattr(self,attr),'__call__'):
pass
elif hasattr(orig, attr) and hasattr(getattr(orig,attr),'__call__'):
pass
elif hasattr(self, attr) and getattr(self,attr) is None:
pass
elif hasattr(orig, attr) and getattr(orig,attr) is None:
pass
elif hasattr(orig, attr) and hasattr(self, attr):
# this is a value update
try:
# it was: diff = getattr(chng,attr) - getattr(orig,attr)
# now it is: getattr(chng,attr) = getattr(orig,attr) + diff
change = getattr(orig,attr) + getattr(self,attr)
if debug:
print str(attr)+' chng: '+str(change)
setattr(orig,attr,change)
except TypeError:
if debug:
print str(attr)+" can't do simple diff"
return orig
def reverse(self, chng, debug=False):
# remove stored changes from a changed object
for attr in set(dir(self)).union(dir(chng)):
if hasattr(self, attr) and hasattr(getattr(self,attr),'__call__'):
pass
elif hasattr(chng, attr) and hasattr(getattr(chng,attr),'__call__'):
pass
elif hasattr(self, attr) and getattr(self,attr) is None:
pass
elif hasattr(chng, attr) and getattr(chng,attr) is None:
pass
elif hasattr(chng, attr) and hasattr(self, attr):
# this is a value update
try:
# it was: diff = getattr(chng,attr) - getattr(chng,attr)
# now it is: getattr(orig,attr) = getattr(chng,attr) - diff
change = getattr(chng,attr) - getattr(self,attr)
if debug:
print str(attr)+' chng: '+str(change)
setattr(chng,attr,change)
except TypeError:
if debug:
print str(attr)+" can't do simple diff"
return chng | buckbaskin/data-1 | trace_calculation.py | Python | mit | 4,737 |
#!/usr/bin/env python
# coding: utf-8
date = raw_input("Please Input a Year(YYYY/MM/DD): ")
date = [int(x) for x in date.split("/")]
mounth_day = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
days = sum(mounth_day[:date[1]-1]) + date[2]
if date[1] > 2 and (date[0] % 400 == 0 or date[0] % 4 == 0 and date[0] % 100 != 0):
days += 1
print days
| 51reboot/actual_13_homework | 03/peter/homework3.py | Python | mit | 353 |
from ..base_settings import BaseSettings
class TestBaseSettings(object):
def setup(self):
self.settings = BaseSettings()
def test_leaf(self):
"""Test setting a leaf node."""
self.settings.set('test', 'thing')
assert(self.settings.get('test') == 'thing')
def test_doesnt_exist(self):
"""Test getting a key that doesn't exists."""
assert(self.settings.get('notathing') is None)
def test_doesnt_exist_with_default(self):
"""Test getting a key that doesn't exists with default."""
assert(self.settings.get('notathing', default=1) == 1)
def test_initial_flag(self):
"""
Test initial flag only sets values if they do not
already exist.
"""
self.settings.set('initial_test', '1', initial=True)
self.settings.set('initial_test', '2', initial=True)
assert(self.settings.get('initial_test') == '1')
def test_nested_keys(self):
"""Test setting a nested key."""
self.settings.set('nested.thing', 'yeah')
assert(self.settings.get('nested.thing') == 'yeah')
assert(self.settings.get('nested') == {'thing': 'yeah'})
def test_nested_keys_dont_break_siblings(self):
"""
Test setting a nested key does not unset any of
the siblings.
"""
self.settings.set('nested.thing', 'yeah')
self.settings.set('nested.otherthing', 'what')
assert(self.settings.get('nested.thing') == 'yeah')
assert(self.settings.get('nested.otherthing') == 'what')
assert(
self.settings.get('nested') ==
{'thing': 'yeah', 'otherthing': 'what'}
)
def test_setting_dict(self):
"""Test setting a tree of keys using a dictionary."""
self.settings.set('dict_test', {'a': 'A', 'b': {'deep': 'works too'}})
assert(self.settings.get('dict_test.b.deep') == 'works too')
def test_set_multi(self):
"""Test multiple keys at once."""
self.settings.setMulti({'a': 'A', 'b': 'B'})
assert(self.settings.get('a') == 'A')
assert(self.settings.get('b') == 'B')
def test_get_multi(self):
"""Test getting multiple keys at once."""
self.settings.setMulti({'a': 'A', 'b': 'B'})
multi = self.settings.getMulti(['a', 'b'])
assert(multi['a'] == 'A')
assert(multi['b'] == 'B')
def test_get_multi_doesnt_exist(self):
"""Test getting multiple keys, some of which don't exist."""
self.settings.setMulti({'a': 'A', 'b': 'B'})
multi = self.settings.getMulti(['a', 'b', 'c'])
assert(multi['c'] is None)
def test_get_multi_doesnt_exist_default(self):
"""Test getting multiple keys, some of which don't exist."""
self.settings.setMulti({'a': 'A', 'b': 'B'})
multi = self.settings.getMulti(['a', 'b', 'c'], default='default')
assert(multi['c'] == 'default')
def test_lists(self):
"""Test setting lists."""
test_list = ['a', 'b', 'c']
self.settings.set('list', test_list)
test_list2 = self.settings.get('list')
assert(test_list == test_list2)
| collingreen/yaib | modules/settings/test/test_base_settings.py | Python | mit | 3,181 |
"""
Evaluators to produce dummy data from DummyData models.
"""
import re
from collections import OrderedDict
from . import functions
from .exceptions import DDEvaluatorException
TAG_PATTERN = re.compile(r"""
\{% \s* # open tag
(?P<function> \b \w+ \b) # function name
(?P<args> # function arguments
(?: \s* # separated by white-space
[^\s]+ )*? )? # non-white-space, allowed characters
\s* %\} # close tag
""", re.VERBOSE)
ARG_PATTERN = re.compile(r"""
(?<!\S) # do not allow non-white-space
(?<![:/\d]) # do not match date or time
-? # negative sign
(?= [1-9]|0(?!\d) ) # digits or zero before decimal
\d+ # pre-decimal digits
(?: \. # decimal
\d+ )? # post-decimal digits
(?:[eE] [+-]? \d+)? # scientific notation
(?![:/\d]) # do not match date or time
(?!\S) # do not allow non-white-space
|
(?<!\S) # do not allow non-white-space
" # begin quote
(?:[^"\\] # non-control characters
| \\ ["\\bfnrt/] # escaped characters
| \\ u [0-9a-f]{4} # Unicode characters
| \\\\ \\\" )*? # double-escaped quotation mark
" # end quote
(?!\S) # do not allow non-white-space
|
(?<!\S) # do not allow non-white-space
(?:[^"\\\s])+? # unenclosed string without white-space
(?!\S) # do not allow non-white-space
""", re.VERBOSE)
def evaluate_json(json, allow_callable=False, iteration=None):
"""
Traverse parsed JSON data and evaluate tags.
"""
def call_function(match):
"""
Call matched function.
"""
args = ARG_PATTERN.findall(match.group('args'))
args = [x[1:-1] if x[0] == '"' and x[-1] == '"' else x for x in args]
try:
value = getattr(
functions,
match.group('function')
)(*args, iteration=iteration)
except AttributeError:
raise DDEvaluatorException(
'attempted call to non-existent function {0}'.format(
match.group('function')
)
)
if hasattr(json, '__call__') and not allow_callable:
raise DDEvaluatorException(
'function {0} called from illegal location'.format(
match.group('function')
)
)
if match.start() != 0 or match.end() != len(match.string):
value = str(value)
return value
def evaluate_object(json):
"""
Evaluate tags in parsed JSON object.
"""
evaluated = OrderedDict()
for k in json:
evaluated[
evaluate_json(k, iteration=iteration)
] = evaluate_json(json[k], iteration=iteration)
return evaluated
def evaluate_array(json):
"""
Evaluate tags in parsed JSON array.
"""
evaluated = []
index = 0
while index < len(json):
item = evaluate_json(
json[index],
allow_callable=True,
iteration=iteration
)
if hasattr(item, '__call__'):
if index + 1 >= len(json):
raise DDEvaluatorException(
'invalid use of {0} function at end of array'.format(
item.parent_function
)
)
if 'repeat' == item.parent_function:
evaluated.extend(item(json[index + 1], evaluate_json))
index += 1
elif 'random' == item.parent_function:
return item(json[index + 1:], evaluate_json)
else:
evaluated.append(item)
index += 1
return evaluated
if isinstance(json, dict):
return evaluate_object(json)
elif isinstance(json, list):
return evaluate_array(json)
elif isinstance(json, str):
try:
return re.sub(TAG_PATTERN, call_function, json)
except TypeError:
# function returned a type other than string
return call_function(TAG_PATTERN.search(json))
return json
| blcook223/DummyData | evaluators.py | Python | mit | 4,844 |
from django.db import models
from clube.models import Clube
from .constants import TIPO_CAMPEONATO
class Campeonato(models.Model):
descricao = models.CharField(max_length=200)
tipo_campeonato = models.CharField(max_length=1, choices=TIPO_CAMPEONATO, default="Q")
data_inicio = models.DateTimeField(blank=True, null=True)
data_fim = models.DateTimeField(blank=True, null=True)
data_cadastro = models.DateTimeField(auto_now_add=True)
ativo = models.BooleanField(default=False)
patrocinador = models.CharField(max_length=200, blank=True)
campeonato_principal = models.BooleanField(default=False)
def __str__(self):
return self.descricao
class Meta:
db_table = 'campeonato'
class Grupo(models.Model):
campeonato = models.ForeignKey(Campeonato, on_delete=models.CASCADE, related_name='grupo_campeonato')
descricao = models.CharField(max_length=100)
data_cadastro = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.descricao
class Meta:
db_table = 'grupo'
class GrupoClube(models.Model):
grupo = models.ForeignKey(Grupo, on_delete=models.CASCADE, related_name='grupoclube_grupo')
clube = models.ForeignKey(Clube, on_delete=models.CASCADE, related_name='grupoclube_clube')
data_cadastro = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{} | {} no {}'.format(self.grupo.campeonato.descricao, self.clube.nome, self.grupo.descricao)
class Meta:
db_table = 'grupo_clube' | leonardocintra/campeonato | core/models.py | Python | mit | 1,544 |
#
# File:
# TRANS_write_netCDF.py
#
# Synopsis:
# Illustrates how to write a netCDF file
#
# Categories:
# I/O
#
# Author:
# Karin Meier-Fleischer, based on NCL example
#
# Date of initial publication:
# September 2018
#
# Description:
# This example shows how to write a netCDF file.
#
# Effects illustrated:
# o Reading netCDF file
# o Converting data from Kelvin to degC
# o Writing data to new netCDF file
# o Detailed version
#
# Output:
# netCDF data file.
#
# Notes: The data for this example can be downloaded from
# http://www.ncl.ucar.edu/Document/Manuals/NCL_User_Guide/Data/
#
"""
Transition Guide Python Example: TRANS_write_netCDF.py
- Reading netCDF file
- Converting data from Kelvin to degC
- Writing data to new netCDF file
- Detailed version
2018-08-28 kmf
"""
from __future__ import print_function
import os
import numpy as np
import Ngl,Nio
#-- data file name
fname = "../read_data/rectilinear_grid_3D.nc"
#-- open file
f = Nio.open_file(fname, "r")
#-- read temperature, time, latitude and longitude arrays
var = f.variables["t"]
time = f.variables["time"]
lat = f.variables["lat"]
lon = f.variables["lon"]
#-- convert data from units Kelvin to degC
varC = var[:,0,:,:] #-- copy variable at level=0; retain metadata
varC = varC-273.15 #-- convert to degC
#-- open new netCDF file
os.system("rm -rf t_degC_py.nc") #-- delete file if it exists
outf = Nio.open_file("t_degC_py.nc","c") #-- open new netCDF file
#-- create dimensions
outf.create_dimension('time',None)
outf.create_dimension('lat',f.dimensions['lat'])
outf.create_dimension('lon',f.dimensions['lon'])
#-- create dimension variables
outf.create_variable('time',time.typecode(),time.dimensions)
varAtts = list(time.__dict__.keys())
varAtts.sort()
for att in varAtts:
value = getattr(time,att) #-- get attributes
setattr(outf.variables['time'],att,value) #-- copy attributes
outf.create_variable('lat',lat.typecode(),lat.dimensions)
varAtts = list(lat.__dict__.keys())
varAtts.sort()
for att in varAtts:
value = getattr(lat,att) #-- get attributes
setattr(outf.variables['lat'],att,value) #-- copy attributes
outf.create_variable('lon',lon.typecode(),lon.dimensions)
varAtts = list(lon.__dict__.keys())
varAtts.sort()
for att in varAtts:
value = getattr(lon,att) #-- get attributes
setattr(outf.variables['lon'],att,value) #-- copy attributes
#-- create variable
outf.create_variable('t','f',('time','lat','lon'))
setattr(outf.variables['t'], 'standard_name', 'temperature')
setattr(outf.variables['t'], 'units', 'degC')
#-- assign values --> write data to file
outf.variables['time'].assign_value(time)
outf.variables['lat'].assign_value(lat)
outf.variables['lon'].assign_value(lon)
outf.variables['t'].assign_value(varC)
#-- close output stream (not necessary)
outf.close()
| KMFleischer/PyEarthScience | Transition_examples_NCL_to_PyNGL/write_data/TRANS_write_netCDF.py | Python | mit | 2,979 |
from django.db import models
from citations.models import Citation
class Chemical(models.Model):
id = models.BigAutoField(primary_key=True)
pmid = models.ForeignKey(Citation, models.DO_NOTHING, db_column='pmid', blank=True, null=True)
idx = models.SmallIntegerField(blank=True, null=True)
uid = models.CharField(max_length=256, blank=True, null=True)
name = models.CharField(max_length=256)
class Meta:
managed = False
db_table = 'chemicals'
unique_together = (('pmid', 'idx'),) | Radmor/med_search | src/chemicals/models.py | Python | mit | 528 |
from setuptools import setup
setup(
name='jidoka',
version='0.1.3',
author = "Johannes Daniel Nuemm",
author_email = "[email protected]",
description = ("Simple build tool for sass, coffescript and many more..."),
license = "MIT",
url = "https://github.com/monocult/jidoka",
py_modules=['jidoka'],
install_requires=[
'CoffeeScript',
'click',
'jsmin',
'jsonschema',
'libsass',
'watchdog',
'csscompressor',
'htmlmin',
'Markdown'
],
entry_points='''
[console_scripts]
jidoka=jidoka:cli
''',
)
| monocult/jidoka | setup.py | Python | mit | 728 |
"""
Unspecific helper classes
@author: Martin Kuemmel, Jonas Haase
@organization: Space Telescope - European Coordinating Facility (ST-ECF)
@license: Gnu Public Licence
@contact: [email protected]
@since: 2005/09/13
$LastChangedBy: mkuemmel $
$LastChangedDate: 2008-07-03 10:27:47 +0200 (Thu, 03 Jul 2008) $
$HeadURL: http://astropy.scipy.org/svn/astrolib/trunk/asciidata/Lib/asciiutils.py $
"""
__version__ = "Version 1.0 $LastChangedRevision: 503 $"
import string, sys, os, types
class NicePrinter(object):
"""
Class to print to I/O-streams
The class is a wrapper around an I/O stream. It offers
methods to format strings and print to a given I/O stream.
Linend, delimiter and linestarts are attributes of the
class and allow a nice formatting of the print.
"""
def __init__(self, stream=None, delimiter=None, linestart=None, linend=None):
"""
Initializes the class
A simple initializer. Most of the class attributes
are given as parameters
@param stream: I/O stream to write to
@type stream: I/O stream
@param delimiter: optional delimiter
@type delimiter: string
@param linend: optional linenend
@type linend: string
"""
#set the stream
self._stream = stream
# set a start value
self._start = ''
# set the delimiter
if delimiter != None:
# self._delimiter = ' '+delimiter+' '
self._delimiter = delimiter
else:
self._delimiter = ' '
# set the linend
if linend != None:
self._linend = linend
else:
self._linend = '\n'
# set the linestart
if linestart != None:
self._start = linestart
else:
self._linestart = ''
def print_string(self, hstring):
"""
Prints a string to the stream
This general method prints any string
to stream.
@param hstring: the header to print
@type hstring: string
"""
# that's easy up to now
self._stream.write(hstring)
def print_list(self, strlist):
"""
Prints a list to the stream.
The method combines a string list from the input
to a string which represents a line. Delimiter,
linend and linestart are taken into account.
The lines is directly sent to the I/O stream.
@param strlist: list
@type strlist: [string]
"""
self._stream.write(self._start
+ self._delimiter.join(strlist) + self._linend)
class Separator(object):
"""
Class to separate an ascii line into items
Instance of this class split an ascii line into
the different items. The methods on how to split
a line work with a delimiter, or according to
whitespace or according to a fixed format given
in a file (not yet implemented.
"""
def __init__(self, delimiter=None, file=None):
"""
The class constructor
"""
self._delimiter = delimiter
self._file = file
def separate(self, line):
"""
Separates a line into its items
@param line: the ascii line to be separated
@type line: string
@return: the list of items
@rtype: [string]
"""
# delete the trailing newline
if line[-1] == '\n':
line = line[:len(line)-1]
# separate either along a delimiter
if self._delimiter != None:
items = self.separate_delim(line)
# or along whitespaces
else:
items = self.separate_white(line)
return items
def separate_white(self, line):
"""
Separates a line along the whitespace
The method transforms a line into the list
of its space-separated items. The first space
is the delimiter, any further spaces are interpreted
to belong to the item and are preserved.
This is advantageous to keep the item length for
string columns with leading spaces.
@param line: the ascii line to be separated
@type line: string
@return: the list of items
@rtype: [string]
"""
# create the item list
witems = []
# split it conventionally
items = line.strip().split()
# go again over the line and identify
# the exact starting position of each
# item, preserving the leading spaces
start=0
for item in items:
pos = line.find(item,start)
if pos > -1:
witems.append(line[start:pos+len(item)])
start = pos+len(item)+1
# return the list
return witems
def separate_delim(self, line):
"""
Separates a line along a delimiter
The method transforms a line into the list
of its delimiter separated items.
@param line: the ascii line to be separated
@type line: string
@return: the list of items
@rtype: [string]
"""
# split the line
items = line.split( self._delimiter)
# return the list
return items
class AsciiLenGetIter(object):
"""
A general purpose iteratorfor any class with len() and get[]
"""
def __init__(self, len_get_object):
"""
The class constructor
"""
# store the associated AsciiData object
self._len_get_object = len_get_object
# set the index of the actual column
self._index = -1
# set the maximum column index
self._max_index = len(self._len_get_object) - 1
def _iter(self):
"""
Mandatory method for an iterator class
"""
return self
def __next__(self):
"""
Mandatory method for an iterator class
The method gives the next object in the iterator sequence.
In case that a next object does no longer exist,
a corresponding exception is thrown to indicate
the end of the iterator sequence.
"""
# check whether the next iteration does exist
if self._index >= self._max_index:
# no next iteration, raise exception
raise StopIteration
# enhance the actual index
self._index += 1
# return the next iteration
return self._len_get_object[self._index]
class AsciiColumnIter(object):
"""
An iterator class for the AsciiData class
"""
def __init__(self, ascii_column):
"""
The class constructor
"""
# store the associated AsciiColumn object
self.ascii_column = ascii_column
# set the index of the actual row
self._row_index = -1
# set the maximum column index
self._max_index = ascii_column.get_nrows() - 1
def _iter(self):
"""
Mandatory method for an iterator class
"""
return self
def __next__(self):
"""
Mandatory method for an iterator class
The method gives the next object in the iterator sequence.
In case that a next object does no longer exist,
a corresponding exception is thrown to indicate
the end of the iterator sequence.
"""
# check whether the next iteration does exist
if self._row_index >= self._max_index:
# no next iteration, raise exception
raise StopIteration
# enhance the actual column index
self._row_index += 1
# return the next iteration
return self.ascii_column[self._row_index]
| davidharvey1986/pyRRG | lib/asciidata/asciiutils.py | Python | mit | 7,799 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from rakuten_ws.baseapi import ApiService, ApiEndpoint, BaseWebService, ApiRequest, ApiMethod
class SimpleAPI(ApiService):
api_url = "https://testapi"
api_version = "20140222"
format_version = 2
item = ApiEndpoint(ApiMethod('search'), ApiMethod('ranking'))
product = ApiEndpoint(ApiMethod('get'), api_endpoint="Product")
order = ApiEndpoint(ApiMethod('get'), name='orders')
class SimpleWebService(BaseWebService):
test_api = SimpleAPI()
def test_class_api_description():
assert SimpleWebService.test_api == SimpleAPI
assert SimpleAPI.item == ApiEndpoint
ws = SimpleWebService(application_id="4K95553C260362")
assert ws.test_api.name == "test_api"
assert ws.test_api.api_version == "20140222"
assert ws.test_api.api_url == "https://testapi"
assert isinstance(ws.test_api.item.search, ApiRequest)
assert ws.test_api.item.search.build_url(item_id=23) == 'https://testapi/TestApiItem/Search/20140222?applicationId=4K95553C260362&formatVersion=2&itemId=23' # noqa
assert ws.test_api.product.get.build_url(product_id=23) == 'https://testapi/Product/Get/20140222?applicationId=4K95553C260362&formatVersion=2&productId=23' # noqa
assert ws.test_api.order.name == "orders"
def test_multiple_credentials():
ws = SimpleWebService(application_id="4K95553C260362")
assert ws.test_api.item.search.build_url(item_id=23) == 'https://testapi/TestApiItem/Search/20140222?applicationId=4K95553C260362&formatVersion=2&itemId=23' # noqa
ws = SimpleWebService(application_id="TOTOOTOTOTO")
assert ws.test_api.item.search.build_url(item_id=23) == 'https://testapi/TestApiItem/Search/20140222?applicationId=TOTOOTOTOTO&formatVersion=2&itemId=23' # noqa
def test_aliases_methods():
class AnotherAPI(ApiService):
api_version = "20131024"
endpoint = ApiEndpoint(ApiMethod('simple_search', 'simple_hotel_search'),
ApiMethod('detail_search', 'hotel_detail_search'),
api_endpoint="Custom")
class SimpleWebService(BaseWebService):
api = AnotherAPI()
ws = SimpleWebService(application_id="4K9")
assert isinstance(ws.api.endpoint.simple_search, ApiRequest)
assert isinstance(ws.api.endpoint.detail_search, ApiRequest)
assert ws.api.endpoint.simple_search.build_url(item_id=23) == 'https://app.rakuten.co.jp/services/api/Custom/SimpleHotelSearch/20131024?applicationId=4K9&formatVersion=2&itemId=23' # noqa
assert ws.api.endpoint.detail_search.build_url(item_id=23) == 'https://app.rakuten.co.jp/services/api/Custom/HotelDetailSearch/20131024?applicationId=4K9&formatVersion=2&itemId=23' # noqa
| alexandriagroup/rakuten-ws | tests/test_baseapi.py | Python | mit | 2,747 |
"""Support for lights through the SmartThings cloud API."""
from __future__ import annotations
import asyncio
from collections.abc import Sequence
from pysmartthings import Capability
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
import homeassistant.util.color as color_util
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Add lights for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
async_add_entities(
[
SmartThingsLight(device)
for device in broker.devices.values()
if broker.any_assigned(device.device_id, "light")
],
True,
)
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None:
"""Return all capabilities supported if minimum required are present."""
supported = [
Capability.switch,
Capability.switch_level,
Capability.color_control,
Capability.color_temperature,
]
# Must be able to be turned on/off.
if Capability.switch not in capabilities:
return None
# Must have one of these
light_capabilities = [
Capability.color_control,
Capability.color_temperature,
Capability.switch_level,
]
if any(capability in capabilities for capability in light_capabilities):
return supported
return None
def convert_scale(value, value_scale, target_scale, round_digits=4):
"""Convert a value to a different scale."""
return round(value * target_scale / value_scale, round_digits)
class SmartThingsLight(SmartThingsEntity, LightEntity):
"""Define a SmartThings Light."""
def __init__(self, device):
"""Initialize a SmartThingsLight."""
super().__init__(device)
self._brightness = None
self._color_temp = None
self._hs_color = None
self._supported_features = self._determine_features()
def _determine_features(self):
"""Get features supported by the device."""
features = 0
# Brightness and transition
if Capability.switch_level in self._device.capabilities:
features |= SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
# Color Temperature
if Capability.color_temperature in self._device.capabilities:
features |= SUPPORT_COLOR_TEMP
# Color
if Capability.color_control in self._device.capabilities:
features |= SUPPORT_COLOR
return features
async def async_turn_on(self, **kwargs) -> None:
"""Turn the light on."""
tasks = []
# Color temperature
if self._supported_features & SUPPORT_COLOR_TEMP and ATTR_COLOR_TEMP in kwargs:
tasks.append(self.async_set_color_temp(kwargs[ATTR_COLOR_TEMP]))
# Color
if self._supported_features & SUPPORT_COLOR and ATTR_HS_COLOR in kwargs:
tasks.append(self.async_set_color(kwargs[ATTR_HS_COLOR]))
if tasks:
# Set temp/color first
await asyncio.gather(*tasks)
# Switch/brightness/transition
if self._supported_features & SUPPORT_BRIGHTNESS and ATTR_BRIGHTNESS in kwargs:
await self.async_set_level(
kwargs[ATTR_BRIGHTNESS], kwargs.get(ATTR_TRANSITION, 0)
)
else:
await self._device.switch_on(set_status=True)
# State is set optimistically in the commands above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the light off."""
# Switch/transition
if self._supported_features & SUPPORT_TRANSITION and ATTR_TRANSITION in kwargs:
await self.async_set_level(0, int(kwargs[ATTR_TRANSITION]))
else:
await self._device.switch_off(set_status=True)
# State is set optimistically in the commands above, therefore update
# the entity state ahead of receiving the confirming push updates
self.async_schedule_update_ha_state(True)
async def async_update(self):
"""Update entity attributes when the device status has changed."""
# Brightness and transition
if self._supported_features & SUPPORT_BRIGHTNESS:
self._brightness = int(
convert_scale(self._device.status.level, 100, 255, 0)
)
# Color Temperature
if self._supported_features & SUPPORT_COLOR_TEMP:
self._color_temp = color_util.color_temperature_kelvin_to_mired(
self._device.status.color_temperature
)
# Color
if self._supported_features & SUPPORT_COLOR:
self._hs_color = (
convert_scale(self._device.status.hue, 100, 360),
self._device.status.saturation,
)
async def async_set_color(self, hs_color):
"""Set the color of the device."""
hue = convert_scale(float(hs_color[0]), 360, 100)
hue = max(min(hue, 100.0), 0.0)
saturation = max(min(float(hs_color[1]), 100.0), 0.0)
await self._device.set_color(hue, saturation, set_status=True)
async def async_set_color_temp(self, value: float):
"""Set the color temperature of the device."""
kelvin = color_util.color_temperature_mired_to_kelvin(value)
kelvin = max(min(kelvin, 30000), 1)
await self._device.set_color_temperature(kelvin, set_status=True)
async def async_set_level(self, brightness: int, transition: int):
"""Set the brightness of the light over transition."""
level = int(convert_scale(brightness, 255, 100, 0))
# Due to rounding, set level to 1 (one) so we don't inadvertently
# turn off the light when a low brightness is set.
level = 1 if level == 0 and brightness > 0 else level
level = max(min(level, 100), 0)
duration = int(transition)
await self._device.set_level(level, duration, set_status=True)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self._color_temp
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return self._hs_color
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._device.status.switch
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
# SmartThings does not expose this attribute, instead it's
# implemented within each device-type handler. This value is the
# lowest kelvin found supported across 20+ handlers.
return 500 # 2000K
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
# SmartThings does not expose this attribute, instead it's
# implemented within each device-type handler. This value is the
# highest kelvin found supported across 20+ handlers.
return 111 # 9000K
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
| rohitranjan1991/home-assistant | homeassistant/components/smartthings/light.py | Python | mit | 7,919 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sqlite3
# utworzenie połączenia z bazą przechowywaną w pamięci RAM
con = sqlite3.connect(':memory:')
# dostęp do kolumn przez indeksy i przez nazwy
con.row_factory = sqlite3.Row
# utworzenie obiektu kursora
cur = con.cursor()
# tworzenie tabel
cur.executescript("""
DROP TABLE IF EXISTS klasa;
CREATE TABLE IF NOT EXISTS klasa (
id INTEGER PRIMARY KEY ASC,
nazwa varchar(250) NOT NULL,
profil varchar(250) DEFAULT ''
);
DROP TABLE IF EXISTS uczen;
CREATE TABLE IF NOT EXISTS uczen (
id INTEGER PRIMARY KEY ASC,
imie varchar(250) NOT NULL,
nazwisko varchar(250) NOT NULL,
klasa_id INTEGER NOT NULL,
FOREIGN KEY(klasa_id) REFERENCES klasa(id)
)""")
# wstawiamy dane uczniów
cur.execute('INSERT INTO klasa VALUES(NULL, ?, ?);', ('1A', 'matematyczny'))
cur.execute('INSERT INTO klasa VALUES(NULL, ?, ?);', ('1B', 'humanistyczny'))
# wykonujemy zapytanie SQL, które pobierze id klasy "1A" z tabeli "klasa".
cur.execute('SELECT id FROM klasa WHERE nazwa = ?', ('1A',))
klasa_id = cur.fetchone()[0]
# wstawiamy dane uczniów
cur.execute('INSERT INTO uczen VALUES(?,?,?,?)',
(None, 'Tomasz', 'Nowak', klasa_id))
cur.execute('INSERT INTO uczen VALUES(?,?,?,?)',
(None, 'Adam', 'Kowalski', klasa_id))
# zatwierdzamy zmiany w bazie
con.commit()
def czytajdane():
"""Funkcja pobiera i wyświetla dane z bazy"""
cur.execute(
"""
SELECT uczen.id,imie,nazwisko,nazwa FROM uczen,klasa
WHERE uczen.klasa_id=klasa.id
""")
uczniowie = cur.fetchall()
for uczen in uczniowie:
print uczen['id'], uczen['imie'], uczen['nazwisko'], uczen['nazwa']
print ""
czytajdane()
# przepisanie ucznia do innej klasy
cur.execute('SELECT id FROM uczen WHERE nazwisko="Nowak"')
uczen_id = cur.fetchone()[0]
cur.execute('SELECT id FROM klasa WHERE nazwa = ?', ('1B',))
klasa_id = cur.fetchone()[0]
cur.execute('UPDATE uczen SET klasa_id=? WHERE id=?', (klasa_id, uczen_id))
czytajdane()
# usunięcie ucznia o identyfikatorze 1
cur.execute('DELETE FROM uczen WHERE id=?', (1,))
czytajdane()
con.close()
| koduj-z-klasa/python101 | bazy/sqlorm/sqlraw05.py | Python | mit | 2,212 |
import re
from zope.interface import implements
from formal import iformal
class FormsError(Exception):
"""
Base class for all Forms errors. A single string, message, is accepted and
stored as an attribute.
The message is not passed on to the Exception base class because it doesn't
seem to be able to handle unicode at all.
"""
def __init__(self, message):
Exception.__init__(self, message)
self.message = message
class FormError(FormsError):
"""
Form validation error. Raise this, typically from a submit callback, to
signal that the form (not an individual field) failed to validate.
"""
pass
class FieldError(FormsError):
"""
Base class for field-related exceptions. The failure message and the failing
field name are stored as attributes.
"""
def __init__(self, message, fieldName=None):
FormsError.__init__(self, message)
self.fieldName = fieldName
class FieldValidationError(FieldError):
"""
Exception that signals that a field failed to validate.
"""
pass
class FieldRequiredError(FieldValidationError):
"""
Exception that signals that a field that is marked as required was not
entered.
"""
pass
class RequiredValidator(object):
implements(iformal.IValidator)
def validate(self, field, value):
if value is None:
raise FieldRequiredError, 'Required'
class LengthValidator(object):
"""Validate the length of the value is within a given range.
"""
implements(iformal.IValidator)
def __init__(self, min=None, max=None):
self.min = min
self.max = max
assert self.min is not None or self.max is not None
def validationErrorMessage(self, field):
if self.min is not None and self.max is None:
return 'Must be longer than %r characters'%(self.min,)
if self.min is None and self.max is not None:
return 'Must be shorter than %r characters'%(self.max,)
return 'Must be between %r and %r characters'%(self.min, self.max)
def validate(self, field, value):
if value is None:
return
length = len(value)
if self.min is not None and length < self.min:
raise FieldValidationError, self.validationErrorMessage(field)
if self.max is not None and length > self.max:
raise FieldValidationError, self.validationErrorMessage(field)
class RangeValidator(object):
"""Validate the size of the value is within is given range.
"""
implements(iformal.IValidator)
def __init__(self, min=None, max=None):
self.min = min
self.max = max
assert self.min is not None or self.max is not None
def validationErrorMessage(self, field):
if self.min is not None and self.max is None:
return 'Must be greater than %r'%(self.min,)
if self.min is None and self.max is not None:
return 'Must be less than %r'%(self.max,)
return 'Must be between %r and %r'%(self.min, self.max)
def validate(self, field, value):
if value is None:
return
if self.min is not None and value < self.min:
raise FieldValidationError, self.validationErrorMessage(field)
if self.max is not None and value > self.max:
raise FieldValidationError, self.validationErrorMessage(field)
class PatternValidator(object):
"""Validate the value is a certain pattern.
The required pattern is defined as a regular expression. The regex will be
compiled automatically if necessary.
"""
implements(iformal.IValidator)
def __init__(self, regex):
self.regex = regex
def validate(self, field, value):
if value is None:
return
# If it doesn't look like a regex object then compile it now
if not hasattr(self.regex, 'match'):
self.regex = re.compile(self.regex)
if self.regex.match(value) is None:
raise FieldValidationError, 'Invalid format'
class CallableValidator(object):
"""
A validator that delegates the validation of non-None values to a callable
with the same signature as IValidator.validate.
"""
implements(iformal.IValidator)
def __init__(self, callable):
self.callable = callable
def validate(self, field, value):
if value is None:
return
self.callable(field, value)
__all__ = [
'FormError', 'FieldError', 'FieldValidationError', 'FieldRequiredError',
'RequiredValidator', 'LengthValidator', 'RangeValidator', 'PatternValidator',
'CallableValidator',
]
| emgee/formal | formal/validation.py | Python | mit | 4,845 |
'''
Created on 2014-6-22
@author: xiajie
'''
import numpy as np
from SupervisedBasic import zipcode
def weight(x0, x, lmbda):
return np.exp(-np.transpose(x0-x).dot(x0-x)/(2*lmbda))
def cal_pi(x0, data_set, K, lmbda):
pi = np.zeros(K)
n = 0.
for k in range(K):
for i in range(len(data_set[k])):
w = weight(x0, data_set[k][i], lmbda)
pi[k] += w
n += w
return pi/N
def cal_mean(x0, data_set, K, lmbda):
means = np.zeros((K,len(data_set[0][0])))
for k in range(K):
n = 0.
for i in range(len(data_set[k])):
x = data_set[k][i]
w = weight(x0, x, lmbda)
means[k] = means[k] + x*w
n += w
means[k] /= n
return means
def cal_thegma(x0, data_set, means, K, N, lmbda):
p = len(means[0])
thegma = np.zeros((p,p))
for k in range(K):
g = np.zeros((p,p))
for i in range(len(data_set[k])):
x = data_set[k][i]
w = weight(x0, x, lmbda)
diff = x - means[k]
g += w*np.outer(diff, diff)/(N-K)
thegma += g
return thegma
def cookdata(data):
cls = {}
inputs = data[:,1:].tolist()
outputs = data[:,0].tolist()
for i in range(len(inputs)):
cls.setdefault(outputs[i],[])
cls[outputs[i]].append(inputs[i])
for k in cls.keys():
cls[k] = np.array(cls[k])
return cls
def discriminant(x, u, thegma, pi):
inthegma = np.linalg.inv(thegma)
return np.transpose(x).dot(inthegma).dot(u) - 0.5*np.transpose(u).dot(inthegma).dot(u) + np.log(pi)
def classify(x, U, T, P, K):
dmax = -999999999
index = 99
for i in range(K):
dis = discriminant(x, U[i], T, P[i])
if dis > dmax:
dmax = dis
index = i
return index
def run_classify(train, test, N):
lmbda = 100
error = 0.
for i in range(len(test)):
x0 = np.array(test[i][1:])
y0 = test[i][0]
pi = cal_pi(x0, train, len(train), lmbda)
means = cal_mean(x0, train, len(train), lmbda)
sigma = cal_thegma(x0, train, means, len(train), N, lmbda)
res = classify(x0, means, sigma, pi, len(train))
if res != y0:
error += 1.
print 'error rate:', error/len(test)
if __name__ == '__main__':
train, test = zipcode.loaddata()
N = len(train)
train_cls = cookdata(train)
#test_cls = cookdata(test)
K = len(train_cls)
run_classify(train_cls, test, N)
| jayshonzs/ESL | KernelSmoothing/LocalLDA.py | Python | mit | 2,510 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Holds fixtures for the smif package tests
"""
from __future__ import absolute_import, division, print_function
import json
import logging
import os
from copy import deepcopy
import numpy as np
import pandas as pd
from pytest import fixture
from smif.data_layer import Store
from smif.data_layer.data_array import DataArray
from smif.data_layer.file.file_config_store import _write_yaml_file as dump
from smif.data_layer.memory_interface import (MemoryConfigStore,
MemoryDataStore,
MemoryMetadataStore)
from smif.metadata import Spec
logging.basicConfig(filename='test_logs.log',
level=logging.DEBUG,
format='%(asctime)s %(name)-12s: %(levelname)-8s %(message)s',
filemode='w')
@fixture
def empty_store():
"""Store fixture
"""
# implement each part using the memory classes, simpler than mocking
# each other implementation of a part is tested fully by e.g. test_config_store.py
return Store(
config_store=MemoryConfigStore(),
metadata_store=MemoryMetadataStore(),
data_store=MemoryDataStore()
)
@fixture
def setup_empty_folder_structure(tmpdir_factory):
folder_list = ['models', 'results', 'config', 'data']
config_folders = [
'dimensions',
'model_runs',
'scenarios',
'sector_models',
'sos_models',
]
for folder in config_folders:
folder_list.append(os.path.join('config', folder))
data_folders = [
'coefficients',
'dimensions',
'initial_conditions',
'interventions',
'narratives',
'scenarios',
'strategies',
'parameters'
]
for folder in data_folders:
folder_list.append(os.path.join('data', folder))
test_folder = tmpdir_factory.mktemp("smif")
for folder in folder_list:
test_folder.mkdir(folder)
return test_folder
@fixture
def setup_folder_structure(setup_empty_folder_structure, oxford_region, remap_months,
initial_system, planned_interventions):
"""
Returns
-------
:class:`LocalPath`
Path to the temporary folder
"""
test_folder = setup_empty_folder_structure
region_file = test_folder.join('data', 'dimensions', 'test_region.geojson')
region_file.write(json.dumps(oxford_region))
intervals_file = test_folder.join('data', 'dimensions', 'annual.yml')
intervals_file.write("""\
- name: '1'
interval: [[P0Y, P1Y]]
""")
intervals_file = test_folder.join('data', 'dimensions', 'hourly.yml')
intervals_file.write("""\
- name: '1'
interval: [[PT0H, PT1H]]
""")
initial_conditions_dir = str(test_folder.join('data', 'initial_conditions'))
dump(initial_conditions_dir, 'init_system', initial_system)
interventions_dir = str(test_folder.join('data', 'interventions'))
dump(interventions_dir, 'planned_interventions', planned_interventions)
dimensions_dir = str(test_folder.join('data', 'dimensions'))
dump(dimensions_dir, 'remap', remap_months)
units_file = test_folder.join('data', 'user_units.txt')
with units_file.open(mode='w') as units_fh:
units_fh.write("blobbiness = m^3 * 10^6\n")
units_fh.write("people = [people]\n")
units_fh.write("mcm = 10^6 * m^3\n")
units_fh.write("GBP=[currency]\n")
return test_folder
@fixture
def initial_system():
"""Initial system (interventions with build_date)
"""
return [
{'name': 'water_asset_a', 'build_year': 2017},
{'name': 'water_asset_b', 'build_year': 2017},
{'name': 'water_asset_c', 'build_year': 2017},
]
@fixture
def parameters():
return [
{
'name': 'smart_meter_savings',
'description': 'The savings from smart meters',
'absolute_range': (0, 100),
'suggested_range': (3, 10),
'default_value': 3,
'unit': '%'
}
]
@fixture
def planned_interventions():
"""Return pre-specified planning intervention data
"""
return [
{
'name': 'water_asset_a',
'capacity': {'value': 6, 'unit': 'Ml'},
'description': 'Existing water treatment plants',
'location': {'lat': 51.74556, 'lon': -1.240528}
},
{
'name': 'water_asset_b',
'capacity': {'value': 6, 'unit': 'Ml'},
'description': 'Existing water treatment plants',
'location': {'lat': 51.74556, 'lon': -1.240528}
},
{
'name': 'water_asset_c',
'capacity': {'value': 6, 'unit': 'Ml'},
'description': 'Existing water treatment plants',
'location': {'lat': 51.74556, 'lon': -1.240528}
},
]
@fixture
def oxford_region():
data = {
"type": "FeatureCollection",
"crs": {
"type": "name",
"properties": {
"name": "urn:ogc:def:crs:EPSG::27700"
}
},
"features": [
{
"type": "Feature",
"properties": {
"name": "oxford"
},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[448180, 209366],
[449500, 211092],
[450537, 211029],
[450873, 210673],
[451250, 210793],
[451642, 210023],
[453855, 208466],
[454585, 208468],
[456077, 207967],
[456146, 207738],
[456668, 207779],
[456708, 207444],
[456278, 207122],
[456149, 206615],
[455707, 206798],
[455749, 204521],
[456773, 204488],
[457014, 204184],
[456031, 203475],
[456444, 202854],
[456087, 202044],
[455369, 201799],
[454396, 202203],
[453843, 201634],
[452499, 203209],
[452052, 203566],
[451653, 203513],
[450645, 205137],
[449497, 205548],
[449051, 206042],
[448141, 208446],
[448180, 209366]
]
]
}
},
]
}
return data
@fixture
def initial_conditions():
return [{'name': 'solar_installation', 'build_year': 2017}]
@fixture
def interventions():
return {
'solar_installation': {
'name': 'solar_installation',
'capacity': 5,
'capacity_units': 'MW'
},
'wind_installation': {
'name': 'wind_installation',
'capacity': 4,
'capacity_units': 'MW'
}
}
@fixture
def water_interventions_abc():
return [
{
"name": "water_asset_a",
"location": "oxford",
"capital_cost": {
"units": "£",
"value": 1000
},
"economic_lifetime": {
"units": "years",
"value": 25
},
"operational_lifetime": {
"units": "years",
"value": 25
}
},
{
"name": "water_asset_b",
"location": "oxford",
"capital_cost": {
"units": "£",
"value": 1500
},
"economic_lifetime": {
"units": "years",
"value": 25
},
"operational_lifetime": {
"units": "years",
"value": 25
}
},
{
"name": "water_asset_c",
"location": "oxford",
"capital_cost": {
"units": "£",
"value": 3000
},
"economic_lifetime": {
"units": "years",
"value": 25
},
"operational_lifetime": {
"units": "years",
"value": 25
}
}
]
@fixture
def model_run():
"""Return sample model_run
"""
return {
'name': 'unique_model_run_name',
'description': 'a description of what the model run contains',
'stamp': '2017-09-20T12:53:23+00:00',
'timesteps': [
2015,
2020,
2025
],
'sos_model': 'energy',
'scenarios': {
'population': 'High Population (ONS)'
},
'strategies': [
{
'type': 'pre-specified-planning',
'name': 'energy_supply',
'description': 'description of the strategy',
'model_name': 'energy_supply',
}
],
'narratives': {
'technology': [
'Energy Demand - High Tech'
],
'governance': [
'Central Planning'
]
}
}
@fixture
def get_sos_model(sample_narratives):
"""Return sample sos_model
"""
return {
'name': 'energy',
'description': "A system of systems model which encapsulates "
"the future supply and demand of energy for the UK",
'scenarios': [
'population'
],
'narratives': sample_narratives,
'sector_models': [
'energy_demand',
'energy_supply'
],
'scenario_dependencies': [
{
'source': 'population',
'source_output': 'population_count',
'sink': 'energy_demand',
'sink_input': 'population'
}
],
'model_dependencies': [
{
'source': 'energy_demand',
'source_output': 'gas_demand',
'sink': 'energy_supply',
'sink_input': 'natural_gas_demand'
}
]
}
@fixture
def get_sector_model(annual, hourly, lad):
"""Return sample sector_model
"""
return {
'name': 'energy_demand',
'description': "Computes the energy demand of the"
"UK population for each timestep",
'classname': 'EnergyDemandWrapper',
'path': '../../models/energy_demand/run.py',
'inputs': [
{
'name': 'population',
'dtype': 'int',
'dims': ['lad', 'annual'],
'coords': {
'lad': lad,
'annual': annual
},
'absolute_range': [0, int(1e12)],
'expected_range': [0, 100000],
'unit': 'people'
}
],
'outputs': [
{
'name': 'gas_demand',
'dtype': 'float',
'dims': ['lad', 'hourly'],
'coords': {
'lad': lad,
'hourly': hourly
},
'absolute_range': [0, float('inf')],
'expected_range': [0.01, 10],
'unit': 'GWh'
}
],
'parameters': [
{
'name': 'smart_meter_savings',
'description': "Difference in floor area per person"
"in end year compared to base year",
'absolute_range': [0, float('inf')],
'expected_range': [0.5, 2],
'unit': '%',
'dtype': 'float'
},
{
'name': 'homogeneity_coefficient',
'description': "How homegenous the centralisation"
"process is",
'absolute_range': [0, 1],
'expected_range': [0, 1],
'unit': 'percentage',
'dtype': 'float'
}
],
'interventions': [],
'initial_conditions': []
}
@fixture
def energy_supply_sector_model(hourly):
"""Return sample sector_model
"""
return {
'name': 'energy_supply',
'description': "Supply system model",
'classname': 'EnergySupplyWrapper',
'path': '../../models/energy_supply/run.py',
'inputs': [
{
'name': 'natural_gas_demand',
'dims': ['lad', 'hourly'],
'coords': {
'lad': ['a', 'b'],
'hourly': hourly
},
'absolute_range': [0, float('inf')],
'expected_range': [0, 100],
'dtype': 'float',
'unit': 'GWh'
}
],
'outputs': [],
'parameters': [],
'interventions': [],
'initial_conditions': []
}
@fixture
def water_supply_sector_model(hourly):
"""Return sample sector_model
"""
return {
'name': 'water_supply',
'description': "Supply system model",
'classname': 'WaterSupplyWrapper',
'path': '../../models/water_supply/run.py',
'inputs': [],
'outputs': [],
'parameters': [
{
'name': 'clever_water_meter_savings',
'description': "",
'absolute_range': [0, 1],
'expected_range': [0, 0.2],
'unit': 'percentage',
'dtype': 'float'
},
{
'name': 'per_capita_water_demand',
'description': "",
'absolute_range': [0, float('inf')],
'expected_range': [0, 0.05],
'unit': 'Ml/day',
'dtype': 'float'
}
],
'interventions': [],
'initial_conditions': []
}
@fixture
def get_sector_model_parameter_defaults(get_sector_model):
"""DataArray for each parameter default
"""
data = {
'smart_meter_savings': np.array(0.5),
'homogeneity_coefficient': np.array(0.1)
}
for param in get_sector_model['parameters']:
nda = data[param['name']]
spec = Spec.from_dict(param)
data[param['name']] = DataArray(spec, nda)
return data
@fixture
def get_multidimensional_param():
spec = Spec.from_dict({
'name': 'ss_t_base_heating',
'description': 'Industrial base temperature',
'default': '../energy_demand/parameters/ss_t_base_heating.csv',
'unit': '',
'dims': ['interpolation_params', 'end_yr'],
'coords': {
'interpolation_params': ['diffusion_choice', 'value_ey'],
'end_yr': [2030, 2050]
},
'dtype': 'float'
})
dataframe = pd.DataFrame([
{
'interpolation_params': 'diffusion_choice',
'end_yr': 2030,
'ss_t_base_heating': 0
},
{
'interpolation_params': 'diffusion_choice',
'end_yr': 2050,
'ss_t_base_heating': 0
},
{
'interpolation_params': 'value_ey',
'end_yr': 2030,
'ss_t_base_heating': 15.5
},
{
'interpolation_params': 'value_ey',
'end_yr': 2050,
'ss_t_base_heating': 15.5
},
]).set_index(['interpolation_params', 'end_yr'])
return DataArray.from_df(spec, dataframe)
@fixture
def get_sector_model_no_coords(get_sector_model):
model = deepcopy(get_sector_model)
for spec_group in ('inputs', 'outputs', 'parameters'):
for spec in model[spec_group]:
try:
del spec['coords']
except KeyError:
pass
return model
@fixture
def sample_scenarios():
"""Return sample scenario
"""
return [
{
'name': 'population',
'description': 'The annual change in UK population',
'provides': [
{
'name': "population_count",
'description': "The count of population",
'unit': 'people',
'dtype': 'int',
'dims': ['lad', 'annual']
},
],
'variants': [
{
'name': 'High Population (ONS)',
'description': 'The High ONS Forecast for UK population out to 2050',
'data': {
'population_count': 'population_high.csv'
}
},
{
'name': 'Low Population (ONS)',
'description': 'The Low ONS Forecast for UK population out to 2050',
'data': {
'population_count': 'population_low.csv'
}
},
],
},
]
@fixture
def sample_scenario_data(scenario, get_sector_model, energy_supply_sector_model,
water_supply_sector_model):
scenario_data = {}
for scenario in [scenario]:
for variant in scenario['variants']:
for data_key, data_value in variant['data'].items():
spec = Spec.from_dict(
[provides for provides in scenario['provides']
if provides['name'] == data_key][0])
nda = np.random.random(spec.shape)
da = DataArray(spec, nda)
key = (scenario['name'], variant['name'], data_key)
scenario_data[key] = da
return scenario_data
@fixture
def get_scenario():
"""Return sample scenario
"""
return {
"name": "Economy",
"description": "Economic projections for the UK",
"provides": [
{
'name': "gva",
'description': "GVA",
'dtype': "float",
'unit': "million GBP"
}
],
"variants": [
{
"name": "Central Economy (High)",
"data": {
"gva": 3,
}
}
]
}
@fixture(scope='function')
def get_narrative():
"""Return sample narrative
"""
return {
'name': 'technology',
'description': 'Describes the evolution of technology',
'provides': {
'energy_demand': ['smart_meter_savings']
},
'variants': [
{
'name': 'high_tech_dsm',
'description': 'High takeup of smart technology on the demand side',
'data': {
'smart_meter_savings': 'high_tech_dsm.csv'
}
}
]
}
@fixture
def sample_narratives(get_narrative):
"""Return sample narratives
"""
return [
get_narrative,
{
'name': 'governance',
'description': 'Defines the nature of governance and influence upon decisions',
'provides': {
'energy_demand': ['homogeneity_coefficient']
},
'variants': [
{
'name': 'Central Planning',
'description': 'Stronger role for central government in planning and ' +
'regulation, less emphasis on market-based solutions',
'data': {
'homogeneity_coefficient': 'homogeneity_coefficient.csv'
}
},
],
},
]
@fixture
def sample_narrative_data(sample_narratives, get_sector_model, energy_supply_sector_model,
water_supply_sector_model):
narrative_data = {}
sos_model_name = 'energy'
sector_models = {}
sector_models[get_sector_model['name']] = get_sector_model
sector_models[energy_supply_sector_model['name']] = energy_supply_sector_model
sector_models[water_supply_sector_model['name']] = water_supply_sector_model
for narrative in sample_narratives:
for sector_model_name, param_names in narrative['provides'].items():
sector_model = sector_models[sector_model_name]
for param_name in param_names:
param = _pick_from_list(sector_model['parameters'], param_name)
for variant in narrative['variants']:
spec = Spec.from_dict(param)
nda = np.random.random(spec.shape)
da = DataArray(spec, nda)
key = (sos_model_name, narrative['name'], variant['name'], param_name)
narrative_data[key] = da
return narrative_data
@fixture
def sample_results():
spec = Spec(name='energy_use', dtype='float')
data = np.array(1, dtype=float)
return DataArray(spec, data)
def _pick_from_list(list_, name):
for item in list_:
if item['name'] == name:
return item
assert False, '{} not found in {}'.format(name, list_)
@fixture
def sample_dimensions(remap_months, hourly, annual, lad):
"""Return sample dimensions
"""
return [
{
'name': 'lad',
'description': 'Local authority districts for the UK',
'elements': lad
},
{
'name': 'hourly',
'description': 'The 8760 hours in the year named by hour',
'elements': hourly
},
{
'name': 'annual',
'description': 'One annual timestep, used for aggregate yearly data',
'elements': annual,
},
{
'name': 'remap_months',
'description': 'Remapped months to four representative months',
'elements': remap_months,
},
{
'name': 'technology_type',
'description': 'Technology dimension for narrative fixture',
'elements': [
{'name': 'water_meter'},
{'name': 'electricity_meter'},
]
},
{
'name': 'county',
'elements': [
{'name': 'oxford'}
]
},
{
'name': 'season',
'elements': [
{'name': 'cold_month'},
{'name': 'spring_month'},
{'name': 'hot_month'},
{'name': 'fall_month'}
]
}
]
@fixture
def get_dimension():
return {
"name": "annual",
"description": "Single annual interval of 8760 hours",
"elements":
[
{
"id": 1,
"interval": [["PT0H", "PT8760H"]]
}
]
}
@fixture
def lad():
return [{'name': 'a'}, {'name': 'b'}]
@fixture
def hourly():
return [
{
'name': n,
'interval': [['PT{}H'.format(n), 'PT{}H'.format(n+1)]]
}
for n in range(8) # should be 8760
]
@fixture
def annual():
return [
{
'name': 1,
'interval': [['PT0H', 'PT8760H']]
}
]
@fixture
def remap_months():
"""Remapping four representative months to months across the year
In this case we have a model which represents the seasons through
the year using one month for each season. We then map the four
model seasons 1, 2, 3 & 4 onto the months throughout the year that
they represent.
The data will be presented to the model using the four time intervals,
1, 2, 3 & 4. When converting to hours, the data will be replicated over
the year. When converting from hours to the model time intervals,
data will be averaged and aggregated.
"""
data = [
{'name': 'cold_month', 'interval': [['P0M', 'P1M'], ['P1M', 'P2M'], ['P11M', 'P12M']]},
{'name': 'spring_month', 'interval': [['P2M', 'P3M'], ['P3M', 'P4M'], ['P4M', 'P5M']]},
{'name': 'hot_month', 'interval': [['P5M', 'P6M'], ['P6M', 'P7M'], ['P7M', 'P8M']]},
{'name': 'fall_month', 'interval': [['P8M', 'P9M'], ['P9M', 'P10M'], ['P10M', 'P11M']]}
]
return data
@fixture
def minimal_model_run():
return {
'name': 'test_modelrun',
'timesteps': [2010, 2015, 2010],
'sos_model': 'energy'
}
@fixture
def strategies():
return [
{
'type': 'pre-specified-planning',
'description': 'a description',
'model_name': 'test_model',
'interventions': [
{'name': 'a', 'build_year': 2020},
{'name': 'b', 'build_year': 2025},
]
},
{
'type': 'rule-based',
'description': 'reduce emissions',
'path': 'planning/energyagent.py',
'classname': 'EnergyAgent'
}
]
@fixture
def unit_definitions():
return ['kg = kilograms']
@fixture
def dimension():
return {'name': 'category', 'elements': [{'name': 1}, {'name': 2}, {'name': 3}]}
@fixture
def conversion_source_spec():
return Spec(name='a', dtype='float', unit='ml')
@fixture
def conversion_sink_spec():
return Spec(name='b', dtype='float', unit='ml')
@fixture
def conversion_coefficients():
return np.array([[1]])
@fixture
def scenario(sample_dimensions):
return deepcopy({
'name': 'mortality',
'description': 'The annual mortality rate in UK population',
'provides': [
{
'name': 'mortality',
'dims': ['lad'],
'coords': {'lad': sample_dimensions[0]['elements']},
'dtype': 'float',
}
],
'variants': [
{
'name': 'low',
'description': 'Mortality (Low)',
'data': {
'mortality': 'mortality_low.csv',
},
}
]
})
@fixture
def scenario_no_coords(scenario):
scenario = deepcopy(scenario)
for spec in scenario['provides']:
try:
del spec['coords']
except KeyError:
pass
return scenario
@fixture
def narrative_no_coords(get_narrative):
get_narrative = deepcopy(get_narrative)
for spec in get_narrative['provides']:
try:
del spec['coords']
except KeyError:
pass
return get_narrative
@fixture
def state():
return [
{
'name': 'test_intervention',
'build_year': 1900
}
]
| willu47/smif | tests/conftest.py | Python | mit | 27,175 |
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
# return HttpResponse("<h1>The Fett homepage</h1>")
return render(request, 'bokplot/index.html', {})
from bokeh.plotting import figure
from bokeh.resources import CDN
from bokeh.embed import components
def simple_chart(request):
plot = figure()
import numpy as np
# x = np.arange(-10,10,0.1)
# y = np.arange(-10,10,0.1)
# plot.circle([1,2], [3,4])
script, div = components(plot, CDN)
tp, hp = np.loadtxt('/Users/sebastian/phd/data/phenEOB-data/phenP/hp.dat').T
tc, hc = np.loadtxt('/Users/sebastian/phd/data/phenEOB-data/phenP/hc.dat').T
# select the tools we want
# TOOLS="reset,pan,wheel_zoom,box_zoom,save"
# p1 = figure(tools=TOOLS, plot_width=300*2, plot_height=300)
p1 = figure(plot_width=300*2, plot_height=300)
p1.line(tp, hp, color="red", alpha=0.5)
# p2 = figure(tools=TOOLS, plot_width=300*2, plot_height=300, x_range=p1.x_range, y_range=p1.y_range,)
p2 = figure(plot_width=300*2, plot_height=300, x_range=p1.x_range, y_range=p1.y_range,)
p2.line(tc, hc, color="blue", alpha=0.5)
from bokeh.plotting import gridplot
p = gridplot([[p1],[p2]])
# plots = {'Red': p1, 'Blue': p2}
# script, div = components(plots)
script, div = components(p)
return render(request, "bokplot/simple_chart.html", {"the_script": script, "the_div": div})
def simple_chart_same_axis(request):
plot = figure()
import numpy as np
# x = np.arange(-10,10,0.1)
# y = np.arange(-10,10,0.1)
# plot.circle([1,2], [3,4])
script, div = components(plot, CDN)
tp, hp = np.loadtxt('/Users/sebastian/phd/data/phenEOB-data/phenP/hp.dat').T
tc, hc = np.loadtxt('/Users/sebastian/phd/data/phenEOB-data/phenP/hc.dat').T
# select the tools we want
TOOLS="pan,wheel_zoom,box_zoom,reset,save"
p1 = figure(tools=TOOLS, plot_width=300*4, plot_height=300*2)
p1.line(tp, hp, color="red", alpha=0.5)
p1.line(tc, hc, color="blue", alpha=0.5)
script, div = components(p1, CDN)
return render(request, "bokplot/simple_chart.html", {"the_script": script, "the_div": div})
def simple_chart_2(request):
from bokeh.plotting import figure
from bokeh.models import Range1d
from bokeh.embed import components
# create some data
x1 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y1 = [0, 8, 2, 4, 6, 9, 5, 6, 25, 28, 4, 7]
x2 = [2, 5, 7, 15, 18, 19, 25, 28, 9, 10, 4]
y2 = [2, 4, 6, 9, 15, 18, 0, 8, 2, 25, 28]
x3 = [0, 1, 0, 8, 2, 4, 6, 9, 7, 8, 9]
y3 = [0, 8, 4, 6, 9, 15, 18, 19, 19, 25, 28]
# select the tools we want
TOOLS="pan,wheel_zoom,box_zoom,reset,save"
# the red and blue graphs will share this data range
xr1 = Range1d(start=0, end=30)
yr1 = Range1d(start=0, end=30)
# only the green will use this data range
xr2 = Range1d(start=0, end=30)
yr2 = Range1d(start=0, end=30)
# build our figures
p1 = figure(x_range=xr1, y_range=yr1, tools=TOOLS, plot_width=300, plot_height=300)
p1.scatter(x1, y1, size=12, color="red", alpha=0.5)
p2 = figure(x_range=xr1, y_range=yr1, tools=TOOLS, plot_width=300, plot_height=300)
p2.scatter(x2, y2, size=12, color="blue", alpha=0.5)
p3 = figure(x_range=xr2, y_range=yr2, tools=TOOLS, plot_width=300, plot_height=300)
p3.scatter(x3, y3, size=12, color="green", alpha=0.5)
# plots can be a single PlotObject, a list/tuple, or even a dictionary
plots = {'Red': p1, 'Blue': p2, 'Green': p3}
script, div = components(plots)
return render(request, "bokplot/simple_chart.html", {"the_script": script, "the_div": div})
| Cyberface/django-fett | mysite/bokplot/views.py | Python | mit | 3,716 |
import zmq
import time
PORT = "5555";
c = zmq.Context()
s = c.socket(zmq.PULL)
s.bind("tcp://*:" + PORT)
print "Listening for ZMQ connections on port " + PORT
while True:
print s.recv()
| Institute-Web-Science-and-Technologies/LiveGovWP1 | scripts/zmq-pull-socket.py | Python | mit | 193 |
"""HTTP/1.1 client library
<intro stuff goes here>
<other stuff, too>
HTTPConnection goes through a number of "states", which define when a client
may legally make another request or fetch the response for a particular
request. This diagram details these state transitions:
(null)
|
| HTTPConnection()
v
Idle
|
| putrequest()
v
Request-started
|
| ( putheader() )* endheaders()
v
Request-sent
|
| response = getresponse()
v
Unread-response [Response-headers-read]
|\____________________
| |
| response.read() | putrequest()
v v
Idle Req-started-unread-response
______/|
/ |
response.read() | | ( putheader() )* endheaders()
v v
Request-started Req-sent-unread-response
|
| response.read()
v
Request-sent
This diagram presents the following rules:
-- a second request may not be started until {response-headers-read}
-- a response [object] cannot be retrieved until {request-sent}
-- there is no differentiation between an unread response body and a
partially read response body
Note: this enforcement is applied by the HTTPConnection class. The
HTTPResponse class does not enforce this state machine, which
implies sophisticated clients may accelerate the request/response
pipeline. Caution should be taken, though: accelerating the states
beyond the above pattern may imply knowledge of the server's
connection-close behavior for certain requests. For example, it
is impossible to tell whether the server will close the connection
UNTIL the response headers have been read; this means that further
requests cannot be placed into the pipeline until it is known that
the server will NOT be closing the connection.
Logical State __state __response
------------- ------- ----------
Idle _CS_IDLE None
Request-started _CS_REQ_STARTED None
Request-sent _CS_REQ_SENT None
Unread-response _CS_IDLE <response_class>
Req-started-unread-response _CS_REQ_STARTED <response_class>
Req-sent-unread-response _CS_REQ_SENT <response_class>
"""
import io
import socket
import email.parser
import email.message
from urllib.parse import urlsplit
import warnings
__all__ = ["HTTPResponse", "HTTPConnection",
"HTTPException", "NotConnected", "UnknownProtocol",
"UnknownTransferEncoding", "UnimplementedFileMode",
"IncompleteRead", "InvalidURL", "ImproperConnectionState",
"CannotSendRequest", "CannotSendHeader", "ResponseNotReady",
"BadStatusLine", "error", "responses"]
HTTP_PORT = 80
HTTPS_PORT = 443
_UNKNOWN = 'UNKNOWN'
# connection states
_CS_IDLE = 'Idle'
_CS_REQ_STARTED = 'Request-started'
_CS_REQ_SENT = 'Request-sent'
# status codes
# informational
CONTINUE = 100
SWITCHING_PROTOCOLS = 101
PROCESSING = 102
# successful
OK = 200
CREATED = 201
ACCEPTED = 202
NON_AUTHORITATIVE_INFORMATION = 203
NO_CONTENT = 204
RESET_CONTENT = 205
PARTIAL_CONTENT = 206
MULTI_STATUS = 207
IM_USED = 226
# redirection
MULTIPLE_CHOICES = 300
MOVED_PERMANENTLY = 301
FOUND = 302
SEE_OTHER = 303
NOT_MODIFIED = 304
USE_PROXY = 305
TEMPORARY_REDIRECT = 307
# client error
BAD_REQUEST = 400
UNAUTHORIZED = 401
PAYMENT_REQUIRED = 402
FORBIDDEN = 403
NOT_FOUND = 404
METHOD_NOT_ALLOWED = 405
NOT_ACCEPTABLE = 406
PROXY_AUTHENTICATION_REQUIRED = 407
REQUEST_TIMEOUT = 408
CONFLICT = 409
GONE = 410
LENGTH_REQUIRED = 411
PRECONDITION_FAILED = 412
REQUEST_ENTITY_TOO_LARGE = 413
REQUEST_URI_TOO_LONG = 414
UNSUPPORTED_MEDIA_TYPE = 415
REQUESTED_RANGE_NOT_SATISFIABLE = 416
EXPECTATION_FAILED = 417
UNPROCESSABLE_ENTITY = 422
LOCKED = 423
FAILED_DEPENDENCY = 424
UPGRADE_REQUIRED = 426
# server error
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
BAD_GATEWAY = 502
SERVICE_UNAVAILABLE = 503
GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
INSUFFICIENT_STORAGE = 507
NOT_EXTENDED = 510
# Mapping status codes to official W3C names
responses = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
306: '(Unused)',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
}
# maximal amount of data to read at one time in _safe_read
MAXAMOUNT = 1048576
class HTTPMessage(email.message.Message):
def getallmatchingheaders(self, name):
"""Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.
"""
# XXX: copied from rfc822.Message for compatibility
name = name.lower() + ':'
n = len(name)
lst = []
hit = 0
for line in self.keys():
if line[:n].lower() == name:
hit = 1
elif not line[:1].isspace():
hit = 0
if hit:
lst.append(line)
return lst
def parse_headers(fp):
"""Parses only RFC2822 headers from a file pointer.
email Parser wants to see strings rather than bytes.
But a TextIOWrapper around self.rfile would buffer too many bytes
from the stream, bytes which we later need to read as bytes.
So we read the correct bytes here, as bytes, for email Parser
to parse.
"""
# XXX: Copied from http.server.BaseHTTPRequestHandler.parse_request,
# maybe we can just call this function from there.
headers = []
while True:
line = fp.readline()
headers.append(line)
if line in (b'\r\n', b'\n', b''):
break
hstring = b''.join(headers).decode('iso-8859-1')
return email.parser.Parser(_class=HTTPMessage).parsestr(hstring)
class HTTPResponse:
# strict: If true, raise BadStatusLine if the status line can't be
# parsed as a valid HTTP/1.0 or 1.1 status line. By default it is
# false because it prevents clients from talking to HTTP/0.9
# servers. Note that a response with a sufficiently corrupted
# status line will look like an HTTP/0.9 response.
# See RFC 2616 sec 19.6 and RFC 1945 sec 6 for details.
# The bytes from the socket object are iso-8859-1 strings.
# See RFC 2616 sec 2.2 which notes an exception for MIME-encoded
# text following RFC 2047. The basic status line parsing only
# accepts iso-8859-1.
def __init__(self, sock, debuglevel=0, strict=0, method=None):
# XXX If the response includes a content-length header, we
# need to make sure that the client doesn't read more than the
# specified number of bytes. If it does, it will block until
# the server times out and closes the connection. (The only
# applies to HTTP/1.1 connections.) Since some clients access
# self.fp directly rather than calling read(), this is a little
# tricky.
self.fp = sock.makefile("rb", 0)
self.debuglevel = debuglevel
self.strict = strict
self._method = method
self.msg = None
# from the Status-Line of the response
self.version = _UNKNOWN # HTTP-Version
self.status = _UNKNOWN # Status-Code
self.reason = _UNKNOWN # Reason-Phrase
self.chunked = _UNKNOWN # is "chunked" being used?
self.chunk_left = _UNKNOWN # bytes left to read in current chunk
self.length = _UNKNOWN # number of bytes left in response
self.will_close = _UNKNOWN # conn will close at end of response
def _read_status(self):
# Initialize with Simple-Response defaults.
line = str(self.fp.readline(), "iso-8859-1")
if self.debuglevel > 0:
print("reply:", repr(line))
if not line:
# Presumably, the server closed the connection before
# sending a valid response.
raise BadStatusLine(line)
try:
[version, status, reason] = line.split(None, 2)
except ValueError:
try:
[version, status] = line.split(None, 1)
reason = ""
except ValueError:
# empty version will cause next test to fail and status
# will be treated as 0.9 response.
version = ""
if not version.startswith("HTTP/"):
if self.strict:
self.close()
raise BadStatusLine(line)
else:
# Assume it's a Simple-Response from an 0.9 server.
# We have to convert the first line back to raw bytes
# because self.fp.readline() needs to return bytes.
self.fp = LineAndFileWrapper(bytes(line, "ascii"), self.fp)
return "HTTP/0.9", 200, ""
# The status code is a three-digit number
try:
status = int(status)
if status < 100 or status > 999:
raise BadStatusLine(line)
except ValueError:
raise BadStatusLine(line)
return version, status, reason
def begin(self):
if self.msg is not None:
# we've already started reading the response
return
# read until we get a non-100 response
while True:
version, status, reason = self._read_status()
if status != CONTINUE:
break
# skip the header from the 100 response
while True:
skip = self.fp.readline().strip()
if not skip:
break
if self.debuglevel > 0:
print("header:", skip)
self.status = status
self.reason = reason.strip()
if version == "HTTP/1.0":
self.version = 10
elif version.startswith("HTTP/1."):
self.version = 11 # use HTTP/1.1 code for HTTP/1.x where x>=1
elif version == "HTTP/0.9":
self.version = 9
else:
raise UnknownProtocol(version)
if self.version == 9:
self.length = None
self.chunked = 0
self.will_close = 1
self.msg = email.message_from_string('')
return
self.msg = parse_headers(self.fp)
if self.debuglevel > 0:
for hdr in self.msg:
print("header:", hdr, end=" ")
# are we using the chunked-style of transfer encoding?
tr_enc = self.msg.get("transfer-encoding")
if tr_enc and tr_enc.lower() == "chunked":
self.chunked = 1
self.chunk_left = None
else:
self.chunked = 0
# will the connection close at the end of the response?
self.will_close = self._check_close()
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
self.length = None
length = self.msg.get("content-length")
# are we using the chunked-style of transfer encoding?
tr_enc = self.msg.get("transfer-encoding")
if length and not self.chunked:
try:
self.length = int(length)
except ValueError:
self.length = None
else:
if self.length < 0: # ignore nonsensical negative lengths
self.length = None
else:
self.length = None
# does the body have a fixed length? (of zero)
if (status == NO_CONTENT or status == NOT_MODIFIED or
100 <= status < 200 or # 1xx codes
self._method == "HEAD"):
self.length = 0
# if the connection remains open, and we aren't using chunked, and
# a content-length was not provided, then assume that the connection
# WILL close.
if (not self.will_close and
not self.chunked and
self.length is None):
self.will_close = 1
def _check_close(self):
conn = self.msg.get("connection")
if self.version == 11:
# An HTTP/1.1 proxy is assumed to stay open unless
# explicitly closed.
conn = self.msg.get("connection")
if conn and "close" in conn.lower():
return True
return False
# Some HTTP/1.0 implementations have support for persistent
# connections, using rules different than HTTP/1.1.
# For older HTTP, Keep-Alive indicates persistent connection.
if self.msg.get("keep-alive"):
return False
# At least Akamai returns a "Connection: Keep-Alive" header,
# which was supposed to be sent by the client.
if conn and "keep-alive" in conn.lower():
return False
# Proxy-Connection is a netscape hack.
pconn = self.msg.get("proxy-connection")
if pconn and "keep-alive" in pconn.lower():
return False
# otherwise, assume it will close
return True
def close(self):
if self.fp:
self.fp.close()
self.fp = None
# These implementations are for the benefit of io.BufferedReader.
# XXX This class should probably be revised to act more like
# the "raw stream" that BufferedReader expects.
@property
def closed(self):
return self.isclosed()
def flush(self):
self.fp.flush()
# End of "raw stream" methods
def isclosed(self):
# NOTE: it is possible that we will not ever call self.close(). This
# case occurs when will_close is TRUE, length is None, and we
# read up to the last byte, but NOT past it.
#
# IMPLIES: if will_close is FALSE, then self.close() will ALWAYS be
# called, meaning self.isclosed() is meaningful.
return self.fp is None
# XXX It would be nice to have readline and __iter__ for this, too.
def read(self, amt=None):
if self.fp is None:
return b""
if self.chunked:
return self._read_chunked(amt)
if amt is None:
# unbounded read
if self.length is None:
s = self.fp.read()
else:
s = self._safe_read(self.length)
self.length = 0
self.close() # we read everything
return s
if self.length is not None:
if amt > self.length:
# clip the read to the "end of response"
amt = self.length
# we do not use _safe_read() here because this may be a .will_close
# connection, and the user is reading more bytes than will be provided
# (for example, reading in 1k chunks)
s = self.fp.read(amt)
if self.length is not None:
self.length -= len(s)
if not self.length:
self.close()
return s
def _read_chunked(self, amt):
assert self.chunked != _UNKNOWN
chunk_left = self.chunk_left
value = b""
# XXX This accumulates chunks by repeated string concatenation,
# which is not efficient as the number or size of chunks gets big.
while True:
if chunk_left is None:
line = self.fp.readline()
i = line.find(b";")
if i >= 0:
line = line[:i] # strip chunk-extensions
try:
chunk_left = int(line, 16)
except ValueError:
# close the connection as protocol synchronisation is
# probably lost
self.close()
raise IncompleteRead(value)
if chunk_left == 0:
break
if amt is None:
value += self._safe_read(chunk_left)
elif amt < chunk_left:
value += self._safe_read(amt)
self.chunk_left = chunk_left - amt
return value
elif amt == chunk_left:
value += self._safe_read(amt)
self._safe_read(2) # toss the CRLF at the end of the chunk
self.chunk_left = None
return value
else:
value += self._safe_read(chunk_left)
amt -= chunk_left
# we read the whole chunk, get another
self._safe_read(2) # toss the CRLF at the end of the chunk
chunk_left = None
# read and discard trailer up to the CRLF terminator
### note: we shouldn't have any trailers!
while True:
line = self.fp.readline()
if not line:
# a vanishingly small number of sites EOF without
# sending the trailer
break
if line == b"\r\n":
break
# we read everything; close the "file"
self.close()
return value
def _safe_read(self, amt):
"""Read the number of bytes requested, compensating for partial reads.
Normally, we have a blocking socket, but a read() can be interrupted
by a signal (resulting in a partial read).
Note that we cannot distinguish between EOF and an interrupt when zero
bytes have been read. IncompleteRead() will be raised in this
situation.
This function should be used when <amt> bytes "should" be present for
reading. If the bytes are truly not available (due to EOF), then the
IncompleteRead exception can be used to detect the problem.
"""
s = []
while amt > 0:
chunk = self.fp.read(min(amt, MAXAMOUNT))
if not chunk:
raise IncompleteRead(s)
s.append(chunk)
amt -= len(chunk)
return b"".join(s)
def getheader(self, name, default=None):
if self.msg is None:
raise ResponseNotReady()
return ', '.join(self.msg.get_all(name, default))
def getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise ResponseNotReady()
return list(self.msg.items())
class HTTPConnection:
_http_vsn = 11
_http_vsn_str = 'HTTP/1.1'
response_class = HTTPResponse
default_port = HTTP_PORT
auto_open = 1
debuglevel = 0
strict = 0
def __init__(self, host, port=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.timeout = timeout
self.sock = None
self._buffer = []
self.__response = None
self.__state = _CS_IDLE
self._method = None
self._set_hostport(host, port)
if strict is not None:
self.strict = strict
def _set_hostport(self, host, port):
if port is None:
i = host.rfind(':')
j = host.rfind(']') # ipv6 addresses have [...]
if i > j:
try:
port = int(host[i+1:])
except ValueError:
raise InvalidURL("nonnumeric port: '%s'" % host[i+1:])
host = host[:i]
else:
port = self.default_port
if host and host[0] == '[' and host[-1] == ']':
host = host[1:-1]
self.host = host
self.port = port
def set_debuglevel(self, level):
self.debuglevel = level
def connect(self):
"""Connect to the host and port specified in __init__."""
self.sock = socket.create_connection((self.host,self.port),
self.timeout)
def close(self):
"""Close the connection to the HTTP server."""
if self.sock:
self.sock.close() # close it manually... there may be other refs
self.sock = None
if self.__response:
self.__response.close()
self.__response = None
self.__state = _CS_IDLE
def send(self, str):
"""Send `str' to the server."""
if self.sock is None:
if self.auto_open:
self.connect()
else:
raise NotConnected()
# send the data to the server. if we get a broken pipe, then close
# the socket. we want to reconnect when somebody tries to send again.
#
# NOTE: we DO propagate the error, though, because we cannot simply
# ignore the error... the caller will know if they can retry.
if self.debuglevel > 0:
print("send:", repr(str))
try:
blocksize=8192
if hasattr(str,'read') :
if self.debuglevel > 0: print("sendIng a read()able")
data=str.read(blocksize)
while data:
self.sock.sendall(data)
data=str.read(blocksize)
else:
self.sock.sendall(str)
except socket.error as v:
if v.args[0] == 32: # Broken pipe
self.close()
raise
def _output(self, s):
"""Add a line of output to the current request buffer.
Assumes that the line does *not* end with \\r\\n.
"""
self._buffer.append(s)
def _send_output(self, message_body=None):
"""Send the currently buffered request and clear the buffer.
Appends an extra \\r\\n to the buffer.
"""
self._buffer.extend((b"", b""))
msg = b"\r\n".join(self._buffer)
del self._buffer[:]
# If msg and message_body are sent in a single send() call,
# it will avoid performance problems caused by the interaction
# between delayed ack and the Nagle algorithim.
if message_body is not None:
msg += message_body
self.send(msg)
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0):
"""Send a request to the server.
`method' specifies an HTTP request method, e.g. 'GET'.
`url' specifies the object being requested, e.g. '/index.html'.
`skip_host' if True does not add automatically a 'Host:' header
`skip_accept_encoding' if True does not add automatically an
'Accept-Encoding:' header
"""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
# in certain cases, we cannot issue another request on this connection.
# this occurs when:
# 1) we are in the process of sending a request. (_CS_REQ_STARTED)
# 2) a response to a previous request has signalled that it is going
# to close the connection upon completion.
# 3) the headers for the previous response have not been read, thus
# we cannot determine whether point (2) is true. (_CS_REQ_SENT)
#
# if there is no prior response, then we can request at will.
#
# if point (2) is true, then we will have passed the socket to the
# response (effectively meaning, "there is no prior response"), and
# will open a new one when a new request is made.
#
# Note: if a prior response exists, then we *can* start a new request.
# We are not allowed to begin fetching the response to this new
# request, however, until that prior response is complete.
#
if self.__state == _CS_IDLE:
self.__state = _CS_REQ_STARTED
else:
raise CannotSendRequest()
# Save the method we use, we need it later in the response phase
self._method = method
if not url:
url = '/'
request = '%s %s %s' % (method, url, self._http_vsn_str)
# Non-ASCII characters should have been eliminated earlier
self._output(request.encode('ascii'))
if self._http_vsn == 11:
# Issue some standard headers for better HTTP/1.1 compliance
if not skip_host:
# this header is issued *only* for HTTP/1.1
# connections. more specifically, this means it is
# only issued when the client uses the new
# HTTPConnection() class. backwards-compat clients
# will be using HTTP/1.0 and those clients may be
# issuing this header themselves. we should NOT issue
# it twice; some web servers (such as Apache) barf
# when they see two Host: headers
# If we need a non-standard port,include it in the
# header. If the request is going through a proxy,
# but the host of the actual URL, not the host of the
# proxy.
netloc = ''
if url.startswith('http'):
nil, netloc, nil, nil, nil = urlsplit(url)
if netloc:
try:
netloc_enc = netloc.encode("ascii")
except UnicodeEncodeError:
netloc_enc = netloc.encode("idna")
self.putheader('Host', netloc_enc)
else:
try:
host_enc = self.host.encode("ascii")
except UnicodeEncodeError:
host_enc = self.host.encode("idna")
if self.port == self.default_port:
self.putheader('Host', host_enc)
else:
host_enc = host_enc.decode("ascii")
self.putheader('Host', "%s:%s" % (host_enc, self.port))
# note: we are assuming that clients will not attempt to set these
# headers since *this* library must deal with the
# consequences. this also means that when the supporting
# libraries are updated to recognize other forms, then this
# code should be changed (removed or updated).
# we only want a Content-Encoding of "identity" since we don't
# support encodings such as x-gzip or x-deflate.
if not skip_accept_encoding:
self.putheader('Accept-Encoding', 'identity')
# we can accept "chunked" Transfer-Encodings, but no others
# NOTE: no TE header implies *only* "chunked"
#self.putheader('TE', 'chunked')
# if TE is supplied in the header, then it must appear in a
# Connection header.
#self.putheader('Connection', 'TE')
else:
# For HTTP/1.0, the server will assume "not chunked"
pass
def putheader(self, header, *values):
"""Send a request header line to the server.
For example: h.putheader('Accept', 'text/html')
"""
if self.__state != _CS_REQ_STARTED:
raise CannotSendHeader()
if hasattr(header, 'encode'):
header = header.encode('ascii')
values = list(values)
for i, one_value in enumerate(values):
if hasattr(one_value, 'encode'):
values[i] = one_value.encode('ascii')
value = b'\r\n\t'.join(values)
header = header + b': ' + value
self._output(header)
def endheaders(self, message_body=None):
"""Indicate that the last header line has been sent to the server.
This method sends the request to the server. The optional
message_body argument can be used to pass message body
associated with the request. The message body will be sent in
the same packet as the message headers if possible. The
message_body should be a string.
"""
if self.__state == _CS_REQ_STARTED:
self.__state = _CS_REQ_SENT
else:
raise CannotSendHeader()
self._send_output(message_body)
def request(self, method, url, body=None, headers={}):
"""Send a complete request to the server."""
try:
self._send_request(method, url, body, headers)
except socket.error as v:
# trap 'Broken pipe' if we're allowed to automatically reconnect
if v.args[0] != 32 or not self.auto_open:
raise
# try one more time
self._send_request(method, url, body, headers)
def _set_content_length(self, body):
# Set the content-length based on the body.
thelen = None
try:
thelen = str(len(body))
except TypeError as te:
# If this is a file-like object, try to
# fstat its file descriptor
import os
try:
thelen = str(os.fstat(body.fileno()).st_size)
except (AttributeError, OSError):
# Don't send a length if this failed
if self.debuglevel > 0: print("Cannot stat!!")
if thelen is not None:
self.putheader('Content-Length', thelen)
def _send_request(self, method, url, body, headers):
# honour explicitly requested Host: and Accept-Encoding headers
header_names = dict.fromkeys([k.lower() for k in headers])
skips = {}
if 'host' in header_names:
skips['skip_host'] = 1
if 'accept-encoding' in header_names:
skips['skip_accept_encoding'] = 1
self.putrequest(method, url, **skips)
if body and ('content-length' not in header_names):
self._set_content_length(body)
for hdr, value in headers.items():
self.putheader(hdr, value)
if isinstance(body, str):
self.endheaders(body.encode('ascii'))
else:
self.endheaders()
if body: # when body is a file rather than a string
self.send(body)
def getresponse(self):
"""Get the response from the server."""
# if a prior response has been completed, then forget about it.
if self.__response and self.__response.isclosed():
self.__response = None
#
# if a prior response exists, then it must be completed (otherwise, we
# cannot read this response's header to determine the connection-close
# behavior)
#
# note: if a prior response existed, but was connection-close, then the
# socket and response were made independent of this HTTPConnection
# object since a new request requires that we open a whole new
# connection
#
# this means the prior response had one of two states:
# 1) will_close: this connection was reset and the prior socket and
# response operate independently
# 2) persistent: the response was retained and we await its
# isclosed() status to become true.
#
if self.__state != _CS_REQ_SENT or self.__response:
raise ResponseNotReady()
if self.debuglevel > 0:
response = self.response_class(self.sock, self.debuglevel,
strict=self.strict,
method=self._method)
else:
response = self.response_class(self.sock, strict=self.strict,
method=self._method)
response.begin()
assert response.will_close != _UNKNOWN
self.__state = _CS_IDLE
if response.will_close:
# this effectively passes the connection to the response
self.close()
else:
# remember this, so we can tell when it is complete
self.__response = response
return response
try:
import ssl
except ImportError:
pass
else:
class HTTPSConnection(HTTPConnection):
"This class allows communication via SSL."
default_port = HTTPS_PORT
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
HTTPConnection.__init__(self, host, port, strict, timeout)
self.key_file = key_file
self.cert_file = cert_file
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket.create_connection((self.host, self.port), self.timeout)
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file)
def FakeSocket (sock, sslobj):
warnings.warn("FakeSocket is deprecated, and won't be in 3.x. " +
"Use the result of ssl.wrap_socket() directly instead.",
DeprecationWarning, stacklevel=2)
return sslobj
__all__.append("HTTPSConnection")
class HTTPException(Exception):
# Subclasses that define an __init__ must call Exception.__init__
# or define self.args. Otherwise, str() will fail.
pass
class NotConnected(HTTPException):
pass
class InvalidURL(HTTPException):
pass
class UnknownProtocol(HTTPException):
def __init__(self, version):
self.args = version,
self.version = version
class UnknownTransferEncoding(HTTPException):
pass
class UnimplementedFileMode(HTTPException):
pass
class IncompleteRead(HTTPException):
def __init__(self, partial):
self.args = partial,
self.partial = partial
class ImproperConnectionState(HTTPException):
pass
class CannotSendRequest(ImproperConnectionState):
pass
class CannotSendHeader(ImproperConnectionState):
pass
class ResponseNotReady(ImproperConnectionState):
pass
class BadStatusLine(HTTPException):
def __init__(self, line):
self.args = line,
self.line = line
# for backwards compatibility
error = HTTPException
class LineAndFileWrapper:
"""A limited file-like object for HTTP/0.9 responses."""
# The status-line parsing code calls readline(), which normally
# get the HTTP status line. For a 0.9 response, however, this is
# actually the first line of the body! Clients need to get a
# readable file object that contains that line.
def __init__(self, line, file):
self._line = line
self._file = file
self._line_consumed = 0
self._line_offset = 0
self._line_left = len(line)
def __getattr__(self, attr):
return getattr(self._file, attr)
def _done(self):
# called when the last byte is read from the line. After the
# call, all read methods are delegated to the underlying file
# object.
self._line_consumed = 1
self.read = self._file.read
self.readline = self._file.readline
self.readlines = self._file.readlines
def read(self, amt=None):
if self._line_consumed:
return self._file.read(amt)
assert self._line_left
if amt is None or amt > self._line_left:
s = self._line[self._line_offset:]
self._done()
if amt is None:
return s + self._file.read()
else:
return s + self._file.read(amt - len(s))
else:
assert amt <= self._line_left
i = self._line_offset
j = i + amt
s = self._line[i:j]
self._line_offset = j
self._line_left -= amt
if self._line_left == 0:
self._done()
return s
def readline(self):
if self._line_consumed:
return self._file.readline()
assert self._line_left
s = self._line[self._line_offset:]
self._done()
return s
def readlines(self, size=None):
if self._line_consumed:
return self._file.readlines(size)
assert self._line_left
L = [self._line[self._line_offset:]]
self._done()
if size is None:
return L + self._file.readlines()
else:
return L + self._file.readlines(size)
| MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.0/Lib/http/client.py | Python | mit | 37,702 |
class Friends:
@staticmethod
def _get_friends(session, user_id):
"""
https://vk.com/dev/friends.get
"""
response = session.fetch('friends.get', user_id=user_id)
return response["items"]
@staticmethod
def _get_friends_count(session, user_id):
"""
https://vk.com/dev/friends.get
"""
response = session.fetch('friends.get', user_id=user_id, count=1)
return response["count"]
| sgaynetdinov/py-vkontakte | vk/friends.py | Python | mit | 469 |
from amqpstorm.management import ManagementApi
from amqpstorm.management.exception import ApiConnectionError
from amqpstorm.management.exception import ApiError
from amqpstorm.tests import CAFILE
from amqpstorm.tests import HTTP_URL
from amqpstorm.tests import HTTPS_URL
from amqpstorm.tests import PASSWORD
from amqpstorm.tests import USERNAME
from amqpstorm.tests.functional.utility import TestFunctionalFramework
class ApiFunctionalTests(TestFunctionalFramework):
def test_api_url_with_slash(self):
api = ManagementApi(HTTP_URL + '/', USERNAME, PASSWORD)
self.assertEqual(api.aliveness_test('/'), {'status': 'ok'})
def test_api_with_invalid_url(self):
api = ManagementApi('abc', USERNAME, PASSWORD)
self.assertRaisesRegex(
ApiConnectionError,
'Invalid URL',
api.aliveness_test, '/'
)
def test_api_with_inaccessible(self):
api = ManagementApi('http://192.168.1.50', USERNAME, PASSWORD,
timeout=0.1)
self.assertRaisesRegex(
ApiConnectionError,
'Max retries exceeded with url',
api.aliveness_test
)
def test_api_with_invalid_credentials(self):
api = ManagementApi(HTTP_URL, 'travis_ci', PASSWORD)
self.assertRaisesRegex(
ApiError,
'401 Client Error: Unauthorized',
api.aliveness_test
)
def test_api_ssl_test(self):
api = ManagementApi(HTTPS_URL, USERNAME, PASSWORD,
verify=CAFILE)
self.assertEqual(api.aliveness_test(), {'status': 'ok'})
def test_api_aliveness_test(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
self.assertEqual(api.aliveness_test(), {'status': 'ok'})
def test_api_context_manager(self):
with ManagementApi(HTTP_URL, USERNAME, PASSWORD) as api:
self.assertEqual(api.aliveness_test(), {'status': 'ok'})
def test_api_overview(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
result = api.overview()
self.assertIsInstance(result, dict)
self.assertIn('node', result)
self.assertIn('management_version', result)
def test_api_cluster_name(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
result = api.cluster_name()
self.assertIsInstance(result, dict)
self.assertIn('name', result)
self.assertEqual('[email protected]', result['name'])
def test_api_nodes(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
result = api.nodes()
self.assertIsInstance(result, list)
self.assertTrue(result)
self.assertEqual('[email protected]', result[0]['name'])
def test_api_node(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
result = api.node('[email protected]')
self.assertIsInstance(result, dict)
self.assertTrue(result)
self.assertEqual('[email protected]', result['name'])
def test_api_whoami(self):
api = ManagementApi(HTTP_URL, USERNAME, PASSWORD)
result = api.whoami()
self.assertIsInstance(result, dict)
self.assertEqual(result['name'], USERNAME)
# RabbitMQ 3.9.X compatibility
if isinstance(result['tags'], list):
tag = result['tags'][0]
else:
tag = result['tags']
self.assertEqual('administrator', tag)
| eandersson/amqpstorm | amqpstorm/tests/functional/management/test_api.py | Python | mit | 3,496 |
#
# This file is part of python-corosync. Python-Corosync is free software
# that is made available under the MIT license. Consult the file "LICENSE"
# that is distributed together with this file for the exact licensing terms.
#
# Python-Corosync is copyright (c) 2008 by the python-corosync authors. See
# the file "AUTHORS" for a complete overview.
class Service(object):
"""Base class for python-corosync services."""
def start(self):
"""Register to the executive and start receiving events."""
raise NotImplementedError
def stop(self):
"""Stop receiving events."""
raise NotImplementedError
def active(self):
"""Return True if this service is active, false otherwise."""
raise NotImplementedError
def fileno(self):
"""Return a file descriptor that can be used to wait for events."""
raise NotImplementedError
def dispatch(self, type=None):
"""Dispatch events for this service."""
raise NotImplementedError
| geertj/python-corosync | lib/corosync/service.py | Python | mit | 951 |
from collections import UserList, UserDict
from contextlib import contextmanager
from functools import partial
def fnv32a(text):
h = 0x811c9dc5
for c in text:
h = ((h ^ ord(c)) * 0x01000193) & 0xffffffff
return h
def numbered_columns(array):
# Can't use `not array` because numpy interprets it differently.
if len(array) == 0:
return []
return [str(i) for i in range(len(array[0]))]
def iterate_items(obj):
"""Iterates over the object's key value pairs (key, value), where obj[key] == value."""
if hasattr(obj, "items"):
return obj.items()
# If the object lacks the items() function, assume it's a list with keys [0, len(obj)-1].
return enumerate(obj)
class Array(UserList):
"""Array stores the real-valued features.
Args:
columns: [str], the name of each column.
"""
def __init__(self, columns=None):
super().__init__()
self._columns = list(columns) if columns is not None else []
self.data = []
def concat(self, other, prefix=""):
"""Adds the columns from `other` to `self`. `other` can be
of any type that supports length, indexing and enumeration. Furthermore
column names can be supplied in a `columns` attribute.
Args:
other: contains the new columns.
prefix: str, optional prefix added to the new column names
to avoid name clashes.
"""
if len(self) == 0:
self.data = [[] for i in range(len(other))]
if len(self) != len(other):
raise ValueError("array length does not match - have {} and {}".format(len(self), len(other)))
columns = other.columns if hasattr(other, "columns") else numbered_columns(other)
if prefix:
columns = ["{}_{}".format(prefix, name) for name in columns]
# Make sure the column names do not clash.
for column in columns:
if column in self._columns:
raise ValueError("a column named '{}' already exists".format(column))
self._columns += columns
for i, row in enumerate(other):
self.data[i].extend(row)
@property
def shape(self):
"""Returns the array shape."""
return (len(self.data), len(self.data[0]))
@property
def columns(self):
"""Returns the column names."""
if not self._columns:
self._columns = numbered_columns(self)
return self._columns
class BaseFeature(object):
def discard(self):
raise NotImplementedError()
def set(self, *args, **kwargs):
raise NotImplementedError()
def push(self):
raise NotImplementedError()
def array(self):
raise NotImplementedError()
@contextmanager
def new(self):
self.discard()
try:
yield self
except:
self.discard()
raise
else:
self.push()
class CurriedSet(object):
"""Helper class that allows partially applying arguments to the object's set function
by placing the arguments in the function name. This allows more readable function calls:
>>> obj.set("a", "b", "c", 1.0)
>>> obj.set_a_b_c(1.0)
"""
def __getattr__(self, name):
function, *partial_applied_args = name.split("_")
if function != "set" or not partial_applied_args:
return self.__getattribute__(name)
return partial(self.set, *partial_applied_args)
class Pipe(object):
"""Pipe applies one ore more functions to a feature array.
Args:
feature: Feature|Group, of which the array shall be transformed.
functions: callable, one ore more functions that take and return an array.
"""
def __init__(self, feature, *functions):
self.feature = feature
self.functions = functions
def array(self):
"""Returns the feature array with all functions of the pipe applied."""
result = self.feature.array()
for function in self.functions:
result = function(result)
return result
def __getattr__(self, name):
return getattr(self.feature, name)
class Group(BaseFeature, CurriedSet):
"""Group combines one or more Feature/Group instances.
Args:
features: {str: Feature|Group}, dictionary of Feature/Group instances
stored under their names.
"""
def __init__(self, features):
super().__init__()
self.features = features
def set(self, *args, **kwargs):
# If there's only a single feature, allow omitting the feature name.
# Otherwise name should be the first argument and the remaining arguments
# get passed on to the feature's set() function.
if len(self.features) == 1:
name, = self.features.keys()
if name == args[0]:
args = args[1:]
else:
name = args[0]
args = args[1:]
self.features[name].set(*args, **kwargs)
def discard(self):
for feature in self.features.values():
feature.discard()
def push(self):
for feature in self.features.values():
feature.push()
def array(self):
result = Array()
for name, feature in sorted(self.features.items()):
result.concat(feature.array(), prefix=name)
return result
class Feature(BaseFeature):
"""Base class of all features.
A feature produces one or more numerical values. These values
are stored in so-called fields.
Args:
dimensions: int or [str] or None, either the number of dimensions this feature
or a list of their names.
If dimensions is an integer, the dimension names will be indexes from 0 to `dimensions-1`.
If dimensions is None, the number of dimensions and their names will be
determined dynamically.
Attributes:
dimensions: [int|str] or None, the names of the fields produced by this feature.
"""
def __init__(self, dimensions=None):
if hasattr(self, "Dimensions"):
dimensions = self.Dimensions
if type(dimensions) is int:
dimensions = list(range(dimensions))
self.dimensions = sorted(set(dimensions)) if dimensions else None
self.slot = {}
self.rows = []
def discard(self):
self.slot = {}
def push(self):
if self.dimensions:
for dimension, value in iterate_items(self.slot):
if dimension not in self.dimensions:
raise KeyError("unknown dimension '{}' (have dimensions {})".format(dimension, self.dimensions))
self.rows.append(self.slot)
self.slot = {}
def array(self):
# If dimensions is None, determine the dimensions by looking at the data.
if self.dimensions is None:
self.dimensions = sorted({dimension for row in self.rows for dimension, value in iterate_items(row)})
result = Array(columns=self.dimensions)
for i, row in enumerate(self.rows):
values = [0.0] * len(self.dimensions)
for dimension, value in iterate_items(row):
values[self.dimensions.index(dimension)] = value
result.data.append(values)
return result
class Numerical(Feature, CurriedSet):
"""Produces a single numerical value."""
def set(self, *args):
index = 0
if len(args) == 1:
value, = args
else:
index, value = args
self.slot[index] = value
class Categorical(Feature, CurriedSet):
"""Performs one hot encoding on categorical data.
Args:
values: list, the values to encode.
"""
def __init__(self, values):
super().__init__(dimensions=values)
def set(self, token, weight=1.0):
if token in self.dimensions:
self.slot[token] = weight
class Hashed(Feature):
"""Hashes arbitrary values into a fixed number of buckets.
Args:
hash: callable, the hash function to map features to buckets.
Please note that Python's hash() function is randomized. Using it
here maps values to different buckets on each program run.
buckets: int, the number of buckets to use.
replace: str or callable, defines a replacement strategy if a value
gets assigned to a non-empty bucket. Supported strategies are
- 'sum', which adds the value to bucket
- 'max', which stores the maximum in the bucket
Additionally, a callable function can be passed to implement
a custom replacement strategy.
random_sign: bool, if True, makes the sign of the weights depend on
the hash values.
"""
def __init__(self, hash=fnv32a, buckets=100, replace=None, random_sign=False):
super().__init__(dimensions=buckets)
self.buckets = buckets
self.random_sign = random_sign
self.replace = None
if replace == "sum":
self.func = lambda new, old: new + old
if replace == "max":
self.func = lambda new, old: max(new, old)
if callable(replace):
self.func = replace
self.hash = hash
def set(self, token, weight=1.0):
key = self.hash(token)
if self.random_sign:
if key & 0x80000000 != 0:
weight = -weight
index = key % self.buckets
if self.replace is not None and index in self.slot:
weight = self.replace(weight, self.slot[index])
self.slot[index] = weight
| slyrz/feature | feature/feature.py | Python | mit | 9,670 |
import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import asc
from sqlalchemy import desc
from sqlalchemy import exc as sa_exc
from sqlalchemy import exists
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import literal_column
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import Text
from sqlalchemy import text
from sqlalchemy import true
from sqlalchemy import union
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.orm import aliased
from sqlalchemy.orm import backref
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import column_property
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm.context import ORMSelectCompileState
from sqlalchemy.orm.util import join
from sqlalchemy.sql import column
from sqlalchemy.sql import table
from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from test.orm import _fixtures
class QueryTest(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
(
Node,
composite_pk_table,
users,
Keyword,
items,
Dingaling,
order_items,
item_keywords,
Item,
User,
dingalings,
Address,
keywords,
CompositePk,
nodes,
Order,
orders,
addresses,
) = (
cls.classes.Node,
cls.tables.composite_pk_table,
cls.tables.users,
cls.classes.Keyword,
cls.tables.items,
cls.classes.Dingaling,
cls.tables.order_items,
cls.tables.item_keywords,
cls.classes.Item,
cls.classes.User,
cls.tables.dingalings,
cls.classes.Address,
cls.tables.keywords,
cls.classes.CompositePk,
cls.tables.nodes,
cls.classes.Order,
cls.tables.orders,
cls.tables.addresses,
)
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", order_by=addresses.c.id
),
"orders": relationship(
Order, backref="user", order_by=orders.c.id
), # o2m, m2o
},
)
cls.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"dingaling": relationship(
Dingaling, uselist=False, backref="address"
) # o2o
},
)
cls.mapper_registry.map_imperatively(Dingaling, dingalings)
cls.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
), # m2m
"address": relationship(Address), # m2o
},
)
cls.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(Keyword, secondary=item_keywords)
},
) # m2m
cls.mapper_registry.map_imperatively(Keyword, keywords)
cls.mapper_registry.map_imperatively(
Node,
nodes,
properties={
"children": relationship(
Node, backref=backref("parent", remote_side=[nodes.c.id])
)
},
)
cls.mapper_registry.map_imperatively(CompositePk, composite_pk_table)
configure_mappers()
class QueryCorrelatesLikeSelect(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
query_correlated = (
"SELECT users.name AS users_name, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE addresses.user_id = users.id) AS anon_1 FROM users"
)
query_not_correlated = (
"SELECT users.name AS users_name, "
"(SELECT count(addresses.id) AS count_1 FROM addresses, users "
"WHERE addresses.user_id = users.id) AS anon_1 FROM users"
)
def test_scalar_subquery_select_auto_correlate(self):
addresses, users = self.tables.addresses, self.tables.users
query = (
select(func.count(addresses.c.id))
.where(addresses.c.user_id == users.c.id)
.scalar_subquery()
)
query = select(users.c.name.label("users_name"), query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_select_explicit_correlate(self):
addresses, users = self.tables.addresses, self.tables.users
query = (
select(func.count(addresses.c.id))
.where(addresses.c.user_id == users.c.id)
.correlate(users)
.scalar_subquery()
)
query = select(users.c.name.label("users_name"), query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_select_correlate_off(self):
addresses, users = self.tables.addresses, self.tables.users
query = (
select(func.count(addresses.c.id))
.where(addresses.c.user_id == users.c.id)
.correlate(None)
.scalar_subquery()
)
query = select(users.c.name.label("users_name"), query)
self.assert_compile(
query, self.query_not_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_query_auto_correlate(self):
sess = fixture_session()
Address, User = self.classes.Address, self.classes.User
query = (
sess.query(func.count(Address.id))
.filter(Address.user_id == User.id)
.scalar_subquery()
)
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_query_explicit_correlate(self):
sess = fixture_session()
Address, User = self.classes.Address, self.classes.User
query = (
sess.query(func.count(Address.id))
.filter(Address.user_id == User.id)
.correlate(self.tables.users)
.scalar_subquery()
)
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
@testing.combinations(False, None)
def test_scalar_subquery_query_correlate_off(self, value):
sess = fixture_session()
Address, User = self.classes.Address, self.classes.User
query = (
sess.query(func.count(Address.id))
.filter(Address.user_id == User.id)
.correlate(value)
.scalar_subquery()
)
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_not_correlated, dialect=default.DefaultDialect()
)
def test_correlate_to_union(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User)
q = sess.query(User).union(q)
u_alias = aliased(User)
raw_subq = exists().where(u_alias.id > User.id)
orm_subq = sess.query(u_alias).filter(u_alias.id > User.id).exists()
self.assert_compile(
q.add_columns(raw_subq),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"EXISTS (SELECT * FROM users AS users_1 "
"WHERE users_1.id > anon_1.users_id) AS anon_2 "
"FROM ("
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"UNION SELECT users.id AS users_id, users.name AS users_name "
"FROM users) AS anon_1",
)
# only difference is "1" vs. "*" (not sure why that is)
self.assert_compile(
q.add_columns(orm_subq),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"EXISTS (SELECT 1 FROM users AS users_1 "
"WHERE users_1.id > anon_1.users_id) AS anon_2 "
"FROM ("
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"UNION SELECT users.id AS users_id, users.name AS users_name "
"FROM users) AS anon_1",
)
def test_correlate_to_union_w_labels_newstyle(self):
User = self.classes.User
q = select(User).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
q = (
select(User)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.union(q)
.subquery()
)
u_alias = aliased(User)
raw_subq = exists().where(u_alias.id > q.c[0])
self.assert_compile(
select(q, raw_subq).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"EXISTS (SELECT * FROM users AS users_1 "
"WHERE users_1.id > anon_1.users_id) AS anon_2 "
"FROM ("
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"UNION SELECT users.id AS users_id, users.name AS users_name "
"FROM users) AS anon_1",
)
def test_correlate_to_union_newstyle(self):
User = self.classes.User
q = select(User)
q = select(User).union(q).subquery()
u_alias = aliased(User)
raw_subq = exists().where(u_alias.id > q.c[0])
self.assert_compile(
select(q, raw_subq),
"SELECT anon_1.id, anon_1.name, EXISTS "
"(SELECT * FROM users AS users_1 WHERE users_1.id > anon_1.id) "
"AS anon_2 FROM (SELECT users.id AS id, users.name AS name "
"FROM users "
"UNION SELECT users.id AS id, users.name AS name FROM users) "
"AS anon_1",
)
class RawSelectTest(QueryTest, AssertsCompiledSQL):
"""compare a bunch of select() tests with the equivalent Query using
straight table/columns.
Results should be the same as Query should act as a select() pass-
thru for ClauseElement entities.
"""
__dialect__ = "default"
def test_select(self):
addresses, users = self.tables.addresses, self.tables.users
sess = fixture_session()
self.assert_compile(
sess.query(users)
.select_entity_from(users.select().subquery())
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.statement,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, "
"(SELECT users.id AS id, users.name AS name FROM users) AS anon_1",
)
self.assert_compile(
sess.query(users, exists(text("1")).select_from(addresses))
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.statement,
"SELECT users.id AS users_id, users.name AS users_name, EXISTS "
"(SELECT 1 FROM addresses) AS anon_1 FROM users",
)
# a little tedious here, adding labels to work around Query's
# auto-labelling.
s = (
sess.query(
addresses.c.id.label("id"),
addresses.c.email_address.label("email"),
)
.filter(addresses.c.user_id == users.c.id)
.correlate(users)
.statement.alias()
)
self.assert_compile(
sess.query(users, s.c.email)
.select_entity_from(users.join(s, s.c.id == users.c.id))
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.statement,
"SELECT users.id AS users_id, users.name AS users_name, "
"anon_1.email AS anon_1_email "
"FROM users JOIN (SELECT addresses.id AS id, "
"addresses.email_address AS email FROM addresses, users "
"WHERE addresses.user_id = users.id) AS anon_1 "
"ON anon_1.id = users.id",
)
x = func.lala(users.c.id).label("foo")
self.assert_compile(
sess.query(x).filter(x == 5).statement,
"SELECT lala(users.id) AS foo FROM users WHERE "
"lala(users.id) = :param_1",
)
self.assert_compile(
sess.query(func.sum(x).label("bar")).statement,
"SELECT sum(lala(users.id)) AS bar FROM users",
)
class EntityFromSubqueryTest(QueryTest, AssertsCompiledSQL):
# formerly FromSelfTest
__dialect__ = "default"
def test_filter(self):
User = self.classes.User
subq = select(User).filter(User.id.in_([8, 9])).subquery()
q = fixture_session().query(aliased(User, subq))
eq_(
[User(id=8), User(id=9)],
q.all(),
)
subq = select(User).order_by(User.id).slice(1, 3).subquery()
q = fixture_session().query(aliased(User, subq))
eq_([User(id=8), User(id=9)], q.all())
subq = select(User).filter(User.id.in_([8, 9])).subquery()
u = aliased(User, subq)
q = fixture_session().query(u).order_by(u.id)
eq_(
[User(id=8)],
list(q[0:1]),
)
def test_join(self):
User, Address = self.classes.User, self.classes.Address
stmt = select(User).filter(User.id.in_([8, 9])).subquery()
u = aliased(User, stmt)
q = (
fixture_session()
.query(u)
.join(u.addresses)
.add_entity(Address)
.order_by(u.id, Address.id)
)
eq_(
[
(User(id=8), Address(id=2)),
(User(id=8), Address(id=3)),
(User(id=8), Address(id=4)),
(User(id=9), Address(id=5)),
],
q.all(),
)
def test_group_by(self):
Address = self.classes.Address
subq = (
select(Address.user_id, func.count(Address.id).label("count"))
.group_by(Address.user_id)
.order_by(Address.user_id)
.subquery()
)
# there's no reason to do aliased(Address) in this case but we're just
# testing
aq = aliased(Address, subq)
q = fixture_session().query(aq.user_id, subq.c.count)
eq_(
q.all(),
[(7, 1), (8, 3), (9, 1)],
)
subq = select(Address.user_id, Address.id)
aq = aliased(Address, subq)
q = (
fixture_session()
.query(aq.user_id, func.count(aq.id))
.group_by(aq.user_id)
.order_by(aq.user_id)
)
eq_(
q.all(),
[(7, 1), (8, 3), (9, 1)],
)
def test_error_w_aliased_against_select(self):
User = self.classes.User
s = fixture_session()
stmt = select(User.id)
assert_raises_message(
sa_exc.ArgumentError,
"Column expression or FROM clause expected, got "
"<sqlalchemy.sql.selectable.Select .*> object resolved from "
"<AliasedClass .* User> object. To create a FROM clause from "
"a <class 'sqlalchemy.sql.selectable.Select'> object",
s.query,
aliased(User, stmt),
)
def test_having(self):
User = self.classes.User
s = fixture_session()
stmt = (
select(User.id)
.group_by(User.id)
.having(User.id > 5)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
q = s.query(aliased(User, stmt))
self.assert_compile(
q,
"SELECT anon_1.users_id AS anon_1_users_id FROM "
"(SELECT users.id AS users_id FROM users GROUP "
"BY users.id HAVING users.id > :id_1) AS anon_1",
)
def test_no_joinedload(self):
User = self.classes.User
s = fixture_session()
subq = (
select(User)
.options(joinedload(User.addresses))
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
uq = aliased(User, subq)
q = s.query(uq)
# in 2.0 style, joinedload in the subquery is just ignored
self.assert_compile(
q.statement,
"SELECT anon_1.users_id, anon_1.users_name FROM (SELECT "
"users.id AS users_id, users.name AS users_name FROM users) "
"AS anon_1",
)
# needs to be on the outside
self.assert_compile(
q.options(joinedload(uq.addresses)).statement,
"SELECT anon_1.users_id, anon_1.users_name, addresses_1.id, "
"addresses_1.user_id, addresses_1.email_address FROM "
"(SELECT users.id AS users_id, users.name AS "
"users_name FROM users) AS anon_1 LEFT OUTER JOIN "
"addresses AS addresses_1 ON anon_1.users_id = "
"addresses_1.user_id ORDER BY addresses_1.id",
)
def test_aliases(self):
"""test that aliased objects are accessible externally to a from_self()
call."""
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
ualias = aliased(User)
subq = select(User, ualias).filter(User.id > ualias.id).subquery()
uq1 = aliased(User, subq)
uq2 = aliased(ualias, subq)
q = s.query(uq1.name, uq2.name).order_by(uq1.name, uq2.name)
eq_(
q.all(),
[
("chuck", "ed"),
("chuck", "fred"),
("chuck", "jack"),
("ed", "jack"),
("fred", "ed"),
("fred", "jack"),
],
)
q = (
s.query(uq1.name, uq2.name)
.filter(uq2.name == "ed")
.order_by(uq1.name, uq2.name)
)
eq_(
q.all(),
[("chuck", "ed"), ("fred", "ed")],
)
q = (
s.query(uq2.name, Address.email_address)
.join(uq2.addresses)
.order_by(uq2.name, Address.email_address)
)
eq_(
q.all(),
[
("ed", "[email protected]"),
("jack", "[email protected]"),
("jack", "[email protected]"),
("jack", "[email protected]"),
("jack", "[email protected]"),
],
)
def test_multiple_entities(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
subq = (
select(User, Address)
.filter(User.id == Address.user_id)
.filter(Address.id.in_([2, 5]))
.subquery()
)
uq = aliased(User, subq)
aq = aliased(Address, subq)
eq_(
sess.query(uq, aq).all(),
[(User(id=8), Address(id=2)), (User(id=9), Address(id=5))],
)
eq_(
sess.query(uq, aq).options(joinedload(uq.addresses)).first(),
(
User(id=8, addresses=[Address(), Address(), Address()]),
Address(id=2),
),
)
def test_multiple_with_column_entities_oldstyle(self):
# this is now very awkward and not very useful
User = self.classes.User
subq = select(User.id).subquery()
uq = aliased(User, subq)
subq2 = (
select(uq.id)
.add_columns(func.count().label("foo"))
.group_by(uq.id)
.order_by(uq.id)
.subquery()
)
uq2 = aliased(User, subq2)
sess = fixture_session()
eq_(
sess.query(uq2.id, subq2.c.foo).all(),
[(7, 1), (8, 1), (9, 1), (10, 1)],
)
def test_multiple_with_column_entities_newstyle(self):
User = self.classes.User
sess = fixture_session()
q1 = sess.query(User.id)
subq1 = aliased(User, q1.subquery())
q2 = sess.query(subq1.id).add_columns(func.count().label("foo"))
q2 = q2.group_by(subq1.id).order_by(subq1.id).subquery()
q3 = sess.query(q2)
eq_(
q3.all(),
[(7, 1), (8, 1), (9, 1), (10, 1)],
)
q3 = select(q2)
eq_(sess.execute(q3).fetchall(), [(7, 1), (8, 1), (9, 1), (10, 1)])
class ColumnAccessTest(QueryTest, AssertsCompiledSQL):
"""test access of columns after _from_selectable has been applied"""
__dialect__ = "default"
def test_select_entity_from(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User)
q = sess.query(User).select_entity_from(q.statement.subquery())
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name "
"FROM (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE anon_1.name = :name_1",
)
def test_select_entity_from_no_entities(self):
User = self.classes.User
sess = fixture_session()
assert_raises_message(
sa.exc.ArgumentError,
r"A selectable \(FromClause\) instance is "
"expected when the base alias is being set",
sess.query(User).select_entity_from(User)._compile_context,
)
def test_select_from_no_aliasing(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User)
q = sess.query(User).select_from(q.statement.subquery())
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE users.name = :name_1",
)
def test_anonymous_expression_oldstyle(self):
# relies upon _orm_only_from_obj_alias setting
from sqlalchemy.sql import column
sess = fixture_session()
c1, c2 = column("c1"), column("c2")
q1 = sess.query(c1, c2).filter(c1 == "dog")
q2 = sess.query(c1, c2).filter(c1 == "cat")
q3 = q1.union(q2)
self.assert_compile(
q3.order_by(c1),
"SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 "
"AS anon_1_c2 FROM (SELECT c1, c2 WHERE "
"c1 = :c1_1 UNION SELECT c1, c2 "
"WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1",
)
def test_anonymous_expression_newstyle(self):
from sqlalchemy.sql import column
c1, c2 = column("c1"), column("c2")
q1 = select(c1, c2).where(c1 == "dog")
q2 = select(c1, c2).where(c1 == "cat")
subq = q1.union(q2).subquery()
q3 = select(subq).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
q3.order_by(subq.c.c1),
"SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 "
"AS anon_1_c2 FROM (SELECT c1, c2 WHERE "
"c1 = :c1_1 UNION SELECT c1, c2 "
"WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1",
)
def test_table_anonymous_expression_from_self_twice_newstyle(self):
from sqlalchemy.sql import column
t1 = table("t1", column("c1"), column("c2"))
stmt = (
select(t1.c.c1, t1.c.c2)
.where(t1.c.c1 == "dog")
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
)
subq1 = (
stmt.subquery("anon_2")
.select()
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
)
subq2 = subq1.subquery("anon_1")
q1 = select(subq2).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
# as in test_anonymous_expression_from_self_twice_newstyle_wlabels,
# set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL) means the
# subquery cols have long names. however,
# here we illustrate if they did use
# set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL), but they also
# named the subqueries explicitly as one would certainly do if they
# were using set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL),
# we can get at that column based on how
# it is aliased, no different than plain SQL.
q1.order_by(subq2.c.anon_2_t1_c1),
"SELECT anon_1.anon_2_t1_c1 "
"AS anon_1_anon_2_t1_c1, anon_1.anon_2_t1_c2 "
"AS anon_1_anon_2_t1_c2 "
"FROM (SELECT anon_2.t1_c1 AS anon_2_t1_c1, "
"anon_2.t1_c2 AS anon_2_t1_c2 FROM (SELECT t1.c1 AS t1_c1, t1.c2 "
"AS t1_c2 FROM t1 WHERE t1.c1 = :c1_1) AS anon_2) AS anon_1 "
"ORDER BY anon_1.anon_2_t1_c1",
)
def test_anonymous_expression_from_self_twice_newstyle_wlabels(self):
from sqlalchemy.sql import column
c1, c2 = column("c1"), column("c2")
subq = select(c1, c2).where(c1 == "dog").subquery()
subq2 = (
select(subq)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
stmt = select(subq2).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
# because of the apply labels we don't have simple keys on
# subq2.c
stmt.order_by(subq2.c.corresponding_column(c1)),
"SELECT anon_1.anon_2_c1 AS anon_1_anon_2_c1, anon_1.anon_2_c2 AS "
"anon_1_anon_2_c2 FROM (SELECT anon_2.c1 AS anon_2_c1, anon_2.c2 "
"AS anon_2_c2 "
"FROM (SELECT c1, c2 WHERE c1 = :c1_1) AS "
"anon_2) AS anon_1 ORDER BY anon_1.anon_2_c1",
)
def test_anonymous_expression_from_self_twice_newstyle_wolabels(self):
from sqlalchemy.sql import column
c1, c2 = column("c1"), column("c2")
subq = select(c1, c2).where(c1 == "dog").subquery()
subq2 = select(subq).subquery()
stmt = select(subq2)
self.assert_compile(
# without labels we can access .c1 but the statement will not
# have the same labeling applied (which does not matter)
stmt.order_by(subq2.c.c1),
"SELECT anon_1.c1, anon_1.c2 FROM "
"(SELECT anon_2.c1 AS c1, anon_2.c2 AS c2 "
"FROM (SELECT c1, c2 WHERE c1 = :c1_1) AS "
"anon_2) AS anon_1 ORDER BY anon_1.c1",
)
def test_anonymous_labeled_expression_oldstyle(self):
# relies upon _orm_only_from_obj_alias setting
sess = fixture_session()
c1, c2 = column("c1"), column("c2")
q1 = sess.query(c1.label("foo"), c2.label("bar")).filter(c1 == "dog")
q2 = sess.query(c1.label("foo"), c2.label("bar")).filter(c1 == "cat")
q3 = q1.union(q2)
self.assert_compile(
q3.order_by(c1),
"SELECT anon_1.foo AS anon_1_foo, anon_1.bar AS anon_1_bar FROM "
"(SELECT c1 AS foo, c2 AS bar WHERE c1 = :c1_1 UNION SELECT "
"c1 AS foo, c2 AS bar "
"WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.foo",
)
def test_anonymous_labeled_expression_newstyle(self):
c1, c2 = column("c1"), column("c2")
q1 = select(c1.label("foo"), c2.label("bar")).where(c1 == "dog")
q2 = select(c1.label("foo"), c2.label("bar")).where(c1 == "cat")
subq = union(q1, q2).subquery()
q3 = select(subq).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
q3.order_by(subq.c.foo),
"SELECT anon_1.foo AS anon_1_foo, anon_1.bar AS anon_1_bar FROM "
"(SELECT c1 AS foo, c2 AS bar WHERE c1 = :c1_1 UNION SELECT "
"c1 AS foo, c2 AS bar "
"WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.foo",
)
def test_anonymous_expression_plus_flag_aliased_join_newstyle(self):
User = self.classes.User
Address = self.classes.Address
addresses = self.tables.addresses
sess = fixture_session()
q1 = sess.query(User.id).filter(User.id > 5)
uq = aliased(
User, q1.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).subquery()
)
aa = aliased(Address)
q1 = (
sess.query(uq.id)
.join(uq.addresses.of_type(aa))
.order_by(uq.id, aa.id, addresses.c.id)
)
self.assert_compile(
q1,
"SELECT anon_1.users_id AS anon_1_users_id "
"FROM (SELECT users.id AS users_id FROM users "
"WHERE users.id > :id_1) AS anon_1 JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.users_id, addresses_1.id, addresses.id",
)
def test_anonymous_expression_plus_explicit_aliased_join_newstyle(self):
"""test that the 'dont alias non-ORM' rule remains for other
kinds of aliasing when _from_selectable() is used."""
User = self.classes.User
Address = self.classes.Address
addresses = self.tables.addresses
sess = fixture_session()
q1 = (
sess.query(User.id)
.filter(User.id > 5)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.subquery()
)
uq = aliased(User, q1)
aa = aliased(Address)
q1 = (
sess.query(uq.id)
.join(aa, uq.addresses)
.order_by(uq.id, aa.id, addresses.c.id)
)
self.assert_compile(
q1,
"SELECT anon_1.users_id AS anon_1_users_id "
"FROM (SELECT users.id AS users_id FROM users "
"WHERE users.id > :id_1) AS anon_1 JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.users_id, addresses_1.id, addresses.id",
)
class AddEntityEquivalenceTest(fixtures.MappedTest, AssertsCompiledSQL):
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("type", String(20)),
Column("bid", Integer, ForeignKey("b.id")),
)
Table(
"b",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
Column("type", String(20)),
)
Table(
"c",
metadata,
Column("id", Integer, ForeignKey("b.id"), primary_key=True),
Column("age", Integer),
)
Table(
"d",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("dede", Integer),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(B):
pass
class D(A):
pass
@classmethod
def setup_mappers(cls):
a, c, b, d = (cls.tables.a, cls.tables.c, cls.tables.b, cls.tables.d)
A, B, C, D = cls.classes("A", "B", "C", "D")
cls.mapper_registry.map_imperatively(
A,
a,
polymorphic_identity="a",
polymorphic_on=a.c.type,
with_polymorphic=("*", None),
properties={
"link": relationship(B, uselist=False, backref="back")
},
)
cls.mapper_registry.map_imperatively(
B,
b,
polymorphic_identity="b",
polymorphic_on=b.c.type,
with_polymorphic=("*", None),
)
cls.mapper_registry.map_imperatively(
C, c, inherits=B, polymorphic_identity="c"
)
cls.mapper_registry.map_imperatively(
D, d, inherits=A, polymorphic_identity="d"
)
@classmethod
def insert_data(cls, connection):
A, C, B = (cls.classes.A, cls.classes.C, cls.classes.B)
sess = Session(connection)
sess.add_all(
[
B(name="b1"),
A(name="a1", link=C(name="c1", age=3)),
C(name="c2", age=6),
A(name="a2"),
]
)
sess.flush()
def test_add_entity_equivalence(self):
A, C, B = (self.classes.A, self.classes.C, self.classes.B)
sess = fixture_session()
for q in [
sess.query(A, B).join(A.link),
sess.query(A).join(A.link).add_entity(B),
]:
eq_(
q.all(),
[
(
A(bid=2, id=1, name="a1", type="a"),
C(age=3, id=2, name="c1", type="c"),
)
],
)
for q in [
sess.query(B, A).join(B.back),
sess.query(B).join(B.back).add_entity(A),
sess.query(B).add_entity(A).join(B.back),
]:
eq_(
q.all(),
[
(
C(age=3, id=2, name="c1", type="c"),
A(bid=2, id=1, name="a1", type="a"),
)
],
)
class InstancesTest(QueryTest, AssertsCompiledSQL):
def test_from_alias_two_needs_nothing(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select()
.where(users.c.id == 7)
.union(users.select().where(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select()
.order_by(text("ulist.id"), addresses.c.id)
)
sess = fixture_session()
q = sess.query(User)
def go():
result = (
q.options(contains_eager("addresses"))
.from_statement(query)
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_two(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select()
.where(users.c.id == 7)
.union(users.select().where(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select()
.order_by(text("ulist.id"), addresses.c.id)
)
sess = fixture_session()
q = sess.query(User)
def go():
ulist = query.alias("ulist")
ulist_alias = aliased(User, alias=ulist)
result = (
q.options(contains_eager("addresses", alias=ulist))
.select_entity_from(ulist_alias)
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_three(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select()
.where(users.c.id == 7)
.union(users.select().where(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select()
.order_by(text("ulist.id"), addresses.c.id)
)
sess = fixture_session()
# better way. use select_entity_from()
def go():
result = (
sess.query(User)
.select_entity_from(query.subquery())
.options(contains_eager("addresses"))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_four(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
sess = fixture_session()
# same thing, but alias addresses, so that the adapter
# generated by select_entity_from() is wrapped within
# the adapter created by contains_eager()
adalias = addresses.alias()
query = (
users.select()
.where(users.c.id == 7)
.union(users.select().where(users.c.id > 7))
.alias("ulist")
.outerjoin(adalias)
.select()
.order_by(text("ulist.id"), adalias.c.id)
)
def go():
result = (
sess.query(User)
.select_entity_from(query.subquery())
.options(contains_eager("addresses", alias=adalias))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_one(self):
addresses, User = (self.tables.addresses, self.classes.User)
sess = fixture_session()
# test that contains_eager suppresses the normal outer join rendering
q = (
sess.query(User)
.outerjoin(User.addresses)
.options(contains_eager(User.addresses))
.order_by(User.id, addresses.c.id)
)
self.assert_compile(
q.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).statement,
"SELECT addresses.id AS addresses_id, "
"addresses.user_id AS addresses_user_id, "
"addresses.email_address AS "
"addresses_email_address, users.id AS "
"users_id, users.name AS users_name FROM "
"users LEFT OUTER JOIN addresses ON "
"users.id = addresses.user_id ORDER BY "
"users.id, addresses.id",
dialect=default.DefaultDialect(),
)
def go():
assert self.static.user_address_result == q.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_two(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
sess = fixture_session()
adalias = addresses.alias()
q = (
sess.query(User)
.select_entity_from(users.outerjoin(adalias))
.options(contains_eager(User.addresses, alias=adalias))
.order_by(User.id, adalias.c.id)
)
def go():
eq_(self.static.user_address_result, q.all())
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_four(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
sess = fixture_session()
selectquery = (
users.outerjoin(addresses)
.select()
.where(users.c.id < 10)
.order_by(users.c.id, addresses.c.id)
)
q = sess.query(User)
def go():
result = (
q.options(contains_eager("addresses"))
.from_statement(selectquery)
.all()
)
assert self.static.user_address_result[0:3] == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_four_future(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
sess = fixture_session(future=True)
selectquery = (
users.outerjoin(addresses)
.select()
.where(users.c.id < 10)
.order_by(users.c.id, addresses.c.id)
)
q = select(User)
def go():
result = (
sess.execute(
q.options(contains_eager("addresses")).from_statement(
selectquery
)
)
.scalars()
.unique()
.all()
)
assert self.static.user_address_result[0:3] == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_aliased(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
q = sess.query(User)
# Aliased object
adalias = aliased(Address)
def go():
result = (
q.options(contains_eager("addresses", alias=adalias))
.outerjoin(adalias, User.addresses)
.order_by(User.id, adalias.id)
)
assert self.static.user_address_result == result.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_alias(self):
orders, items, users, order_items, User = (
self.tables.orders,
self.tables.items,
self.tables.users,
self.tables.order_items,
self.classes.User,
)
sess = fixture_session()
q = sess.query(User)
oalias = orders.alias("o1")
ialias = items.alias("i1")
query = (
users.outerjoin(oalias)
.outerjoin(order_items)
.outerjoin(ialias)
.select()
.order_by(users.c.id, oalias.c.id, ialias.c.id)
)
# test using Alias with more than one level deep
# new way:
# from sqlalchemy.orm.strategy_options import Load
# opt = Load(User).contains_eager('orders', alias=oalias).
# contains_eager('items', alias=ialias)
def go():
result = list(
q.options(
contains_eager("orders", alias=oalias),
contains_eager("orders.items", alias=ialias),
).from_statement(query)
)
assert self.static.user_order_result == result
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_aliased(self):
Item, User, Order = (
self.classes.Item,
self.classes.User,
self.classes.Order,
)
sess = fixture_session()
q = sess.query(User)
# test using Aliased with more than one level deep
oalias = aliased(Order)
ialias = aliased(Item)
def go():
result = (
q.options(
contains_eager(User.orders, alias=oalias),
contains_eager(User.orders, Order.items, alias=ialias),
)
.outerjoin(oalias, User.orders)
.outerjoin(ialias, oalias.items)
.order_by(User.id, oalias.id, ialias.id)
)
assert self.static.user_order_result == result.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_aliased_of_type(self):
# test newer style that does not use the alias parameter
Item, User, Order = (
self.classes.Item,
self.classes.User,
self.classes.Order,
)
sess = fixture_session()
q = sess.query(User)
# test using Aliased with more than one level deep
oalias = aliased(Order)
ialias = aliased(Item)
def go():
result = (
q.options(
contains_eager(User.orders.of_type(oalias)).contains_eager(
oalias.items.of_type(ialias)
)
)
.outerjoin(User.orders.of_type(oalias))
.outerjoin(oalias.items.of_type(ialias))
.order_by(User.id, oalias.id, ialias.id)
)
assert self.static.user_order_result == result.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_chaining(self):
"""test that contains_eager() 'chains' by default."""
Dingaling, User, Address = (
self.classes.Dingaling,
self.classes.User,
self.classes.Address,
)
sess = fixture_session()
q = (
sess.query(User)
.join(User.addresses)
.join(Address.dingaling)
.options(contains_eager(User.addresses, Address.dingaling))
)
def go():
eq_(
q.all(),
# note we only load the Address records that
# have a Dingaling here due to using the inner
# join for the eager load
[
User(
name="ed",
addresses=[
Address(
email_address="[email protected]",
dingaling=Dingaling(data="ding 1/2"),
)
],
),
User(
name="fred",
addresses=[
Address(
email_address="[email protected]",
dingaling=Dingaling(data="ding 2/5"),
)
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_chaining_aliased_endpoint(self):
"""test that contains_eager() 'chains' by default and supports
an alias at the end."""
Dingaling, User, Address = (
self.classes.Dingaling,
self.classes.User,
self.classes.Address,
)
sess = fixture_session()
da = aliased(Dingaling, name="foob")
q = (
sess.query(User)
.join(User.addresses)
.join(da, Address.dingaling)
.options(
contains_eager(User.addresses, Address.dingaling, alias=da)
)
)
def go():
eq_(
q.all(),
# note we only load the Address records that
# have a Dingaling here due to using the inner
# join for the eager load
[
User(
name="ed",
addresses=[
Address(
email_address="[email protected]",
dingaling=Dingaling(data="ding 1/2"),
)
],
),
User(
name="fred",
addresses=[
Address(
email_address="[email protected]",
dingaling=Dingaling(data="ding 2/5"),
)
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
def test_mixed_eager_contains_with_limit(self):
Order, User, Address = (
self.classes.Order,
self.classes.User,
self.classes.Address,
)
sess = fixture_session()
q = sess.query(User)
def go():
# outerjoin to User.orders, offset 1/limit 2 so we get user
# 7 + second two orders. then joinedload the addresses.
# User + Order columns go into the subquery, address left
# outer joins to the subquery, joinedloader for User.orders
# applies context.adapter to result rows. This was
# [ticket:1180].
result = (
q.outerjoin(User.orders)
.options(
joinedload(User.addresses), contains_eager(User.orders)
)
.order_by(User.id, Order.id)
.offset(1)
.limit(2)
.all()
)
eq_(
result,
[
User(
id=7,
addresses=[
Address(
email_address="[email protected]", user_id=7, id=1
)
],
name="jack",
orders=[
Order(
address_id=1,
user_id=7,
description="order 3",
isopen=1,
id=3,
),
Order(
address_id=None,
user_id=7,
description="order 5",
isopen=0,
id=5,
),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
# same as above, except Order is aliased, so two adapters
# are applied by the eager loader
oalias = aliased(Order)
result = (
q.outerjoin(oalias, User.orders)
.options(
joinedload(User.addresses),
contains_eager(User.orders, alias=oalias),
)
.order_by(User.id, oalias.id)
.offset(1)
.limit(2)
.all()
)
eq_(
result,
[
User(
id=7,
addresses=[
Address(
email_address="[email protected]", user_id=7, id=1
)
],
name="jack",
orders=[
Order(
address_id=1,
user_id=7,
description="order 3",
isopen=1,
id=3,
),
Order(
address_id=None,
user_id=7,
description="order 5",
isopen=0,
id=5,
),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
class MixedEntitiesTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_alias_naming(self):
User = self.classes.User
sess = fixture_session()
ua = aliased(User, name="foobar")
q = sess.query(ua)
self.assert_compile(
q,
"SELECT foobar.id AS foobar_id, "
"foobar.name AS foobar_name FROM users AS foobar",
)
def test_correlated_subquery(self):
"""test that a subquery constructed from ORM attributes doesn't leak
out those entities to the outermost query."""
Address, users, User = (
self.classes.Address,
self.tables.users,
self.classes.User,
)
sess = fixture_session()
subq = (
select(func.count())
.where(User.id == Address.user_id)
.correlate(users)
.label("count")
)
# we don't want Address to be outside of the subquery here
eq_(
list(sess.query(User, subq)[0:3]),
[
(User(id=7, name="jack"), 1),
(User(id=8, name="ed"), 3),
(User(id=9, name="fred"), 1),
],
)
# same thing without the correlate, as it should
# not be needed
subq = (
select(func.count())
.where(User.id == Address.user_id)
.label("count")
)
# we don't want Address to be outside of the subquery here
eq_(
list(sess.query(User, subq)[0:3]),
[
(User(id=7, name="jack"), 1),
(User(id=8, name="ed"), 3),
(User(id=9, name="fred"), 1),
],
)
@testing.combinations((True,), (False,))
def test_no_uniquing_cols_legacy(self, with_entities):
"""test #6924"""
User = self.classes.User
Address = self.classes.Address
sess = fixture_session()
if with_entities:
q = (
sess.query(User)
.join(Address)
.filter(Address.user_id == 8)
.with_entities(User.id, User.name)
.order_by(User.id)
)
else:
q = (
sess.query(User.id, User.name)
.join(Address)
.filter(Address.user_id == 8)
.order_by(User.id)
)
is_(q._compile_state()._primary_entity, None)
eq_(q.all(), [(8, "ed"), (8, "ed"), (8, "ed")])
@testing.combinations((True,), (False,))
def test_no_uniquing_cols(self, with_entities):
"""test #6924"""
User = self.classes.User
Address = self.classes.Address
if with_entities:
stmt = (
select(User)
.join(Address)
.filter(Address.user_id == 8)
.with_only_columns(User.id, User.name)
.order_by(User.id)
)
else:
stmt = (
select(User.id, User.name)
.join(Address)
.filter(Address.user_id == 8)
.order_by(User.id)
)
compile_state = ORMSelectCompileState.create_for_statement(stmt, None)
is_(compile_state._primary_entity, None)
def test_column_queries_one(self):
User = self.classes.User
sess = fixture_session()
eq_(
sess.query(User.name).all(),
[("jack",), ("ed",), ("fred",), ("chuck",)],
)
def test_column_queries_two(self):
users, User = (
self.tables.users,
self.classes.User,
)
sess = fixture_session()
sel = users.select().where(User.id.in_([7, 8])).alias()
q = sess.query(User.name)
q2 = q.select_entity_from(sel).all()
eq_(list(q2), [("jack",), ("ed",)])
def test_column_queries_three(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
eq_(
sess.query(User.name, Address.email_address)
.filter(User.id == Address.user_id)
.all(),
[
("jack", "[email protected]"),
("ed", "[email protected]"),
("ed", "[email protected]"),
("ed", "[email protected]"),
("fred", "[email protected]"),
],
)
def test_column_queries_four(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
eq_(
sess.query(User.name, func.count(Address.email_address))
.outerjoin(User.addresses)
.group_by(User.id, User.name)
.order_by(User.id)
.all(),
[("jack", 1), ("ed", 3), ("fred", 1), ("chuck", 0)],
)
def test_column_queries_five(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
eq_(
sess.query(User, func.count(Address.email_address))
.outerjoin(User.addresses)
.group_by(User)
.order_by(User.id)
.all(),
[
(User(name="jack", id=7), 1),
(User(name="ed", id=8), 3),
(User(name="fred", id=9), 1),
(User(name="chuck", id=10), 0),
],
)
def test_column_queries_six(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
eq_(
sess.query(func.count(Address.email_address), User)
.outerjoin(User.addresses)
.group_by(User)
.order_by(User.id)
.all(),
[
(1, User(name="jack", id=7)),
(3, User(name="ed", id=8)),
(1, User(name="fred", id=9)),
(0, User(name="chuck", id=10)),
],
)
def test_column_queries_seven(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
adalias = aliased(Address)
eq_(
sess.query(User, func.count(adalias.email_address))
.outerjoin(adalias, "addresses")
.group_by(User)
.order_by(User.id)
.all(),
[
(User(name="jack", id=7), 1),
(User(name="ed", id=8), 3),
(User(name="fred", id=9), 1),
(User(name="chuck", id=10), 0),
],
)
def test_column_queries_eight(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
adalias = aliased(Address)
eq_(
sess.query(func.count(adalias.email_address), User)
.outerjoin(adalias, User.addresses)
.group_by(User)
.order_by(User.id)
.all(),
[
(1, User(name="jack", id=7)),
(3, User(name="ed", id=8)),
(1, User(name="fred", id=9)),
(0, User(name="chuck", id=10)),
],
)
def test_column_queries_nine(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
adalias = aliased(Address)
subq = (
sess.query(User, adalias.email_address, adalias.id)
.outerjoin(adalias, User.addresses)
.subquery()
)
ua = aliased(User, subq)
aa = aliased(adalias, subq)
q = sess.query(ua, aa.email_address).order_by(ua.id, aa.id)
# select from aliasing + explicit aliasing
eq_(
q.all(),
[
(User(name="jack", id=7), "[email protected]"),
(User(name="ed", id=8), "[email protected]"),
(User(name="ed", id=8), "[email protected]"),
(User(name="ed", id=8), "[email protected]"),
(User(name="fred", id=9), "[email protected]"),
(User(name="chuck", id=10), None),
],
)
def test_column_queries_ten(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
# anon + select from aliasing
aa = aliased(Address)
subq = (
sess.query(User)
.join(aa, User.addresses)
.filter(aa.email_address.like("%ed%"))
.subquery()
)
ua = aliased(User, subq)
eq_(
sess.query(ua).all(),
[User(name="ed", id=8), User(name="fred", id=9)],
)
def test_column_queries_eleven(self):
Address, User = (
self.classes.Address,
self.classes.User,
)
sess = fixture_session()
adalias = aliased(Address)
q1 = (
sess.query(User, adalias.email_address)
.outerjoin(adalias, User.addresses)
.options(joinedload(User.addresses))
.order_by(User.id, adalias.id)
.limit(10)
)
subq = (
sess.query(User, adalias.email_address, adalias.id)
.outerjoin(adalias, User.addresses)
.subquery()
)
ua = aliased(User, subq)
aa = aliased(adalias, subq)
q2 = (
sess.query(ua, aa.email_address)
.options(joinedload(ua.addresses))
.order_by(ua.id, aa.id)
.limit(10)
)
# test eager aliasing, with/without select_entity_from aliasing
for q in [q1, q2]:
eq_(
q.all(),
[
(
User(
addresses=[
Address(
user_id=7,
email_address="[email protected]",
id=1,
)
],
name="jack",
id=7,
),
"[email protected]",
),
(
User(
addresses=[
Address(
user_id=8,
email_address="[email protected]",
id=2,
),
Address(
user_id=8,
email_address="[email protected]",
id=3,
),
Address(
user_id=8,
email_address="[email protected]",
id=4,
),
],
name="ed",
id=8,
),
"[email protected]",
),
(
User(
addresses=[
Address(
user_id=8,
email_address="[email protected]",
id=2,
),
Address(
user_id=8,
email_address="[email protected]",
id=3,
),
Address(
user_id=8,
email_address="[email protected]",
id=4,
),
],
name="ed",
id=8,
),
"[email protected]",
),
(
User(
addresses=[
Address(
user_id=8,
email_address="[email protected]",
id=2,
),
Address(
user_id=8,
email_address="[email protected]",
id=3,
),
Address(
user_id=8,
email_address="[email protected]",
id=4,
),
],
name="ed",
id=8,
),
"[email protected]",
),
(
User(
addresses=[
Address(
user_id=9,
email_address="[email protected]",
id=5,
)
],
name="fred",
id=9,
),
"[email protected]",
),
(User(addresses=[], name="chuck", id=10), None),
],
)
def test_column_from_limited_joinedload(self):
User = self.classes.User
sess = fixture_session()
def go():
results = (
sess.query(User)
.limit(1)
.options(joinedload("addresses"))
.add_columns(User.name)
.all()
)
eq_(results, [(User(name="jack"), "jack")])
self.assert_sql_count(testing.db, go, 1)
def test_self_referential_from_self(self):
Order = self.classes.Order
sess = fixture_session()
oalias = aliased(Order)
q1 = (
sess.query(Order, oalias)
.filter(Order.user_id == oalias.user_id)
.filter(Order.user_id == 7)
.filter(Order.id > oalias.id)
.order_by(Order.id, oalias.id)
)
subq = (
sess.query(Order, oalias).filter(Order.id > oalias.id).subquery()
)
oa, oaa = aliased(Order, subq), aliased(oalias, subq)
q2 = (
sess.query(oa, oaa)
.filter(oa.user_id == oaa.user_id)
.filter(oa.user_id == 7)
.order_by(oa.id, oaa.id)
)
# same thing, but reversed.
subq = (
sess.query(oalias, Order).filter(Order.id < oalias.id).subquery()
)
oa, oaa = aliased(Order, subq), aliased(oalias, subq)
q3 = (
sess.query(oaa, oa)
.filter(oaa.user_id == oa.user_id)
.filter(oaa.user_id == 7)
.order_by(oaa.id, oa.id)
)
subq = (
sess.query(Order, oalias)
.filter(Order.user_id == oalias.user_id)
.filter(Order.user_id == 7)
.filter(Order.id > oalias.id)
.subquery()
)
oa, oaa = aliased(Order, subq), aliased(oalias, subq)
# here we go....two layers of aliasing (due to joinedload w/ limit)
q4 = (
sess.query(oa, oaa)
.order_by(oa.id, oaa.id)
.limit(10)
.options(joinedload(oa.items))
)
# gratuitous four layers
subq4 = subq
for i in range(4):
oa, oaa = aliased(Order, subq4), aliased(oaa, subq4)
subq4 = sess.query(oa, oaa).subquery()
oa, oaa = aliased(Order, subq4), aliased(oaa, subq4)
q5 = (
sess.query(oa, oaa)
.order_by(oa.id, oaa.id)
.limit(10)
.options(joinedload(oa.items))
)
for q in [
q1,
q2,
q3,
q4,
q5,
]:
eq_(
q.all(),
[
(
Order(
address_id=1,
description="order 3",
isopen=1,
user_id=7,
id=3,
),
Order(
address_id=1,
description="order 1",
isopen=0,
user_id=7,
id=1,
),
),
(
Order(
address_id=None,
description="order 5",
isopen=0,
user_id=7,
id=5,
),
Order(
address_id=1,
description="order 1",
isopen=0,
user_id=7,
id=1,
),
),
(
Order(
address_id=None,
description="order 5",
isopen=0,
user_id=7,
id=5,
),
Order(
address_id=1,
description="order 3",
isopen=1,
user_id=7,
id=3,
),
),
],
)
def test_from_self_internal_literals_newstyle(self):
Order = self.classes.Order
stmt = select(
Order.id, Order.description, literal_column("'q'").label("foo")
).where(Order.description == "order 3")
subq = aliased(
Order,
stmt.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL).subquery(),
)
stmt = select(subq).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
self.assert_compile(
stmt,
"SELECT anon_1.orders_id AS "
"anon_1_orders_id, "
"anon_1.orders_description AS anon_1_orders_description "
"FROM (SELECT "
"orders.id AS orders_id, "
"orders.description AS orders_description, "
"'q' AS foo FROM orders WHERE "
"orders.description = :description_1) AS "
"anon_1",
)
def test_multi_mappers(self):
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
test_session = fixture_session()
(user7, user8, user9, user10) = test_session.query(User).all()
(
address1,
address2,
address3,
address4,
address5,
) = test_session.query(Address).all()
expected = [
(user7, address1),
(user8, address2),
(user8, address3),
(user8, address4),
(user9, address5),
(user10, None),
]
sess = fixture_session(future=True)
selectquery = (
users.outerjoin(addresses)
.select()
.order_by(users.c.id, addresses.c.id)
)
result = sess.execute(
select(User, Address).from_statement(selectquery)
)
eq_(
list(result),
expected,
)
sess.expunge_all()
for address_entity in (Address, aliased(Address)):
q = (
sess.query(User)
.add_entity(address_entity)
.outerjoin(address_entity, "addresses")
.order_by(User.id, address_entity.id)
)
eq_(q.all(), expected)
sess.expunge_all()
q = sess.query(User).add_entity(address_entity)
q = q.join(address_entity, "addresses")
q = q.filter_by(email_address="[email protected]")
eq_(q.all(), [(user8, address3)])
sess.expunge_all()
q = (
sess.query(User, address_entity)
.join(address_entity, "addresses")
.filter_by(email_address="[email protected]")
)
eq_(q.all(), [(user8, address3)])
sess.expunge_all()
q = (
sess.query(User, address_entity)
.join(address_entity, "addresses")
.options(joinedload("addresses"))
.filter_by(email_address="[email protected]")
)
eq_(list(util.OrderedSet(q.all())), [(user8, address3)])
sess.expunge_all()
def test_aliased_multi_mappers(self):
User, addresses, users, Address = (
self.classes.User,
self.tables.addresses,
self.tables.users,
self.classes.Address,
)
sess = fixture_session()
(user7, user8, user9, user10) = sess.query(User).all()
(address1, address2, address3, address4, address5) = sess.query(
Address
).all()
expected = [
(user7, address1),
(user8, address2),
(user8, address3),
(user8, address4),
(user9, address5),
(user10, None),
]
q = sess.query(User)
adalias = addresses.alias("adalias")
q = q.add_entity(Address, alias=adalias).select_entity_from(
users.outerjoin(adalias)
)
result = q.order_by(User.id, adalias.c.id).all()
assert result == expected
sess.expunge_all()
q = sess.query(User).add_entity(Address, alias=adalias)
result = (
q.select_entity_from(users.outerjoin(adalias))
.filter(adalias.c.email_address == "[email protected]")
.all()
)
assert result == [(user8, address3)]
def test_with_entities(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
q = sess.query(User).filter(User.id == 7).order_by(User.name)
self.assert_compile(
q.with_entities(User.id, Address).filter(
Address.user_id == User.id
),
"SELECT users.id AS users_id, addresses.id "
"AS addresses_id, addresses.user_id AS "
"addresses_user_id, addresses.email_address"
" AS addresses_email_address FROM users, "
"addresses WHERE users.id = :id_1 AND "
"addresses.user_id = users.id ORDER BY "
"users.name",
)
def test_multi_columns(self):
users, User = self.tables.users, self.classes.User
sess = fixture_session()
expected = [(u, u.name) for u in sess.query(User).all()]
for add_col in (User.name, users.c.name):
assert sess.query(User).add_columns(add_col).all() == expected
sess.expunge_all()
assert_raises(
sa_exc.ArgumentError, sess.query(User).add_columns, object()
)
def test_add_multi_columns(self):
"""test that add_column accepts a FROM clause."""
users, User = self.tables.users, self.classes.User
sess = fixture_session()
eq_(
sess.query(User.id).add_columns(users).all(),
[(7, 7, "jack"), (8, 8, "ed"), (9, 9, "fred"), (10, 10, "chuck")],
)
def test_multi_columns_2(self):
"""test aliased/nonalised joins with the usage of add_columns()"""
User, Address, addresses, users = (
self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users,
)
sess = fixture_session()
(user7, user8, user9, user10) = sess.query(User).all()
expected = [(user7, 1), (user8, 3), (user9, 1), (user10, 0)]
q = sess.query(User)
q = (
q.group_by(users)
.order_by(User.id)
.outerjoin("addresses")
.add_columns(func.count(Address.id).label("count"))
)
eq_(q.all(), expected)
sess.expunge_all()
adalias = aliased(Address)
q = sess.query(User)
q = (
q.group_by(users)
.order_by(User.id)
.outerjoin(adalias, "addresses")
.add_columns(func.count(adalias.id).label("count"))
)
eq_(q.all(), expected)
sess.expunge_all()
# TODO: figure out why group_by(users) doesn't work here
count = func.count(addresses.c.id).label("count")
s = (
select(users, count)
.select_from(users.outerjoin(addresses))
.group_by(*[c for c in users.c])
.order_by(User.id)
)
q = sess.query(User)
result = (
q.add_columns(s.selected_columns.count).from_statement(s).all()
)
assert result == expected
def test_multi_columns_3(self):
User = self.classes.User
users = self.tables.users
sess = fixture_session()
q = sess.query(User.id, User.name)
stmt = select(users).order_by(users.c.id)
q = q.from_statement(stmt)
eq_(q.all(), [(7, "jack"), (8, "ed"), (9, "fred"), (10, "chuck")])
def test_raw_columns(self):
addresses, users, User = (
self.tables.addresses,
self.tables.users,
self.classes.User,
)
sess = fixture_session()
(user7, user8, user9, user10) = sess.query(User).all()
expected = [
(user7, 1, "Name:jack"),
(user8, 3, "Name:ed"),
(user9, 1, "Name:fred"),
(user10, 0, "Name:chuck"),
]
adalias = addresses.alias()
with fixture_session() as sess:
q = (
sess.query(User)
.add_columns(
func.count(adalias.c.id), ("Name:" + users.c.name)
)
.outerjoin(adalias)
.group_by(users)
.order_by(users.c.id)
)
eq_(q.all(), expected)
# test with a straight statement
s = (
select(
users,
func.count(addresses.c.id).label("count"),
("Name:" + users.c.name).label("concat"),
)
.select_from(users.outerjoin(addresses))
.group_by(*[c for c in users.c])
.order_by(users.c.id)
)
with fixture_session() as sess:
q = sess.query(User)
result = (
q.add_columns(
s.selected_columns.count, s.selected_columns.concat
)
.from_statement(s)
.all()
)
eq_(result, expected)
with fixture_session() as sess:
# test with select_entity_from()
q = (
fixture_session()
.query(User)
.add_columns(
func.count(addresses.c.id), ("Name:" + users.c.name)
)
.select_entity_from(users.outerjoin(addresses))
.group_by(users)
.order_by(users.c.id)
)
eq_(q.all(), expected)
with fixture_session() as sess:
q = (
sess.query(User)
.add_columns(
func.count(addresses.c.id), ("Name:" + users.c.name)
)
.outerjoin("addresses")
.group_by(users)
.order_by(users.c.id)
)
eq_(q.all(), expected)
with fixture_session() as sess:
q = (
sess.query(User)
.add_columns(
func.count(adalias.c.id), ("Name:" + users.c.name)
)
.outerjoin(adalias)
.group_by(users)
.order_by(users.c.id)
)
eq_(q.all(), expected)
def test_expression_selectable_matches_mzero(self):
User, Address = self.classes.User, self.classes.Address
ua = aliased(User)
aa = aliased(Address)
s = fixture_session()
for crit, j, exp in [
(
User.id + Address.id,
User.addresses,
"SELECT users.id + addresses.id AS anon_1 "
"FROM users JOIN addresses ON users.id = "
"addresses.user_id",
),
(
User.id + Address.id,
Address.user,
"SELECT users.id + addresses.id AS anon_1 "
"FROM addresses JOIN users ON users.id = "
"addresses.user_id",
),
(
Address.id + User.id,
User.addresses,
"SELECT addresses.id + users.id AS anon_1 "
"FROM users JOIN addresses ON users.id = "
"addresses.user_id",
),
(
User.id + aa.id,
(aa, User.addresses),
"SELECT users.id + addresses_1.id AS anon_1 "
"FROM users JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id",
),
]:
q = s.query(crit)
mzero = q._compile_state()._entity_zero()
is_(mzero, q._compile_state()._entities[0].entity_zero)
q = q.join(j)
self.assert_compile(q, exp)
for crit, j, exp in [
(
ua.id + Address.id,
ua.addresses,
"SELECT users_1.id + addresses.id AS anon_1 "
"FROM users AS users_1 JOIN addresses "
"ON users_1.id = addresses.user_id",
),
(
ua.id + aa.id,
(aa, ua.addresses),
"SELECT users_1.id + addresses_1.id AS anon_1 "
"FROM users AS users_1 JOIN addresses AS "
"addresses_1 ON users_1.id = addresses_1.user_id",
),
(
ua.id + aa.id,
(ua, aa.user),
"SELECT users_1.id + addresses_1.id AS anon_1 "
"FROM addresses AS addresses_1 JOIN "
"users AS users_1 "
"ON users_1.id = addresses_1.user_id",
),
]:
q = s.query(crit)
mzero = q._compile_state()._entity_zero()
is_(mzero, q._compile_state()._entities[0].entity_zero)
q = q.join(j)
self.assert_compile(q, exp)
def test_aliased_adapt_on_names(self):
User, Address = self.classes.User, self.classes.Address
sess = fixture_session()
agg_address = sess.query(
Address.id,
func.sum(func.length(Address.email_address)).label(
"email_address"
),
).group_by(Address.user_id)
ag1 = aliased(Address, agg_address.subquery())
ag2 = aliased(Address, agg_address.subquery(), adapt_on_names=True)
# first, without adapt on names, 'email_address' isn't matched up - we
# get the raw "address" element in the SELECT
self.assert_compile(
sess.query(User, ag1.email_address)
.join(ag1, User.addresses)
.filter(ag1.email_address > 5),
"SELECT users.id "
"AS users_id, users.name AS users_name, addresses.email_address "
"AS addresses_email_address FROM addresses, users JOIN "
"(SELECT addresses.id AS id, sum(length(addresses.email_address)) "
"AS email_address FROM addresses GROUP BY addresses.user_id) AS "
"anon_1 ON users.id = addresses.user_id "
"WHERE addresses.email_address > :email_address_1",
)
# second, 'email_address' matches up to the aggregate, and we get a
# smooth JOIN from users->subquery and that's it
self.assert_compile(
sess.query(User, ag2.email_address)
.join(ag2, User.addresses)
.filter(ag2.email_address > 5),
"SELECT users.id AS users_id, users.name AS users_name, "
"anon_1.email_address AS anon_1_email_address FROM users "
"JOIN ("
"SELECT addresses.id AS id, sum(length(addresses.email_address)) "
"AS email_address FROM addresses GROUP BY addresses.user_id) AS "
"anon_1 ON users.id = addresses.user_id "
"WHERE anon_1.email_address > :email_address_1",
)
class SelectFromTest(QueryTest, AssertsCompiledSQL):
run_setup_mappers = None
__dialect__ = "default"
def test_replace_with_select(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
sel = users.select().where(users.c.id.in_([7, 8])).alias()
sess = fixture_session()
eq_(
sess.query(User).select_entity_from(sel).all(),
[User(id=7), User(id=8)],
)
eq_(
sess.query(User)
.select_entity_from(sel)
.filter(User.id == 8)
.all(),
[User(id=8)],
)
eq_(
sess.query(User)
.select_entity_from(sel)
.order_by(desc(User.name))
.all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
eq_(
sess.query(User)
.select_entity_from(sel)
.order_by(asc(User.name))
.all(),
[User(name="ed", id=8), User(name="jack", id=7)],
)
eq_(
sess.query(User)
.select_entity_from(sel)
.options(joinedload("addresses"))
.first(),
User(name="jack", addresses=[Address(id=1)]),
)
def test_select_from_aliased_one(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
not_users = table("users", column("id"), column("name"))
ua = aliased(User, select(not_users).alias(), adapt_on_names=True)
q = sess.query(User.name).select_entity_from(ua).order_by(User.name)
self.assert_compile(
q,
"SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, "
"users.name AS name FROM users) AS anon_1 ORDER BY anon_1.name",
)
eq_(q.all(), [("chuck",), ("ed",), ("fred",), ("jack",)])
def test_select_from_aliased_two(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
ua = aliased(User)
q = sess.query(User.name).select_entity_from(ua).order_by(User.name)
self.assert_compile(
q,
"SELECT users_1.name AS users_1_name FROM users AS users_1 "
"ORDER BY users_1.name",
)
eq_(q.all(), [("chuck",), ("ed",), ("fred",), ("jack",)])
def test_select_from_core_alias_one(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
ua = users.alias()
q = sess.query(User.name).select_entity_from(ua).order_by(User.name)
self.assert_compile(
q,
"SELECT users_1.name AS users_1_name FROM users AS users_1 "
"ORDER BY users_1.name",
)
eq_(q.all(), [("chuck",), ("ed",), ("fred",), ("jack",)])
def test_differentiate_self_external(self):
"""test some different combinations of joining a table to a subquery of
itself."""
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
sel = sess.query(User).filter(User.id.in_([7, 8])).subquery()
ualias = aliased(User)
self.assert_compile(
sess.query(User).join(sel, User.id > sel.c.id),
"SELECT users.id AS users_id, users.name AS users_name FROM "
"users JOIN (SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id IN ([POSTCOMPILE_id_1])) "
"AS anon_1 ON users.id > anon_1.id",
)
self.assert_compile(
sess.query(ualias)
.select_entity_from(sel)
.filter(ualias.id > sel.c.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1, ("
"SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 "
"WHERE users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
self.assert_compile(
sess.query(ualias)
.select_entity_from(sel)
.join(ualias, ualias.id > sel.c.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM (SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
self.assert_compile(
sess.query(ualias)
.select_entity_from(sel)
.join(ualias, ualias.id > User.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM (SELECT users.id AS id, users.name AS name FROM "
"users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
salias = aliased(User, sel)
self.assert_compile(
sess.query(salias).join(ualias, ualias.id > salias.id),
"SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id IN ([POSTCOMPILE_id_1])) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
self.assert_compile(
sess.query(ualias).select_entity_from(
join(sel, ualias, ualias.id > sel.c.id)
),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id "
"IN ([POSTCOMPILE_id_1])) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",
check_post_param={"id_1": [7, 8]},
)
def test_aliased_class_vs_nonaliased(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
ua = aliased(User)
sess = fixture_session()
self.assert_compile(
sess.query(User).select_from(ua).join(User, ua.name > User.name),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users AS users_1 JOIN users ON users_1.name > users.name",
)
self.assert_compile(
sess.query(User.name)
.select_from(ua)
.join(User, ua.name > User.name),
"SELECT users.name AS users_name FROM users AS users_1 "
"JOIN users ON users_1.name > users.name",
)
self.assert_compile(
sess.query(ua.name)
.select_from(ua)
.join(User, ua.name > User.name),
"SELECT users_1.name AS users_1_name FROM users AS users_1 "
"JOIN users ON users_1.name > users.name",
)
self.assert_compile(
sess.query(ua).select_from(User).join(ua, ua.name > User.name),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users JOIN users AS users_1 ON users_1.name > users.name",
)
self.assert_compile(
sess.query(ua).select_from(User).join(ua, User.name > ua.name),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users JOIN users AS users_1 ON users.name > users_1.name",
)
# this is tested in many other places here, just adding it
# here for comparison
self.assert_compile(
sess.query(User.name).select_entity_from(
users.select().where(users.c.id > 5).subquery()
),
"SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, "
"users.name AS name FROM users WHERE users.id > :id_1) AS anon_1",
)
def test_join_no_order_by(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sel = users.select().where(users.c.id.in_([7, 8]))
sess = fixture_session()
eq_(
sess.query(User).select_entity_from(sel.subquery()).all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
def test_join_relname_from_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses),
backref="user",
)
},
)
sess = fixture_session()
self.assert_compile(
sess.query(User).select_from(Address).join("user"),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id",
)
def test_filter_by_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses)
)
},
)
sess = fixture_session()
self.assert_compile(
sess.query(User)
.select_from(Address)
.filter_by(email_address="ed")
.join(User),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id "
"WHERE addresses.email_address = :email_address_1",
)
def test_join_ent_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
self.mapper_registry.map_imperatively(Address, addresses)
)
},
)
sess = fixture_session()
self.assert_compile(
sess.query(User).select_from(Address).join(User),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id",
)
def test_join(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User, users, properties={"addresses": relationship(Address)}
)
self.mapper_registry.map_imperatively(Address, addresses)
sel = users.select().where(users.c.id.in_([7, 8]))
sess = fixture_session()
eq_(
sess.query(User)
.select_entity_from(sel.subquery())
.join("addresses")
.add_entity(Address)
.order_by(User.id)
.order_by(Address.id)
.all(),
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="[email protected]", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=4),
),
],
)
adalias = aliased(Address)
eq_(
sess.query(User)
.select_entity_from(sel.subquery())
.join(adalias, "addresses")
.add_entity(adalias)
.order_by(User.id)
.order_by(adalias.id)
.all(),
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="[email protected]", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=4),
),
],
)
def test_more_joins(self):
(
users,
Keyword,
orders,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"orders": relationship(Order, backref="user")},
) # o2m, m2o
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
)
},
) # m2m
self.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords, order_by=keywords.c.id
)
},
) # m2m
self.mapper_registry.map_imperatively(Keyword, keywords)
sess = fixture_session()
sel = users.select().where(users.c.id.in_([7, 8]))
eq_(
sess.query(User)
.select_entity_from(sel.subquery())
.join(User.orders, Order.items, Item.keywords)
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[User(name="jack", id=7)],
)
def test_very_nested_joins_with_joinedload(self):
(
users,
Keyword,
orders,
items,
order_items,
Order,
Item,
User,
keywords,
item_keywords,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.orders,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.keywords,
self.tables.item_keywords,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"orders": relationship(Order, backref="user")},
) # o2m, m2o
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item, secondary=order_items, order_by=items.c.id
)
},
) # m2m
self.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
Keyword, secondary=item_keywords, order_by=keywords.c.id
)
},
) # m2m
self.mapper_registry.map_imperatively(Keyword, keywords)
sess = fixture_session()
sel = users.select().where(users.c.id.in_([7, 8]))
def go():
eq_(
sess.query(User)
.select_entity_from(sel.subquery())
.options(
joinedload("orders")
.joinedload("items")
.joinedload("keywords")
)
.join(User.orders, Order.items, Item.keywords)
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[
User(
name="jack",
orders=[
Order(
description="order 1",
items=[
Item(
description="item 1",
keywords=[
Keyword(name="red"),
Keyword(name="big"),
Keyword(name="round"),
],
),
Item(
description="item 2",
keywords=[
Keyword(name="red", id=2),
Keyword(name="small", id=5),
Keyword(name="square"),
],
),
Item(
description="item 3",
keywords=[
Keyword(name="green", id=3),
Keyword(name="big", id=4),
Keyword(name="round", id=6),
],
),
],
),
Order(
description="order 3",
items=[
Item(
description="item 3",
keywords=[
Keyword(name="green", id=3),
Keyword(name="big", id=4),
Keyword(name="round", id=6),
],
),
Item(
description="item 4", keywords=[], id=4
),
Item(
description="item 5", keywords=[], id=5
),
],
),
Order(
description="order 5",
items=[
Item(description="item 5", keywords=[])
],
),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
sel2 = orders.select().where(orders.c.id.in_([1, 2, 3]))
eq_(
sess.query(Order)
.select_entity_from(sel2.subquery())
.join(Order.items)
.join(Item.keywords)
.filter(Keyword.name == "red")
.order_by(Order.id)
.all(),
[
Order(description="order 1", id=1),
Order(description="order 2", id=2),
],
)
def test_replace_with_eager(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address, order_by=addresses.c.id)
},
)
self.mapper_registry.map_imperatively(Address, addresses)
sel = users.select().where(users.c.id.in_([7, 8]))
sess = fixture_session()
def go():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel.subquery())
.order_by(User.id)
.all(),
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel.subquery())
.filter(User.id == 8)
.order_by(User.id)
.all(),
[
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel.subquery())
.order_by(User.id)[1],
User(
id=8,
addresses=[Address(id=2), Address(id=3), Address(id=4)],
),
)
self.assert_sql_count(testing.db, go, 1)
class CustomJoinTest(QueryTest):
run_setup_mappers = None
def test_double_same_mappers_flag_alias(self):
"""test aliasing of joins with a custom join condition"""
(
addresses,
items,
order_items,
orders,
Item,
User,
Address,
Order,
users,
) = (
self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="select",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(Address, lazy="select"),
open_orders=relationship(
Order,
primaryjoin=and_(
orders.c.isopen == 1, users.c.id == orders.c.user_id
),
lazy="select",
viewonly=True,
),
closed_orders=relationship(
Order,
primaryjoin=and_(
orders.c.isopen == 0, users.c.id == orders.c.user_id
),
lazy="select",
viewonly=True,
),
),
)
q = fixture_session().query(User)
eq_(
q.join("open_orders", "items", aliased=True)
.filter(Item.id == 4)
.join("closed_orders", "items", aliased=True)
.filter(Item.id == 3)
.all(),
[User(id=7)],
)
def test_double_same_mappers_explicit_alias(self):
"""test aliasing of joins with a custom join condition"""
(
addresses,
items,
order_items,
orders,
Item,
User,
Address,
Order,
users,
) = (
self.tables.addresses,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.users,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order,
orders,
properties={
"items": relationship(
Item,
secondary=order_items,
lazy="select",
order_by=items.c.id,
)
},
)
self.mapper_registry.map_imperatively(Item, items)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(Address, lazy="select"),
open_orders=relationship(
Order,
primaryjoin=and_(
orders.c.isopen == 1, users.c.id == orders.c.user_id
),
lazy="select",
viewonly=True,
),
closed_orders=relationship(
Order,
primaryjoin=and_(
orders.c.isopen == 0, users.c.id == orders.c.user_id
),
lazy="select",
viewonly=True,
),
),
)
q = fixture_session().query(User)
oo = aliased(Order)
co = aliased(Order)
oi = aliased(Item)
ci = aliased(Item)
# converted from aliased=True. This is kind of the worst case
# kind of query when we don't have aliased=True. two different
# styles are illustrated here, but the important point is that
# the filter() is not doing any trickery, you need to pass it the
# aliased entity explicitly.
eq_(
q.join(oo, User.open_orders)
.join(oi, oo.items)
.filter(oi.id == 4)
.join(User.closed_orders.of_type(co))
.join(co.items.of_type(ci))
.filter(ci.id == 3)
.all(),
[User(id=7)],
)
class ExternalColumnsTest(QueryTest):
"""test mappers with SQL-expressions added as column properties."""
run_setup_mappers = None
def test_external_columns_bad(self):
users, User = self.tables.users, self.classes.User
assert_raises_message(
sa_exc.ArgumentError,
"not represented in the mapper's table",
self.mapper_registry.map_imperatively,
User,
users,
properties={"concat": (users.c.id * 2)},
)
clear_mappers()
def test_external_columns(self):
"""test querying mappings that reference external columns or
selectables."""
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"concat": column_property((users.c.id * 2)),
"count": column_property(
select(func.count(addresses.c.id))
.where(
users.c.id == addresses.c.user_id,
)
.correlate(users)
.scalar_subquery()
),
},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(
User,
)
},
)
sess = fixture_session()
sess.query(Address).options(joinedload("user")).all()
eq_(
sess.query(User).all(),
[
User(id=7, concat=14, count=1),
User(id=8, concat=16, count=3),
User(id=9, concat=18, count=1),
User(id=10, concat=20, count=0),
],
)
address_result = [
Address(id=1, user=User(id=7, concat=14, count=1)),
Address(id=2, user=User(id=8, concat=16, count=3)),
Address(id=3, user=User(id=8, concat=16, count=3)),
Address(id=4, user=User(id=8, concat=16, count=3)),
Address(id=5, user=User(id=9, concat=18, count=1)),
]
# TODO: ISSUE: BUG: cached metadata is confusing the user.id
# column here with the anon_1 for some reason, when we
# use compiled cache. this bug may even be present in
# regular master / 1.3. right now the caching of result
# metadata is disabled.
eq_(sess.query(Address).all(), address_result)
# run the eager version twice to test caching of aliased clauses
for x in range(2):
sess.expunge_all()
def go():
eq_(
sess.query(Address)
.options(joinedload("user"))
.order_by(Address.id)
.all(),
address_result,
)
self.assert_sql_count(testing.db, go, 1)
ualias = aliased(User)
eq_(
sess.query(Address, ualias).join(ualias, "user").all(),
[(address, address.user) for address in address_result],
)
ualias2 = aliased(User)
eq_(
sess.query(Address, ualias.count)
.join(ualias, "user")
.join(ualias2, "user")
.order_by(Address.id)
.all(),
[
(Address(id=1), 1),
(Address(id=2), 3),
(Address(id=3), 3),
(Address(id=4), 3),
(Address(id=5), 1),
],
)
eq_(
sess.query(Address, ualias.concat, ualias.count)
.join(ualias, "user")
.join(ualias2, "user")
.order_by(Address.id)
.all(),
[
(Address(id=1), 14, 1),
(Address(id=2), 16, 3),
(Address(id=3), 16, 3),
(Address(id=4), 16, 3),
(Address(id=5), 18, 1),
],
)
ua = aliased(User)
eq_(
sess.query(Address, ua.concat, ua.count)
.select_entity_from(join(Address, ua, "user"))
.options(joinedload(Address.user))
.order_by(Address.id)
.all(),
[
(Address(id=1, user=User(id=7, concat=14, count=1)), 14, 1),
(Address(id=2, user=User(id=8, concat=16, count=3)), 16, 3),
(Address(id=3, user=User(id=8, concat=16, count=3)), 16, 3),
(Address(id=4, user=User(id=8, concat=16, count=3)), 16, 3),
(Address(id=5, user=User(id=9, concat=18, count=1)), 18, 1),
],
)
eq_(
list(
sess.query(Address)
.join("user")
.with_entities(Address.id, User.id, User.concat, User.count)
),
[
(1, 7, 14, 1),
(2, 8, 16, 3),
(3, 8, 16, 3),
(4, 8, 16, 3),
(5, 9, 18, 1),
],
)
eq_(
list(
sess.query(Address, ua)
.select_entity_from(join(Address, ua, "user"))
.with_entities(Address.id, ua.id, ua.concat, ua.count)
),
[
(1, 7, 14, 1),
(2, 8, 16, 3),
(3, 8, 16, 3),
(4, 8, 16, 3),
(5, 9, 18, 1),
],
)
def test_external_columns_joinedload(self):
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
# in this test, we have a subquery on User that accesses "addresses",
# underneath an joinedload for "addresses". So the "addresses" alias
# adapter needs to *not* hit the "addresses" table within the "user"
# subquery, but "user" still needs to be adapted. therefore the long
# standing practice of eager adapters being "chained" has been removed
# since its unnecessary and breaks this exact condition.
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address, backref="user", order_by=addresses.c.id
),
"concat": column_property((users.c.id * 2)),
"count": column_property(
select(func.count(addresses.c.id))
.where(
users.c.id == addresses.c.user_id,
)
.correlate(users)
.scalar_subquery()
),
},
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Order, orders, properties={"address": relationship(Address)}
) # m2o
sess = fixture_session()
def go():
o1 = (
sess.query(Order)
.options(joinedload("address").joinedload("user"))
.get(1)
)
eq_(o1.address.user.count, 1)
self.assert_sql_count(testing.db, go, 1)
sess = fixture_session()
def go():
o1 = (
sess.query(Order)
.options(joinedload("address").joinedload("user"))
.first()
)
eq_(o1.address.user.count, 1)
self.assert_sql_count(testing.db, go, 1)
def test_external_columns_compound(self):
# see [ticket:2167] for background
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"fullname": column_property(users.c.name.label("x"))},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"username": column_property(
select(User.fullname)
.where(User.id == addresses.c.user_id)
.label("y")
)
},
)
sess = fixture_session()
a1 = sess.query(Address).first()
eq_(a1.username, "jack")
sess = fixture_session()
subq = sess.query(Address).subquery()
aa = aliased(Address, subq)
a1 = sess.query(aa).first()
eq_(a1.username, "jack")
class TestOverlyEagerEquivalentCols(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
)
Table(
"sub1",
metadata,
Column("id", Integer, ForeignKey("base.id"), primary_key=True),
Column("data", String(50)),
)
Table(
"sub2",
metadata,
Column(
"id",
Integer,
ForeignKey("base.id"),
ForeignKey("sub1.id"),
primary_key=True,
),
Column("data", String(50)),
)
def test_equivs(self):
base, sub2, sub1 = (
self.tables.base,
self.tables.sub2,
self.tables.sub1,
)
class Base(fixtures.ComparableEntity):
pass
class Sub1(fixtures.ComparableEntity):
pass
class Sub2(fixtures.ComparableEntity):
pass
self.mapper_registry.map_imperatively(
Base,
base,
properties={
"sub1": relationship(Sub1),
"sub2": relationship(Sub2),
},
)
self.mapper_registry.map_imperatively(Sub1, sub1)
self.mapper_registry.map_imperatively(Sub2, sub2)
sess = fixture_session()
s11 = Sub1(data="s11")
s12 = Sub1(data="s12")
b1 = Base(data="b1", sub1=[s11], sub2=[])
b2 = Base(data="b1", sub1=[s12], sub2=[])
sess.add(b1)
sess.add(b2)
sess.flush()
class LabelCollideTest(fixtures.MappedTest):
"""Test handling for a label collision. This collision
is handled by core, see ticket:2702 as well as
test/sql/test_selectable->WithLabelsTest. here we want
to make sure the end result is as we expect.
"""
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column("id", Integer, primary_key=True),
Column("bar_id", Integer),
)
Table("foo_bar", metadata, Column("id", Integer, primary_key=True))
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
class Bar(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
cls.mapper_registry.map_imperatively(cls.classes.Foo, cls.tables.foo)
cls.mapper_registry.map_imperatively(
cls.classes.Bar, cls.tables.foo_bar
)
@classmethod
def insert_data(cls, connection):
s = Session(connection)
s.add_all([cls.classes.Foo(id=1, bar_id=2), cls.classes.Bar(id=3)])
s.commit()
def test_overlap_plain(self):
s = fixture_session()
row = (
s.query(self.classes.Foo, self.classes.Bar)
.join(self.classes.Bar, true())
.all()[0]
)
def go():
eq_(row.Foo.id, 1)
eq_(row.Foo.bar_id, 2)
eq_(row.Bar.id, 3)
# all three columns are loaded independently without
# overlap, no additional SQL to load all attributes
self.assert_sql_count(testing.db, go, 0)
def test_overlap_subquery(self):
s = fixture_session()
subq = (
s.query(self.classes.Foo, self.classes.Bar)
.join(self.classes.Bar, true())
.subquery()
)
fa = aliased(self.classes.Foo, subq, name="Foo")
ba = aliased(self.classes.Bar, subq, name="Bar")
row = s.query(fa, ba).all()[0]
def go():
eq_(row.Foo.id, 1)
eq_(row.Foo.bar_id, 2)
eq_(row.Bar.id, 3)
# all three columns are loaded independently without
# overlap, no additional SQL to load all attributes
self.assert_sql_count(testing.db, go, 0)
class CorrelateORMTest(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
@testing.fixture
def mapping(self):
Base = declarative_base()
def go(include_property, correlate_style, include_from):
class Address(Base):
__tablename__ = "addresses"
id = Column(Integer, primary_key=True)
user_id = Column(
Integer, ForeignKey("users.id"), nullable=False
)
city = Column(Text)
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(Text)
stmt = select(func.count(Address.id)).where(
Address.user_id == User.id
)
if include_from:
stmt = stmt.select_from(Address)
if include_property:
if correlate_style == "correlate":
User.total_addresses = column_property(
stmt.correlate(User).scalar_subquery()
)
elif correlate_style == "correlate_except":
User.total_addresses = column_property(
stmt.correlate_except(Address).scalar_subquery()
)
elif correlate_style is None:
User.total_addresses = column_property(
stmt.scalar_subquery()
)
total_addresses = None
else:
def total_addresses(cls):
stmt = select(func.count(Address.id)).where(
Address.user_id == cls.id
)
if correlate_style == "correlate":
stmt = stmt.correlate(cls)
elif correlate_style == "correlate_except":
stmt = stmt.correlate_except(Address)
stmt = stmt.scalar_subquery()
return stmt
return User, Address, total_addresses
yield go
Base.registry.dispose()
def _combinations(fn):
return testing.combinations(
(True,), (False,), argnames="include_property"
)(
testing.combinations(
("correlate",),
("correlate_except",),
(None,),
argnames="correlate_style",
)(
testing.combinations(
(True,), (False), argnames="include_from"
)(fn)
)
)
@_combinations
def test_correlate_to_cte_legacy(
self, mapping, include_property, correlate_style, include_from
):
User, Address, total_addresses = mapping(
include_property, correlate_style, include_from
)
session = fixture_session()
filtered_users = (
session.query(User.id, User.name)
.join(Address)
.filter(Address.city == "somewhere")
.cte("filtered_users")
)
filtered_users_alias = aliased(User, filtered_users)
paginated_users = (
session.query(filtered_users_alias.id, filtered_users_alias.name)
.order_by(func.lower(filtered_users_alias.name).asc())
.limit(25)
.cte("paginated_users")
)
paginated_users_alias = aliased(User, paginated_users)
if total_addresses:
q = session.query(
paginated_users_alias, total_addresses(paginated_users_alias)
)
else:
q = session.query(paginated_users_alias)
self.assert_compile(
q,
"WITH filtered_users AS "
"(SELECT users.id AS id, users.name AS name "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"WHERE addresses.city = :city_1), "
"paginated_users AS (SELECT filtered_users.id AS id, "
"filtered_users.name AS name FROM filtered_users "
"ORDER BY lower(filtered_users.name) ASC LIMIT :param_1) "
"SELECT "
"paginated_users.id AS paginated_users_id, "
"paginated_users.name AS paginated_users_name, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE addresses.user_id = paginated_users.id) AS anon_1 "
"FROM paginated_users",
)
| monetate/sqlalchemy | test/orm/test_froms.py | Python | mit | 131,835 |
import champyongg.requests
from champyongg.common import ChampyonGGObject
from champyongg.classes.champion import Champion
from champyongg.classes.generaldata import GeneralData, Skills
from champyongg.classes.itemset import ItemSet
from champyongg.classes.matchup import Matchup
from champyongg.classes.runeset import RuneSet
from champyongg.classes.skillset import SkillSet
from champyongg.classes.summonerspellset import SummonerSpellSet
from .classes.stats import Stats
def set_api_key(key):
"""Set your API key
key str the key to use
"""
champyongg.requests.api_key = key
def print_calls(on):
"""Sets whether to print calls to stdout as they are made
on bool the region to query against
"""
champyongg.requests.print_calls = on
def get_champions():
"""return http://api.champion.gg/docs/#api-Champion-GetChampions"""
request = 'champion'
return {datum['key']: Champion(datum) for datum in champyongg.requests.get(request)}
def get_general_data(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionGeneralData
"""
request = 'champion/{name}/general'.format(name=champion)
return {datum['role']: GeneralData(datum) for datum in champyongg.requests.get(request)}
def get_matchups_by_role(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionMatchups
"""
request = '/champion/{name}/matchup'.format(name=champion)
return {dictionary['role']: {datum['key']: Matchup(datum) for datum in dictionary['matchups']} for dictionary in champyongg.requests.get(request)}
def get_most_popular_items(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionMostPopularItems
"""
request = '/champion/{name}/items/finished/mostPopular'.format(name=champion)
return {datum['role']: ItemSet(datum) for datum in champyongg.requests.get(request)}
def get_most_popular_skills(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionMostPopularSkill
"""
request = '/champion/{name}/skills/mostPopular'.format(name=champion)
return {datum['role']: SkillSet(datum) for datum in champyongg.requests.get(request)}
def get_most_popular_starting_items(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionMostPopularStartingItems
"""
request = '/champion/{name}/items/starters/mostPopular'.format(name=champion)
return {datum['role']: ItemSet(datum) for datum in champyongg.requests.get(request)}
def get_most_popular_summoners(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionMostPopularSummoners
"""
request = '/champion/{name}/summoners/mostPopular'.format(name=champion)
return {datum['role']: SummonerSpellSet(datum) for datum in champyongg.requests.get(request)}
def get_most_popular_runes(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionMostPopularRunes
"""
request = '/champion/{name}/runes/mostPopular'.format(name=champion)
return {datum['role']: RuneSet(datum) for datum in champyongg.requests.get(request)}
def get_most_winning_items(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionMostWinningItems
"""
request = '/champion/{name}/items/finished/mostWins'.format(name=champion)
return {datum['role']: ItemSet(datum) for datum in champyongg.requests.get(request)}
def get_most_winning_starting_items(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionMostWinningStartingItems
"""
request = '/champion/{name}/items/starters/mostWins'.format(name=champion)
return {datum['role']: ItemSet(datum) for datum in champyongg.requests.get(request)}
def get_most_winning_summoners(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionMostWinningSummoners
"""
request = '/champion/{name}/summoners/mostWins'.format(name=champion)
return {datum['role']: SummonerSpellSet(datum) for datum in champyongg.requests.get(request)}
def get_most_winning_runes(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionMostWinningRunes
"""
request = '/champion/{name}/runes/mostWins'.format(name=champion)
return {datum['role']: RuneSet(datum) for datum in champyongg.requests.get(request)}
def get_most_winning_skills(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionMostWinningSkill
"""
request = '/champion/{name}/skills/mostWins'.format(name=champion)
return {datum['role']: SkillSet(datum) for datum in champyongg.requests.get(request)}
def get_skills(champion):
"""champion <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionSkills
"""
request = '/champion/{name}/skills'.format(name=champion)
return Skills(champyongg.requests.get(request))
#return {key: Skills(datum) for key, datum in champyongg.requests.get(request).items()}
def get_specific_matchup(champion, enemy):
"""champion <str> champion name
enemy <str> champion name
return http://api.champion.gg/docs/#api-Champion-GetChampionSpecificMatchup
"""
request = '/champion/{name}/matchup/{enemy}'.format(name=champion, enemy=enemy)
return {datum['role']: Matchup(datum) for datum in champyongg.requests.get(request)}
def get_stats():
"""return http://api.champion.gg/stats/overall"""
request = 'stats/overall'
return Stats(champyongg.requests.get(request))
| meraki-analytics/champyongg | api.py | Python | mit | 6,054 |
import os
import traceback
import copy
import re
import datetime
import renderdoc as rd
from . import util
from . import analyse
from . import capture
from .logging import log, TestFailureException
class ShaderVariableCheck:
def __init__(self, var: rd.ShaderVariable, name: str):
self.var = var
if self.var.name != name:
raise TestFailureException("Variable {} name mismatch, expected '{}' but got '{}'"
.format(self.var.name, name, self.var.name))
def rows(self, rows_: int):
if self.var.rows != rows_:
raise TestFailureException("Variable {} row count mismatch, expected {} but got {}"
.format(self.var.name, rows_, self.var.rows))
return self
def cols(self, cols_: int):
if self.var.columns != cols_:
raise TestFailureException("Variable {} column count mismatch, expected {} but got {}"
.format(self.var.name, cols_, self.var.columns))
return self
def type(self, type_: rd.VarType):
if self.var.type != type_:
raise TestFailureException("Variable {} type mismatch, expected {} but got {}"
.format(self.var.name, str(type_), str(self.var.type)))
return self
def value(self, value_: list):
count = len(value_)
if isinstance(value_[0], float):
if list(self.var.value.f32v[0:count]) != list(value_):
raise TestFailureException("Float variable {} value mismatch, expected {} but got {}"
.format(self.var.name, value_, self.var.value.f32v[0:count]))
else:
# hack - check signed and unsigned values
if list(self.var.value.s32v[0:count] ) != list(value_) and list(self.var.value.u32v[0:count]) != list(value_):
raise TestFailureException("Int variable {} value mismatch, expected {} but got {} / {}"
.format(self.var.name, value_, self.var.value.s32v[0:count],
self.var.value.u32v[0:count]))
return self
def longvalue(self, value_: list):
count = len(value_)
if isinstance(value_[0], float):
if list(self.var.value.f64v[0:count]) != list(value_):
raise TestFailureException("Float variable {} value mismatch, expected {} but got {}"
.format(self.var.name, value_, self.var.value.f64v[0:count]))
else:
# hack - check signed and unsigned values
if list(self.var.value.s64v[0:count]) != list(value_) and list(self.var.value.u64v[0:count]) != list(value_):
raise TestFailureException("Int variable {} value mismatch, expected {} but got {} / {}"
.format(self.var.name, value_, self.var.value.s64v[0:count],
self.var.value.u64v[0:count]))
return self
def row_major(self):
if not self.var.rowMajor:
raise TestFailureException("Variable {} is not row-major, as expected"
.format(self.var.name))
return self
def column_major(self):
if self.var.rowMajor:
raise TestFailureException("Variable {} is not row-major, as expected"
.format(self.var.name))
return self
def arraySize(self, elements_: int):
if len(self.var.members) != elements_:
raise TestFailureException("Variable {} array size mismatch, expected {} but got {}"
.format(self.var.name, elements_, len(self.var.members)))
return self
def structSize(self, elements_: int):
if not self.var.isStruct:
raise TestFailureException("Variable {} is not a struct as was expected"
.format(self.var.name))
if len(self.var.members) != elements_:
raise TestFailureException("Variable {} struct size mismatch, expected {} but got {}"
.format(self.var.name, elements_, len(self.var.members)))
return self
def members(self, member_callbacks: dict):
for i, m in enumerate(self.var.members):
if i in member_callbacks:
member_callbacks[i](ShaderVariableCheck(m, m.name))
elif m.name in member_callbacks:
member_callbacks[m.name](ShaderVariableCheck(m, m.name))
else:
raise TestFailureException("Unexpected member in {}: {}"
.format(self.var.name, m.name))
class ConstantBufferChecker:
def __init__(self, variables: list):
self._variables = variables
def check(self, name: str):
if len(self._variables) == 0:
raise TestFailureException("Too many variables checked, {} has no matching data".format(name))
return ShaderVariableCheck(self._variables.pop(0), name)
def next_var(self):
return self._variables[0]
def done(self):
if len(self._variables) != 0:
raise TestFailureException("Not all variables checked, {} still remain".format(len(self._variables)))
class TestCase:
slow_test = False
internal = False
demos_test_name = ''
demos_frame_cap = 5
demos_frame_count = 1
demos_timeout = None
demos_captures_expected = None
_test_list = {}
@staticmethod
def set_test_list(tests):
TestCase._test_list = tests
def check_support(self):
if self.demos_test_name != '':
if self.demos_test_name not in TestCase._test_list:
return False,'Test {} not in compiled tests'.format(self.demos_test_name)
return TestCase._test_list[self.demos_test_name]
# Otherwise assume we can run - child tests can override if they want to do some other check
return True,""
def __init__(self):
self.capture_filename = ""
self.controller: rd.ReplayController = None
self.sdfile: rd.SDFile = None
self._variables = []
def get_time(self):
return datetime.datetime.now(datetime.timezone.utc)
def get_ref_path(self, name: str, extra: bool = False):
if extra:
return util.get_data_extra_path(os.path.join(self.__class__.__name__, name))
else:
return util.get_data_path(os.path.join(self.__class__.__name__, name))
def check(self, expr, msg=None):
if not expr:
callstack = traceback.extract_stack()
callstack.pop()
assertion_line = callstack[-1].line
assert_msg = re.sub(r'[^(]*\((.*)?\)', r'\1', assertion_line)
if msg is None:
raise TestFailureException('Assertion Failure: {}'.format(assert_msg))
else:
raise TestFailureException('Assertion Failure: {}'.format(msg))
def get_replay_options(self):
"""
Method to overload if you want to override the replay options used.
:return: The renderdoc.ReplayOptions to use.
"""
return rd.ReplayOptions()
def get_capture_options(self):
"""
Method to overload if you want to override the capture options used.
:return: The renderdoc.CaptureOptions to use.
"""
return rd.CaptureOptions()
def get_capture(self):
"""
Method to overload if not implementing a run(), using the default run which
handles everything and calls get_capture() and check_capture() for you.
:return: The path to the capture to open. If in a temporary path, it will be
deleted if the test completes.
"""
if self.demos_test_name != '':
logfile = os.path.join(util.get_tmp_dir(), 'demos.log')
timeout = self.demos_timeout
if timeout is None:
timeout = util.get_demos_timeout()
return capture.run_and_capture(util.get_demos_binary(), self.demos_test_name + " --log " + logfile,
self.demos_frame_cap, frame_count=self.demos_frame_count,
captures_expected=self.demos_captures_expected, logfile=logfile,
opts=self.get_capture_options(), timeout=timeout)
raise NotImplementedError("If run() is not implemented in a test, then"
"get_capture() and check_capture() must be.")
def check_capture(self):
"""
Method to overload if not implementing a run(), using the default run which
handles everything and calls get_capture() and check_capture() for you.
"""
raise NotImplementedError("If run() is not implemented in a test, then"
"get_capture() and check_capture() must be.")
def action_name(self, action: rd.ActionDescription):
if len(action.customName) > 0:
return action.customName
return self.sdfile.chunks[action.events[-1].chunkIndex].name
def _find_action(self, name: str, start_event: int, action_list):
action: rd.ActionDescription
for action in action_list:
# If this action matches, return it
if action.eventId >= start_event and (name == '' or name in self.action_name(action)):
return action
# Recurse to children - depth-first search
ret: rd.ActionDescription = self._find_action(name, start_event, action.children)
# If we found our action, return
if ret is not None:
return ret
# Otherwise continue to next in the list
# If we didn't find anything, return None
return None
def find_action(self, name: str, start_event: int = 0):
"""
Finds the first action matching given criteria
:param name: The name to search for within the actions
:param start_event: The first eventId to search from.
:return:
"""
return self._find_action(name, start_event, self.controller.GetRootActions())
def get_action(self, event: int = 0):
"""
Finds the action for the given event
:param event: The eventId to search for.
:return:
"""
return self._find_action('', event, self.controller.GetRootActions())
def get_vsin(self, action: rd.ActionDescription, first_index: int=0, num_indices: int=0, instance: int=0, view: int=0):
ib: rd.BoundVBuffer = self.controller.GetPipelineState().GetIBuffer()
if num_indices == 0:
num_indices = action.numIndices
else:
num_indices = min(num_indices, action.numIndices)
ioffs = action.indexOffset * ib.byteStride
mesh = rd.MeshFormat()
mesh.numIndices = num_indices
mesh.indexByteOffset = ib.byteOffset + ioffs
mesh.indexByteStride = ib.byteStride
mesh.indexResourceId = ib.resourceId
mesh.baseVertex = action.baseVertex
if ib.byteSize > ioffs:
mesh.indexByteSize = ib.byteSize - ioffs
else:
mesh.indexByteSize = 0
if not (action.flags & rd.ActionFlags.Indexed):
mesh.indexByteOffset = 0
mesh.indexByteStride = 0
mesh.indexResourceId = rd.ResourceId.Null()
attrs = analyse.get_vsin_attrs(self.controller, action.vertexOffset, mesh)
first_index = min(first_index, action.numIndices-1)
indices = analyse.fetch_indices(self.controller, action, mesh, 0, first_index, num_indices)
return analyse.decode_mesh_data(self.controller, indices, indices, attrs, 0, 0)
def get_postvs(self, action: rd.ActionDescription, data_stage: rd.MeshDataStage, first_index: int = 0,
num_indices: int = 0, instance: int = 0, view: int = 0):
mesh: rd.MeshFormat = self.controller.GetPostVSData(instance, view, data_stage)
if mesh.numIndices == 0:
return []
if num_indices == 0:
num_indices = mesh.numIndices
else:
num_indices = min(num_indices, mesh.numIndices)
first_index = min(first_index, mesh.numIndices-1)
ib: rd.BoundVBuffer = self.controller.GetPipelineState().GetIBuffer()
ioffs = action.indexOffset * ib.byteStride
in_mesh = rd.MeshFormat()
in_mesh.numIndices = num_indices
in_mesh.indexByteOffset = ib.byteOffset + ioffs
in_mesh.indexByteStride = ib.byteStride
in_mesh.indexResourceId = ib.resourceId
in_mesh.baseVertex = action.baseVertex
if ib.byteSize > ioffs:
in_mesh.indexByteSize = ib.byteSize - ioffs
else:
in_mesh.indexByteSize = 0
if not (action.flags & rd.ActionFlags.Indexed):
in_mesh.indexByteOffset = 0
in_mesh.indexByteStride = 0
in_mesh.indexResourceId = rd.ResourceId.Null()
indices = analyse.fetch_indices(self.controller, action, mesh, 0, first_index, num_indices)
in_indices = analyse.fetch_indices(self.controller, action, in_mesh, 0, first_index, num_indices)
attrs = analyse.get_postvs_attrs(self.controller, mesh, data_stage)
return analyse.decode_mesh_data(self.controller, indices, in_indices, attrs, 0, mesh.baseVertex)
def check_mesh_data(self, mesh_ref, mesh_data):
for idx in mesh_ref:
ref = mesh_ref[idx]
if idx >= len(mesh_data):
raise TestFailureException('Mesh data doesn\'t have expected element {}'.format(idx))
data = mesh_data[idx]
for key in ref:
if key not in data:
raise TestFailureException('Mesh data[{}] doesn\'t contain data {} as expected. Data is: {}'.format(idx, key, list(data.keys())))
if not util.value_compare(ref[key], data[key]):
raise TestFailureException('Mesh data[{}] \'{}\': {} is not as expected: {}'.format(idx, key, data[key], ref[key]))
log.success("Mesh data is identical to reference")
def check_pixel_value(self, tex: rd.ResourceId, x, y, value, *, sub=None, cast=None, eps=util.FLT_EPSILON):
tex_details = self.get_texture(tex)
res_details = self.get_resource(tex)
if sub is None:
sub = rd.Subresource(0,0,0)
if cast is None:
cast = rd.CompType.Typeless
if tex_details is not None:
if type(x) is float:
x = int(((tex_details.width >> sub.mip) - 1) * x)
if type(y) is float:
y = int(((tex_details.height >> sub.mip) - 1) * y)
if cast == rd.CompType.Typeless and tex_details.creationFlags & rd.TextureCategory.SwapBuffer:
cast = rd.CompType.UNormSRGB
# Reduce epsilon for RGBA8 textures if it's not already reduced
if tex_details.format.compByteWidth == 1 and eps == util.FLT_EPSILON:
eps = (1.0 / 255.0)
if tex_details.format.compByteWidth == 2 and eps == util.FLT_EPSILON:
eps = (1.0 / 16384.0)
picked: rd.PixelValue = self.controller.PickPixel(tex, x, y, sub, cast)
picked_value = picked.floatValue
if cast == rd.CompType.UInt:
picked_value = picked.uintValue
elif cast == rd.CompType.SInt:
picked_value = picked.intValue
if not util.value_compare(picked_value, value, eps):
save_data = rd.TextureSave()
save_data.resourceId = tex
save_data.destType = rd.FileType.PNG
save_data.slice.sliceIndex = sub.slice
save_data.mip = sub.mip
save_data.sample.sampleIndex = sub.sample
img_path = util.get_tmp_path('output.png')
self.controller.SaveTexture(save_data, img_path)
raise TestFailureException(
"Picked value {} at {},{} doesn't match expectation of {}".format(picked_value, x, y, value),
img_path)
name = "Texture"
if res_details is not None:
name = res_details.name
log.success("Picked value at {},{} in {} is as expected".format(x, y, name))
def check_triangle(self, out = None, back = None, fore = None, vp = None):
pipe: rd.PipeState = self.controller.GetPipelineState()
# if no output is specified, check the current colour output at this action
if out is None:
out = pipe.GetOutputTargets()[0].resourceId
tex_details = self.get_texture(out)
# if no colours are specified, default to green on our dark grey
if back is None:
back = [0.2, 0.2, 0.2, 1.0]
if fore is None:
fore = [0.0, 1.0, 0.0, 1.0]
if vp is None:
vp = (0.0, 0.0, float(tex_details.width), float(tex_details.height))
self.check_pixel_value(out, int(0.5*vp[2]+vp[0]), int(0.5*vp[3]+vp[1]), fore)
self.check_pixel_value(out, int(0.5*vp[2]+vp[0]), int(0.3*vp[3]+vp[1]), fore)
self.check_pixel_value(out, int(0.3*vp[2]+vp[0]), int(0.7*vp[3]+vp[1]), fore)
self.check_pixel_value(out, int(0.7*vp[2]+vp[0]), int(0.7*vp[3]+vp[1]), fore)
self.check_pixel_value(out, int(0.3*vp[2]+vp[0]), int(0.5*vp[3]+vp[1]), back)
self.check_pixel_value(out, int(0.7*vp[2]+vp[0]), int(0.5*vp[3]+vp[1]), back)
self.check_pixel_value(out, int(0.5*vp[2]+vp[0]), int(0.8*vp[3]+vp[1]), back)
self.check_pixel_value(out, int(0.5*vp[2]+vp[0]), int(0.2*vp[3]+vp[1]), back)
log.success("Simple triangle is as expected")
def run(self):
self.capture_filename = self.get_capture()
self.check(os.path.exists(self.capture_filename), "Didn't generate capture in make_capture")
log.print("Loading capture")
self.controller = analyse.open_capture(self.capture_filename, opts=self.get_replay_options())
self.sdfile = self.controller.GetStructuredFile()
log.print("Checking capture")
self.check_capture()
if self.controller is not None:
self.controller.Shutdown()
def invoketest(self, debugMode):
start_time = self.get_time()
self.run()
duration = self.get_time() - start_time
log.print("Test ran in {}".format(duration))
self.debugMode = debugMode
def get_first_action(self):
first_action: rd.ActionDescription = self.controller.GetRootActions()[0]
while len(first_action.children) > 0:
first_action = first_action.children[0]
return first_action
def get_texture(self, id: rd.ResourceId):
texs = self.controller.GetTextures()
for t in texs:
t: rd.TextureDescription
if t.resourceId == id:
return t
return None
def get_resource(self, id: rd.ResourceId):
resources = self.controller.GetResources()
for r in resources:
r: rd.ResourceDescription
if r.resourceId == id:
return r
return None
def get_resource_by_name(self, name: str):
resources = self.controller.GetResources()
for r in resources:
r: rd.ResourceDescription
if r.name == name:
return r
return None
def get_last_action(self):
last_action: rd.ActionDescription = self.controller.GetRootActions()[-1]
while len(last_action.children) > 0:
last_action = last_action.children[-1]
return last_action
def check_final_backbuffer(self):
img_path = util.get_tmp_path('backbuffer.png')
ref_path = self.get_ref_path('backbuffer.png')
last_action: rd.ActionDescription = self.get_last_action()
self.controller.SetFrameEvent(last_action.eventId, True)
save_data = rd.TextureSave()
save_data.resourceId = last_action.copyDestination
save_data.destType = rd.FileType.PNG
self.controller.SaveTexture(save_data, img_path)
if not util.png_compare(img_path, ref_path):
raise TestFailureException("Reference and output backbuffer image differ", ref_path, img_path)
log.success("Backbuffer is identical to reference")
def process_trace(self, trace: rd.ShaderDebugTrace):
variables = {}
cycles = 0
while True:
states = self.controller.ContinueDebug(trace.debugger)
if len(states) == 0:
break
for state in states:
for change in state.changes:
variables[change.after.name] = change.after
cycles = states[-1].stepIndex
return cycles, variables
def get_sig_index(self, signature, builtin: rd.ShaderBuiltin, reg_index: int = -1):
search = (builtin, reg_index)
signature_mapped = [(sig.systemValue, sig.regIndex) for sig in signature]
if reg_index == -1:
search = builtin
signature_mapped = [x[0] for x in signature_mapped]
if search in signature_mapped:
return signature_mapped.index(search)
return -1
def find_source_var(self, sourceVars, signatureIndex, varType):
vars = [x for x in sourceVars if x.signatureIndex == signatureIndex and x.variables[0].type == varType]
if len(vars) == 0:
return None
return vars[0]
def find_input_source_var(self, trace: rd.ShaderDebugTrace, builtin: rd.ShaderBuiltin, reg_index: int = -1):
refl: rd.ShaderReflection = self.controller.GetPipelineState().GetShaderReflection(trace.stage)
sig_index = self.get_sig_index(refl.inputSignature, builtin, reg_index)
return self.find_source_var(trace.sourceVars, sig_index, rd.DebugVariableType.Input)
def find_output_source_var(self, trace: rd.ShaderDebugTrace, builtin: rd.ShaderBuiltin, reg_index: int = -1):
refl: rd.ShaderReflection = self.controller.GetPipelineState().GetShaderReflection(trace.stage)
sig_index = self.get_sig_index(refl.outputSignature, builtin, reg_index)
return self.find_source_var(trace.sourceVars, sig_index, rd.DebugVariableType.Variable)
def get_debug_var(self, debugVars, path: str):
# first look for exact match
for name, var in debugVars.items():
if name == path:
return var
child = ''
remaining = ''
# Otherwise, take off any child if we haven't started recursing
m = re.match("([a-zA-Z0-9_]+)(\[.*|\..*)", path)
if m:
child = m.group(1)
remaining = m.group(2)
else:
# array index
m = re.match("(\[[0-9]*\])(.*)", path)
if m:
child = m.group(1)
remaining = m.group(2)
else:
m = re.match("\.([a-zA-Z0-9_]+)(.*)", path)
if m:
child = m.group(1)
remaining = m.group(2)
if child != '':
for name, var in debugVars.items():
var: rd.ShaderVariable
if name == child:
if remaining == '':
return var
else:
return self.get_debug_var({mem.name: mem for mem in var.members}, remaining)
raise KeyError("Couldn't find {} in debug vars".format(path))
raise KeyError("Couldn't parse path {}".format(path))
def evaluate_source_var(self, sourceVar: rd.SourceVariableMapping, debugVars):
debugged = rd.ShaderVariable()
debugged.name = sourceVar.name
debugged.type = sourceVar.type
debugged.rows = sourceVar.rows
debugged.columns = sourceVar.columns
f32v = [0.0] * 16
for i, debugVarPath in enumerate(sourceVar.variables):
debugVar = self.get_debug_var(debugVars, debugVarPath.name)
debugged.rowMajor = debugVar.rowMajor
f32v[i] = debugVar.value.f32v[debugVarPath.component]
debugged.value.f32v = f32v
return debugged
def combine_source_vars(self, vars):
NOT_FOUND = 100000
processed = []
# Keep looping until we're done
while len(vars) > 0:
# find the first member that contains a . or [ character in its name
base = ''
bare_array = False
first_var = len(vars)
for i,v in enumerate(vars):
idx = NOT_FOUND
if '.' in v.name:
idx = v.name.index('.')
if '[' in v.name:
idx2 = v.name.index('[')
if idx2 < idx:
if idx == NOT_FOUND:
bare_array = True
idx = idx2
if idx2 == 0:
idx = v.name.index(']')+1
if idx == NOT_FOUND:
processed.append(v)
else:
first_var = i
base = v.name[:idx]
break
del vars[0:first_var]
# If no vars are found, we're done
if base == '':
continue
members = []
combined = rd.ShaderVariable()
combined.name = base
last_var = -1
for i in range(len(vars)):
check = vars[i].name[:len(base)+1]
if check == base + '.' or check == base + '[':
last_var = i
v = vars[i]
v.name = v.name[len(base):]
if v.name[0] == '.':
v.name = v.name[1:]
combined.isStruct = True
if check == base + '.':
combined.isStruct = True
members.append(vars[i])
if not bare_array:
members = self.combine_source_vars(members)
combined.members = members
del vars[0:last_var+1]
processed.append(combined)
# Continue and combine the next set of vars (there could be multiple structs/arrays on the same level,
# and we only combined the first set)
return processed
def check_export(self, capture_filename):
recomp_path = util.get_tmp_path('recompressed.rdc')
conv_zipxml_path = util.get_tmp_path('conv.zip.xml')
conv_path = util.get_tmp_path('conv.rdc')
origrdc = rd.OpenCaptureFile()
status = origrdc.OpenFile(capture_filename, '', None)
self.check(status == rd.ReplayStatus.Succeeded, "Couldn't open '{}': {}".format(capture_filename, str(status)))
# Export to rdc, to recompress
origrdc.Convert(recomp_path, '', None, None)
origrdc.Convert(conv_zipxml_path, 'zip.xml', None, None)
origrdc.Shutdown()
# Load up the zip.xml file
zipxml = rd.OpenCaptureFile()
status = zipxml.OpenFile(conv_zipxml_path, 'zip.xml', None)
self.check(status == rd.ReplayStatus.Succeeded, "Couldn't open '{}': {}".format(conv_zipxml_path, str(status)))
# Convert out to rdc
zipxml.Convert(conv_path, '', None, None)
zipxml.Shutdown()
if not util.md5_compare(recomp_path, conv_path):
raise TestFailureException("Recompressed capture file doesn't match re-imported capture file", conv_path, recomp_path, conv_zipxml_path)
log.success("Recompressed and re-imported capture files are identical")
| moradin/renderdoc | util/test/rdtest/testcase.py | Python | mit | 27,966 |
# -*- coding: utf-8 -*-
# http://google-styleguide.googlecode.com/svn/trunk/pyguide.html
import index, get, post, delete, put | VojtechBartos/smsgw | smsgw/resources/contacts/datasets/__init__.py | Python | mit | 126 |
from django.db import models
from tinymce import models as tinymce_models
class TestModel(models.Model):
foobar = tinymce_models.HTMLField()
class TestPage(models.Model):
content1 = models.TextField()
content2 = models.TextField()
class TestInline(models.Model):
page = models.ForeignKey(TestPage, on_delete=models.CASCADE)
content1 = models.TextField()
content2 = models.TextField()
| aljosa/django-tinymce | tests/testapp/models.py | Python | mit | 415 |
# Generated by Django 1.9.7 on 2016-07-12 15:16
from __future__ import unicode_literals
from django.db import migrations
from django.utils import translation
from django.utils.translation import gettext_lazy as _
sitesettings = [
{
'slug': 'site_name',
'description': _('Site name'),
'type': 'text'
},
{
'slug': 'email_for_feedback',
'description': _('Email for feedback'),
'type': 'text'
},
]
def insert_settings(apps, schema):
from django.conf import settings
translation.activate(settings.LANGUAGE_CODE)
SiteSettings = apps.get_model('sitesettings', 'SiteSettings')
for sitesetting in sitesettings:
SiteSettings.objects.create(**sitesetting)
translation.deactivate()
def delete_settings(apps, schema):
SiteSettings = apps.get_model('sitesettings', 'SiteSettings')
for sitesetting in sitesettings:
SiteSettings.objects.get(slug=sitesetting['slug']).delete()
class Migration(migrations.Migration):
dependencies = [
('sitesettings', '0001_initial'),
]
operations = [
migrations.RunPython(insert_settings, delete_settings)
]
| astrikov-d/dartcms | dartcms/apps/sitesettings/migrations/0002_insert_settings.py | Python | mit | 1,175 |
"""
Utilities for the rdb backend.
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Jan 7, 2013.
"""
from everest.constants import RESOURCE_ATTRIBUTE_KINDS
from everest.entities.system import UserMessage
from everest.repositories.utils import GlobalObjectManager
from inspect import isdatadescriptor
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import func as sa_func
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import clear_mappers as sa_clear_mappers
from sqlalchemy.orm import mapper as sa_mapper
from sqlalchemy.orm.interfaces import MANYTOMANY
from sqlalchemy.orm.interfaces import MANYTOONE
from sqlalchemy.orm.interfaces import ONETOMANY
from sqlalchemy.orm.mapper import _mapper_registry
from sqlalchemy.sql.expression import cast
from threading import Lock
__docformat__ = 'reStructuredText en'
__all__ = ['OrmAttributeInspector',
'as_slug_expression',
'clear_mappers',
'empty_metadata',
'get_metadata',
'hybrid_descriptor',
'is_metadata_initialized',
'map_system_entities',
'mapper',
'reset_metadata',
'set_metadata',
'synonym',
]
class _MetaDataManager(GlobalObjectManager):
_globs = {}
_lock = Lock()
@classmethod
def reset(cls):
# This removes all attribute instrumentation from the entity classes.
clear_mappers()
# This is *very* important - the ORM attribute inspector caches
# attributes which have become invalidated by the clearing of the
# mappers.
OrmAttributeInspector.reset()
for md in cls._globs.values():
md.clear()
super(_MetaDataManager, cls).reset()
get_metadata = _MetaDataManager.get
set_metadata = _MetaDataManager.set
is_metadata_initialized = _MetaDataManager.is_initialized
reset_metadata = _MetaDataManager.reset
def clear_mappers():
"""
Clears all mappers set up by SA and also clears all custom "id" and
"slug" attributes inserted by the :func:`mapper` function in this module.
This should only ever be needed in a testing context.
"""
# Remove our hybrid property constructs.
for mpr, is_primary in _mapper_registry.items():
if is_primary:
for attr_name in ('id', 'slug'):
try:
attr = object.__getattribute__(mpr.class_, attr_name)
if isinstance(attr, hybrid_property):
if attr_name == 'id':
delattr(mpr.class_, attr_name)
else:
setattr(mpr.class_, attr_name, attr.descriptor)
except AttributeError:
pass
sa_clear_mappers()
def as_slug_expression(attr):
"""
Converts the given instrumented string attribute into an SQL expression
that can be used as a slug.
Slugs are identifiers for members in a collection that can be used in an
URL. We create slug columns by replacing non-URL characters with dashes
and lower casing the result. We need this at the ORM level so that we can
use the slug in a query expression.
"""
slug_expr = sa_func.replace(attr, ' ', '-')
slug_expr = sa_func.replace(slug_expr, '_', '-')
slug_expr = sa_func.lower(slug_expr)
return slug_expr
class hybrid_descriptor(hybrid_property):
"""
Helper class wrapping a data descriptor into a hybrid property.
"""
def __init__(self, descriptor, expr=None):
self.__descriptor = descriptor
hybrid_property.__init__(self, descriptor.fget,
fset=descriptor.fset, fdel=descriptor.fdel,
expr=expr)
@property
def descriptor(self):
return self.__descriptor
def mapper(class_, local_table=None, id_attribute='id', slug_expression=None,
*args, **kwargs):
"""
Convenience wrapper around the SA mapper which will set up the hybrid
"id" and "slug" attributes required by everest after calling the SA
mapper.
If you (e.g., for testing purposes) want to clear mappers created with
this function, use the :func:`clear_mappers` function in this module.
:param str id_attribute: the name of the column in the table to use as
ID column (will be aliased to a new "id" attribute in the mapped class)
:param slug_expression: function to generate a slug SQL expression given
the mapped class as argument.
"""
mpr = sa_mapper(class_, local_table=local_table, *args, **kwargs)
# Set up the ID attribute as a hybrid property, if necessary.
if id_attribute != 'id':
# Make sure we are not overwriting an already mapped or customized
# 'id' attribute.
if 'id' in mpr.columns:
mpr.dispose()
raise ValueError('Attempting to overwrite the mapped "id" '
'attribute.')
elif isdatadescriptor(getattr(class_, 'id', None)):
mpr.dispose()
raise ValueError('Attempting to overwrite the custom data '
'descriptor defined for the "id" attribute.')
class_.id = synonym(id_attribute)
# If this is a polymorphic class, a base class may already have a
# hybrid descriptor set as slug attribute.
slug_descr = None
for base_cls in class_.__mro__:
try:
slug_descr = object.__getattribute__(base_cls, 'slug')
except AttributeError:
pass
else:
break
if isinstance(slug_descr, hybrid_descriptor):
if not slug_expression is None:
raise ValueError('Attempting to overwrite the expression for '
'an inherited slug hybrid descriptor.')
hyb_descr = slug_descr
else:
# Set up the slug attribute as a hybrid property.
if slug_expression is None:
cls_expr = lambda cls: cast(getattr(cls, 'id'), String)
else:
cls_expr = slug_expression
hyb_descr = hybrid_descriptor(slug_descr, expr=cls_expr)
class_.slug = hyb_descr
return mpr
def synonym(name):
"""
Utility function mimicking the behavior of the old SA synonym function
with the new hybrid property semantics.
"""
return hybrid_property(lambda inst: getattr(inst, name),
lambda inst, value: setattr(inst, name, value),
expr=lambda cls: getattr(cls, name))
def map_system_entities(engine, metadata, reset):
"""
Maps all system entities.
"""
# Map the user message system entity.
msg_tbl = Table('_user_messages', metadata,
Column('guid', String, nullable=False, primary_key=True),
Column('text', String, nullable=False),
Column('time_stamp', DateTime(timezone=True),
nullable=False, default=sa_func.now()),
)
mapper(UserMessage, msg_tbl, id_attribute='guid')
if reset:
metadata.drop_all(bind=engine, tables=[msg_tbl])
metadata.create_all(bind=engine, tables=[msg_tbl])
def empty_metadata(engine):
"""
The default metadata factory.
"""
metadata = MetaData()
metadata.create_all(bind=engine)
return metadata
class OrmAttributeInspector(object):
"""
Helper class inspecting class attributes mapped by the ORM.
"""
__cache = {}
@staticmethod
def reset():
"""
This clears the attribute cache this inspector maintains.
Only needed in a testing context.
"""
OrmAttributeInspector.__cache.clear()
@staticmethod
def inspect(orm_class, attribute_name):
"""
:param attribute_name: name of the mapped attribute to inspect.
:returns: list of 2-tuples containing information about the inspected
attribute (first element: mapped entity attribute kind; second
attribute: mapped entity attribute)
"""
key = (orm_class, attribute_name)
elems = OrmAttributeInspector.__cache.get(key)
if elems is None:
elems = OrmAttributeInspector.__inspect(key)
OrmAttributeInspector.__cache[key] = elems
return elems
@staticmethod
def __inspect(key):
orm_class, attribute_name = key
elems = []
entity_type = orm_class
ent_attr_tokens = attribute_name.split('.')
count = len(ent_attr_tokens)
for idx, ent_attr_token in enumerate(ent_attr_tokens):
entity_attr = getattr(entity_type, ent_attr_token)
kind, attr_type = OrmAttributeInspector.__classify(entity_attr)
if idx == count - 1:
pass
# We are at the last name token - this must be a TERMINAL
# or an ENTITY.
# if kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION:
# raise ValueError('Invalid attribute name "%s": the '
# 'last element (%s) references an '
# 'aggregate attribute.'
# % (attribute_name, ent_attr_token))
else:
if kind == RESOURCE_ATTRIBUTE_KINDS.TERMINAL:
# We should not get here - the last attribute was a
# terminal.
raise ValueError('Invalid attribute name "%s": the '
'element "%s" references a terminal '
'attribute.'
% (attribute_name, ent_attr_token))
entity_type = attr_type
elems.append((kind, entity_attr))
return elems
@staticmethod
def __classify(attr):
# Looks up the entity attribute kind and target type for the given
# entity attribute.
# We look for an attribute "property" to identify mapped attributes
# (instrumented attributes and attribute proxies).
if not hasattr(attr, 'property'):
raise ValueError('Attribute "%s" is not mapped.' % attr)
# We detect terminals by the absence of an "argument" attribute of
# the attribute's property.
if not hasattr(attr.property, 'argument'):
kind = RESOURCE_ATTRIBUTE_KINDS.TERMINAL
target_type = None
else: # We have a relationship.
target_type = attr.property.argument
if attr.property.direction in (ONETOMANY, MANYTOMANY):
if not attr.property.uselist:
# 1:1
kind = RESOURCE_ATTRIBUTE_KINDS.MEMBER
else:
# 1:n or n:m
kind = RESOURCE_ATTRIBUTE_KINDS.COLLECTION
elif attr.property.direction == MANYTOONE:
kind = RESOURCE_ATTRIBUTE_KINDS.MEMBER
else:
raise ValueError('Unsupported relationship direction "%s".' # pragma: no cover
% attr.property.direction)
return kind, target_type
| helixyte/everest | everest/repositories/rdb/utils.py | Python | mit | 11,376 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'psychicwight.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^$', 'quiz.views.home', name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^quiz/', include('quiz.urls', namespace="quiz")),
)
| noracami/psychic-wight | psychicwight/psychicwight/urls.py | Python | mit | 389 |
# -*- coding: utf-8 -*-
"""
Cli tool for odin
"""
import os
import argparse
import queue
import logging
import uuid
import json
from pprint import pprint
import odin
from odin.static import __version__
from odin.utils import (run_scan, assembler, get_filter)
from odin.store import OpenDnsModel
from odin import utils
# Default logging capabilities (logging nowhere)
log = odin.get_logger()
def get_args():
"""Parse input parameters
:returns: an Namespace object with inputted arguments
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser()
parser_exclusive = parser.add_mutually_exclusive_group(required=False)
parser_exclusive.add_argument('-v', '--verbose',
action='count', default=None)
parser_exclusive.add_argument('-q', '--quiet',
action='store_const', const=0, default=None)
hold_parser = argparse.ArgumentParser(
description='{0}{1}{2}{3}{4}{5}{6}'.format(
'Odin is a command line tool to scan an host or a ',
'CIDR formatted network and find Open Resolvers. ',
'As you would probably run the script over some 1000~ hosts at ',
'a time, the chuck switch is provided in order to select the ',
'thread pool number to spawn to process the requests. (bare in ',
'mind your ISP could easely throttle you out if you burst to much',
'traffic in a single run!)'))
hold_parser.add_argument("--version", action="version",
version="%(prog)s " + __version__)
subparsers = hold_parser.add_subparsers(dest="subparser")
# Scan sub parser
scan = subparsers.add_parser('scan', parents=[parser],
conflict_handler='resolve',
description="{}{}{}".format(
"Perform a scan and return the ",
"result, optionally save the ",
"scan into the DB"))
scan.add_argument("-t", "--target", dest="target",
action="store", type=str, default=None,
help="Set target: IP or CIDR range.", required=True)
scan.add_argument("-c", "--chunk", dest="chunk", action="store",
default=50, type=int,
help="Set Ip Range chunk size; 1024 max.")
scan.add_argument("-f", "--filter", dest="filter", action="store",
help='{} {} {}'.format(
"show only if target is (or has):",
"is_dns, is_resolver, version, all.",
"It defaults to 'is_dns'."),
required=False, default='is_resolver', type=str)
scan.add_argument("--store", action="store_true",
help="if specified store the result in Dynamo")
scan.add_argument("--dump", action="store_true",
help="{}{}".format(
"if specified dump the result ",
"into randomly generated file in CWD"))
# Query sub parser
query = subparsers.add_parser('query', parents=[parser],
conflict_handler='resolve',
)
query.add_argument("-t", "--type", dest="type",
action="store", type=str,
required=True, help="{}{}".format(
"Specify the type of record to query for: ",
"is_dns | is_resolver"))
query.add_argument("-r", "--range", dest="range",
action="store", type=str, default=None,
help="{}{}".format(
"Set target range in the form like: ",
"192 | 192.168 | 192.168.0"))
query.add_argument("-V", "--version", dest="version", nargs='?',
const=None, type=str,
help='{} {} {}'.format(
"query only for target with ",
"or without a particular dns version in the form:",
"'dnsmasq' or '!dnsmasq'"))
query.add_argument("--reversed", action="store_false",
help="{}{}".format(
"if specified, the resultset ",
"is sorted from older to newer"),
default=True)
query.add_argument("-l", "--limit", dest="limit", action="store",
default=None, type=int,
help="Set a limit for results")
query.add_argument("-e", "--extended", action="store_true", default=None,
help="if specified show all from returned object")
# Db sun parser
db = subparsers.add_parser('db', parents=[parser],
conflict_handler='resolve',
)
exclusive = db.add_mutually_exclusive_group(required=True)
exclusive.add_argument("--dump", dest="dump", action="store",
help="Output file")
exclusive.add_argument("--load", dest="load", action="store",
help="Input file")
exclusive.add_argument("--create", dest="create",
nargs=2, metavar=('read', 'write'),
help="{}{}".format(
"Create the DB, if not exists already,",
" giving read and write capacity"))
exclusive.add_argument("--delete", action="store_true",
help="delete the DB table")
exclusive.add_argument("--describe", action="store_true",
help="describe the DB tables")
# Delete sub parser
delete = subparsers.add_parser('delete', parents=[parser],
conflict_handler='resolve'
)
delete.add_argument("-t", "--target", dest="target",
action="store", type=str, default=None,
help="Set target for deletion: IP or CIDR range.",
required=True)
return (hold_parser.parse_args(), hold_parser)
def do_query(subject, index=None,
nets=[], scan_index_forward=None, limit=None,
version_string=None, negate_version=None):
"""Run a query against the DB"""
if negate_version:
version_name = 'version__not_contains'
elif version_string is True:
version_name = 'version__not_null'
else:
version_name = 'version__contains'
query = {'scan_index_forward': scan_index_forward}
if limit is not None:
query.update({'limit': limit})
if version_string:
query.update({version_name: version_string})
if subject == 'is_resolver':
query_over = getattr(OpenDnsModel, 'openresolvers_index')
elif subject == 'is_dns':
query_over = getattr(OpenDnsModel, 'dns_index')
query.update({'is_resolver__eq': False,
'is_dns__eq': True}) # FIXME
if len(nets) is 0:
log.info('performing a single query for %s type', subject)
for ip in query_over.query(1, **query):
log.debug('returned ip: %s', ip.ip)
yield ip
else:
log.info('performing a batch of queries for %s type', subject)
for net in nets:
log.debug('query for net: %s', net)
query.update({index + '__eq': str(net)})
for ip in query_over.query(1, **query):
log.debug('returned ip: %s', ip.ip)
yield ip
def main():
""" the main script."""
args, parser = get_args()
if args.subparser is None:
parser.print_help()
return
# setup logging
levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
# taking care to check either verbosity or quiet flag
if args.verbose is not None:
arg = args.verbose + 1
elif args.quiet is not None:
arg = args.quiet
else:
arg = 1
level = levels[min(len(levels)-1, arg)]
log = odin.get_logger(level)
# SCAN case
if args.subparser == "scan":
log.info('Preparing for scanning..')
targets = utils.findip(args.target)
log.debug('list of ip to be scanned: %s', targets)
assert args.chunk > 1 and args.chunk <= 2048, (
'You have to specify a chunk between 1 and 2048.')
targets = utils.chunker(targets, args.chunk)
assert args.filter in ['is_dns',
'is_resolver',
'version',
'all'], ('{}{}'.format(
'You have to specify a filter in:',
' is_dns, is_resolver, version, all.'))
my_queue = queue.Queue()
result = []
printing = {}
for obj in run_scan(my_queue, targets):
log.debug('adding %s to the resultset', obj.ip)
printing[obj.ip] = obj.serialize
result.append(obj)
if args.dump:
log.info('dump flag passed: saving results to file..')
filename = str(uuid.uuid4())
with open(filename, 'w') as f:
json.dump(
[r.serialize for r in result if r.serialize['is_dns']], f)
if args.store:
log.info('store flag passed: saving results into the DB..')
try:
with OpenDnsModel.batch_write() as batch:
for ip in result:
log.debug('storing ip: %s', ip)
batch.save(ip)
except Exception as err:
log.error('batch failed to save to db: %s', err, exc_info=True)
pass
pprint(printing)
# QUERY case
elif args.subparser == "query":
log.info('Preparing for querying..')
# query needs:
# * show all openresolvers
# * show all openresolvers with version X
# * show all dns
# * show all dns with version X
# * show all
# - all above with given range 192, 192.168, 192.168.55
# - all above with timerange first to last or viceversa
# - all above with filter option to show only certain info
# * show last X [anythig, dns, resolver], with possible cass filter
# * count number per: all, dns, resolver, with possible class fliter
if args.range:
cidr, class_range = assembler(args.range)
nets = [args.range]
else:
class_range = None
nets = []
if args.version == 1:
version_string, negate_version = True, None
elif args.version:
version_string, negate_version = get_filter(args.version)
else:
version_string, negate_version = None, None
log.debug(
'cheking version params: version_string %s, negate_version %s',
version_string, negate_version)
for result in do_query(args.type, index=class_range,
nets=nets, scan_index_forward=args.reversed,
version_string=version_string, limit=args.limit,
negate_version=negate_version):
log.debug('perform serialization of obj: %s', result.ip)
if args.extended:
pprint(result.serialize)
else:
pprint({'ip': result.ip, 'version': result.version})
# DELETE CASE
elif args.subparser == "delete":
log.info('starting deletion of IP addresses')
targets = utils.findip(args.target)
# FIXME BUG in pynamo: single delete is ok, but batching complain about
# key that cannnot be null
objects = []
for target in targets:
log.debug('preparing to delete item: %s', target)
try:
objects.append(OpenDnsModel.get(target))
# obj does not exist
except Exception as err:
log.info('ip: %s does not exist in DB: %s', target,
err, exc_info=True)
try:
with OpenDnsModel.batch_write() as batch:
for ip in objects:
batch.delete(ip)
except Exception as err:
log.error("unable to delete the specified ips",
exc_info=True)
return
log.info('delete operation finished successfully')
# DB manipulation case
elif args.subparser == "db":
if args.describe:
log.info('describe table %s', OpenDnsModel.Meta.table_name)
return pprint(OpenDnsModel.describe_table())
if args.load:
log.info('preparing to load dataset from file..')
try:
OpenDnsModel.load(args.load)
except IOError as err:
log.error('%s%s%s',
'\nUnable to load data from ',
args.load,
'. are you sure the file exisits?\n', exc_info=True)
return
log.info('data succefully loaded from %s', args.load)
elif args.dump:
log.info('dumping the DB in %s', args.dump)
if os.path.exists(args.dump):
answer = input(
'file {} exist: overwrite? '.format(args.dump))
if answer in ['yes', 'y']:
OpenDnsModel.dump(args.dump)
else:
log.warn('user choice exception, exiting..')
else:
OpenDnsModel.dump(args.dump)
log.info('DB dumped correctly')
elif args.create:
log.info("creating database... ")
try:
read, write = (int(param) for param in args.create)
except Exception as err:
log.error(
'wrong read or write parameter specified: %s',
args.create, exc_info=True)
return
result = OpenDnsModel.create_table(wait=True,
read_capacity_units=read,
write_capacity_units=write)
if result is None:
log.debug('DB already exist')
return pprint(OpenDnsModel.describe_table())
else:
return pprint(result)
elif args.delete:
log.info("deleting database.. ")
result = OpenDnsModel.delete_table()
log.info("delete successful")
return pprint(result)
elif args.modify:
# TODO make a function I saved output because no documentation is
# there
# q=TableConnection(table_name='OpenDns',
# host='http://127.0.0.1:8000')
# q.update_table(
# read_capacity_units=50,
# write_capacity_units=50,
# global_secondary_index_updates=[
# {'read_capacity_units': 50,
# 'write_capacity_units': 50,
# 'index_name': 'ClassA'}])
pass
else:
parser.print_help()
if __name__ == "__main__":
main()
| j0lly/Odin | odin/scripts.py | Python | mit | 15,495 |
#!/usr/bin/env python3
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that release executables only contain certain symbols
and are only linked against allowed libraries.
Example usage:
find ../path/to/binaries -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
import subprocess
import sys
from typing import List, Optional
import lief
import pixie
from utils import determine_wellknown_cmd
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
# - g++ version 4.9.2 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.19 (https://packages.debian.org/search?suite=jessie&arch=any&searchon=names&keywords=libc6)
#
# Ubuntu 16.04 (Xenial) EOL: 2024. https://wiki.ubuntu.com/Releases
#
# - g++ version 5.3.1 (https://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=xenial§ion=all)
# - libc version 2.23.0 (https://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=xenial§ion=all)
#
# CentOS 7 EOL: 2024. https://wiki.centos.org/FAQ/General
#
# - g++ version 4.8.5 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
# - libc version 2.17 (http://mirror.centos.org/centos/7/os/x86_64/Packages/)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.8.5: GCC_4.8.0
# (glibc) GLIBC_2_17
#
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': {
pixie.EM_386: (2,17),
pixie.EM_X86_64: (2,17),
pixie.EM_ARM: (2,17),
pixie.EM_AARCH64:(2,17),
pixie.EM_PPC64: (2,17),
pixie.EM_RISCV: (2,27),
},
'LIBATOMIC': (1,0),
'V': (0,5,0), # xkb (fujicoin-qt only)
}
# See here for a description of _IO_stdin_used:
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
'environ', '_environ', '__environ',
}
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
# fujicoind and fujicoin-qt
'libgcc_s.so.1', # GCC base support
'libc.so.6', # C library
'libpthread.so.0', # threading
'libm.so.6', # math library
'librt.so.1', # real-time (clock)
'libatomic.so.1',
'ld-linux-x86-64.so.2', # 64-bit dynamic linker
'ld-linux.so.2', # 32-bit dynamic linker
'ld-linux-aarch64.so.1', # 64-bit ARM dynamic linker
'ld-linux-armhf.so.3', # 32-bit ARM dynamic linker
'ld64.so.1', # POWER64 ABIv1 dynamic linker
'ld64.so.2', # POWER64 ABIv2 dynamic linker
'ld-linux-riscv64-lp64d.so.1', # 64-bit RISC-V dynamic linker
# fujicoin-qt only
'libxcb.so.1', # part of X11
'libxkbcommon.so.0', # keyboard keymapping
'libxkbcommon-x11.so.0', # keyboard keymapping
'libfontconfig.so.1', # font support
'libfreetype.so.6', # font parsing
'libdl.so.2' # programming interface to dynamic linker
}
MACHO_ALLOWED_LIBRARIES = {
# fujicoind and fujicoin-qt
'libc++.1.dylib', # C++ Standard Library
'libSystem.B.dylib', # libc, libm, libpthread, libinfo
# fujicoin-qt only
'AppKit', # user interface
'ApplicationServices', # common application tasks.
'Carbon', # deprecated c back-compat API
'CoreFoundation', # low level func, data types
'CoreGraphics', # 2D rendering
'CoreServices', # operating system services
'CoreText', # interface for laying out text and handling fonts.
'CoreVideo', # video processing
'Foundation', # base layer functionality for apps/frameworks
'ImageIO', # read and write image file formats.
'IOKit', # user-space access to hardware devices and drivers.
'IOSurface', # cross process image/drawing buffers
'libobjc.A.dylib', # Objective-C runtime library
'Metal', # 3D graphics
'Security', # access control and authentication
'QuartzCore', # animation
}
PE_ALLOWED_LIBRARIES = {
'ADVAPI32.dll', # security & registry
'IPHLPAPI.DLL', # IP helper API
'KERNEL32.dll', # win32 base APIs
'msvcrt.dll', # C standard library for MSVC
'SHELL32.dll', # shell API
'USER32.dll', # user interface
'WS2_32.dll', # sockets
# fujicoin-qt only
'dwmapi.dll', # desktop window manager
'GDI32.dll', # graphics device interface
'IMM32.dll', # input method editor
'NETAPI32.dll',
'ole32.dll', # component object model
'OLEAUT32.dll', # OLE Automation API
'SHLWAPI.dll', # light weight shell API
'USERENV.dll',
'UxTheme.dll',
'VERSION.dll', # version checking
'WINMM.dll', # WinMM audio API
'WTSAPI32.dll',
}
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(determine_wellknown_cmd('CPPFILT', 'c++filt'), stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
self.proc.stdin.flush()
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def check_version(max_versions, version, arch) -> bool:
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
if isinstance(max_versions[lib], tuple):
return ver <= max_versions[lib]
else:
return ver <= max_versions[lib][arch]
def check_imported_symbols(filename) -> bool:
elf = pixie.load(filename)
cppfilt = CPPFilt()
ok: bool = True
for symbol in elf.dyn_symbols:
if not symbol.is_import:
continue
sym = symbol.name.decode()
version = symbol.version.decode() if symbol.version is not None else None
if version and not check_version(MAX_VERSIONS, version, elf.hdr.e_machine):
print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
ok = False
return ok
def check_exported_symbols(filename) -> bool:
elf = pixie.load(filename)
cppfilt = CPPFilt()
ok: bool = True
for symbol in elf.dyn_symbols:
if not symbol.is_export:
continue
sym = symbol.name.decode()
if elf.hdr.e_machine == pixie.EM_RISCV or sym in IGNORE_EXPORTS:
continue
print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
ok = False
return ok
def check_ELF_libraries(filename) -> bool:
ok: bool = True
elf = pixie.load(filename)
for library_name in elf.query_dyn_tags(pixie.DT_NEEDED):
assert(isinstance(library_name, bytes))
if library_name.decode() not in ELF_ALLOWED_LIBRARIES:
print('{}: NEEDED library {} is not allowed'.format(filename, library_name.decode()))
ok = False
return ok
def check_MACHO_libraries(filename) -> bool:
ok: bool = True
binary = lief.parse(filename)
for dylib in binary.libraries:
split = dylib.name.split('/')
if split[-1] not in MACHO_ALLOWED_LIBRARIES:
print(f'{split[-1]} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
def check_MACHO_min_os(filename) -> bool:
binary = lief.parse(filename)
if binary.build_version.minos == [10,14,0]:
return True
return False
def check_MACHO_sdk(filename) -> bool:
binary = lief.parse(filename)
if binary.build_version.sdk == [10, 15, 6]:
return True
return False
def check_PE_libraries(filename) -> bool:
ok: bool = True
binary = lief.parse(filename)
for dylib in binary.libraries:
if dylib not in PE_ALLOWED_LIBRARIES:
print(f'{dylib} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
def check_PE_subsystem_version(filename) -> bool:
binary = lief.parse(filename)
major: int = binary.optional_header.major_subsystem_version
minor: int = binary.optional_header.minor_subsystem_version
if major == 6 and minor == 1:
return True
return False
CHECKS = {
'ELF': [
('IMPORTED_SYMBOLS', check_imported_symbols),
('EXPORTED_SYMBOLS', check_exported_symbols),
('LIBRARY_DEPENDENCIES', check_ELF_libraries)
],
'MACHO': [
('DYNAMIC_LIBRARIES', check_MACHO_libraries),
('MIN_OS', check_MACHO_min_os),
('SDK', check_MACHO_sdk),
],
'PE' : [
('DYNAMIC_LIBRARIES', check_PE_libraries),
('SUBSYSTEM_VERSION', check_PE_subsystem_version),
]
}
def identify_executable(executable) -> Optional[str]:
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
elif magic.startswith(b'\xcf\xfa'):
return 'MACHO'
return None
if __name__ == '__main__':
retval: int = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print(f'{filename}: unknown format')
retval = 1
continue
failed: List[str] = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print(f'{filename}: failed {" ".join(failed)}')
retval = 1
except IOError:
print(f'{filename}: cannot open')
retval = 1
sys.exit(retval)
| fujicoin/fujicoin | contrib/devtools/symbol-check.py | Python | mit | 9,699 |
import sys
import java
lineSeparator = java.lang.System.getProperty("line.separator")
global AdminConfig
cellName = ""
nodeName = ""
serverName = ""
auth_alias = ""
driverPath = ""
datasource_name = ""
jndiName = ""
datasource_url = ""
jdbcProvider = ""
implementationClassName = ""
minConnections = ""
maxConnections = ""
datasource_helper_class = "give.specific.helper.class"
def createJDBCProvider(nodeName, serverName):
print "Creating the database provider"
n1 = ["name", jdbcProvider]
implementationClassName = ["implementationClassName",
implementationClassName]
classPath = ["classPath", driverPath]
description = ["description", jdbcProvider]
jdbcAttrs = [n1, descriptionm implementationClassName, classPath]
server = AdminConfig.getid("/Cell:" + cellName + "/Node:" + nodeName +
"/Server:" + serverName + "/")
AdminConfig.create("JDBCProvider", server, jdbcAttrs)
print "jdbc provider creater"
AdminConfig.save()
def createDS(nodeName, serverName):
print("Creating Datasource: " + datasource_name + " Node: " + nodeName +
" Server: " + serverName)
jdbcProvider = AdminConfig.getid("/Cell:" + cellName + "/Node:" + nodeName +
"/Server:" + serverName + "/JDBCProvider:" + jdbcProvider + "/")
#create Data source
auth_alias = ["authDataAlias", auth_alias]
datasource_helper_class = ["datasourceHelperClassname",
datasource_helper_class]
description = ["description", datasource_name]
jndi = ["jndiName", jndiName]
name = ["name", datasource_name]
datasource_attrs = [name, description, jndi, datasource_helper_class,
auth_alias]
new_datasource = AdminConfig.create("DataSource", jdbcProviderId,
datasource_attrs)
new_property_set = AdminConfig.create("J2EEResourcePropertySet",
new_datasource, [])
url_attrs = [["name", "URL"], ["value", datasource_url],
["type", "java.lang.String"]]
AdminConfig.create("J2EEResourceProperty", newPropSet, url_attrs)
# Set connection pool
cxAttr = [["connectionTimeout", connectionTimeout],
["maxConnections", maxConnections],
["minConnections", minConnections],
["reapTime", reapTime], ["unusedTimeout", unusedTimeout],
["agedTimeout", agedTimeout], ["purgePolicy", purgePolicy]]
AdminConfig.create("ConnectionPool", new_datasource, cxAttr)
print "Datasource created"
AdminConfig.save()
print "Cell: " + cellName + " Node: " + nodeName
createDS(nodeName, serverName)
| infinite-Joy/websphere | datasource_setup.py | Python | mit | 2,690 |
from django import template
from django.template.defaultfilters import stringfilter
from .. import space_capital as sc
register = template.Library()
@register.filter
@stringfilter
def space_capital(value):
return sc(value)
| mpdevilleres/tbpc_app | tbpc/utilities/templatetags/extra_tags.py | Python | mit | 231 |
import asyncio
import time
import traceback
from collections import defaultdict
import discord
from discord.ext import commands
from dwarf import formatting as f
from dwarf.bot import Cog
from dwarf.controllers import BaseController
from dwarf.errors import (ExtensionAlreadyInstalled, ExtensionNotFound, ExtensionNotInIndex,
PrefixAlreadyExists, PrefixNotFound)
from . import strings
from .controllers import CoreController
class Core(Cog):
"""All commands that relate to management operations."""
def __init__(self, bot, extension):
super().__init__(bot, extension)
self.core = CoreController(bot=bot)
self.base = BaseController(bot=bot)
@commands.command(name='eval')
@commands.is_owner()
async def evaluate(self, ctx, *, code: str):
"""Evaluates code.
Modified function, originally made by Rapptz"""
# [p]eval <code>
code = code.strip('` ')
result = None
global_vars = globals().copy()
global_vars['bot'] = self.bot
global_vars['ctx'] = ctx
global_vars['message'] = ctx.message
global_vars['author'] = ctx.message.author
global_vars['channel'] = ctx.message.channel
global_vars['guild'] = ctx.message.guild
try:
result = eval(code, global_vars, locals())
except Exception as ex:
await ctx.send(f.block(type(ex).__name__ + ': ' + str(ex), 'py'))
return
if asyncio.iscoroutine(result):
result = await result
result = f.block(result, 'py')
await ctx.send(result)
@commands.command()
@commands.is_owner()
async def install(self, ctx, *, extensions: str):
"""Installs an extension."""
# [p] install <extensions>
extensions = extensions.lower().split()
installation_status = defaultdict(lambda: [])
def extension_check(message):
extension_name = message.content
is_same_author = ctx.message.author == message.author
is_same_channel = ctx.message.channel == message.channel
is_proper_name = ' ' not in extension_name
return is_same_author and is_same_channel and is_proper_name
async def _install(_extension):
repository = None
if _extension.startswith('https://'):
repository = _extension
await ctx.send(strings.specify_extension_name)
_extension = await self.bot.wait_for('message', check=extension_check, timeout=60)
if _extension is None:
await ctx.send(strings.skipping_this_extension)
return False
_extension = _extension.content
await ctx.send("Installing '**" + _extension + "**'...")
try:
unsatisfied = self.base.install_extension(_extension, repository)
except ExtensionAlreadyInstalled:
await ctx.send("The extension '**" + _extension + "**' is already installed.")
installation_status['failed_extensions'].append(_extension)
return False
except ExtensionNotInIndex:
await ctx.send("There is no extension called '**" + _extension + "**'.")
installation_status['failed_extensions'].append(_extension)
return False
else:
if unsatisfied is not None:
failure_message = strings.failed_to_install.format(_extension)
if unsatisfied['packages']:
failure_message += '\n' + strings.unsatisfied_requirements + '\n'
failure_message += "**" + "**\n**".join(unsatisfied['packages']) + "**"
if unsatisfied['extensions']:
failure_message += '\n' + strings.unsatisfied_dependencies + '\n'
failure_message += "**" + "**\n**".join(unsatisfied['extensions']) + "**"
await ctx.send(failure_message)
if unsatisfied['packages']:
await ctx.send("Do you want to install the required packages now? (yes/no)")
_answer = await self.bot.wait_for_answer(ctx)
if _answer is True:
for package in unsatisfied['packages']:
return_code = self.base.install_package(package)
if return_code is 0:
unsatisfied['packages'].remove(package)
await ctx.send("Installed package '**"
+ package + "**' successfully.")
installation_status['installed_packages'].append(package)
if unsatisfied['packages']:
await ctx.send("Failed to install packages: '**"
+ "**', '**".join(unsatisfied['packages']) + "**'.")
installation_status['failed_packages'] += unsatisfied['packages']
return False
else:
await ctx.send("Alright, I will not install any packages the '**"
+ _extension + "**' extension requires just now.")
installation_status['failed_extensions'].append(_extension)
return False
if not unsatisfied['packages'] and unsatisfied['extensions']:
await ctx.send("Do you want to install the extensions '**"
+ _extension + "**' depends on now? (yes/no)")
_answer = await self.bot.wait_for_answer(ctx)
if _answer is True:
for extension_to_install in unsatisfied['extensions']:
extension_install_return_code = await _install(extension_to_install)
if extension_install_return_code is True:
unsatisfied['extensions'].remove(extension_to_install)
if unsatisfied['extensions']:
await ctx.send("Failed to install one or more of the '**"
+ _extension + "**' extension's dependencies.")
installation_status['failed_extensions'].append(_extension)
return False
else:
return await _install(_extension)
else:
await ctx.send("Alright, I will not install any dependencies just now")
installation_status['failed_extensions'].append(_extension)
return False
else:
await ctx.send("The extension '**" + _extension + "**' was installed successfully.")
installation_status['installed_extensions'].append(_extension)
return True
for extension in extensions:
await _install(extension)
completed_message = "Installation completed.\n"
if installation_status['installed_extensions']:
completed_message += "Installed extensions:\n"
completed_message += "**" + "**\n**".join(installation_status['installed_extensions']) + "**\n"
if installation_status['installed_packages']:
completed_message += "Installed packages:\n"
completed_message += "**" + "**\n**".join(installation_status['installed_packages']) + "**\n"
if installation_status['failed_extensions']:
completed_message += "Failed to install extensions:\n"
completed_message += "**" + "**\n**".join(installation_status['failed_extensions']) + "**\n"
if installation_status['failed_packages']:
completed_message += "Failed to install packages:\n"
completed_message += "**" + "**\n**".join(installation_status['failed_packages']) + "**\n"
await ctx.send(completed_message)
if installation_status['installed_extensions']:
await ctx.send("Reboot Dwarf for changes to take effect.\n"
"Would you like to restart now? (yes/no)")
answer = await self.bot.wait_for_answer(ctx)
if answer is True:
await ctx.send("Okay, I'll be right back!")
await self.core.restart(restarted_from=ctx.message.channel)
@commands.command()
@commands.is_owner()
async def update(self, ctx, *, extensions: str):
"""Updates an extension."""
# [p]update <extensions>
extensions = extensions.lower().split()
update_status = defaultdict(lambda: [])
async def _update(_extension):
await ctx.send("Updating '**" + _extension + "**'...")
try:
unsatisfied = self.base.update_extension(_extension)
except ExtensionNotFound:
await ctx.send("The extension '**" + _extension + "**' could not be found.")
update_status['failed_extensions'].append(_extension)
return False
else:
if unsatisfied is not None:
failure_message = strings.failed_to_update.format(_extension)
if unsatisfied['packages']:
failure_message += '\n' + strings.unsatisfied_requirements + '\n'
failure_message += "**" + "**\n**".join(unsatisfied['packages']) + "**"
if unsatisfied['extensions']:
failure_message += '\n' + strings.unsatisfied_dependencies + '\n'
failure_message += "**" + "**\n**".join(unsatisfied['extensions']) + "**"
await ctx.send(failure_message)
if unsatisfied['packages']:
await ctx.send("Do you want to install the new requirements of "
+ _extension + " now? (yes/no)")
_answer = await self.bot.wait_for_answer(ctx)
if _answer is True:
for package in unsatisfied['packages']:
return_code = self.base.install_package(package)
if return_code is 0:
unsatisfied['packages'].remove(package)
await ctx.send("Installed package '**"
+ package + "**' successfully.")
update_status['installed_packages'].append(package)
if unsatisfied['packages']:
await ctx.send("Failed to install packages: '**"
+ "**', '**".join(unsatisfied['packages']) + "**'.")
update_status['failed_packages'] += unsatisfied['packages']
return False
else:
await ctx.send("Alright, I will not install any packages the '**"
+ _extension + "**' extension requires just now.")
update_status['failed_to_install_extensions'].append(_extension)
return False
if not unsatisfied['packages'] and unsatisfied['extensions']:
await ctx.send("Do you want to install the new dependencies of '**"
+ _extension + "**' now? (yes/no)")
_answer = await self.bot.wait_for_response(ctx)
if _answer is True:
await ctx.invoke(self.bot.get_command('install'), ' '.join(unsatisfied['extensions']))
exts = self.base.get_extensions()
for extension_to_check in unsatisfied['extensions']:
if extension_to_check in exts:
unsatisfied['extensions'].remove(extension_to_check)
if unsatisfied['extensions']:
await ctx.send("Failed to install one or more of '**"
+ _extension + "**' dependencies.")
update_status['failed_extensions'].append(_extension)
return False
else:
return await _update(_extension)
else:
await ctx.send("Alright, I will not install any dependencies just now")
update_status['failed_extensions'].append(_extension)
return False
else:
await ctx.send("The extension '**" + _extension + "**' was updated successfully.")
update_status['updated_extensions'].append(_extension)
return True
for extension in extensions:
await _update(extension)
completed_message = "Update completed.\n"
if update_status['updated_extensions']:
completed_message += "Updated extensions:\n"
completed_message += "**" + "**\n**".join(update_status['updated_extensions']) + "**\n"
if update_status['installed_packages']:
completed_message += "Installed packages:\n"
completed_message += "**" + "**\n**".join(update_status['installed_packages']) + "**\n"
if update_status['failed_extensions']:
completed_message += "Failed to update extensions:\n"
completed_message += "**" + "**\n**".join(update_status['failed_extensions']) + "**\n"
if update_status['failed_packages']:
completed_message += "Failed to install packages:\n"
completed_message += "**" + "**\n**".join(update_status['failed_packages']) + "**\n"
await ctx.send(completed_message)
if update_status['updated_extensions']:
await ctx.send("Reboot Dwarf for changes to take effect.\n"
"Would you like to restart now? (yes/no)")
answer = await self.bot.wait_for_response(ctx)
if answer is True:
await ctx.send("Okay, I'll be right back!")
await self.core.restart(restarted_from=ctx.message.channel)
@commands.command()
@commands.is_owner()
async def uninstall(self, ctx, *, extensions: str):
"""Uninstalls extensions."""
# [p]uninstall <extensions>
extensions = extensions.lower().split()
uninstall_status = defaultdict(lambda: [])
async def _uninstall(_extension):
await ctx.send("Uninstalling '**" + _extension + "**'...")
try:
to_cascade = self.base.uninstall_extension(_extension)
except ExtensionNotFound:
await ctx.send("The extension '**" + _extension + "**' could not be found.")
uninstall_status['failed_extensions'].append(_extension)
return False
else:
if to_cascade:
await ctx.send(strings.would_be_uninstalled_too.format(_extension) + "\n"
+ "**" + "**\n**".join(to_cascade) + "**")
await ctx.send(strings.proceed_with_uninstallation)
_answer = await self.bot.wait_for_answer(ctx)
if _answer is True:
for extension_to_uninstall in to_cascade:
return_code = await _uninstall(extension_to_uninstall)
if return_code is True:
to_cascade.remove(extension_to_uninstall)
if to_cascade:
await ctx.send("Failed to uninstall '**"
+ "**', '**".join(to_cascade) + "**'.")
uninstall_status['failed_extensions'].append(_extension)
return False
else:
return await _uninstall(_extension)
else:
await ctx.send("Alright, I will not install any extensions just now.")
uninstall_status['failed_extensions'].append(_extension)
return False
else:
await ctx.send("The '**" + _extension + "**' extension was uninstalled successfully.")
uninstall_status['uninstalled_extensions'].append(_extension)
return True
for extension in extensions:
await _uninstall(extension)
completed_message = "Uninstallation completed.\n"
if uninstall_status['uninstalled_extensions']:
completed_message += "Uninstalled extensions:\n"
completed_message += "**" + "**\n**".join(uninstall_status['uninstalled_extensions']) + "**\n"
if uninstall_status['failed_extensions']:
completed_message += "Failed to uninstall extensions:\n"
completed_message += "**" + "**\n**".join(uninstall_status['failed_extensions']) + "**\n"
await ctx.send(completed_message)
if uninstall_status['uninstalled_extensions']:
await ctx.send("Reboot Dwarf for changes to take effect.\n"
"Would you like to restart now? (yes/no)")
answer = await self.bot.wait_for_answer(ctx)
if answer is True:
await ctx.send("Okay, I'll be right back!")
await self.core.restart(restarted_from=ctx.message.channel)
@commands.command()
@commands.is_owner()
async def set_name(self, ctx, *, name: str):
"""Sets the bot's name."""
# [p]set name <name>
name = name.strip()
if name != "":
await self.bot.user.edit(username=name)
else:
await self.bot.send_command_help(ctx)
@commands.command()
@commands.is_owner()
async def set_nickname(self, ctx, *, nickname: str=""):
"""Sets the bot's nickname on the current server.
Leaving this empty will remove it."""
# [p]set nickname <nickname>
nickname = nickname.strip()
if nickname == "":
nickname = None
try:
await ctx.me.edit(nick=nickname)
await ctx.send("Done.")
except discord.Forbidden:
await ctx.send("I cannot do that, I lack the \"Change Nickname\" permission.")
@commands.command()
@commands.is_owner()
async def set_game(self, ctx, *, game: discord.Game=None):
"""Sets the bot's playing status
Leaving this empty will clear it."""
# [p]set game <game>
guild = ctx.message.guild
current_status = guild.me.status if guild is not None else None
if game:
await self.bot.change_presence(game=game,
status=current_status)
await ctx.send('Game set to "{}".'.format(game))
else:
await self.bot.change_presence(game=None, status=current_status)
await ctx.send('Not playing a game now.')
@commands.command()
@commands.is_owner()
async def set_status(self, ctx, *, status: discord.Status=None):
"""Sets the bot's status
Statuses:
online
idle
dnd
invisible"""
# [p]set status <status>
guild = ctx.message.guild
current_game = guild.me.game if guild is not None else None
if status is None:
await self.bot.change_presence(status=discord.Status.online,
game=current_game)
await ctx.send("Status reset.")
else:
await self.bot.change_presence(status=status,
game=current_game)
await ctx.send("Status set to {0}.".format(status))
@commands.command()
@commands.is_owner()
async def set_stream(self, ctx, streamer: str=None, *, stream_title: str=None):
"""Sets the bot's streaming status.
Leaving both streamer and stream_title empty will clear it."""
# [p]set stream <streamer> <stream_title>
guild = ctx.message.guild
current_status = guild.me.status if guild is not None else None
if stream_title:
stream_title = stream_title.strip()
if "twitch.tv/" not in streamer:
streamer = "https://www.twitch.tv/" + streamer
game = discord.Game(type=1, url=streamer, name=stream_title)
await self.bot.change_presence(game=game, status=current_status)
elif streamer is not None:
await self.bot.send_command_help(ctx)
return
else:
await self.bot.change_presence(game=None, status=current_status)
self.log.debug('stream cleared by owner')
await ctx.send("Done.")
@commands.command()
@commands.is_owner()
async def set_avatar(self, ctx, url: str):
"""Sets the bot's avatar."""
# [p]set avatar <url>
try:
await self.core.set_avatar(url)
await ctx.send("Done.")
self.log.debug("Changed avatar.")
except discord.HTTPException as ex:
await ctx.send("Error, check your console or logs for "
"more information.")
self.log.exception(ex)
traceback.print_exc()
@commands.command()
@commands.is_owner()
async def set_token(self, ctx, token: str):
"""Sets the bot's login token."""
# [p]set token <token>
if len(token) > 50: # assuming token
self.base.set_token(token)
await ctx.send("Token set. Restart Dwarf to use the new token.")
self.log.info("Bot token changed.")
else:
await ctx.send("Invalid token.")
@commands.command()
@commands.is_owner()
async def set_description(self, ctx, *, description: str):
"""Sets the bot's description."""
self.core.set_description(description)
self.bot.description = description
await ctx.send("My description has been set.")
@commands.command()
@commands.is_owner()
async def set_repository(self, ctx, repository: str):
"""Sets the bot's repository."""
self.core.set_repository(repository)
await ctx.send("My repository is now located at:\n<" + repository + ">")
@commands.command()
@commands.is_owner()
async def set_officialinvite(self, ctx, invite: str):
"""Sets the bot's official server's invite URL."""
self.core.set_official_invite(invite)
await ctx.send("My official server invite is now:\n<" + invite + ">")
@commands.command()
@commands.is_owner()
async def add_prefix(self, ctx, prefix: str):
"""Adds a prefix to the bot."""
if prefix.startswith('"') and prefix.endswith('"'):
prefix = prefix[1:len(prefix) - 1]
try:
self.core.add_prefix(prefix)
self.bot.command_prefix = self.core.get_prefixes()
await ctx.send("The prefix '**{}**' was added successfully.".format(prefix))
except PrefixAlreadyExists:
await ctx.send("The prefix '**{}**' could not be added "
"as it is already a prefix.".format(prefix))
@commands.command()
@commands.is_owner()
async def remove_prefix(self, ctx, prefix: str):
"""Removes a prefix from the bot."""
try:
self.core.remove_prefix(prefix)
self.bot.command_prefix = self.core.get_prefixes()
await ctx.send("The prefix '**{}**' was removed successfully.".format(prefix))
except PrefixNotFound:
await ctx.send("'**{}**' is not a prefix of this bot.".format(prefix))
@commands.command()
@commands.is_owner()
async def prefixes(self, ctx):
"""Shows the bot's prefixes."""
prefixes = self.core.get_prefixes()
if len(prefixes) > 1:
await ctx.send("My prefixes are: {}".format("'**" + "**', '**".join(prefixes) + "**'"))
else:
await ctx.send("My prefix is '**{}**'.".format(prefixes[0]))
@commands.command()
async def ping(self, ctx):
"""Calculates the ping time."""
# [p]ping
t_1 = time.perf_counter()
await ctx.trigger_typing()
t_2 = time.perf_counter()
await ctx.send("Pong.\nTime: {}ms".format(round((t_2-t_1)*1000)))
@commands.command()
@commands.is_owner()
async def shutdown(self, ctx):
"""Shuts down Dwarf."""
# [p]shutdown
await ctx.send("Goodbye!")
await self.core.shutdown()
@commands.command()
@commands.is_owner()
async def restart(self, ctx):
"""Restarts Dwarf."""
# [p]restart
await ctx.send("I'll be right back!")
if ctx.guild is None:
restarted_from = ctx.message.author
else:
restarted_from = ctx.message.channel
await self.core.restart(restarted_from=restarted_from)
async def leave_confirmation(self, guild, ctx):
if not ctx.message.channel.is_private:
current_guild = ctx.guild
else:
current_guild = None
await ctx.send("Are you sure you want me to leave **{}**? (yes/no)".format(guild.name))
answer = await self.bot.wait_for_answer(ctx, timeout=30)
if answer is None or answer is False:
await ctx.send("I'll stay then.")
else:
await guild.leave()
if guild != current_guild:
await ctx.send("Done.")
@commands.command(no_pm=True)
@commands.is_owner()
async def leave(self, ctx):
"""Makes the bot leave the current server."""
# [p]leave
await ctx.send("Are you sure you want me to leave this server? (yes/no)")
answer = await self.bot.wait_for_answer(ctx, timeout=30)
if answer is True:
await ctx.send("Alright. Bye :wave:")
await ctx.guild.leave()
else:
await ctx.send("Ok I'll stay here then.")
@commands.command()
@commands.is_owner()
async def servers(self, ctx):
"""Lists and allows to leave servers."""
# [p]servers
guilds = list(self.bot.guilds)
guild_list = {}
msg = ""
for i, guild in enumerate(guilds):
guild_list[i] = guilds[i]
msg += "{}: {}\n".format(i, guild.name)
msg += "\nTo leave a server just type its number."
for page in f.pagify(msg, ['\n']):
await ctx.send(page)
while msg is not None:
msg = await self.bot.wait_for_response(ctx, timeout=30)
if msg is not None:
msg = msg.content.strip()
if msg in guild_list.keys():
await self.leave_confirmation(guild_list[msg], ctx)
else:
break
else:
break
await ctx.send("Reinvoke the {}{} command if you need to leave any servers in the "
"future.".format(ctx.prefix, ctx.invoked_with))
@commands.command(enabled=False)
async def contact(self, ctx, *, message: str):
"""Sends message to the owner of the bot."""
# [p]contact <message>
owner_id = self.core.get_owner_id()
if owner_id is None:
await ctx.send("I have no owner set.")
return
owner = self.bot.get_user(owner_id)
author = ctx.message.author
if isinstance(ctx.message.channel, discord.abc.GuildChannel):
guild = ctx.message.guild
source = ", server **{}** ({})".format(guild.name, guild.id)
else:
source = ", direct message"
sender = "From **{}** ({}){}:\n\n".format(author, author.id, source)
message = sender + message
try:
await owner.send(message)
except discord.errors.InvalidArgument:
await ctx.send("I cannot send your message, I'm unable to find "
"my owner... *sigh*")
except discord.errors.HTTPException:
await ctx.send("Your message is too long.")
else:
await ctx.send("Your message has been sent.")
@commands.command()
async def about(self, ctx):
"""Shows information about the bot."""
# [p]info
await ctx.send("{}\n"
"**Repository:**\n"
"<{}>\n"
"**Official server:**\n"
"<{}>".format(self.core.get_description(),
self.core.get_repository(),
self.core.get_official_invite()))
@commands.command()
async def version(self, ctx):
"""Shows the bot's current version"""
# [p]version
await ctx.send("Current version: " + self.base.get_dwarf_version())
| Dwarf-Community/Dwarf | core/cogs.py | Python | mit | 29,709 |
from templeplus.pymod import PythonModifier
from toee import *
import tpdp
import char_class_utils
###################################################
def GetConditionName():
return "Eldritch Knight"
def GetSpellCasterConditionName():
return "Eldritch Knight Spellcasting"
print "Registering " + GetConditionName()
classEnum = stat_level_eldritch_knight
classSpecModule = __import__('class026_eldritch_knight')
###################################################
#### standard callbacks - BAB and Save values
def OnGetToHitBonusBase(attachee, args, evt_obj):
classLvl = attachee.stat_level_get(classEnum)
babvalue = game.get_bab_for_class(classEnum, classLvl)
evt_obj.bonus_list.add(babvalue, 0, 137) # untyped, description: "Class"
return 0
def OnGetSaveThrowFort(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Fortitude)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def OnGetSaveThrowReflex(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Reflex)
evt_obj.bonus_list.add(value, 0, 137)
return 0
def OnGetSaveThrowWill(attachee, args, evt_obj):
value = char_class_utils.SavingThrowLevel(classEnum, attachee, D20_Save_Will)
evt_obj.bonus_list.add(value, 0, 137)
return 0
classSpecObj = PythonModifier(GetConditionName(), 0)
classSpecObj.AddHook(ET_OnToHitBonusBase, EK_NONE, OnGetToHitBonusBase, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_FORTITUDE, OnGetSaveThrowFort, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_REFLEX, OnGetSaveThrowReflex, ())
classSpecObj.AddHook(ET_OnSaveThrowLevel, EK_SAVE_WILL, OnGetSaveThrowWill, ())
##### Spell casting
# configure the spell casting condition to hold the highest Arcane classs
def OnAddSpellCasting(attachee, args, evt_obj):
#arg0 holds the arcane class
if (args.get_arg(0) == 0):
args.set_arg(0, char_class_utils.GetHighestArcaneClass(attachee))
return 0
# Extend caster level for base casting class
def OnGetBaseCasterLevel(attachee, args, evt_obj):
class_extended_1 = args.get_arg(0)
class_code = evt_obj.arg0
if (class_code != class_extended_1):
if (evt_obj.arg1 == 0): # arg1 != 0 means you're looking for this particular class's contribution
return 0
classLvl = attachee.stat_level_get(classEnum)
if classLvl > 1:
evt_obj.bonus_list.add(classLvl - 1, 0, 137)
return 0
def OnSpellListExtensionGet(attachee, args, evt_obj):
class_extended_1 = args.get_arg(0)
class_code = evt_obj.arg0
if (class_code != class_extended_1):
if (evt_obj.arg1 == 0): # arg1 != 0 means you're looking for this particular class's contribution
return 0
classLvl = attachee.stat_level_get(classEnum)
if classLvl > 1:
evt_obj.bonus_list.add(classLvl - 1, 0, 137)
return 0
def OnInitLevelupSpellSelection(attachee, args, evt_obj):
if (evt_obj.arg0 != classEnum):
return 0
classLvl = attachee.stat_level_get(classEnum)
if (classLvl == 0):
return 0
class_extended_1 = args.get_arg(0)
classSpecModule.InitSpellSelection(attachee, class_extended_1)
return 0
def OnLevelupSpellsCheckComplete(attachee, args, evt_obj):
if (evt_obj.arg0 != classEnum):
return 0
class_extended_1 = args.get_arg(0)
if (not classSpecModule.LevelupCheckSpells(attachee, class_extended_1) ):
evt_obj.bonus_list.add(-1, 0, 137) # denotes incomplete spell selection
return 1
def OnLevelupSpellsFinalize(attachee, args, evt_obj):
if (evt_obj.arg0 != classEnum):
return 0
classLvl = attachee.stat_level_get(classEnum)
if (classLvl == 0):
return 0
class_extended_1 = args.get_arg(0)
classSpecModule.LevelupSpellsFinalize(attachee, class_extended_1)
return
spellCasterSpecObj = PythonModifier(GetSpellCasterConditionName(), 8)
spellCasterSpecObj.AddHook(ET_OnConditionAdd, EK_NONE, OnAddSpellCasting, ())
spellCasterSpecObj.AddHook(ET_OnGetBaseCasterLevel, EK_NONE, OnGetBaseCasterLevel, ())
spellCasterSpecObj.AddHook(ET_OnSpellListExtensionGet, EK_NONE, OnSpellListExtensionGet, ())
spellCasterSpecObj.AddHook(ET_OnLevelupSystemEvent, EK_LVL_Spells_Activate, OnInitLevelupSpellSelection, ())
spellCasterSpecObj.AddHook(ET_OnLevelupSystemEvent, EK_LVL_Spells_Check_Complete, OnLevelupSpellsCheckComplete, ())
spellCasterSpecObj.AddHook(ET_OnLevelupSystemEvent, EK_LVL_Spells_Finalize, OnLevelupSpellsFinalize, ()) | GrognardsFromHell/TemplePlus | tpdatasrc/tpgamefiles/scr/tpModifiers/eldritch_knight.py | Python | mit | 4,322 |
from __future__ import division
import asyncio
import discord
import random
from discord.ext import commands
from Cogs import Settings, Utils
from pyparsing import (Literal,CaselessLiteral,Word,Combine,Group,Optional,
ZeroOrMore,Forward,nums,alphas,oneOf)
import math
import operator
def setup(bot):
# Add the bot
bot.add_cog(Calc(bot))
__author__='Paul McGuire'
__version__ = '$Revision: 0.0 $'
__date__ = '$Date: 2009-03-20 $'
__source__='''http://pyparsing.wikispaces.com/file/view/fourFn.py
http://pyparsing.wikispaces.com/message/view/home/15549426
'''
__note__='''
All I've done is rewrap Paul McGuire's fourFn.py as a class, so I can use it
more easily in other places.
'''
class NumericStringParser(object):
'''
Most of this code comes from the fourFn.py pyparsing example
'''
def pushFirst(self, strg, loc, toks ):
self.exprStack.append( toks[0] )
def pushUMinus(self, strg, loc, toks ):
if toks and toks[0]=='-':
self.exprStack.append( 'unary -' )
def __init__(self):
"""
expop :: '^'
multop :: 'x' | '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
"""
point = Literal( "." )
e = CaselessLiteral( "E" )
fnumber = Combine( Word( "+-"+nums, nums ) +
Optional( point + Optional( Word( nums ) ) ) +
Optional( e + Word( "+-"+nums, nums ) ) )
ident = Word(alphas, alphas+nums+"_$")
plus = Literal( "+" )
minus = Literal( "-" )
mult = Literal( "x" )
div = Literal( "/" )
lpar = Literal( "(" ).suppress()
rpar = Literal( ")" ).suppress()
addop = plus | minus
multop = mult | div
expop = Literal( "^" )
pi = CaselessLiteral( "PI" )
expr = Forward()
atom = ((Optional(oneOf("- +")) +
(pi|e|fnumber|ident+lpar+expr+rpar).setParseAction(self.pushFirst))
| Optional(oneOf("- +")) + Group(lpar+expr+rpar)
).setParseAction(self.pushUMinus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of
# "atom [ ^ atom ]...", we get right-to-left exponents, instead of left-to-right
# that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor << atom + ZeroOrMore( ( expop + factor ).setParseAction( self.pushFirst ) )
term = factor + ZeroOrMore( ( multop + factor ).setParseAction( self.pushFirst ) )
expr << term + ZeroOrMore( ( addop + term ).setParseAction( self.pushFirst ) )
# addop_term = ( addop + term ).setParseAction( self.pushFirst )
# general_term = term + ZeroOrMore( addop_term ) | OneOrMore( addop_term)
# expr << general_term
self.bnf = expr
# map operator symbols to corresponding arithmetic operations
epsilon = 1e-12
self.opn = { "+" : operator.add,
"-" : operator.sub,
"x" : operator.mul,
"/" : operator.truediv,
"^" : operator.pow }
self.fn = { "sin" : math.sin,
"cos" : math.cos,
"tan" : math.tan,
"abs" : abs,
"trunc" : lambda a: int(a),
"round" : round,
"sgn" : lambda a: abs(a)>epsilon and cmp(a,0) or 0}
def evaluateStack(self, s ):
op = s.pop()
if op == 'unary -':
return -self.evaluateStack( s )
if op in "+-x/^":
op2 = self.evaluateStack( s )
op1 = self.evaluateStack( s )
return self.opn[op]( op1, op2 )
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in self.fn:
return self.fn[op]( self.evaluateStack( s ) )
elif op[0].isalpha():
return 0
else:
return float( op )
def eval(self,num_string,parseAll=True):
self.exprStack=[]
results=self.bnf.parseString(num_string,parseAll)
val=self.evaluateStack( self.exprStack[:] )
return val
class Calc(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot):
self.bot = bot
self.nsp=NumericStringParser()
global Utils
Utils = self.bot.get_cog("Utils")
@commands.command(pass_context=True)
async def calc(self, ctx, *, formula = None):
"""Do some math."""
if formula == None: return await ctx.send('Usage: `{}calc [formula]`'.format(ctx.prefix))
formula = formula.replace("*","x")
try:
answer=self.nsp.eval(formula)
except:
msg = 'I couldn\'t parse "{}" :(\n\n'.format(formula.replace('*', '\\*').replace('`', '\\`').replace('_', '\\_'))
msg += 'I understand the following syntax:\n```\n'
msg += "expop :: '^'\n"
msg += "multop :: 'x' | '*' | '/'\n"
msg += "addop :: '+' | '-'\n"
msg += "integer :: ['+' | '-'] '0'..'9'+\n"
msg += "atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'\n"
msg += "factor :: atom [ expop factor ]*\n"
msg += "term :: factor [ multop factor ]*\n"
msg += "expr :: term [ addop term ]*```"
return await ctx.send(Utils.suppressed(ctx,msg))
if int(answer) == answer:
# Check if it's a whole number and cast to int if so
answer = int(answer)
# Say message
await ctx.send('{} = {}'.format(formula, answer))
| corpnewt/CorpBot.py | Cogs/Calc.py | Python | mit | 6,092 |
"""A simple HTTP server."""
def response_ok():
"""Testing for 200 response code."""
pass
| pasaunders/http-server | src/step1.py | Python | mit | 99 |
import pandas as pd
import os.path
TRANSCRIPT_GTF_FILE = "TRANSCRIPT_GTF_FILE"
GENOME_FASTA_DIR = "GENOME_FASTA_DIR"
SIMULATED_READS = "SIMULATED_READS"
LEFT_SIMULATED_READS = "LEFT_SIMULATED_READS"
RIGHT_SIMULATED_READS = "RIGHT_SIMULATED_READS"
FASTQ_READS = "FASTQ_READS"
QUANTIFIER_DIRECTORY = "QUANTIFIER_DIRECTORY"
_QUANT_METHODS = {}
def get_quantification_methods():
return _QUANT_METHODS
def _Quantifier(cls):
_QUANT_METHODS[cls.get_name()] = cls()
return cls
class _QuantifierBase(object):
def __init__(self):
self.abundances = None
def __str__(self):
return self.__class__.get_name()
@_Quantifier
class _Cufflinks(_QuantifierBase):
FPKM_COLUMN = "FPKM"
CALCULATE_BOWTIE_INDEX_DIRECTORY = \
"BOWTIE_INDEX_DIR=$(dirname {bowtie_index})"
CHECK_BOWTIE_INDEX_DIRECTORY_EXISTS = \
"! -d $BOWTIE_INDEX_DIR"
MAKE_BOWTIE_INDEX_DIRECTORY = \
"mkdir -p $BOWTIE_INDEX_DIR"
GET_GENOME_REFERENCE_FASTA_FILE_LIST = \
"REF_FILES=$(ls -1 {genome_fasta_dir}/*.fa | tr '\\n' ',')"
STRIP_TRAILING_COMMA_FROM_FASTA_FILE_LIST = \
"REF_FILES=${REF_FILES%,}"
BUILD_BOWTIE_INDEX = \
"bowtie-build $REF_FILES {bowtie_index}"
CONSTRUCT_BOWTIE_REFERENCE_FASTA = \
"bowtie-inspect {bowtie_index} > {bowtie_index}.fa"
MAP_READS_TO_GENOME_WITH_TOPHAT = \
"tophat {stranded_spec} --no-coverage-search -p 8 " + \
"-o tho {bowtie_index} {reads_spec}"
QUANTIFY_ISOFORM_EXPRESSION = \
"cufflinks -o transcriptome -u -b {bowtie_index}.fa -p 8 " + \
"{stranded_spec} -G {transcript_gtf} tho/accepted_hits.bam"
REMOVE_TOPHAT_OUTPUT_DIRECTORY = \
"rm -rf tho"
REMOVE_CUFFLINKS_OUTPUT_EXCEPT_ISOFORM_ABUNDANCES = \
"find transcriptome \! -name 'isoforms.fpkm_tracking' -type f -delete"
@classmethod
def get_name(cls):
return "Cufflinks"
@classmethod
def _get_bowtie_index(cls, quantifier_dir):
return os.path.join(quantifier_dir, "bowtie-index", "index")
@classmethod
def write_preparatory_commands(cls, writer, params):
writer.add_comment(
"Prepare the bowtie index for read mapping if it doesn't " +
"already exist. Note that this step only needs to be done " +
"once for a particular reference genome")
bowtie_index = cls._get_bowtie_index(params[QUANTIFIER_DIRECTORY])
writer.add_line(cls.CALCULATE_BOWTIE_INDEX_DIRECTORY.format(
bowtie_index=bowtie_index))
with writer.section():
with writer.if_block(cls.CHECK_BOWTIE_INDEX_DIRECTORY_EXISTS):
writer.add_line(cls.MAKE_BOWTIE_INDEX_DIRECTORY)
writer.add_line(
cls.GET_GENOME_REFERENCE_FASTA_FILE_LIST.format(
genome_fasta_dir=params[GENOME_FASTA_DIR]))
writer.add_line(cls.STRIP_TRAILING_COMMA_FROM_FASTA_FILE_LIST)
writer.add_line(cls.BUILD_BOWTIE_INDEX.format(
bowtie_index=bowtie_index))
writer.add_line(cls.CONSTRUCT_BOWTIE_REFERENCE_FASTA.format(
bowtie_index=bowtie_index))
@classmethod
def write_quantification_commands(cls, writer, params):
bowtie_index = cls._get_bowtie_index(params[QUANTIFIER_DIRECTORY])
reads_spec = params[SIMULATED_READS] if SIMULATED_READS in params \
else "{l} {r}".format(
l=params[LEFT_SIMULATED_READS],
r=params[RIGHT_SIMULATED_READS])
stranded_spec = "--library-type " + \
("fr-unstranded" if SIMULATED_READS in params
else "fr-secondstrand")
writer.add_line(cls.MAP_READS_TO_GENOME_WITH_TOPHAT.format(
bowtie_index=bowtie_index,
reads_spec=reads_spec,
stranded_spec=stranded_spec))
writer.add_line(cls.QUANTIFY_ISOFORM_EXPRESSION.format(
bowtie_index=bowtie_index,
transcript_gtf=params[TRANSCRIPT_GTF_FILE],
stranded_spec=stranded_spec))
@classmethod
def write_post_quantification_cleanup(cls, writer):
writer.add_line(cls.REMOVE_TOPHAT_OUTPUT_DIRECTORY)
writer.add_line(cls.REMOVE_CUFFLINKS_OUTPUT_EXCEPT_ISOFORM_ABUNDANCES)
def get_transcript_abundance(self, transcript_id):
if self.abundances is None:
self.abundances = pd.read_csv(
"transcriptome/isoforms.fpkm_tracking",
delim_whitespace=True, index_col="tracking_id")
self.norm_constant = \
1000000 / (self.abundances[_Cufflinks.FPKM_COLUMN].sum())
fpkm = self.abundances.ix[transcript_id][_Cufflinks.FPKM_COLUMN] \
if transcript_id in self.abundances.index else 0
return self.norm_constant * fpkm
class _TranscriptomeBasedQuantifierBase(_QuantifierBase):
CALCULATE_TRANSCRIPT_REFERENCE_DIRECTORY = \
"REF_DIR=$(dirname {ref_name})"
CHECK_TRANSCRIPT_REFERENCE_DIRECTORY_EXISTS = \
"! -d $REF_DIR"
MAKE_TRANSCRIPT_REFERENCE_DIRECTORY = \
"mkdir -p $REF_DIR"
PREPARE_TRANSCRIPT_REFERENCE = \
"rsem-prepare-reference --gtf {transcript_gtf} --no-polyA " + \
"{bowtie_spec} {genome_fasta_dir} {ref_name}"
@classmethod
def _get_ref_name(cls, quantifier_dir):
ref_name = cls.get_name().lower()
return os.path.join(quantifier_dir, ref_name, ref_name)
@classmethod
def write_preparatory_commands(cls, writer, params):
with writer.section():
writer.add_comment(
"Prepare the transcript reference if it doesn't already " +
"exist. We create the transcript reference using a tool " +
"from the RSEM package. Note that this step only needs to " +
"be done once for a particular set of transcripts.")
ref_name = cls._get_ref_name(params[QUANTIFIER_DIRECTORY])
# TODO: this will need to be changed when updating beyond RSEM
# 1.2.14, where building the Bowtie index must be specified
# explicitly
bowtie_spec = "" if cls._needs_bowtie_index() else "--no-bowtie"
writer.add_line(
cls.CALCULATE_TRANSCRIPT_REFERENCE_DIRECTORY.format(
ref_name=ref_name))
with writer.if_block(
cls.CHECK_TRANSCRIPT_REFERENCE_DIRECTORY_EXISTS):
writer.add_line(cls.MAKE_TRANSCRIPT_REFERENCE_DIRECTORY)
writer.add_line(cls.PREPARE_TRANSCRIPT_REFERENCE.format(
transcript_gtf=params[TRANSCRIPT_GTF_FILE],
genome_fasta_dir=params[GENOME_FASTA_DIR],
ref_name=ref_name,
bowtie_spec=bowtie_spec))
@_Quantifier
class _RSEM(_TranscriptomeBasedQuantifierBase):
QUANTIFY_ISOFORM_EXPRESSION = \
"rsem-calculate-expression --time {qualities_spec} --p 32 " + \
"{stranded_spec} {reads_spec} {ref_name} rsem_sample"
REMOVE_RSEM_OUTPUT_EXCEPT_ISOFORM_ABUNDANCES = \
"find . -name \"rsem_sample*\" \! " + \
"-name rsem_sample.isoforms.results -type f -delete"
@classmethod
def get_name(cls):
return "RSEM"
@classmethod
def _needs_bowtie_index(cls):
return True
@classmethod
def write_quantification_commands(cls, writer, params):
qualities_spec = "" if params[FASTQ_READS] else "--no-qualities"
reads_spec = params[SIMULATED_READS] if SIMULATED_READS in params \
else "--paired-end {l} {r}".format(
l=params[LEFT_SIMULATED_READS],
r=params[RIGHT_SIMULATED_READS])
stranded_spec = "" if SIMULATED_READS in params \
else "--strand-specific"
ref_name = cls._get_ref_name(params[QUANTIFIER_DIRECTORY])
writer.add_line(cls.QUANTIFY_ISOFORM_EXPRESSION.format(
qualities_spec=qualities_spec,
reads_spec=reads_spec,
stranded_spec=stranded_spec,
ref_name=ref_name))
@classmethod
def write_post_quantification_cleanup(cls, writer):
writer.add_line(cls.REMOVE_RSEM_OUTPUT_EXCEPT_ISOFORM_ABUNDANCES)
def get_transcript_abundance(self, transcript_id):
if self.abundances is None:
self.abundances = pd.read_csv(
"rsem_sample.isoforms.results", delim_whitespace=True,
index_col="transcript_id")
return self.abundances.ix[transcript_id]["TPM"] \
if transcript_id in self.abundances.index else 0
@_Quantifier
class _Express(_TranscriptomeBasedQuantifierBase):
MAP_READS_TO_TRANSCRIPT_REFERENCE = \
"bowtie {qualities_spec} -e 99999999 -l 25 -I 1 -X 1000 -a -S " + \
"-m 200 -p 32 {ref_name} {reads_spec}"
CONVERT_SAM_TO_BAM = \
"samtools view -Sb - > hits.bam"
QUANTIFY_ISOFORM_EXPRESSION = \
"express {stranded_spec} {ref_name}.transcripts.fa hits.bam"
REMOVE_MAPPED_READS = \
"rm hits.bam"
REMOVE_EXPRESS_OUTPUT_EXCEPT_ISOFORM_ABUNDANCES = \
"rm params.xprs"
@classmethod
def get_name(cls):
return "Express"
@classmethod
def _needs_bowtie_index(cls):
return True
@classmethod
def write_quantification_commands(cls, writer, params):
ref_name = cls._get_ref_name(params[QUANTIFIER_DIRECTORY])
qualities_spec = "-q" if params[FASTQ_READS] else "-f"
reads_spec = params[SIMULATED_READS] if SIMULATED_READS in params \
else "-1 {l} -2 {r}".format(
l=params[LEFT_SIMULATED_READS],
r=params[RIGHT_SIMULATED_READS])
stranded_spec = "--fr-stranded " \
if SIMULATED_READS not in params else ""
writer.add_pipe(
cls.MAP_READS_TO_TRANSCRIPT_REFERENCE.format(
qualities_spec=qualities_spec,
ref_name=ref_name,
reads_spec=reads_spec),
cls.CONVERT_SAM_TO_BAM
)
writer.add_line(cls.QUANTIFY_ISOFORM_EXPRESSION.format(
stranded_spec=stranded_spec,
ref_name=ref_name))
@classmethod
def write_post_quantification_cleanup(cls, writer):
writer.add_line(cls.REMOVE_MAPPED_READS)
writer.add_line(cls.REMOVE_EXPRESS_OUTPUT_EXCEPT_ISOFORM_ABUNDANCES)
def get_transcript_abundance(self, transcript_id):
if self.abundances is None:
self.abundances = pd.read_csv(
"results.xprs", delim_whitespace=True, index_col="target_id")
return self.abundances.ix[transcript_id]["tpm"] \
if transcript_id in self.abundances.index else 0
@_Quantifier
class _Sailfish(_TranscriptomeBasedQuantifierBase):
CREATE_SAILFISH_TRANSCRIPT_INDEX = \
"sailfish index -p 8 -t {ref_name}.transcripts.fa -k 20 -o {index_dir}"
QUANTIFY_ISOFORM_EXPRESSION = \
"sailfish quant -p 8 -i {index_dir} -l {library_spec} " + \
"{reads_spec} -o ."
FILTER_COMMENT_LINES = [
"grep -v '^# \[' quant_bias_corrected.sf",
"sed -e 's/# //'i > quant_filtered.csv"
]
REMOVE_SAILFISH_OUTPUT_EXCEPT_ISOFORM_ABUNDANCES = \
"rm -rf logs quant_bias_corrected.sf quant.sf " + \
"reads.count_info reads.sfc"
@classmethod
def get_name(cls):
return "Sailfish"
@classmethod
def _needs_bowtie_index(cls):
return False
@classmethod
def _get_index_dir(cls, quantifier_dir):
return os.path.join(quantifier_dir, "sailfish", "index")
@classmethod
def write_preparatory_commands(cls, writer, params):
# For convenience, we use a tool from the RSEM package to create the
# transcript reference
super(_Sailfish, cls).write_preparatory_commands(writer, params)
with writer.section():
writer.add_comment(
"Now create the Sailfish transcript index (this will only " +
"perform indexing if the index does not already exist.)")
ref_name = cls._get_ref_name(params[QUANTIFIER_DIRECTORY])
index_dir = cls._get_index_dir(params[QUANTIFIER_DIRECTORY])
writer.add_line(cls.CREATE_SAILFISH_TRANSCRIPT_INDEX.format(
ref_name=ref_name, index_dir=index_dir))
@classmethod
def write_quantification_commands(cls, writer, params):
index_dir = cls._get_index_dir(params[QUANTIFIER_DIRECTORY])
library_spec = "\"T=SE:S=U\"" if SIMULATED_READS in params \
else "\"T=PE:O=><:S=SA\""
reads_spec = "-r {r}".format(r=params[SIMULATED_READS]) \
if SIMULATED_READS in params \
else "-1 {l} -2 {r}".format(
l=params[LEFT_SIMULATED_READS],
r=params[RIGHT_SIMULATED_READS])
writer.add_line(cls.QUANTIFY_ISOFORM_EXPRESSION.format(
index_dir=index_dir,
library_spec=library_spec,
reads_spec=reads_spec))
writer.add_pipe(*cls.FILTER_COMMENT_LINES)
@classmethod
def write_post_quantification_cleanup(cls, writer):
writer.add_line(cls.REMOVE_SAILFISH_OUTPUT_EXCEPT_ISOFORM_ABUNDANCES)
def get_transcript_abundance(self, transcript_id):
if self.abundances is None:
self.abundances = pd.read_csv(
"quant_filtered.csv", delim_whitespace=True,
index_col="Transcript")
return self.abundances.ix[transcript_id]["TPM"] \
if transcript_id in self.abundances.index else 0
@_Quantifier
class _Salmon(_TranscriptomeBasedQuantifierBase):
CREATE_SALMON_TRANSCRIPT_INDEX = \
"salmon index -t {ref_name}.transcripts.fa -i {index_dir}"
QUANTIFY_ISOFORM_EXPRESSION = \
"salmon quant -p 8 -i {index_dir} -l {library_spec} {reads_spec} -o ."
FILTER_COMMENT_LINES = [
"grep -v '^# \[\|salmon' quant.sf",
"sed -e 's/# //'i > quant_filtered.csv"
]
REMOVE_SALMON_OUTPUT_EXCEPT_ISOFORM_ABUNDANCES = \
"rm -rf logs quant.sf"
@classmethod
def get_name(cls):
return "Salmon"
@classmethod
def _needs_bowtie_index(cls):
return False
@classmethod
def _get_index_dir(cls, quantifier_dir):
return os.path.join(quantifier_dir, "salmon", "index")
@classmethod
def write_preparatory_commands(cls, writer, params):
# We again use a tool from the RSEM package to create the transcript
# reference sequences
super(_Salmon, cls).write_preparatory_commands(writer, params)
with writer.section():
index_dir = cls._get_index_dir(params[QUANTIFIER_DIRECTORY])
with writer.if_block("! -d " + index_dir):
writer.add_comment("Now create the Salmon transcript index.")
ref_name = cls._get_ref_name(params[QUANTIFIER_DIRECTORY])
writer.add_line(cls.CREATE_SALMON_TRANSCRIPT_INDEX.format(
ref_name=ref_name, index_dir=index_dir))
@classmethod
def write_quantification_commands(cls, writer, params):
index_dir = cls._get_index_dir(params[QUANTIFIER_DIRECTORY])
library_spec = "U" if SIMULATED_READS in params else "ISF"
reads_spec = "-r {r}".format(r=params[SIMULATED_READS]) \
if SIMULATED_READS in params \
else "-1 {l} -2 {r}".format(
l=params[LEFT_SIMULATED_READS],
r=params[RIGHT_SIMULATED_READS])
writer.add_line(cls.QUANTIFY_ISOFORM_EXPRESSION.format(
index_dir=index_dir,
library_spec=library_spec,
reads_spec=reads_spec))
writer.add_pipe(*cls.FILTER_COMMENT_LINES)
@classmethod
def write_post_quantification_cleanup(cls, writer):
writer.add_line(cls.REMOVE_SALMON_OUTPUT_EXCEPT_ISOFORM_ABUNDANCES)
def get_transcript_abundance(self, transcript_id):
if self.abundances is None:
self.abundances = pd.read_csv(
"quant_filtered.csv", delim_whitespace=True,
index_col="Name")
return self.abundances.ix[transcript_id]["TPM"] \
if transcript_id in self.abundances.index else 0
| COMBINE-lab/piquant | piquant/quantifiers.py | Python | mit | 16,279 |
import rethinkdb
rethinkdb.set_loop_type("asyncio")
__version__ = "0.2.2-pre"
# constants
ALL = 0
DECLARED_ONLY = 1
UNDECLARED_ONLY = 2
from .errors import *
from .db import *
from .values_and_valuetypes import *
from .field import *
from .document import *
| lars-tiede/aiorethink | aiorethink/__init__.py | Python | mit | 275 |
from datetime import datetime
from django.contrib.auth.models import User
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.conf import settings
from django.db.utils import DatabaseError
try:
TIMELINE_MODELS = settings.TIMELINE_MODELS
except AttributeError:
TIMELINE_MODELS = []
class Timeline(models.Model):
time = models.DateTimeField(db_index=True, auto_now_add=True)
day = models.DateField(db_index=True, auto_now_add=True) # for grouping by day
is_created = models.BooleanField()
content_type = models.ForeignKey(ContentType, db_index=True)
object_id = models.IntegerField()
object = generic.GenericForeignKey()
user = models.ForeignKey(User, blank=True, null=True)
def __unicode__(self):
return self.object
def get_absolute_url(self):
if hasattr(self.object, 'get_absolute_url'):
return self.object.get_absolute_url()
class Meta:
ordering = ('-time',)
def mark_timeline(instance, created=None, **kwargs):
ctype = ContentType.objects.get_for_model(instance)
if '.'.join(ctype.natural_key()) in TIMELINE_MODELS:
user = None
if isinstance(instance, User):
user = instance
else:
# see if there is a FK to User
for field_name in instance._meta.get_all_field_names():
field = instance._meta.get_field_by_name(field_name)[0]
if hasattr(field, 'rel') and hasattr(field.rel, 'to') and field.rel.to == User:
user = getattr(instance, field_name)
break
Timeline.objects.create(is_created=created, user=user, object_id=instance.id,
content_type=ContentType.objects.get_for_model(instance))
models.signals.post_save.connect(mark_timeline)
def remove_deleted(instance, **kwargs):
if isinstance(instance.pk, int):
Timeline.objects.filter(content_type=ContentType.objects.get_for_model(instance),
object_id=instance.pk).delete()
models.signals.pre_delete.connect(remove_deleted)
| teknolab/teknolab-django-timeline | timeline/models.py | Python | mit | 2,192 |
import os
import re
#requires python 2.6
#must be run in same directory as, and only after, count_unique_seq_per_barcode_pair.sh
#all files should look like "X*.counts"
#dir should be current dir
input_filename = ""
output_filename = "noise_sequences.fa"
outfile = open(output_filename,"w")
for filename in os.listdir("."):
if re.search( ".counts", filename):
input_filename = filename
opens = open(input_filename)
read = opens.read()
splits = read.split("\n")
total = 0
for line in splits:
if len(line)>1:
splitline = line.split(" ")
count = 0
for tabs in splitline:
if not "A" in tabs:
if len(tabs)>0:
total = total + int(tabs)
for line in splits:
if len(line)>1:
splitline = line.split(" ")
for tabs in splitline:
if not "A" in tabs:
if len(tabs)>0:
tot_1 = total
tot = float(tot_1)
acc_1 = int(tabs)
acc = float(acc_1)
if acc*100/tot<5:
#print (str(acc*100/tot)) # prints the actual %
#print line # will print with numerals associated
#print splitline[-1] #prints just barcode and seq.
#for each of the lines need to be removed from the all gtthn 265 etc then can repeat the rest as normal.
outfile.write(splitline[-1] + "\n")
outfile.close()
infile.close()
| sebastianevda/SEvdA_metagen | Script_2_identify_noise_sequences.py | Python | cc0-1.0 | 1,739 |
# -*- coding: utf-8
import surf
from datetime import datetime
from mmda.engine.utils import mmda_logger
from BeautifulSoup import BeautifulStoneSoup
from urllib2 import urlopen
ABSTRACT_TIMEOUT = 5 #seconds
def populate_abstract(artist_or_releasegroup):
"""
Populate CachedArtist or CachedRleaseGroup with short abstract.
High-level API aimed to replace populate_*_lastfm
@param artist_or_releasegroup: a CachedArtist or CachedReleaseGroup object
@return: a CachedArtist or CachedReleaseGroup object
"""
if 'abstract' not in artist_or_releasegroup:
# TODO: parallelize this
abstract = get_abstract_from_bbc(artist_or_releasegroup)
if not abstract:
abstract = get_abstract_from_dbpedia(artist_or_releasegroup)
# TODO: add other abstract sources here
if abstract:
artist_or_releasegroup.abstract = abstract
return artist_or_releasegroup
def get_abstract_from_dbpedia(artist_or_releasegroup):
"""
Populate CachedArtist or CachedRleaseGroup with short abstract.
@param artist_or_releasegroup: a CachedArtist or CachedReleaseGroup object
@return: a dictionary with an abstract structure
"""
abstract = {}
# if artist_or_releasegroup is ReleaseGroup, we look for release with wikipedia URL
# TODO: check performance, and if better - replace in other parts
# TODO: DRY: refactor
if 'dbpedia' not in artist_or_releasegroup.cache_state:
wiki_resource = None
cache_state = 0
if 'releases' in artist_or_releasegroup:
for release in artist_or_releasegroup['releases'].itervalues():
if 'urls' in release and 'Wikipedia' in release['urls']:
wiki_resource, wiki_lang, wiki_url = find_best_wikipedia_resource(release['urls']['Wikipedia'])
elif 'urls' in artist_or_releasegroup and 'Wikipedia' in artist_or_releasegroup['urls']:
wiki_resource, wiki_lang, wiki_url = find_best_wikipedia_resource(artist_or_releasegroup['urls']['Wikipedia'])
if wiki_resource:
store = surf.Store(reader = "sparql_protocol", endpoint = "http://dbpedia.org/sparql")
session = surf.Session(store)
sparql_query = "SELECT ?abstract WHERE {{ <http://dbpedia.org/resource/%s> <http://dbpedia.org/property/abstract> ?abstract FILTER langMatches( lang(?abstract), '%s') } }" % (wiki_resource, wiki_lang)
try:
t = mmda_logger('wiki','request','abstract',wiki_resource)
# TODO: timeout?
sparql_result = session.default_store.execute_sparql(sparql_query) # TODO: error handling
mmda_logger('wiki','result','found',len(sparql_result['results']['bindings']),t)
if sparql_result['results']['bindings'][0]['abstract']:
abstract = {'content':unicode(sparql_result['results']['bindings'][0]['abstract']), 'url':wiki_url, 'lang':wiki_lang, 'provider':'Wikipedia'}
# TODO: add cache_status dbpedia
except Exception, e:
# TODO: handle it?
mmda_logger('surf-dbpedia','ERROR',e)
else:
cache_state = 1
artist_or_releasegroup.cache_state['dbpedia'] = [cache_state,datetime.utcnow()]
artist_or_releasegroup.changes_present = True
return abstract
def find_best_wikipedia_resource(wikipedia_urls):
"""
Find wikipedia resource parameters. Prefer english one, if present.
@param wikipedia_urls: a list of URL strings
@return: a tuple with resource name, its language and URL
"""
for url in wikipedia_urls:
raped_url = url.split('/')
wiki_resource = raped_url[-1]
wiki_lang = raped_url[2].split('.')[0]
wiki_url = url
if wiki_lang == 'en':
break
return (wiki_resource, wiki_lang, wiki_url)
def get_abstract_from_bbc(artist):
"""
Populate CachedArtist with short Wikipedia abstract from BBC API.
BBC provide abstracts only for artists, so we skip it if argument is a release
@param artist_or_releasegroup: a CachedArtist or CachedReleaseGroup object
@return: a dictionary with an abstract structure
"""
abstract = {}
if artist._doc_type == 'CachedArtist' and 'bbc' not in artist.cache_state:
try:
t = mmda_logger('bbc','request','abstract',artist.get_id)
xml = urlopen("http://www.bbc.co.uk/music/artists/%s/wikipedia.xml" % artist.get_id, timeout=ABSTRACT_TIMEOUT).read()
xmlSoup = BeautifulStoneSoup(xml, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
abstract = {
'content':xmlSoup.wikipedia_article.content.text,
'url':xmlSoup.wikipedia_article.url.text,
'lang':'en',
'provider':'Wikipedia'
}
except Exception, e:
mmda_logger('bbc','ERROR',e)
cache_state = 0
else:
mmda_logger('bbc','result','found',abstract['url'],t)
cache_state = 1
artist.cache_state['bbc'] = [cache_state,datetime.utcnow()]
artist.changes_present = True
return abstract
| lidel/mmda | engine/abstract.py | Python | cc0-1.0 | 5,263 |
import sys
import os
import subprocess
initial_layout = [
"---",
"layout: default",
"permalink: 'reviews/{}.html'",
"title: '{}'",
"---\n",
"# {}",
"---\n",
"## Idea\n\n",
"## Method\n\n",
"## Observations\n\n"
]
def main():
paper_name = sys.argv[1]
formatted_name = subprocess.check_output(
["filename-formatter", paper_name]).decode("utf-8").strip()
file_contents = "\n".join(initial_layout)
with open("_reviews/{}.md".format(formatted_name), 'w') as f:
f.write(file_contents.format(formatted_name, paper_name, paper_name))
if __name__ == '__main__':
main()
| v1n337/research-review-notes | create_review.py | Python | cc0-1.0 | 642 |
import tablib
import subprocess
import collections
import os.path
import sys
import json
ver = sys.argv[2]
fn = sys.argv[1]
ofn = os.path.basename(os.path.splitext(sys.argv[1])[0]) + '.csv'
subprocess.call(['soffice', '--headless', '--convert-to', "csv", sys.argv[1]])
data = tablib.Dataset()
print('wot')
print(ofn)
with open(ofn, encoding='latin-1') as f:
data.csv = f.read()
headers = data.headers[:]
want = {
"Full name of License": "name",
"License Identifier": "id",
"Source/url": "sources",
"Notes": "notes",
"OSI Approved": "osi_approved",
"Standard License Header": "header",
"Template": "template"
}
for i, header in enumerate(headers):
if header not in want:
del data[header]
else:
data.headers[i] = want[header]
# Add hidden column for special cases
data.headers.append('hidden')
# Delete rubbish lines
del data[-3:]
data = data.dict
# Convert to booleans
for row in data:
for k, v in row.items():
if v == '':
row[k] = None
row['osi_approved'] = True if row['osi_approved'] == 'YES' else False
if row['sources'] is not None:
row['sources'] = row['sources'].strip().split('\n')
o = collections.OrderedDict()
o['version'] = ver
o['licenses'] = data
print(json.dumps(o, indent=2))
| bbqsrc/spdx-python | scripts/convert-ods.py | Python | cc0-1.0 | 1,297 |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 24 10:59:39 2019
@author: CHaithcock
"""
| crhaithcock/RushHour | RHGraph/generators/GenerateStatesByClass.py | Python | cc0-1.0 | 90 |
def begining():
n = int(raw_input("Start counting down from..."))
countdown(n)
def countdown(n):
if n <= 0:
print "gg is done!"
else:
print n
countdown(n-1)
begining()
| isaiah2286-cmis/isaiah2286-cmis-cs2 | countingdown.py | Python | cc0-1.0 | 208 |
Subsets and Splits