repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Rfam/rfam-production | scripts/support/mirnas/report_to_mirna_input.py | 1 | 3367 | import argparse
import json
import os
from datetime import date
# -------------------------------------------------------------------------------
def extract_new_mirnas_from_report(report_tsv, type='new'):
"""
"""
new_mirnas = {}
fp = open(report_tsv, 'r')
count = 0
for line in fp:
line = line.strip().split('\t')
# check if candidate mirna is a new family
if line[6].lower() == "new family":
# skip families requiring review
if line[1] != '?' and line[1] != '' and line[2] != "1_SEED":
if line[0] not in new_mirnas:
print line
new_mirnas[line[0]] = line[1]
elif line[6].lower() == 'done':
count += 1
fp.close()
return new_mirnas
# -------------------------------------------------------------------------------
def extract_rfam_family_accessions(report_file):
fp = open(report_file, 'r')
accession_map = {}
for line in fp:
line = line.strip().split('\t')
overlap = float(line[4])
if overlap <= 100.0:
# converts to upper to ensure labels match the constant
if line[6].strip().upper() == "UPDATE SEED":
rfam_acc = line[3].strip()
rfam_acc_list = []
if rfam_acc.find(',') == -1:
rfam_acc_list = rfam_acc.split(',')
else:
rfam_acc_list = [rfam_acc]
threshold = 0.0
if line[1] != '':
threshold = float(line[1])
# trim any whitespace characters
mirbase_id = line[0].strip()
accession_map[mirbase_id] = {"rfam_acc": rfam_acc_list,
"threshold": threshold,
"overlap": float(line[4])}
fp.close()
return accession_map
# -------------------------------------------------------------------------------
def parse_arguments():
"""
"""
parser = argparse.ArgumentParser()
parser.add_argument("--report", help="miRNA report in .tsv format", action="store")
parser.add_argument("--dest-dir", help="Desctination directory", action="store", default=os.getcwd())
parser.add_argument("--old-rfam", help="Fetches old Rfam miRNA accessions to be updated",
action="store_true", default=False)
parser.add_argument("--create-dump", help="Generates a JSON (.json) dump in destination directory",
action="store_true", default=False)
return parser
# -------------------------------------------------------------------------------
if __name__ == '__main__':
parser = parse_arguments()
args = parser.parse_args()
accessions = None
if not args.old_rfam:
new_mirnas = extract_new_mirnas_from_report(args.report, type='new')
accessions = new_mirnas
else:
accessions = extract_rfam_family_accessions(args.report)
if args.create_dump:
filename = "new_mirnas_"
if args.old_rfam:
filename = "mirna_families_to_update_"
fp_out = open(os.path.join(args.dest_dir, filename + str(date.today()) + ".json"), 'w')
json.dump(accessions, fp_out)
fp_out.close()
| apache-2.0 | -6,873,067,566,488,718,000 | 28.278261 | 105 | 0.471339 | false | 4.06152 | false | false | false |
jucacrispim/toxicbuild | toxicbuild/master/slave.py | 1 | 21372 | # -*- coding: utf-8 -*-
# Copyright 2016-2020 Juca Crispim <[email protected]>
# This file is part of toxicbuild.
# toxicbuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# toxicbuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with toxicbuild. If not, see <http://www.gnu.org/licenses/>.
import asyncio
from collections import defaultdict
import time
import traceback
from mongomotor.fields import (StringField, IntField, BooleanField,
DictField, ListField)
from toxicbuild.core.exceptions import ToxicClientException, BadJsonData
from toxicbuild.core.utils import (string2datetime, LoggerMixin, now,
localtime2utc)
from toxicbuild.common.exchanges import notifications
from toxicbuild.master.aws import EC2Instance
from toxicbuild.master.build import BuildStep, Builder
from toxicbuild.master.client import get_build_client
from toxicbuild.master.document import OwnedDocument
from toxicbuild.master.signals import (build_started, build_finished,
step_started, step_finished,
step_output_arrived, build_preparing)
class Slave(OwnedDocument, LoggerMixin):
""" Slaves are the entities that actualy do the work
of execute steps. The comunication to slaves is through
the network (using :class:`toxicbuild.master.client.BuildClient`).
The steps are actually decided by the slave.
"""
INSTANCE_TYPES = ('ec2',)
INSTANCE_CLS = {'ec2': EC2Instance}
DYNAMIC_HOST = '<DYNAMIC-HOST>'
host = StringField(required=True)
"""Slave's host."""
port = IntField(required=True)
"""Port for the slave to listen."""
token = StringField(required=True)
"""Token for authentication."""
is_alive = BooleanField(default=False)
"""Indicates if the slave is up and running."""
use_ssl = BooleanField(default=True)
"""Indicates if the build server in uses ssl connection."""
validate_cert = BooleanField(default=True)
"""Indicates if the certificate from the build server should be validated.
"""
on_demand = BooleanField(default=False)
"""If the slave is on-demand it will be started when needed and
will be stopped when all the builds for this slave are completed.
"""
instance_type = StringField(choices=INSTANCE_TYPES)
"""The type of instance used. Currently only 'ec2' is supported.
"""
instance_confs = DictField()
"""Configuration paramenters for the on-demand instance.
"""
parallel_builds = IntField(default=0)
"""Max number of builds in parallel that this slave exeutes.
If no parallel_builds there's no limit.
"""
queue_count = IntField(default=0)
"""How many builds are waiting to run in this repository."""
enqueued_builds = ListField(StringField())
"""Uuids of builds enqueued to run in this slave."""
running_count = IntField(default=0)
"""How many builds are running in this slave."""
running_repos = ListField(StringField())
"""The ids of the repositories that have builds running in this slave.
"""
meta = {
'ordering': ['name']
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# So, the thing here is that we may have a race condition
# with the last step output and the build finished messages.
# In fact, all the build management/build server communitation already
# is on its limits. A new implementation is needed.
self._step_finished = defaultdict(lambda: False)
self._step_output_cache = defaultdict(list)
self._step_output_cache_time = defaultdict(float)
self._step_output_cache_limit = 1 # seconds
self._step_output_is_updating = defaultdict(lambda: False)
async def save(self, *args, **kwargs):
if self.on_demand and not self.host:
self.host = self.DYNAMIC_HOST
r = await super().save(*args, **kwargs)
return r
@classmethod
async def create(cls, **kwargs):
"""Creates a new slave"""
slave = cls(**kwargs)
await slave.save()
return slave
def to_dict(self, id_as_str=False):
"""Returns a dict representation of the object."""
host = self.host if self.host != self.DYNAMIC_HOST else ''
my_dict = {'name': self.name, 'host': host,
'port': self.port, 'token': self.token,
'full_name': self.full_name,
'is_alive': self.is_alive, 'id': self.id,
'on_demand': self.on_demand,
'use_ssl': self.use_ssl,
'validate_cert': self.validate_cert,
'instance_type': self.instance_type,
'instance_confs': self.instance_confs}
if id_as_str:
my_dict['id'] = str(self.id)
return my_dict
@classmethod
async def get(cls, **kwargs):
"""Returns a slave instance."""
slave = await cls.objects.get(**kwargs)
return slave
@property
def instance(self):
"""Returns an on-demand instance wrapper.
"""
cls = self.INSTANCE_CLS[self.instance_type]
return cls(**self.instance_confs)
async def enqueue_build(self, build):
"""Marks a build as enqueued in this slave. It does not enqueue
two times the same build, if the build is already enqueued simply
skip it returning False
"""
buuid = str(build.uuid)
if buuid in self.enqueued_builds:
return False
self.enqueued_builds.append(buuid)
await self.update(
inc__queue_count=1,
enqueued_builds=self.enqueued_builds)
return True
async def dequeue_build(self, build):
"""Unmark a build as enqueued. If the build is not enqueued returns
False.
"""
try:
i = self.enqueued_builds.index(str(build.uuid))
self.enqueued_builds.pop(i)
except ValueError:
return False
await self.update(dec__queue_count=1,
enqueued_builds=self.enqueued_builds)
return True
async def add_running_repo(self, repo_id):
"""Increments the number of running builds in this slave and
adds the repository id to the running repos list. Also decrements
the queue count.
:param repo_id: An id of a repository.
"""
self.running_repos.append(str(repo_id))
self.running_count += 1
self.queue_count -= 1
await self.update(dec__queue_count=1, inc__running_count=1,
set__running_repos=self.running_repos)
async def rm_running_repo(self, repo_id):
"""Decrements the number of running builds in this slave and
removes the repository id from the running repos list
:param repo_id: An id of a repository.
"""
self.running_repos.remove(str(repo_id))
self.running_count -= 1
await self.update(
dec__running_count=1, set__running_repos=self.running_repos)
async def start_instance(self):
"""Starts an on-demand instance if needed."""
if not self.on_demand:
return False
is_running = await self.instance.is_running()
if not is_running:
self.log('Starting on-demand instance for {}'.format(self.id),
level='debug')
await self.instance.start()
ip = await self.instance.get_ip()
if ip and self.host == self.DYNAMIC_HOST:
self.host = ip
await self.wait_service_start()
self.log('Instance for {} started with ip {}'.format(self.id, ip),
level='debug')
return ip
async def stop_instance(self):
"""Stops an on-demand instance"""
if not self.on_demand:
return False
if self.queue_count or self.running_count:
self.log('Instance still building, not stopping it.',
level='debug')
return False
self.log('Stopping on-demand instance for {}'.format(self.id),
level='debug')
is_running = await self.instance.is_running()
if not is_running:
self.log('Instance for {} already stopped. Leaving.'.format(
self.id), level='debug')
return False
await self.instance.stop()
self.log('Instance for {} stopped'.format(self.id), level='debug')
return True
async def get_client(self):
""" Returns a :class:`~toxicbuild.master.client.BuildClient` instance
already connected to the server.
"""
connected_client = await get_build_client(
self, self.host, self.port, use_ssl=self.use_ssl,
validate_cert=self.validate_cert)
return connected_client
async def healthcheck(self):
""" Check if the build server is up and running
"""
with (await self.get_client()) as client:
alive = await client.healthcheck()
return alive
async def wait_service_start(self, timeout=10):
"""Waits for the toxicslave service start in the on-demand
instance.
"""
self.log('waiting toxicslave service start for {}'.format(self.id),
level='debug')
i = 0
while i < timeout:
try:
await self.healthcheck()
return True
except ToxicClientException:
raise
except Exception as e:
self.log('Service down {}'.format(i), level='debug')
self.log(str(e), level='debug')
i += 1
await asyncio.sleep(1)
raise TimeoutError
async def list_builders(self, revision):
""" List builder available in for a given revision
:param revision: An instance of
:class:`toxicbuild.master.repository.RepositoryRevision`
"""
repository = await revision.repository
repo_url = repository.url
vcs_type = repository.vcs_type
branch = revision.branch
named_tree = revision.commit
with (await self.get_client()) as client:
builders = await client.list_builders(repo_url, vcs_type,
branch, named_tree)
builder_instnces = []
for bname in builders:
builder = await Builder.get_or_create(repository=repository,
name=bname)
builder_instnces.append(builder)
return list(builder_instnces)
async def _finish_build_start_exception(self, build, repo, exc_out):
build.status = 'exception'
build.steps = [BuildStep(repository=repo, name='Exception',
command='exception',
output=exc_out, status='exception')]
await build.update()
async def build(self, build, **envvars):
""" Connects to a build server and requests a build on that server
:param build: An instance of :class:`toxicbuild.master.build.Build`
:param envvars: Environment variables to use in the builds.
"""
repo = await build.repository
await self.add_running_repo(repo.id)
await self.dequeue_build(build)
try:
build.status = build.PREPARING
await build.update()
repo = await build.repository
build_preparing.send(str(repo.id), build=build)
try:
await self.start_instance()
except Exception as e:
await self._finish_build_start_exception(build, repo, str(e))
return False
with (await self.get_client()) as client:
try:
build_info = await client.build(
build,
envvars=envvars,
process_coro=self._process_info)
except (ToxicClientException, BadJsonData):
output = traceback.format_exc()
build.status = 'exception'
build.started = build.started or localtime2utc(now())
build.finished = build.finished or localtime2utc(now())
exception_step = BuildStep(repository=repo, output=output,
started=localtime2utc(now()),
finished=localtime2utc(now()),
status='exception',
command='', name='exception')
build.steps.append(exception_step)
await build.update()
build_info = build.to_dict()
finally:
await self.rm_running_repo(repo.id)
return build_info
async def _process_info(self, build, repo, info):
""" Method used to process information sent by
the build server about an in progress build.
:param build: The build that is being executed
:param repo: The repository that owns the build.
:param info: A dictionary. The information sent by the
slave that is executing the build.
"""
# if we need one more conditional here is better to use
# a map...
if info['info_type'] == 'build_info':
await self._process_build_info(build, repo, info)
elif info['info_type'] == 'step_info':
await self._process_step_info(build, repo, info)
else:
await self._process_step_output_info(build, repo, info)
async def _process_build_info(self, build, repo, build_info):
build.status = build_info['status']
build.started = string2datetime(build_info['started'])
finished = build_info['finished']
if finished:
build.finished = string2datetime(finished)
build.total_time = (build.finished - build.started).seconds
await build.update()
if not build.finished:
msg = 'build started at {}'.format(build_info['started'])
self.log(msg)
build_started.send(str(repo.id), build=build)
await build.notify('build-started')
else:
msg = 'build finished at {} with status {}'.format(
build_info['finished'], build.status)
self.log(msg)
build_finished.send(str(repo.id), build=build)
step = build.steps[-1]
status = build_info['steps'][-1]['status']
finished = build_info['steps'][-1]['finished']
await self._fix_last_step_status(build, step, status, finished)
await build.notify('build-finished')
async def _process_step_info(self, build, repo, step_info):
cmd = step_info['cmd']
name = step_info['name']
status = step_info['status']
output = step_info['output']
started = step_info['started']
finished = step_info['finished']
index = step_info['index']
uuid = step_info['uuid']
if finished:
self._step_finished[uuid] = True
msg = 'step {} {} finished at {} with status {}'.format(
cmd, uuid, finished, status)
self.log(msg, level='debug')
requested_step = await self._get_step(build, uuid)
requested_step.status = status
if requested_step.status == 'exception':
requested_step.output = output if not requested_step.output \
else requested_step.output + output
else:
requested_step.output = output
requested_step.finished = string2datetime(finished)
requested_step.total_time = step_info['total_time']
await build.update()
step_finished.send(str(repo.id), build=build, step=requested_step)
msg = requested_step.to_dict()
msg.update({'repository_id': str(repo.id),
'event_type': 'step-finished'})
await notifications.publish(msg)
else:
requested_step = BuildStep(repository=repo, name=name, command=cmd,
status=status, output=output,
started=string2datetime(started),
index=index, uuid=uuid)
build.steps.append(requested_step)
await build.update()
msg = 'step {} started at {}'.format(requested_step.command,
started)
self.log(msg, level='debug')
step_started.send(str(repo.id), build=build, step=requested_step)
msg = requested_step.to_dict()
msg.update({'repository_id': str(repo.id),
'event_type': 'step-started'})
await notifications.publish(msg)
if step_info.get('last_step_status'):
last_step = build.steps[-2]
status = step_info.get('last_step_status')
finished = step_info.get('last_step_finished')
await self._fix_last_step_status(build, last_step,
status, finished)
async def _fix_last_step_status(self, build, step, status, finished):
# this fixes the bug with the status of the step that
# in someway was getting lost here in the slave.
step.status = status
step.finished = string2datetime(finished)
await build.update()
async def _update_build_step_info(self, build, step_info):
# we need this cache here to avoid excessive memory consumption
# if we try to update the step output every time a line arrives.
output = step_info['output']
uuid = step_info['uuid']
self._step_output_cache[uuid].append(output)
now = time.time()
if not self._step_output_cache_time[uuid]:
self._step_output_cache_time[
uuid] = now + self._step_output_cache_limit
is_updating = self._step_output_is_updating[uuid]
if self._step_output_cache_time[uuid] >= now or is_updating:
return False
self._step_output_is_updating[uuid] = True
step = await self._get_step(build, uuid, wait=True)
# the thing here is that while we are waiting for the step,
# the step may have finished, so we don'to anything in this case.
if self._step_finished[uuid]:
self.log('Step {} already finished. Leaving...'.format(uuid),
level='debug')
del self._step_output_cache[uuid]
return False
output = [step.output or ''] + self._step_output_cache[uuid]
step.output = ''.join(output)
del self._step_output_is_updating[uuid]
del self._step_output_cache[uuid]
del self._step_output_cache_time[uuid]
await build.update()
return True
async def _process_step_output_info(self, build, repo, info):
uuid = info['uuid']
msg = 'step_output_arrived for {}'.format(uuid)
self.log(msg, level='debug')
info['repository'] = {'id': str(repo.id)}
info['build'] = {'uuid': str(build.uuid),
'repository': {'id': str(repo.id)}}
info['output'] = info['output'] + '\n'
step_output_arrived.send(str(repo.id), step_info=info)
await self._update_build_step_info(build, info)
async def _get_step(self, build, step_uuid, wait=False):
"""Returns a step from ``build``. Returns None if the requested
step is not present in the build.
:param build: A :class:`toxicbuild.master.build.Build` instance.
:param step_uuid: The uuid of the requested step.
"""
# this is ridiculous, but the idea of waitig for the step is
# that sometimes a info - ie step_output_info - may arrive here
# before the step started info, so we need to wait a little.
build_inst = build
async def _get():
build = await type(build_inst).get(build_inst.uuid)
build_steps = build.steps
for i, step in enumerate(build_steps):
if str(step.uuid) == str(step_uuid):
build_inst.steps[i] = step
return step
step = await _get()
limit = 20
n = 0
while not step and wait:
await asyncio.sleep(0.001)
step = await _get()
n += 1
if n >= limit:
wait = False
return step
| agpl-3.0 | 5,881,850,398,815,286,000 | 36.494737 | 79 | 0.57828 | false | 4.291566 | false | false | false |
ekwan/PyQuiver | src/constants.py | 1 | 5346 | # This file holds physical constants and reads atomic weights.
import sys
import re
import os
import inspect
###############
# Physical Constants
PHYSICAL_CONSTANTS = {
'h' : 6.626070E-34, # Planck's constants in J * s
'c' : 2.997925E+10, # speed of light in units of cm/s
'Eh' : 4.359745E-18, # energy of a hartree in units of J = kg m^2/s^2
'a0' : 5.291772E-11, # bohr radius in m
'atb': 5.291772E-01, # angstroms per bohr
'amu': 1.660468E-27, # atomic mass unit in units kg
'kB' : 1.380649E-23 # Boltzmann's constant in J/K
}
#CM/2.998E10/,EM/1.440E13/,HBC/1.4387/
###############
# Atomic Weight Information
class Element(object):
def __init__(self, full_name, atomic_number, symbol, default_mass):
# the name of this element, like "hydrogen"
full_name = str(full_name)
self.full_name = full_name
if re.match("[^a-z]", full_name):
print("Unexpected non-lowercase character in element name: %s" % full_name)
print("Quitting.")
sys.exit(1)
# the symbol of this element, like "H"
symbol = str(symbol)
self.symbol = symbol
if re.match("[^a-zA-Z]", symbol):
print("Unexpected non-letter character in element symbol: %s" % symbol)
print("Quitting.")
sys.exit(1)
if len(symbol) < 1 or len(symbol) > 2:
print("Unexpected length of element symbol (must be 1 or 2): %s" % symbol)
print("Quitting.")
sys.exit(1)
# the atomic number of this element, like 1
atomic_number = int(atomic_number)
self.atomic_number = atomic_number
if atomic_number < 1 or atomic_number > 200:
print("Unexpected atomic number: %d" % atomic_number)
print("Quitting.")
sys.exit(1)
# the average weight for this element, like 1.00783
default_mass = float(default_mass)
self.default_mass = default_mass
if default_mass < 0.0 or default_mass > 500.0:
print("Unexpected default mass: %d" % default_mass)
print("Quitting.")
sys.exit(1)
# pairs of tuples strings (like "2H") to masses (like 2.0141)
self.replacements = []
def __str__(self):
string = "%s (%s, Z=%d, default mass = %.4f" % (self.full_name.capitalize(), self.symbol, self.atomic_number, self.default_mass)
if len(self.replacements) == 0:
string += ", no isotopic replacements possible)\n"
else:
string += ")\n"
for s,m in self.replacements:
string += " %2s : %.4f\n" % (s,m)
return string[:-1]
def add_replacement(self, symbol, mass):
symbol = str(symbol)
if re.match("[^a-zA-Z0-9]", symbol):
print("Unexpected non-letter character in isotopic replacement symbol: %s" % symbol)
print("Quitting.")
sys.exit(1)
if len(symbol) < 1 or len(symbol) > 4:
print("Unexpected length of element symbol in replacement (must be 1-4 inclusive, found %d): %s" % (len(symbol), symbol))
print("Quitting.")
sys.exit(1)
for s,m in self.replacements:
if s == symbol:
print("Must use a unique symbol for every isotopic replacement: %s" % s)
sys.exit(1)
mass = float(mass)
if mass < 0.0 or mass > 500.0:
print("Unexpected isotopic replacement mass: %f" % mass)
sys.exit(1)
self.replacements.append((symbol,mass))
# read in atomic weight data
elements = []
root = os.path.split(os.path.abspath(__file__))[0]
for line in open(root + "/weights.dat", "r"):
# ignore comments and blank lines
line = line.strip()
if len(line) == 0 or line[0] == "#":
continue
line = line.split("#",1)[0]
# parse
fields = line.split(",") #line.encode("ascii","ignore").split(",")
if len(fields) < 4:
print("Error: not enough data on this line of weights.dat:")
print(line)
print("\nQuitting.")
sys.exit(1)
element = Element(*fields[0:4])
if (len(fields)-4) % 2 != 0:
print("Unexpected number of isotopic replacement fields on this line of weights.dat.")
print("The number of fields after the first four must be a multiple of 2 (found %d)." % (len(fields)-4))
print(line)
print("\nQuitting.")
sys.exit(1)
if (len(fields) > 4):
for i in range(4, len(fields), 2):
element.add_replacement(fields[i], fields[i+1])
elements.append(element)
#print element
print("Read atomic weight data for %d elements." % len(elements))
# map from atomic number to default masses
DEFAULT_MASSES = { e.atomic_number : e.default_mass for e in elements }
# map from valid isotopic replacements to masses
REPLACEMENTS = {}
for e in elements:
for replacement,mass in e.replacements:
REPLACEMENTS[replacement] = mass
# map from isotopic replacements to atomic numbers
REPLACEMENTS_Z = {}
for e in elements:
for replacement,mass in e.replacements:
REPLACEMENTS_Z[replacement]=e.atomic_number
# threshold to separate linear molecules from non-linear molecules
LINEARITY_THRESHOLD = 1e-06
DROP_NUM_LINEAR = 5
# DROP_NUM_NONLINEAR = 6
| apache-2.0 | -5,842,683,066,656,835,000 | 35.121622 | 136 | 0.589787 | false | 3.480469 | false | false | false |
kvidoo/MMexUpdater | mmupdater/MMexCategoryUpdater.py | 1 | 1514 | '''
Created on Aug 30, 2013
@author: novpa01
'''
import logging
import sys
import importlib
from mmupdater.Settings import Settings
from mmupdater.UserError import UserError
from mmupdater.MMexDb import MMexDb
from mmupdater.CategoryAssigner import CategoryAssigner
# Parse settings file
settings = Settings('settings.ini')
if __name__ == '__main__':
try:
# initialize log level
logging.basicConfig(level=settings.loglevel)
# initialize the component to talk to the MMex database
db = MMexDb(settings)
# initialize category assignments
cat_assigner = CategoryAssigner(settings, db)
# get transactions with no categories
transactions = db.get_transactions_with_no_categories()
print("Found " + str(len(transactions)) + " transactions with no category assigned")
# fill-in categories where we can
cat_assigner.assign(transactions)
# get just those transactions that have some category assigned
assigned_transactions = [t for t in transactions if 'CATEGID' in t]
print("Categories found for " + str(len(assigned_transactions)) + " transactions")
# save them to the database
db.update_transactions(assigned_transactions, cat_assigner)
# successful exit
exit(0)
except UserError as e:
sys.stderr.write("ERROR: " + str(e) + '\n')
# non-zero to report error
exit(1)
| mit | 5,669,176,103,942,437,000 | 28.686275 | 92 | 0.645971 | false | 4.452941 | false | false | false |
FSUgenomics/TFLOW | tflow/segments/BUSCO_Analysis.py | 1 | 22978 | #TFLOW Segment: Analyze FASTA File for Gene Recapture using BUSCO Benchmark Database
#
#Dan Stribling
#Florida State University
#Center for Genomics and Personalized Medicine
#Version 0.9, 04/20/2015
#Project URL: http://www.github.com/fsugenomics/tflow
import os.path
import sys
import subprocess
import shutil
BUSCO_FILES = {'arthropoda':'BUSCO_Arthropoda.fas',
'vertebrata':'BUSCO_Vertebrata.fas',
'fungi':'BUSCO_Fungi.fas',
'metazoa':'BUSCO_Metazoa.fas'}
if __name__ == "__main__" or __package__ is None:
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../'))
import tflow.segments
__package__ = "tflow.segments"
from .parser_class import OutputParser
from ..util import (print_exit, print_except, write_file, write_report, delete_pid_file,
percent_string, lowercase, stop_TFLOW_process)
from .. import util
from .. import local_settings
if hasattr(local_settings, 'BUSCO_LOCATION'):
BUSCO_LOCATION = local_settings.BUSCO_LOCATION
else:
BUSCO_LOCATION = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..',
'sequence_files')
if hasattr(local_settings, 'BLAST_LOCATION'):
BLAST_LOCATION = local_settings.BLAST_LOCATION
else:
BLAST_LOCATION = ''
if hasattr(local_settings, 'BLAST_EXEC'):
BLAST_EXEC = local_settings.BLAST_EXEC
else:
BLAST_EXEC = os.path.join(BLAST_LOCATION, 'blastx')
if hasattr(local_settings, 'MAKE_BLAST_DB_LOCATION'):
MAKE_BLAST_DB_LOCATION = local_settings.MAKE_BLAST_DB_LOCATION
else:
MAKE_BLAST_DB_LOCATION = ''
if hasattr(local_settings, 'MAKE_BLAST_DB_EXEC'):
MAKE_BLAST_DB_EXEC = local_settings.MAKE_BLAST_DB_EXEC
else:
MAKE_BLAST_DB_EXEC = os.path.join(MAKE_BLAST_DB_LOCATION, 'makeblastdb')
JOB_TYPE = 'BUSCO_Analysis'
PROGRAM_URL = 'http://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastDocs&DOC_TYPE=Download'
DATASET_DETAILS = ''' Benchmarking sets of Universal Single-Copy Orthologs (BUSCO): Metazoa, Arthropoda,
Vertebrata, and Fungi Datasets
Version: OrthoDB7, Acquired 2015-04-22
URL: ftp://cegg.unige.ch/OrthoDB7/BUSCO/
Citation: Waterhouse et al, Nucleic Acids Research, January 2013, PMID:23180791
OrthoDB: a hierarchical catalog of animal, fungal and bacterial orthologs.
'''
SEGMENT_FOR_VERSION = '2.2.29'
BLAST_COMMAND = BLAST_EXEC
BLAST_COMMAND_LIST = [BLAST_COMMAND]
BLAST_DB_COMMAND = MAKE_BLAST_DB_EXEC
BLAST_DB_COMMAND_LIST = [BLAST_DB_COMMAND]
TEST_COMMAND = '-h'
OUT_FILE = 'BUSCO_Analysis.out'
MILESTONES = ['BUSCO Benchmarking Analysis Complete']
TERMINAL_FLAGS = ['BUSCO Analysis Done']
FAILURE_FLAGS = ['Exiting Early...',
'Traceback',
'Not Found',
'Exception: ERROR',
]
DEFAULT_SETTINGS = {'working_directory':'BUSCO_Analysis',
'BUSCO_type':'vertebrata',
'BUSCO_location':BUSCO_LOCATION,
'copy_input_file':True,
'max_CPU':'4',
'evalue':'1e-5',
'evalue_cutoff':'1e-20',
'blast_result_file':'blast.out',
'print_missing_genes':False,
'print_matches':False,
#TFLOW BUSCO_Analysis Settings
'blast_command':BLAST_COMMAND,
'blast_command_list':BLAST_COMMAND_LIST,
'blast_db_command':BLAST_DB_COMMAND,
'blast_db_command_list':BLAST_DB_COMMAND_LIST,
'test_command':TEST_COMMAND,
'program_URL':PROGRAM_URL,
'segment_for_version':SEGMENT_FOR_VERSION,
'dataset_details':DATASET_DETAILS,
#TFLOW Writing Defaults, Used if Global Not Set
'write_report':True,
'write_command':True,
'write_pid':True,
}
REQUIRED_SETTINGS = ['blast_command_list', 'blast_db_command_list', 'working_directory',
'copy_input_file', 'evalue', 'max_CPU', 'blast_result_file', 'evalue_cutoff',
'print_missing_genes', 'write_command', 'write_report', 'write_pid',
'print_matches']
REQUIRED_ANALYSIS_SETTINGS = ['blast_result_file', 'evalue_cutoff', 'print_missing_genes',
'print_matches', 'write_report']
class Parser(OutputParser):
def set_local_defaults(self):
self.milestones = MILESTONES
self.terminal_flags = TERMINAL_FLAGS
self.failure_flags = FAILURE_FLAGS
self.job_type = JOB_TYPE
def check_done(options):
parser = Parser()
parser.out_file = options['out_file']
failure_exit = (options['mode'] in ['run', 'track'])
return parser.check_completion(failure_exit)
def track(options):
parser = Parser()
parser.out_file = options['out_file']
parser.track()
def read(options):
parser = Parser()
parser.out_file = options['out_file']
parser.read_or_notify()
def stop(options):
job_pid_file = os.path.join(options['working_directory'],
JOB_TYPE + '.auto.pid')
stop_TFLOW_process(job_pid_file, JOB_TYPE)
def clean(options):
files = ['BUSCO_Make_DB.auto.sh', 'BUSCO_tblastn.auto.sh']
for BUSCO_type in ['Arthropoda', 'Vertebrata', 'Metazoa', 'Fungi']:
for suffix in ['.pin', '.psq', '.phr']:
files.append('BUSCO_' + BUSCO_type + suffix)
if options['copy_input_file']:
for file_type in ['absolute_input_analysis_file', 'rel_input_analysis_file']:
if file_type in options:
files.append(os.path.basename(os.path.join(options['working_directory'],
options[file_type])))
break
out_files = [options['blast_result_file']]
remove_outfile = (options['mode'] == 'reset')
util.clean_TFLOW_auto_files(options['job_type'], options['project_directory'],
options['working_directory'], remove_outfile=remove_outfile,
confirm=options['confirm'], files=files, out_files=out_files)
def test(options, silent=False):
all_output = ''
for job_type, command_list in [(JOB_TYPE+':BLAST', 'blast_command_list'),
(JOB_TYPE+':Make_Blast_DB', 'blast_db_command_list')]:
try:
process = subprocess.Popen(options[command_list] + [options['test_command']],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
process.wait()
output, error = process.communicate()
all_output += output
print ' -- %s Found!' % job_type
except OSError as error:
if silent:
return False
print ('%s Cannot Be Found ' % job_type
+ ' With Shell Command: "%s"' % ' '.join(options[command_list]))
if PROGRAM_URL:
print 'If Not Installed, %s Can be Downloaded From:\n%s' % (JOB_TYPE, PROGRAM_URL)
all_output += 'Error Number: %s\nError Text:\n%s' % (str(error.errno), error.strerror)
return all_output
def run(options):
if __name__ != '__main__' and options['is_pipe']:
out_file_stream = open(options['out_file'], 'w')
terminal_out, terminal_error = sys.stdout, sys.stderr
sys.stdout, sys.stderr = out_file_stream, out_file_stream
#Ensure Required Settings in Options
for required_option in REQUIRED_SETTINGS:
if required_option not in options:
print_exit('Required Option: %s for %s not given.' % (required_option, JOB_TYPE))
#Ensure A Type of Input File is Given
if not any(x in options for x in ['absolute_input_analysis_file',
'rel_input_analysis_file',
'result_name_file']):
print_exit('Either absolute_input_analysis_file, rel_input_analysis_file, or'
+ ' result_name_file paramater required.')
#Ensure a BUSCO file or type is given
if not any(x in options for x in ['BUSCO_file', 'BUSCO_type']):
print_exit('Either BUSCO_file or BUSCO_type paramater required.')
#Ensure Working Directory Exists
if not os.path.isdir(options['working_directory']):
print_exit('Working Directory: %s Not Found.' % options['working_directory'])
#Print Dataset Details
if 'dataset_details' in options:
print 'Details on Benchmarking Dataset:'
print options['dataset_details']
print ''
#Assign Correct Input File Name
if 'absolute_input_analysis_file' in options:
full_input_file = options['absolute_input_analysis_file']
input_file = os.path.basename(full_input_file)
elif 'rel_input_analysis_file' in options:
full_input_file = os.path.join(options['project_directory'],
options['rel_input_analysis_file'])
input_file = os.path.basename(options['rel_input_analysis_file'])
elif 'result_name_file' in options:
full_result_name_file = os.path.join(options['project_directory'],
options['result_name_file'])
if os.path.isfile(full_result_name_file):
print ('Reading Result Sequence File Name from Provided '
+ 'File: %s' % full_result_name_file )
else:
print_exit('Provided File: %s Containing' % full_result_name_file
+ ' Result Sequence File Name Not Found.')
rf = open(full_result_name_file, 'r')
full_input_file = rf.read().strip()
rf.close()
if os.path.isfile(full_input_file):
print 'Read Result Sequence File Name: %s' % full_input_file
print 'File Found!'
print ''
else:
print_exit('Cannot Find Read Result Sequence File: %s' % full_input_file)
input_file = os.path.basename(full_input_file)
#Find/Validate BUSCO File Selection
if 'BUSCO_file' in options:
full_BUSCO_file_name = options['BUSCO_file']
print 'BUSCO File: %s Given.' % options['BUSCO_file']
elif 'BUSCO_type' in options:
if 'BUSCO_location' not in options or not options['BUSCO_location']:
print_exit('BUSCO_type: %s Given ' % options['BUSCO_type']
+ 'but BUSCO_location not given.')
if not os.path.isdir(options['BUSCO_location']):
print_exit('BUSCO File Location: %s Not Found.' % options['BUSCO_location'])
BUSCO_type = lowercase(options['BUSCO_type'])
if BUSCO_type in BUSCO_FILES:
print 'BUSCO File Type: %s Provided.' % BUSCO_type
full_BUSCO_file_name = os.path.join(options['BUSCO_location'],
BUSCO_FILES[BUSCO_type])
else:
print_exit([('Selected BUSCO Type: %s Not Available.' % BUSCO_Type),
'Please Select from Types:', ', '.join(BUSCO_FILES.keys())])
#If Selected BUSCO File Not Yet Unzipped, Unzip it
if not os.path.isfile(full_BUSCO_file_name) and os.path.isfile(full_BUSCO_file_name +'.gz'):
print ('\nSelected BUSCO File: %s' % full_BUSCO_file_name
+ 'Found in Zipped Format: %s' % full_BUSCO_file_name + '.gz')
print 'Unzipping...'
print ''
sys.stdout.flush()
with gzip.open(full_BUSCO_file_name + '.gz', 'r') as zipped_BUSCO, \
open(full_BUSCO_file_name, 'w') as unzipped_BUSCO:
unzipped_BUSCO.writelines(zipped_BUSCO)
#Ensure Provided/Selected BUSCO File Exists
if os.path.isfile(full_BUSCO_file_name):
print 'Selected BUSCO File Found: %s' % full_BUSCO_file_name
if 'BUSCO_file' not in options:
options['BUSCO_file'] = full_BUSCO_file_name
else:
print_exit('Selected BUSCO File: %s Cannot Be Found.' % full_BUSCO_file_name)
#Check that Input File Exists
if not os.path.isfile(full_input_file):
print_exit('Input File: %s Not Found.' % full_input_file)
#If Selected, Copy Input File to Working Directory
if options['copy_input_file']:
print ('Copying Input File: %s' % input_file
+ ' to Working Directory: %s' % options['working_directory'])
working_input_file = os.path.join(options['working_directory'], input_file)
shutil.copyfile(full_input_file, working_input_file)
if not os.path.isfile(working_input_file):
print_exit('Copying of File: %s to Name: %s Unsuccesful.' % (full_input_file,
working_input_file))
else:
print 'Using Input File: %s' % full_input_file
working_input_file = full_input_file
#Prepare Blast Database Name
if 'BUSCO_type' in options:
title='BUSCO_' + options['BUSCO_type'].title()
else:
BUSCO_file_name = os.path.basename(options['BUSCO_file'])
if BUSCO_file_name in BUSCO_FILES.values():
for name in BUSCO_FILES:
if BUSCO_file_name == BUSCO_FILES[name]:
title = 'BUSCO_' + name.title()
break
else:
title = 'BUSCO'
#Prepare Blast Database
db_command_list = options['blast_db_command_list'][:]
db_command_list += ['-in', full_BUSCO_file_name, '-dbtype', 'prot', '-title', title,
'-out', title]
db_command = ' '.join(db_command_list)
if options['write_command']:
command_file = os.path.join(options['working_directory'],
'BUSCO_Make_DB.auto.sh')
write_file(command_file, '#!/bin/sh\n' + db_command)
print ''
print 'Running Command:\n ' + db_command
sys.stdout.flush()
try:
process = subprocess.Popen(db_command_list, stdout=sys.stdout, stderr=sys.stderr,
cwd=options['working_directory'])
if options['write_pid']:
pid_file_name = os.path.join(options['working_directory'],
options['job_type'] + '.auto.pid')
write_file(pid_file_name, str(process.pid))
process.wait()
if options['write_pid']:
delete_pid_file(pid_file_name)
sys.stdout.flush()
except KeyboardInterrupt:
if __name__ != '__main__' and options['is_pipe']:
sys.stdout, sys.stderr = terminal_out, terminal_error
out_file_stream.close()
print 'Killing %s Process.' % JOB_TYPE
process.kill()
raise
#Prepare BLAST Sequence Comparison Command
command_list = list(options['blast_command_list'])
command_list += ['-db', title, '-query', full_input_file, '-outfmt', '6', '-evalue',
options['evalue'], '-num_threads', options['max_CPU'], '-out',
options['blast_result_file']]
command = ' '.join(command_list)
#If Selected, Write Command to File
if options['write_command']:
command_file = os.path.join(options['working_directory'], 'BUSCO_blastx.auto.sh')
write_file(command_file, '#!/bin/sh\n' + command)
#Perform BLAST Sequence Comparisons
print ''
print 'Running Command:\n ' + command
sys.stdout.flush()
try:
process = subprocess.Popen(command_list, stdout=sys.stdout, stderr=sys.stderr,
cwd=options['working_directory'])
if options['write_pid']:
pid_file_name = os.path.join(options['working_directory'],
options['job_type'] + '.auto.pid')
write_file(pid_file_name, str(process.pid))
process.wait()
if options['write_pid']:
delete_pid_file(pid_file_name)
sys.stdout.flush()
except KeyboardInterrupt:
if __name__ != '__main__' and options['is_pipe']:
sys.stdout, sys.stderr = terminal_out, terminal_error
out_file_stream.close()
print 'Killing %s Process.' % JOB_TYPE
process.kill()
raise
print ''
print 'Blast Completed with Out File: %s' % options['blast_result_file']
print ''
analyze(options)
print ''
print 'BUSCO Benchmarking Analysis Complete'
if __name__ != '__main__' and options['is_pipe']:
sys.stdout, sys.stderr = terminal_out, terminal_error
out_file_stream.close()
#Analyze Results of Sequence Comparison
def analyze(options):
analysis = 'Analyzing BUSCO Recapture BLAST Result.\n\n'
#Ensure Required Settings in Options
for required_option in REQUIRED_ANALYSIS_SETTINGS:
if required_option not in options:
print_exit('Required Option: %s for %s not given.' % (required_option, JOB_TYPE))
#Ensure a BUSCO file or type is given
if not any(x in options for x in ['BUSCO_file', 'BUSCO_type']):
print_exit('Either BUSCO_file or BUSCO_type paramater required.')
#Ensure Working Directory Exists
if not os.path.isdir(options['working_directory']):
print_exit('Working Directory: %s Not Found.' % options['working_directory'])
#Find/Validate BUSCO File Selection
if 'BUSCO_file' in options:
full_BUSCO_file_name = options['BUSCO_file']
print 'BUSCO File: %s Given.' % options['BUSCO_file']
elif 'BUSCO_type' in options:
if 'BUSCO_location' not in options or not options['BUSCO_location']:
print_exit('BUSCO_type: %s Given ' % options['BUSCO_type']
+ 'but BUSCO_location not given.')
if not os.path.isdir(options['BUSCO_location']):
print_exit('BUSCO File Location: %s Not Found.' % options['BUSCO_location'])
BUSCO_type = lowercase(options['BUSCO_type'])
if BUSCO_type in BUSCO_FILES:
print 'BUSCO File Type: %s Provided.' % BUSCO_type
full_BUSCO_file_name = os.path.join(options['BUSCO_location'],
BUSCO_FILES[BUSCO_type])
else:
print_exit([('Selected BUSCO Type: %s Not Available.' % BUSCO_Type),
'Please Select from Types:', ', '.join(BUSCO_FILES.keys())])
#Ensure Provided/Selected BUSCO File Exists
if os.path.isfile(full_BUSCO_file_name):
print 'Selected BUSCO File Found: %s' % full_BUSCO_file_name
else:
print_exit('Selected BUSCO File: %s Cannot Be Found.' % full_BUSCO_file_name)
full_blast = os.path.join(options['working_directory'], options['blast_result_file'])
#Ensure Blast Output File Exists
if not os.path.isfile(full_blast):
print_exit('Blast Result File: %s Not Found.' % full_blast)
analysis = '\nAnalyzing Blast Result File %s\n' % full_blast
analysis += ' With BUSCO file: %s\n' % full_BUSCO_file_name
#Read Expected Genes
BUSCO_sequences = {}
genes = {}
BUSCO_file = open(full_BUSCO_file_name, 'r')
for line in BUSCO_file:
if line.startswith('>'):
split_line = line.lstrip('>').split()
sequence = split_line[0]
gene = split_line[1]
BUSCO_sequences[sequence] = gene
genes[gene] = False
BUSCO_file.close()
expected_gene_count = len(genes)
analysis += '\nExpected Genes: %i\n' % expected_gene_count
cutoff_float = float(options['evalue_cutoff'])
#Read Blast File Outputs and Count Genes Found Over Threshold
blast_file = open(full_blast, 'r')
for (line_number, line) in enumerate(blast_file, start=1):
split_line = line.split()
if not split_line:
print_exit('Blank Line Found in Blast Results File at Line Number %i' % line_number)
elif len(split_line) < 11:
print_exit([('Problem with formatting of line number %i ' % line_number
+ 'in blast results file: %s' % full_blast), 'Line:', line.strip()])
sequence = split_line[0]
BUSCO_sequence = split_line[1]
if BUSCO_sequence in BUSCO_sequences:
gene = BUSCO_sequences[BUSCO_sequence]
else:
print_except(['Unexpected BUSCO Sequence Hit: %s Found.' % BUSCO_sequence,
'Cannot Identify Gene.'])
e_score_string = split_line[10]
e_score = float(e_score_string)
#Mark Gene as Present if Hit Exists over Threshold Value
if e_score <= cutoff_float:
if options['print_matches'] and not genes[gene]:
analysis += 'Match: %s %s %s %s\n' % (sequence, BUSCO_sequence, gene, e_score_string)
genes[gene] = True
#Count Number of Found and Missing Genes
found_gene_count = 0
missing_genes = []
for gene in genes:
if genes[gene]:
found_gene_count += 1
else:
missing_genes.append(gene)
missing_gene_count = len(missing_genes)
#Ensure that Found/Missing Genes Sums to Expected Total
if missing_gene_count + found_gene_count != expected_gene_count:
print_except('PROBLEM!, Found: %i + ' % found_gene_count
+ 'Missing: %i Genes != Expected: %i' % (missing_gene_count,
expected_gene_count))
#Report Results
analysis += 'Genes Found: %i\n' % found_gene_count
analysis += 'Genes Missing: %i\n' % missing_gene_count
if options['print_missing_genes'] and missing_genes:
analysis += 'Missing Genes: ' + ' '.join(missing_genes) + '\n'
percent = percent_string(found_gene_count, expected_gene_count)
analysis += 'Percent BUSCO Genes Present: %s\n' % percent
headers = ['Analys.', 'Cutoff', 'Expect.', 'Found', 'Missing', 'Total', 'Percent']
data_grid = ['BUSCO', options['evalue_cutoff'], expected_gene_count, found_gene_count,
missing_gene_count, expected_gene_count, percent]
formatted_data = [str(x) for x in data_grid]
analysis += '\n'
analysis += 'Tab Separated Output:\n'
analysis += '\t'.join(headers) + '\n'
analysis += '\t'.join(formatted_data) + '\n'
report_dict = dict(zip(headers, formatted_data))
report_dict['report_type'] = 'recapture'
#If Selected, Write Analysis Report
if options['write_report']:
report_file = os.path.join(options['working_directory'],
JOB_TYPE + '.report')
write_report(report_file, report_dict)
print analysis
return analysis
| gpl-2.0 | -9,137,028,453,921,034,000 | 39.813499 | 104 | 0.581817 | false | 3.697184 | false | false | false |
infinity0n3/python-fabtotum | fabtotum/loaders/gerber/excellon_statements.py | 1 | 31011 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# copyright 2014 Hamilton Kibbe <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Excellon Statements
====================
**Excellon file statement classes**
"""
import re
import uuid
from .utils import (parse_gerber_value, write_gerber_value, decimal_string,
inch, metric)
__all__ = ['ExcellonTool', 'ToolSelectionStmt', 'CoordinateStmt',
'CommentStmt', 'HeaderBeginStmt', 'HeaderEndStmt',
'RewindStopStmt', 'EndOfProgramStmt', 'UnitStmt',
'IncrementalModeStmt', 'VersionStmt', 'FormatStmt', 'LinkToolStmt',
'MeasuringModeStmt', 'RouteModeStmt', 'LinearModeStmt', 'DrillModeStmt',
'AbsoluteModeStmt', 'RepeatHoleStmt', 'UnknownStmt',
'ExcellonStatement', 'ZAxisRoutPositionStmt',
'RetractWithClampingStmt', 'RetractWithoutClampingStmt',
'CutterCompensationOffStmt', 'CutterCompensationLeftStmt',
'CutterCompensationRightStmt', 'ZAxisInfeedRateStmt',
'NextToolSelectionStmt', 'SlotStmt']
class ExcellonStatement(object):
""" Excellon Statement abstract base class
"""
@classmethod
def from_excellon(cls, line):
raise NotImplementedError('from_excellon must be implemented in a '
'subclass')
def __init__(self, unit='inch', id=None):
self.units = unit
self.id = uuid.uuid4().int if id is None else id
def to_excellon(self, settings=None):
raise NotImplementedError('to_excellon must be implemented in a '
'subclass')
def to_inch(self):
self.units = 'inch'
def to_metric(self):
self.units = 'metric'
def offset(self, x_offset=0, y_offset=0):
pass
def __eq__(self, other):
return self.__dict__ == other.__dict__
class ExcellonTool(ExcellonStatement):
""" Excellon Tool class
Parameters
----------
settings : FileSettings (dict-like)
File-wide settings.
kwargs : dict-like
Tool settings from the excellon statement. Valid keys are:
- `diameter` : Tool diameter [expressed in file units]
- `rpm` : Tool RPM
- `feed_rate` : Z-axis tool feed rate
- `retract_rate` : Z-axis tool retraction rate
- `max_hit_count` : Number of hits allowed before a tool change
- `depth_offset` : Offset of tool depth from tip of tool.
Attributes
----------
number : integer
Tool number from the excellon file
diameter : float
Tool diameter in file units
rpm : float
Tool RPM
feed_rate : float
Tool Z-axis feed rate.
retract_rate : float
Tool Z-axis retract rate
depth_offset : float
Offset of depth measurement from tip of tool
max_hit_count : integer
Maximum number of tool hits allowed before a tool change
hit_count : integer
Number of tool hits in excellon file.
"""
PLATED_UNKNOWN = None
PLATED_YES = 'plated'
PLATED_NO = 'nonplated'
PLATED_OPTIONAL = 'optional'
@classmethod
def from_tool(cls, tool):
args = {}
args['depth_offset'] = tool.depth_offset
args['diameter'] = tool.diameter
args['feed_rate'] = tool.feed_rate
args['max_hit_count'] = tool.max_hit_count
args['number'] = tool.number
args['plated'] = tool.plated
args['retract_rate'] = tool.retract_rate
args['rpm'] = tool.rpm
return cls(None, **args)
@classmethod
def from_excellon(cls, line, settings, id=None, plated=None):
""" Create a Tool from an excellon file tool definition line.
Parameters
----------
line : string
Tool definition line from an excellon file.
settings : FileSettings (dict-like)
Excellon file-wide settings
Returns
-------
tool : Tool
An ExcellonTool representing the tool defined in `line`
"""
commands = re.split('([BCFHSTZ])', line)[1:]
commands = [(command, value) for command, value in pairwise(commands)]
args = {}
args['id'] = id
nformat = settings.format
zero_suppression = settings.zero_suppression
for cmd, val in commands:
if cmd == 'B':
args['retract_rate'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'C':
args['diameter'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'F':
args['feed_rate'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'H':
args['max_hit_count'] = parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'S':
args['rpm'] = 1000 * parse_gerber_value(val, nformat, zero_suppression)
elif cmd == 'T':
args['number'] = int(val)
elif cmd == 'Z':
args['depth_offset'] = parse_gerber_value(val, nformat, zero_suppression)
if plated != ExcellonTool.PLATED_UNKNOWN:
# Sometimees we can can parse the plating status
args['plated'] = plated
return cls(settings, **args)
@classmethod
def from_dict(cls, settings, tool_dict):
""" Create an ExcellonTool from a dict.
Parameters
----------
settings : FileSettings (dict-like)
Excellon File-wide settings
tool_dict : dict
Excellon tool parameters as a dict
Returns
-------
tool : ExcellonTool
An ExcellonTool initialized with the parameters in tool_dict.
"""
return cls(settings, **tool_dict)
def __init__(self, settings, **kwargs):
if kwargs.get('id') is not None:
super(ExcellonTool, self).__init__(id=kwargs.get('id'))
self.settings = settings
self.number = kwargs.get('number')
self.feed_rate = kwargs.get('feed_rate')
self.retract_rate = kwargs.get('retract_rate')
self.rpm = kwargs.get('rpm')
self.diameter = kwargs.get('diameter')
self.max_hit_count = kwargs.get('max_hit_count')
self.depth_offset = kwargs.get('depth_offset')
self.plated = kwargs.get('plated')
self.hit_count = 0
def to_excellon(self, settings=None):
if self.settings and not settings:
settings = self.settings
fmt = settings.format
zs = settings.zero_suppression
stmt = 'T%02d' % self.number
if self.retract_rate is not None:
stmt += 'B%s' % write_gerber_value(self.retract_rate, fmt, zs)
if self.feed_rate is not None:
stmt += 'F%s' % write_gerber_value(self.feed_rate, fmt, zs)
if self.max_hit_count is not None:
stmt += 'H%s' % write_gerber_value(self.max_hit_count, fmt, zs)
if self.rpm is not None:
if self.rpm < 100000.:
stmt += 'S%s' % write_gerber_value(self.rpm / 1000., fmt, zs)
else:
stmt += 'S%g' % (self.rpm / 1000.)
if self.diameter is not None:
stmt += 'C%s' % decimal_string(self.diameter, fmt[1], True)
if self.depth_offset is not None:
stmt += 'Z%s' % write_gerber_value(self.depth_offset, fmt, zs)
return stmt
def to_inch(self):
if self.settings.units != 'inch':
self.settings.units = 'inch'
if self.diameter is not None:
self.diameter = inch(self.diameter)
def to_metric(self):
if self.settings.units != 'metric':
self.settings.units = 'metric'
if self.diameter is not None:
self.diameter = metric(self.diameter)
def _hit(self):
self.hit_count += 1
def equivalent(self, other):
"""
Is the other tool equal to this, ignoring the tool number, and other file specified properties
"""
if type(self) != type(other):
return False
return (self.diameter == other.diameter
and self.feed_rate == other.feed_rate
and self.retract_rate == other.retract_rate
and self.rpm == other.rpm
and self.depth_offset == other.depth_offset
and self.max_hit_count == other.max_hit_count
and self.plated == other.plated
and self.settings.units == other.settings.units)
def __repr__(self):
unit = 'in.' if self.settings.units == 'inch' else 'mm'
fmtstr = '<ExcellonTool %%02d: %%%d.%dg%%s dia.>' % self.settings.format
return fmtstr % (self.number, self.diameter, unit)
class ToolSelectionStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
""" Create a ToolSelectionStmt from an excellon file line.
Parameters
----------
line : string
Line from an Excellon file
Returns
-------
tool_statement : ToolSelectionStmt
ToolSelectionStmt representation of `line.`
"""
line = line[1:]
compensation_index = None
# up to 3 characters for tool number (Frizting uses that)
if len(line) <= 3:
tool = int(line)
else:
tool = int(line[:2])
compensation_index = int(line[2:])
return cls(tool, compensation_index, **kwargs)
def __init__(self, tool, compensation_index=None, **kwargs):
super(ToolSelectionStmt, self).__init__(**kwargs)
tool = int(tool)
compensation_index = (int(compensation_index) if compensation_index
is not None else None)
self.tool = tool
self.compensation_index = compensation_index
def to_excellon(self, settings=None):
stmt = 'T%02d' % self.tool
if self.compensation_index is not None:
stmt += '%02d' % self.compensation_index
return stmt
class NextToolSelectionStmt(ExcellonStatement):
# TODO the statement exists outside of the context of the file,
# so it is imposible to know that it is really the next tool
def __init__(self, cur_tool, next_tool, **kwargs):
"""
Select the next tool in the wheel.
Parameters
----------
cur_tool : the tool that is currently selected
next_tool : the that that is now selected
"""
super(NextToolSelectionStmt, self).__init__(**kwargs)
self.cur_tool = cur_tool
self.next_tool = next_tool
def to_excellon(self, settings=None):
stmt = 'M00'
return stmt
class ZAxisInfeedRateStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
""" Create a ZAxisInfeedRate from an excellon file line.
Parameters
----------
line : string
Line from an Excellon file
Returns
-------
z_axis_infeed_rate : ToolSelectionStmt
ToolSelectionStmt representation of `line.`
"""
rate = int(line[1:])
return cls(rate, **kwargs)
def __init__(self, rate, **kwargs):
super(ZAxisInfeedRateStmt, self).__init__(**kwargs)
self.rate = rate
def to_excellon(self, settings=None):
return 'F%02d' % self.rate
class CoordinateStmt(ExcellonStatement):
@classmethod
def from_point(cls, point, mode=None):
stmt = cls(point[0], point[1])
if mode:
stmt.mode = mode
return stmt
@classmethod
def from_excellon(cls, line, settings, **kwargs):
x_coord = None
y_coord = None
if line[0] == 'X':
splitline = line.strip('X').split('Y')
x_coord = parse_gerber_value(splitline[0], settings.format,
settings.zero_suppression)
if len(splitline) == 2:
y_coord = parse_gerber_value(splitline[1], settings.format,
settings.zero_suppression)
else:
y_coord = parse_gerber_value(line.strip(' Y'), settings.format,
settings.zero_suppression)
c = cls(x_coord, y_coord, **kwargs)
c.units = settings.units
return c
def __init__(self, x=None, y=None, **kwargs):
super(CoordinateStmt, self).__init__(**kwargs)
self.x = x
self.y = y
self.mode = None
def to_excellon(self, settings):
stmt = ''
if self.mode == "ROUT":
stmt += "G00"
if self.mode == "LINEAR":
stmt += "G01"
if self.x is not None:
stmt += 'X%s' % write_gerber_value(self.x, settings.format,
settings.zero_suppression)
if self.y is not None:
stmt += 'Y%s' % write_gerber_value(self.y, settings.format,
settings.zero_suppression)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.x is not None:
self.x = inch(self.x)
if self.y is not None:
self.y = inch(self.y)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.x is not None:
self.x = metric(self.x)
if self.y is not None:
self.y = metric(self.y)
def offset(self, x_offset=0, y_offset=0):
if self.x is not None:
self.x += x_offset
if self.y is not None:
self.y += y_offset
def __str__(self):
coord_str = ''
if self.x is not None:
coord_str += 'X: %g ' % self.x
if self.y is not None:
coord_str += 'Y: %g ' % self.y
return '<Coordinate Statement: %s>' % coord_str
class RepeatHoleStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, settings, **kwargs):
match = re.compile(r'R(?P<rcount>[0-9]*)X?(?P<xdelta>[+\-]?\d*\.?\d*)?Y?'
'(?P<ydelta>[+\-]?\d*\.?\d*)?').match(line)
stmt = match.groupdict()
count = int(stmt['rcount'])
xdelta = (parse_gerber_value(stmt['xdelta'], settings.format,
settings.zero_suppression)
if stmt['xdelta'] is not '' else None)
ydelta = (parse_gerber_value(stmt['ydelta'], settings.format,
settings.zero_suppression)
if stmt['ydelta'] is not '' else None)
c = cls(count, xdelta, ydelta, **kwargs)
c.units = settings.units
return c
def __init__(self, count, xdelta=0.0, ydelta=0.0, **kwargs):
super(RepeatHoleStmt, self).__init__(**kwargs)
self.count = count
self.xdelta = xdelta
self.ydelta = ydelta
def to_excellon(self, settings):
stmt = 'R%d' % self.count
if self.xdelta is not None and self.xdelta != 0.0:
stmt += 'X%s' % write_gerber_value(self.xdelta, settings.format,
settings.zero_suppression)
if self.ydelta is not None and self.ydelta != 0.0:
stmt += 'Y%s' % write_gerber_value(self.ydelta, settings.format,
settings.zero_suppression)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.xdelta is not None:
self.xdelta = inch(self.xdelta)
if self.ydelta is not None:
self.ydelta = inch(self.ydelta)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.xdelta is not None:
self.xdelta = metric(self.xdelta)
if self.ydelta is not None:
self.ydelta = metric(self.ydelta)
def __str__(self):
return '<Repeat Hole: %d times, offset X: %g Y: %g>' % (
self.count,
self.xdelta if self.xdelta is not None else 0,
self.ydelta if self.ydelta is not None else 0)
class CommentStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
return cls(line.lstrip(';'))
def __init__(self, comment, **kwargs):
super(CommentStmt, self).__init__(**kwargs)
self.comment = comment
def to_excellon(self, settings=None):
return ';%s' % self.comment
class HeaderBeginStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(HeaderBeginStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M48'
class HeaderEndStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(HeaderEndStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M95'
class RewindStopStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RewindStopStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return '%'
class ZAxisRoutPositionStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(ZAxisRoutPositionStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M15'
class RetractWithClampingStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RetractWithClampingStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M16'
class RetractWithoutClampingStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RetractWithoutClampingStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'M17'
class CutterCompensationOffStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(CutterCompensationOffStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G40'
class CutterCompensationLeftStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(CutterCompensationLeftStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G41'
class CutterCompensationRightStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(CutterCompensationRightStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G42'
class EndOfProgramStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, settings, **kwargs):
match = re.compile(r'M30X?(?P<x>\d*\.?\d*)?Y?'
'(?P<y>\d*\.?\d*)?').match(line)
stmt = match.groupdict()
x = (parse_gerber_value(stmt['x'], settings.format,
settings.zero_suppression)
if stmt['x'] is not '' else None)
y = (parse_gerber_value(stmt['y'], settings.format,
settings.zero_suppression)
if stmt['y'] is not '' else None)
c = cls(x, y, **kwargs)
c.units = settings.units
return c
def __init__(self, x=None, y=None, **kwargs):
super(EndOfProgramStmt, self).__init__(**kwargs)
self.x = x
self.y = y
def to_excellon(self, settings=None):
stmt = 'M30'
if self.x is not None:
stmt += 'X%s' % write_gerber_value(self.x)
if self.y is not None:
stmt += 'Y%s' % write_gerber_value(self.y)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.x is not None:
self.x = inch(self.x)
if self.y is not None:
self.y = inch(self.y)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.x is not None:
self.x = metric(self.x)
if self.y is not None:
self.y = metric(self.y)
def offset(self, x_offset=0, y_offset=0):
if self.x is not None:
self.x += x_offset
if self.y is not None:
self.y += y_offset
class UnitStmt(ExcellonStatement):
@classmethod
def from_settings(cls, settings):
"""Create the unit statement from the FileSettings"""
return cls(settings.units, settings.zeros)
@classmethod
def from_excellon(cls, line, **kwargs):
units = 'inch' if 'INCH' in line else 'metric'
zeros = 'leading' if 'LZ' in line else 'trailing'
if '0000.00' in line:
format = (4, 2)
elif '000.000' in line:
format = (3, 3)
elif '00.0000' in line:
format = (2, 4)
else:
format = None
return cls(units, zeros, format, **kwargs)
def __init__(self, units='inch', zeros='leading', format=None, **kwargs):
super(UnitStmt, self).__init__(**kwargs)
self.units = units.lower()
self.zeros = zeros
self.format = format
def to_excellon(self, settings=None):
# TODO This won't export the invalid format statement if it exists
stmt = '%s,%s' % ('INCH' if self.units == 'inch' else 'METRIC',
'LZ' if self.zeros == 'leading'
else 'TZ')
return stmt
def to_inch(self):
self.units = 'inch'
def to_metric(self):
self.units = 'metric'
class IncrementalModeStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
return cls('off', **kwargs) if 'OFF' in line else cls('on', **kwargs)
def __init__(self, mode='off', **kwargs):
super(IncrementalModeStmt, self).__init__(**kwargs)
if mode.lower() not in ['on', 'off']:
raise ValueError('Mode may be "on" or "off"')
self.mode = mode
def to_excellon(self, settings=None):
return 'ICI,%s' % ('OFF' if self.mode == 'off' else 'ON')
class VersionStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
version = int(line.split(',')[1])
return cls(version, **kwargs)
def __init__(self, version=1, **kwargs):
super(VersionStmt, self).__init__(**kwargs)
version = int(version)
if version not in [1, 2]:
raise ValueError('Valid versions are 1 or 2')
self.version = version
def to_excellon(self, settings=None):
return 'VER,%d' % self.version
class FormatStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
fmt = int(line.split(',')[1])
return cls(fmt, **kwargs)
def __init__(self, format=1, **kwargs):
super(FormatStmt, self).__init__(**kwargs)
format = int(format)
if format not in [1, 2]:
raise ValueError('Valid formats are 1 or 2')
self.format = format
def to_excellon(self, settings=None):
return 'FMAT,%d' % self.format
@property
def format_tuple(self):
return (self.format, 6 - self.format)
class LinkToolStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
linked = [int(tool) for tool in line.split('/')]
return cls(linked, **kwargs)
def __init__(self, linked_tools, **kwargs):
super(LinkToolStmt, self).__init__(**kwargs)
self.linked_tools = [int(x) for x in linked_tools]
def to_excellon(self, settings=None):
return '/'.join([str(x) for x in self.linked_tools])
class MeasuringModeStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
if not ('M71' in line or 'M72' in line):
raise ValueError('Not a measuring mode statement')
return cls('inch', **kwargs) if 'M72' in line else cls('metric', **kwargs)
def __init__(self, units='inch', **kwargs):
super(MeasuringModeStmt, self).__init__(**kwargs)
units = units.lower()
if units not in ['inch', 'metric']:
raise ValueError('units must be "inch" or "metric"')
self.units = units
def to_excellon(self, settings=None):
return 'M72' if self.units == 'inch' else 'M71'
def to_inch(self):
self.units = 'inch'
def to_metric(self):
self.units = 'metric'
class RouteModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(RouteModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G00'
class LinearModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(LinearModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G01'
class DrillModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(DrillModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G05'
class AbsoluteModeStmt(ExcellonStatement):
def __init__(self, **kwargs):
super(AbsoluteModeStmt, self).__init__(**kwargs)
def to_excellon(self, settings=None):
return 'G90'
class UnknownStmt(ExcellonStatement):
@classmethod
def from_excellon(cls, line, **kwargs):
return cls(line, **kwargs)
def __init__(self, stmt, **kwargs):
super(UnknownStmt, self).__init__(**kwargs)
self.stmt = stmt
def to_excellon(self, settings=None):
return self.stmt
def __str__(self):
return "<Unknown Statement: %s>" % self.stmt
class SlotStmt(ExcellonStatement):
"""
G85 statement. Defines a slot created by multiple drills between two specified points.
Format is two coordinates, split by G85in the middle, for example, XnY0nG85XnYn
"""
@classmethod
def from_points(cls, start, end):
return cls(start[0], start[1], end[0], end[1])
@classmethod
def from_excellon(cls, line, settings, **kwargs):
# Split the line based on the G85 separator
sub_coords = line.split('G85')
(x_start_coord, y_start_coord) = SlotStmt.parse_sub_coords(sub_coords[0], settings)
(x_end_coord, y_end_coord) = SlotStmt.parse_sub_coords(sub_coords[1], settings)
# Some files seem to specify only one of the coordinates
if x_end_coord == None:
x_end_coord = x_start_coord
if y_end_coord == None:
y_end_coord = y_start_coord
c = cls(x_start_coord, y_start_coord, x_end_coord, y_end_coord, **kwargs)
c.units = settings.units
return c
@staticmethod
def parse_sub_coords(line, settings):
x_coord = None
y_coord = None
if line[0] == 'X':
splitline = line.strip('X').split('Y')
x_coord = parse_gerber_value(splitline[0], settings.format,
settings.zero_suppression)
if len(splitline) == 2:
y_coord = parse_gerber_value(splitline[1], settings.format,
settings.zero_suppression)
else:
y_coord = parse_gerber_value(line.strip(' Y'), settings.format,
settings.zero_suppression)
return (x_coord, y_coord)
def __init__(self, x_start=None, y_start=None, x_end=None, y_end=None, **kwargs):
super(SlotStmt, self).__init__(**kwargs)
self.x_start = x_start
self.y_start = y_start
self.x_end = x_end
self.y_end = y_end
self.mode = None
def to_excellon(self, settings):
stmt = ''
if self.x_start is not None:
stmt += 'X%s' % write_gerber_value(self.x_start, settings.format,
settings.zero_suppression)
if self.y_start is not None:
stmt += 'Y%s' % write_gerber_value(self.y_start, settings.format,
settings.zero_suppression)
stmt += 'G85'
if self.x_end is not None:
stmt += 'X%s' % write_gerber_value(self.x_end, settings.format,
settings.zero_suppression)
if self.y_end is not None:
stmt += 'Y%s' % write_gerber_value(self.y_end, settings.format,
settings.zero_suppression)
return stmt
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.x_start is not None:
self.x_start = inch(self.x_start)
if self.y_start is not None:
self.y_start = inch(self.y_start)
if self.x_end is not None:
self.x_end = inch(self.x_end)
if self.y_end is not None:
self.y_end = inch(self.y_end)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.x_start is not None:
self.x_start = metric(self.x_start)
if self.y_start is not None:
self.y_start = metric(self.y_start)
if self.x_end is not None:
self.x_end = metric(self.x_end)
if self.y_end is not None:
self.y_end = metric(self.y_end)
def offset(self, x_offset=0, y_offset=0):
if self.x_start is not None:
self.x_start += x_offset
if self.y_start is not None:
self.y_start += y_offset
if self.x_end is not None:
self.x_end += x_offset
if self.y_end is not None:
self.y_end += y_offset
def __str__(self):
start_str = ''
if self.x_start is not None:
start_str += 'X: %g ' % self.x_start
if self.y_start is not None:
start_str += 'Y: %g ' % self.y_start
end_str = ''
if self.x_end is not None:
end_str += 'X: %g ' % self.x_end
if self.y_end is not None:
end_str += 'Y: %g ' % self.y_end
return '<Slot Statement: %s to %s>' % (start_str, end_str)
def pairwise(iterator):
""" Iterate over list taking two elements at a time.
e.g. [1, 2, 3, 4, 5, 6] ==> [(1, 2), (3, 4), (5, 6)]
"""
itr = iter(iterator)
while True:
yield tuple([next(itr) for i in range(2)])
| gpl-3.0 | 4,613,957,387,060,365,000 | 30.708589 | 102 | 0.557447 | false | 3.721022 | false | false | false |
gmimano/commcaretest | corehq/apps/export/custom_export_helpers.py | 1 | 14645 | import json
from corehq.apps.reports.standard import export
from corehq.apps.reports.models import FormExportSchema, HQGroupExportConfiguration, CaseExportSchema
from corehq.apps.reports.standard.export import DeidExportReport
from couchexport.models import ExportTable, ExportSchema, ExportColumn
from django.utils.translation import ugettext as _
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.commtrack.models import StockExportColumn
from corehq.apps.domain.models import Domain
USERNAME_TRANSFORM = 'corehq.apps.export.transforms.user_id_to_username'
OWNERNAME_TRANSFORM = 'corehq.apps.export.transforms.owner_id_to_display'
CASENAME_TRANSFORM = 'corehq.apps.export.transforms.case_id_to_case_name'
class AbstractProperty(object):
def __get__(self, instance, owner):
raise NotImplementedError()
class CustomExportHelper(object):
ExportSchemaClass = AbstractProperty()
ExportReport = AbstractProperty()
export_title = AbstractProperty()
allow_deid = False
allow_repeats = True
subclasses_map = {} # filled in below
export_type = 'form'
@property
def default_order(self):
return {}
@classmethod
def make(cls, request, export_type, domain=None, export_id=None):
export_type = export_type or request.GET.get('request_type', 'form')
return cls.subclasses_map[export_type](request, domain, export_id=export_id)
def update_custom_params(self):
if len(self.custom_export.tables) > 0:
if self.export_stock:
self.custom_export.tables[0].columns.append(
StockExportColumn(domain=self.domain, index='_id')
)
def format_config_for_javascript(self, table_configuration):
return table_configuration
def has_stock_column(self):
return any(
col.doc_type == 'StockExportColumn'
for col in self.custom_export.tables[0].columns
)
class DEID(object):
options = (
('', ''),
(_('Sensitive ID'), 'couchexport.deid.deid_ID'),
(_('Sensitive Date'), 'couchexport.deid.deid_date'),
)
json_options = [{'label': label, 'value': value}
for label, value in options]
def __init__(self, request, domain, export_id=None):
self.request = request
self.domain = domain
self.presave = False
self.transform_dates = False
self.creating_new_export = not bool(export_id)
if export_id:
self.custom_export = self.ExportSchemaClass.get(export_id)
# also update the schema to include potential new stuff
self.custom_export.update_schema()
# enable configuring saved exports from this page
saved_group = HQGroupExportConfiguration.get_for_domain(self.domain)
self.presave = export_id in saved_group.custom_export_ids
self.export_stock = self.has_stock_column()
assert(self.custom_export.doc_type == 'SavedExportSchema')
assert(self.custom_export.type == self.export_type)
assert(self.custom_export.index[0] == domain)
else:
self.custom_export = self.ExportSchemaClass(type=self.export_type)
self.export_stock = False
@property
@memoized
def post_data(self):
return json.loads(self.request.raw_post_data)
def update_custom_export(self):
"""
Updates custom_export object from the request
and saves to the db
"""
post_data = self.post_data
custom_export_json = post_data['custom_export']
SAFE_KEYS = ('default_format', 'is_safe', 'name', 'schema_id', 'transform_dates')
for key in SAFE_KEYS:
self.custom_export[key] = custom_export_json[key]
# update the custom export index (to stay in sync)
schema_id = self.custom_export.schema_id
schema = ExportSchema.get(schema_id)
self.custom_export.index = schema.index
self.presave = post_data['presave']
self.export_stock = post_data['export_stock']
self.custom_export.tables = [
ExportTable.wrap(table)
for table in custom_export_json['tables']
]
table_dict = dict((t.index, t) for t in self.custom_export.tables)
for table in self.custom_export.tables:
if table.index in table_dict:
table_dict[table.index].columns = table.columns
else:
self.custom_export.tables.append(
ExportTable(
index=table.index,
display=self.custom_export.name,
columns=table.columns
)
)
self.update_custom_params()
self.custom_export.save()
if self.presave:
HQGroupExportConfiguration.add_custom_export(self.domain, self.custom_export.get_id)
else:
HQGroupExportConfiguration.remove_custom_export(self.domain, self.custom_export.get_id)
def get_context(self):
table_configuration = self.format_config_for_javascript(self.custom_export.table_configuration)
return {
'custom_export': self.custom_export,
'default_order': self.default_order,
'deid_options': self.DEID.json_options,
'presave': self.presave,
'export_stock': self.export_stock,
'DeidExportReport_name': DeidExportReport.name,
'table_configuration': table_configuration,
'domain': self.domain,
'commtrack_domain': Domain.get_by_name(self.domain).commtrack_enabled,
'helper': {
'back_url': self.ExportReport.get_url(domain=self.domain),
'export_title': self.export_title,
'slug': self.ExportReport.slug,
'allow_deid': self.allow_deid,
'allow_repeats': self.allow_repeats
}
}
class FormCustomExportHelper(CustomExportHelper):
ExportSchemaClass = FormExportSchema
ExportReport = export.ExcelExportReport
allow_deid = True
allow_repeats = True
default_questions = ["form.case.@case_id", "form.meta.timeEnd", "_id", "id", "form.meta.username"]
questions_to_show = default_questions + ["form.meta.timeStart", "received_on"]
@property
def export_title(self):
return _('Export Submissions to Excel')
def __init__(self, request, domain, export_id=None):
super(FormCustomExportHelper, self).__init__(request, domain, export_id)
if not self.custom_export.app_id:
self.custom_export.app_id = request.GET.get('app_id')
def update_custom_params(self):
p = self.post_data['custom_export']
e = self.custom_export
e.include_errors = p['include_errors']
e.app_id = p['app_id']
@property
@memoized
def default_order(self):
return self.custom_export.get_default_order()
def update_table_conf_with_questions(self, table_conf):
column_conf = table_conf[0].get("column_configuration", [])
current_questions = set(self.custom_export.question_order)
remaining_questions = current_questions.copy()
def is_special_type(q):
return any([q.startswith('form.#'), q.startswith('form.@'), q.startswith('form.case.'),
q.startswith('form.meta.'), q.startswith('form.subcase_')])
def generate_additional_columns():
ret = []
case_name_col = CustomColumn(slug='case_name', index='form.case.@case_id', display='info.case_name',
transform=CASENAME_TRANSFORM, show=True, selected=True)
matches = filter(case_name_col.match, column_conf)
if matches:
for match in matches:
case_name_col.format_for_javascript(match)
elif filter(lambda col: col["index"] == case_name_col.index, column_conf):
ret.append(case_name_col.default_column())
return ret
for col in column_conf:
question = col["index"]
if question in remaining_questions:
remaining_questions.discard(question)
col["show"] = True
if question.startswith("form.") and not is_special_type(question) and question not in current_questions:
col["tag"] = "deleted"
col["show"] = False
if question in self.questions_to_show:
col["show"] = True
if self.creating_new_export and (question in self.default_questions or question in current_questions):
col["selected"] = True
column_conf.extend(generate_additional_columns())
column_conf.extend([
ExportColumn(
index=q,
display='',
show=True,
).to_config_format(selected=self.creating_new_export)
for q in remaining_questions
])
# show all questions in repeat groups by default
for conf in table_conf:
if conf["index"].startswith('#.form.'):
for col in conf.get("column_configuration", []):
col["show"] = True
table_conf[0]["column_configuration"] = column_conf
return table_conf
def get_context(self):
ctxt = super(FormCustomExportHelper, self).get_context()
self.update_table_conf_with_questions(ctxt["table_configuration"])
return ctxt
class CustomColumn(object):
def __init__(self, slug, index, display, transform, is_sensitive=False, tag=None, show=False, selected=False):
self.slug = slug
self.index = index
self.display = display
self.transform = transform
self.is_sensitive = is_sensitive
self.tag = tag
self.show = show
self.selected = selected
def match(self, col):
return col['index'] == self.index and col['transform'] == self.transform
def format_for_javascript(self, col):
# this is js --> js conversion so the name is pretty bad
# couch --> javascript UI code
col['special'] = self.slug
def default_column(self):
# this is kinda hacky - mirrors ExportColumn.to_config_format to add custom columns
# to the existing export UI
return {
'index': self.index,
'selected': self.selected,
'display': self.display,
'transform': self.transform,
"is_sensitive": self.is_sensitive,
'tag': self.tag,
'special': self.slug,
'show': self.show,
}
class CaseCustomExportHelper(CustomExportHelper):
ExportSchemaClass = CaseExportSchema
ExportReport = export.CaseExportReport
export_type = 'case'
default_properties = ["_id", "closed", "closed_on", "modified_on", "opened_on", "info.owner_name", "id"]
default_transformed_properties = ["info.closed_by_username", "info.last_modified_by_username",
"info.opened_by_username", "info.owner_name"]
meta_properties = ["_id", "closed", "closed_by", "closed_on", "domain", "computed_modified_on_",
"server_modified_on", "modified_on", "opened_by", "opened_on", "owner_id",
"user_id", "type", "version", "external_id"]
server_properties = ["_rev", "doc_type", "-deletion_id", "initial_processing_complete"]
row_properties = ["id"]
@property
def export_title(self):
return _('Export Cases, Referrals, and Users')
def format_config_for_javascript(self, table_configuration):
custom_columns = [
CustomColumn(slug='last_modified_by_username', index='user_id',
display='info.last_modified_by_username', transform=USERNAME_TRANSFORM),
CustomColumn(slug='opened_by_username', index='opened_by',
display='info.opened_by_username', transform=USERNAME_TRANSFORM),
CustomColumn(slug='closed_by_username', index='closed_by',
display='info.closed_by_username', transform=USERNAME_TRANSFORM),
CustomColumn(slug='owner_name', index='owner_id', display='info.owner_name',
transform=OWNERNAME_TRANSFORM),
]
main_table_columns = table_configuration[0]['column_configuration']
for custom in custom_columns:
matches = filter(custom.match, main_table_columns)
if not matches:
main_table_columns.append(custom.default_column())
else:
for match in matches:
custom.format_for_javascript(match)
return table_configuration
def update_table_conf(self, table_conf):
column_conf = table_conf[0].get("column_configuration", {})
current_properties = set(self.custom_export.case_properties)
remaining_properties = current_properties.copy()
def is_special_type(p):
return any([p in self.meta_properties, p in self.server_properties, p in self.row_properties])
for col in column_conf:
prop = col["index"]
display = col.get('display') or prop
if prop in remaining_properties:
remaining_properties.discard(prop)
col["show"] = True
if not is_special_type(prop) and prop not in current_properties:
col["tag"] = "deleted"
col["show"] = False
if prop in self.default_properties + list(current_properties) or \
display in self.default_transformed_properties:
col["show"] = True
if self.creating_new_export:
col["selected"] = True
column_conf.extend([
ExportColumn(
index=prop,
display='',
show=True,
).to_config_format(selected=self.creating_new_export)
for prop in filter(lambda prop: not prop.startswith("parent/"), remaining_properties)
])
table_conf[0]["column_configuration"] = column_conf
return table_conf
def get_context(self):
ctxt = super(CaseCustomExportHelper, self).get_context()
self.update_table_conf(ctxt["table_configuration"])
return ctxt
CustomExportHelper.subclasses_map.update({
'form': FormCustomExportHelper,
'case': CaseCustomExportHelper,
})
| bsd-3-clause | 1,592,665,312,018,443,300 | 37.539474 | 116 | 0.601571 | false | 4.145202 | true | false | false |
Oslandia/vizitown_plugin | cyclone/web.py | 1 | 86048 | # coding: utf-8
#
# Copyright 2010 Alexandre Fiori
# based on the original Tornado by Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The cyclone web framework looks a bit like web.py (http://webpy.org/) or
Google's webapp (http://code.google.com/appengine/docs/python/tools/webapp/),
but with additional tools and optimizations to take advantage of the
non-blocking web server and tools.
Here is the canonical "Hello, world" example app::
import cyclone.web
from twisted.internet import reactor
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
if __name__ == "__main__":
application = tornado.web.Application([
(r"/", MainHandler),
])
reactor.listenTCP(8888, application)
reactor.run()
See the cyclone walkthrough on http://cyclone.io for more details and a good
getting started guide.
Thread-safety notes
-------------------
In general, methods on RequestHandler and elsewhere in cyclone are not
thread-safe. In particular, methods such as write(), finish(), and
flush() must only be called from the main thread. For more information on
using threads, please check the twisted documentation:
http://twistedmatrix.com/documents/current/core/howto/threading.html
"""
from __future__ import absolute_import, division, with_statement
import Cookie
import base64
import binascii
import calendar
import datetime
import email.utils
import functools
import gzip
import hashlib
import hmac
import httplib
import itertools
import mimetypes
import numbers
import os.path
import re
import stat
import sys
import threading
import time
import traceback
import types
import urllib
import urlparse
import uuid
import cyclone
from cyclone import escape
from cyclone import httpserver
from cyclone import locale
from cyclone import template
from cyclone.escape import utf8, _unicode
from cyclone.util import ObjectDict
from cyclone.util import bytes_type
from cyclone.util import import_object
from cyclone.util import unicode_type
from cStringIO import StringIO as BytesIO # python 2
from twisted.python import failure
from twisted.python import log
from twisted.internet import defer
from twisted.internet import protocol
from twisted.internet import reactor
class RequestHandler(object):
"""Subclass this class and define get() or post() to make a handler.
If you want to support more methods than the standard GET/HEAD/POST, you
should override the class variable SUPPORTED_METHODS in your
RequestHandler class.
If you want lists to be serialized when calling self.write() set
serialize_lists to True.
This may have some security implications if you are not protecting against
XSRF with other means (such as a XSRF token).
More details on this vulnerability here:
http://haacked.com/archive/2008/11/20/anatomy-of-a-subtle-json-vulnerability.aspx
"""
SUPPORTED_METHODS = ("GET", "HEAD", "POST", "DELETE", "PATCH", "PUT",
"OPTIONS")
serialize_lists = False
no_keep_alive = False
xsrf_cookie_name = "_xsrf"
_template_loaders = {} # {path: template.BaseLoader}
_template_loader_lock = threading.Lock()
def __init__(self, application, request, **kwargs):
super(RequestHandler, self).__init__()
self.application = application
self.request = request
self._headers_written = False
self._finished = False
self._auto_finish = True
self._transforms = None # will be set in _execute
self.path_args = None
self.path_kwargs = None
self.ui = ObjectDict((n, self._ui_method(m)) for n, m in
application.ui_methods.items())
# UIModules are available as both `modules` and `_modules` in the
# template namespace. Historically only `modules` was available
# but could be clobbered by user additions to the namespace.
# The template {% module %} directive looks in `_modules` to avoid
# possible conflicts.
self.ui["_modules"] = ObjectDict((n, self._ui_module(n, m)) for n, m in
application.ui_modules.items())
self.ui["modules"] = self.ui["_modules"]
self.clear()
self.request.connection.no_keep_alive = self.no_keep_alive
self.initialize(**kwargs)
def initialize(self):
"""Hook for subclass initialization.
A dictionary passed as the third argument of a url spec will be
supplied as keyword arguments to initialize().
Example::
class ProfileHandler(RequestHandler):
def initialize(self, database):
self.database = database
def get(self, username):
...
app = Application([
(r'/user/(.*)', ProfileHandler, dict(database=database)),
])
"""
pass
@property
def settings(self):
"""An alias for `self.application.settings`."""
return self.application.settings
def head(self, *args, **kwargs):
raise HTTPError(405)
def get(self, *args, **kwargs):
raise HTTPError(405)
def post(self, *args, **kwargs):
raise HTTPError(405)
def delete(self, *args, **kwargs):
raise HTTPError(405)
def patch(self, *args, **kwargs):
raise HTTPError(405)
def put(self, *args, **kwargs):
raise HTTPError(405)
def options(self, *args, **kwargs):
raise HTTPError(405)
def prepare(self):
"""Called at the beginning of a request before `get`/`post`/etc.
Override this method to perform common initialization regardless
of the request method.
"""
pass
def on_finish(self):
"""Called after the end of a request.
Override this method to perform cleanup, logging, etc.
This method is a counterpart to `prepare`. ``on_finish`` may
not produce any output, as it is called after the response
has been sent to the client.
"""
pass
def on_connection_close(self, *args, **kwargs):
"""Called in async handlers if the client closed the connection.
Override this to clean up resources associated with
long-lived connections. Note that this method is called only if
the connection was closed during asynchronous processing; if you
need to do cleanup after every request override `on_finish`
instead.
Proxies may keep a connection open for a time (perhaps
indefinitely) after the client has gone away, so this method
may not be called promptly after the end user closes their
connection.
"""
pass
def clear(self):
"""Resets all headers and content for this response."""
# The performance cost of cyclone.httputil.HTTPHeaders is significant
# (slowing down a benchmark with a trivial handler by more than 10%),
# and its case-normalization is not generally necessary for
# headers we generate on the server side, so use a plain dict
# and list instead.
self._headers = {
"Server": "cyclone/%s" % cyclone.version,
"Content-Type": "text/html; charset=UTF-8",
"Date": datetime.datetime.utcnow().strftime(
"%a, %d %b %Y %H:%M:%S GMT"),
}
self._list_headers = []
self.set_default_headers()
if not self.request.supports_http_1_1():
if self.request.headers.get("Connection") == "Keep-Alive":
self.set_header("Connection", "Keep-Alive")
self._write_buffer = []
self._status_code = 200
self._reason = httplib.responses[200]
def set_default_headers(self):
"""Override this to set HTTP headers at the beginning of the request.
For example, this is the place to set a custom ``Server`` header.
Note that setting such headers in the normal flow of request
processing may not do what you want, since headers may be reset
during error handling.
"""
pass
def set_status(self, status_code, reason=None):
"""Sets the status code for our response.
:arg int status_code: Response status code. If `reason` is ``None``,
it must be present in `httplib.responses`.
:arg string reason: Human-readable reason phrase describing the status
code. If ``None``, it will be filled in from `httplib.responses`.
"""
self._status_code = status_code
if reason is not None:
self._reason = escape.native_str(reason)
else:
try:
self._reason = httplib.responses[status_code]
except KeyError:
raise ValueError("unknown status code %d", status_code)
def get_status(self):
"""Returns the status code for our response."""
return self._status_code
def set_header(self, name, value):
"""Sets the given response header name and value.
If a datetime is given, we automatically format it according to the
HTTP specification. If the value is not a string, we convert it to
a string. All header values are then encoded as UTF-8.
"""
self._headers[name] = self._convert_header_value(value)
def add_header(self, name, value):
"""Adds the given response header and value.
Unlike `set_header`, `add_header` may be called multiple times
to return multiple values for the same header.
"""
self._list_headers.append((name, self._convert_header_value(value)))
def clear_header(self, name):
"""Clears an outgoing header, undoing a previous `set_header` call.
Note that this method does not apply to multi-valued headers
set by `add_header`.
"""
if name in self._headers:
del self._headers[name]
def _convert_header_value(self, value):
if isinstance(value, bytes_type):
pass
elif isinstance(value, unicode_type):
value = value.encode("utf-8")
elif isinstance(value, numbers.Integral):
# return immediately since we know the converted value will be safe
return str(value)
elif isinstance(value, datetime.datetime):
t = calendar.timegm(value.utctimetuple())
return email.utils.formatdate(t, localtime=False, usegmt=True)
else:
raise TypeError("Unsupported header value %r" % value)
# If \n is allowed into the header, it is possible to inject
# additional headers or split the request. Also cap length to
# prevent obviously erroneous values.
if len(value) > 4000 or re.search(r"[\x00-\x1f]", value):
raise ValueError("Unsafe header value %r", value)
return value
_ARG_DEFAULT = []
def get_argument(self, name, default=_ARG_DEFAULT, strip=True):
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we throw an HTTP 400 exception if it is missing.
If the argument appears in the url more than once, we return the
last value.
The returned value is always unicode.
"""
args = self.get_arguments(name, strip=strip)
if not args:
if default is self._ARG_DEFAULT:
raise HTTPError(400, "Missing argument " + name)
return default
return args[-1]
def get_arguments(self, name, strip=True):
"""Returns a list of the arguments with the given name.
If the argument is not present, returns an empty list.
The returned values are always unicode.
"""
values = []
for v in self.request.arguments.get(name, []):
v = self.decode_argument(v, name=name)
if isinstance(v, unicode_type):
# Get rid of any weird control chars (unless decoding gave
# us bytes, in which case leave it alone)
v = re.sub(r"[\x00-\x08\x0e-\x1f]", " ", v)
if strip:
v = v.strip()
values.append(v)
return values
def decode_argument(self, value, name=None):
"""Decodes an argument from the request.
The argument has been percent-decoded and is now a byte string.
By default, this method decodes the argument as utf-8 and returns
a unicode string, but this may be overridden in subclasses.
This method is used as a filter for both get_argument() and for
values extracted from the url and passed to get()/post()/etc.
The name of the argument is provided if known, but may be None
(e.g. for unnamed groups in the url regex).
"""
return _unicode(value)
@property
def cookies(self):
return self.request.cookies
def get_cookie(self, name, default=None):
"""Gets the value of the cookie with the given name, else default."""
if self.request.cookies is not None and name in self.request.cookies:
return self.request.cookies[name].value
return default
def set_cookie(self, name, value, domain=None, expires=None, path="/",
expires_days=None, **kwargs):
"""Sets the given cookie name/value with the given options.
Additional keyword arguments are set on the Cookie.Morsel directly.
See http://docs.python.org/library/cookie.html#morsel-objects
for available attributes.
"""
# The cookie library only accepts type str, in both python 2 and 3
name = escape.native_str(name)
value = escape.native_str(value)
if re.search(r"[\x00-\x20]", name + value):
# Don't let us accidentally inject bad stuff
raise ValueError("Invalid cookie %r: %r" % (name, value))
if not hasattr(self, "_new_cookie"):
self._new_cookie = Cookie.SimpleCookie()
if name in self._new_cookie:
del self._new_cookie[name]
self._new_cookie[name] = value
morsel = self._new_cookie[name]
if domain:
morsel["domain"] = domain
if expires_days is not None and not expires:
expires = datetime.datetime.utcnow() + datetime.timedelta(
days=expires_days)
if expires:
timestamp = calendar.timegm(expires.utctimetuple())
morsel["expires"] = email.utils.formatdate(
timestamp, localtime=False, usegmt=True)
if path:
morsel["path"] = path
for k, v in kwargs.items():
if k == 'max_age':
k = 'max-age'
morsel[k] = v
def clear_cookie(self, name, path="/", domain=None):
"""Deletes the cookie with the given name."""
expires = datetime.datetime.utcnow() - datetime.timedelta(days=365)
self.set_cookie(name, value="", path=path, expires=expires,
domain=domain)
def clear_all_cookies(self):
"""Deletes all the cookies the user sent with this request."""
for name in self.request.cookies.iterkeys():
self.clear_cookie(name)
def set_secure_cookie(self, name, value, expires_days=30, **kwargs):
"""Signs and timestamps a cookie so it cannot be forged.
You must specify the ``cookie_secret`` setting in your Application
to use this method. It should be a long, random sequence of bytes
to be used as the HMAC secret for the signature.
To read a cookie set with this method, use `get_secure_cookie()`.
Note that the ``expires_days`` parameter sets the lifetime of the
cookie in the browser, but is independent of the ``max_age_days``
parameter to `get_secure_cookie`.
Secure cookies may contain arbitrary byte values, not just unicode
strings (unlike regular cookies)
"""
self.set_cookie(name, self.create_signed_value(name, value),
expires_days=expires_days, **kwargs)
def create_signed_value(self, name, value):
"""Signs and timestamps a string so it cannot be forged.
Normally used via set_secure_cookie, but provided as a separate
method for non-cookie uses. To decode a value not stored
as a cookie use the optional value argument to get_secure_cookie.
"""
self.require_setting("cookie_secret", "secure cookies")
return create_signed_value(self.application.settings["cookie_secret"],
name, value)
def get_secure_cookie(self, name, value=None, max_age_days=31):
"""Returns the given signed cookie if it validates, or None.
The decoded cookie value is returned as a byte string (unlike
`get_cookie`).
"""
self.require_setting("cookie_secret", "secure cookies")
if value is None:
value = self.get_cookie(name)
return decode_signed_value(self.application.settings["cookie_secret"],
name, value, max_age_days=max_age_days)
def redirect(self, url, permanent=False, status=None):
"""Sends a redirect to the given (optionally relative) URL.
If the ``status`` argument is specified, that value is used as the
HTTP status code; otherwise either 301 (permanent) or 302
(temporary) is chosen based on the ``permanent`` argument.
The default is 302 (temporary).
"""
if self._headers_written:
raise Exception("Cannot redirect after headers have been written")
if status is None:
status = 301 if permanent else 302
else:
assert isinstance(status, types.IntType) and 300 <= status <= 399
self.set_status(status)
# Remove whitespace
url = re.sub(r"[\x00-\x20]+", "", utf8(url))
self.set_header("Location", urlparse.urljoin(utf8(self.request.uri),
url))
self.finish()
def write(self, chunk):
"""Writes the given chunk to the output buffer.
To write the output to the network, use the flush() method below.
If the given chunk is a dictionary, we write it as JSON and set
the Content-Type of the response to be application/json.
(if you want to send JSON as a different Content-Type, call
set_header *after* calling write()).
Note that lists are not converted to JSON because of a potential
cross-site security vulnerability. All JSON output should be
wrapped in a dictionary. More details at
http://haacked.com/archive/2008/11/20/\
anatomy-of-a-subtle-json-vulnerability.aspx
"""
if self._finished:
raise RuntimeError("Cannot write() after finish(). May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if isinstance(chunk, types.DictType) or \
(self.serialize_lists and isinstance(chunk, types.ListType)):
chunk = escape.json_encode(chunk)
self.set_header("Content-Type", "application/json")
chunk = utf8(chunk)
self._write_buffer.append(chunk)
def render(self, template_name, **kwargs):
"""Renders the template with the given arguments as the response."""
html = self.render_string(template_name, **kwargs)
# Insert the additional JS and CSS added by the modules on the page
js_embed = []
js_files = []
css_embed = []
css_files = []
html_heads = []
html_bodies = []
for module in getattr(self, "_active_modules", {}).values():
embed_part = module.embedded_javascript()
if embed_part:
js_embed.append(utf8(embed_part))
file_part = module.javascript_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes_type)):
js_files.append(file_part)
else:
js_files.extend(file_part)
embed_part = module.embedded_css()
if embed_part:
css_embed.append(utf8(embed_part))
file_part = module.css_files()
if file_part:
if isinstance(file_part, (unicode_type, bytes_type)):
css_files.append(file_part)
else:
css_files.extend(file_part)
head_part = module.html_head()
if head_part:
html_heads.append(utf8(head_part))
body_part = module.html_body()
if body_part:
html_bodies.append(utf8(body_part))
def is_absolute(path):
return any(path.startswith(x) for x in ["/", "http:", "https:"])
if js_files:
# Maintain order of JavaScript files given by modules
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
js = ''.join('<script src="' + escape.xhtml_escape(p) +
'" type="text/javascript"></script>'
for p in paths)
sloc = html.rindex('</body>')
html = html[:sloc] + utf8(js) + '\n' + html[sloc:]
if js_embed:
js = '<script type="text/javascript">\n//<![CDATA[\n' + \
'\n'.join(js_embed) + '\n//]]>\n</script>'
sloc = html.rindex('</body>')
html = html[:sloc] + js + '\n' + html[sloc:]
if css_files:
paths = []
unique_paths = set()
for path in css_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
css = ''.join('<link href="' + escape.xhtml_escape(p) + '" '
'type="text/css" rel="stylesheet"/>'
for p in paths)
hloc = html.index('</head>')
html = html[:hloc] + utf8(css) + '\n' + html[hloc:]
if css_embed:
css = '<style type="text/css">\n' + '\n'.join(css_embed) + \
'\n</style>'
hloc = html.index('</head>')
html = html[:hloc] + css + '\n' + html[hloc:]
if html_heads:
hloc = html.index('</head>')
html = html[:hloc] + ''.join(html_heads) + '\n' + html[hloc:]
if html_bodies:
hloc = html.index('</body>')
html = html[:hloc] + ''.join(html_bodies) + '\n' + html[hloc:]
self.finish(html)
def render_string(self, template_name, **kwargs):
"""Generate the given template with the given arguments.
We return the generated string. To generate and write a template
as a response, use render() above.
"""
# If no template_path is specified, use the path of the calling file
template_path = self.get_template_path()
if not template_path:
frame = sys._getframe(0)
web_file = frame.f_code.co_filename
while frame.f_code.co_filename == web_file:
frame = frame.f_back
template_path = os.path.dirname(frame.f_code.co_filename)
with RequestHandler._template_loader_lock:
if template_path not in RequestHandler._template_loaders:
loader = self.create_template_loader(template_path)
RequestHandler._template_loaders[template_path] = loader
else:
loader = RequestHandler._template_loaders[template_path]
t = loader.load(template_name)
namespace = self.get_template_namespace()
namespace.update(kwargs)
return t.generate(**namespace)
def get_template_namespace(self):
"""Returns a dictionary to be used as the default template namespace.
May be overridden by subclasses to add or modify values.
The results of this method will be combined with additional
defaults in the `tornado.template` module and keyword arguments
to `render` or `render_string`.
"""
namespace = dict(
handler=self,
request=self.request,
current_user=self.current_user,
locale=self.locale,
_=self.locale.translate,
static_url=self.static_url,
xsrf_form_html=self.xsrf_form_html,
reverse_url=self.reverse_url
)
namespace.update(self.ui)
return namespace
def create_template_loader(self, template_path):
"""Returns a new template loader for the given path.
May be overridden by subclasses. By default returns a
directory-based loader on the given path, using the
``autoescape`` application setting. If a ``template_loader``
application setting is supplied, uses that instead.
"""
settings = self.application.settings
if "template_loader" in settings:
return settings["template_loader"]
kwargs = {}
if "autoescape" in settings:
# autoescape=None means "no escaping", so we have to be sure
# to only pass this kwarg if the user asked for it.
kwargs["autoescape"] = settings["autoescape"]
return template.Loader(template_path, **kwargs)
def flush(self, include_footers=False):
"""Flushes the current output buffer to the network."""
chunk = "".join(self._write_buffer)
self._write_buffer = []
if not self._headers_written:
self._headers_written = True
for transform in self._transforms:
self._status_code, self._headers, chunk = \
transform.transform_first_chunk(
self._status_code, self._headers, chunk, include_footers)
headers = self._generate_headers()
else:
for transform in self._transforms:
chunk = transform.transform_chunk(chunk, include_footers)
headers = ""
# Ignore the chunk and only write the headers for HEAD requests
if self.request.method == "HEAD":
if headers:
self.request.write(headers)
return
if headers or chunk:
self.request.write(headers + chunk)
def notifyFinish(self):
"""Returns a deferred, which is fired when the request is terminated
and the connection is closed.
"""
return self.request.notifyFinish()
def finish(self, chunk=None):
"""Finishes this response, ending the HTTP request."""
if self._finished:
raise RuntimeError("finish() called twice. May be caused "
"by using async operations without the "
"@asynchronous decorator.")
if chunk is not None:
self.write(chunk)
# Automatically support ETags and add the Content-Length header if
# we have not flushed any content yet.
if not self._headers_written:
if (self._status_code == 200 and
self.request.method in ("GET", "HEAD") and
"Etag" not in self._headers):
etag = self.compute_etag()
if etag is not None:
self.set_header("Etag", etag)
inm = self.request.headers.get("If-None-Match")
if inm and inm.find(etag) != -1:
self._write_buffer = []
self.set_status(304)
if self._status_code == 304:
assert not self._write_buffer, "Cannot send body with 304"
self._clear_headers_for_304()
elif "Content-Length" not in self._headers:
content_length = sum(len(part) for part in self._write_buffer)
self.set_header("Content-Length", content_length)
self.flush(include_footers=True)
self.request.finish()
self._log()
self._finished = True
self.on_finish()
def send_error(self, status_code=500, **kwargs):
"""Sends the given HTTP error code to the browser.
If `flush()` has already been called, it is not possible to send
an error, so this method will simply terminate the response.
If output has been written but not yet flushed, it will be discarded
and replaced with the error page.
Override `write_error()` to customize the error page that is returned.
Additional keyword arguments are passed through to `write_error`.
"""
if self._headers_written:
log.msg("Cannot send error response after headers written")
if not self._finished:
self.finish()
return
self.clear()
reason = None
if "exc_info" in kwargs:
e = kwargs["exc_info"][1]
if isinstance(e, HTTPError) and e.reason:
reason = e.reason
elif "exception" in kwargs:
e = kwargs["exception"]
if isinstance(e, HTTPAuthenticationRequired):
args = ",".join(['%s="%s"' % (k, v)
for k, v in e.kwargs.items()])
self.set_header("WWW-Authenticate", "%s %s" %
(e.auth_type, args))
self.set_status(status_code, reason=reason)
try:
self.write_error(status_code, **kwargs)
except Exception, e:
log.msg("Uncaught exception in write_error: " + str(e))
if not self._finished:
self.finish()
def write_error(self, status_code, **kwargs):
"""Override to implement custom error pages.
``write_error`` may call `write`, `render`, `set_header`, etc
to produce output as usual.
If this error was caused by an uncaught exception (including
HTTPError), an ``exc_info`` triple will be available as
``kwargs["exc_info"]``. Note that this exception may not be
the "current" exception for purposes of methods like
``sys.exc_info()`` or ``traceback.format_exc``.
For historical reasons, if a method ``get_error_html`` exists,
it will be used instead of the default ``write_error`` implementation.
``get_error_html`` returned a string instead of producing output
normally, and had different semantics for exception handling.
Users of ``get_error_html`` are encouraged to convert their code
to override ``write_error`` instead.
"""
if hasattr(self, 'get_error_html'):
if 'exc_info' in kwargs:
exc_info = kwargs.pop('exc_info')
kwargs['exception'] = exc_info[1]
try:
# Put the traceback into sys.exc_info()
raise exc_info[0], exc_info[1], exc_info[2]
except Exception:
self.finish(self.get_error_html(status_code, **kwargs))
else:
self.finish(self.get_error_html(status_code, **kwargs))
return
if self.settings.get("debug") and "exc_info" in kwargs:
# in debug mode, try to send a traceback
self.set_header('Content-Type', 'text/plain')
for line in traceback.format_exception(*kwargs["exc_info"]):
self.write(line)
self.finish()
else:
self.finish("<html><title>%(code)d: %(message)s</title>"
"<body>%(code)d: %(message)s</body></html>" %
{"code": status_code, "message": self._reason})
@property
def locale(self):
"""The local for the current session.
Determined by either get_user_locale, which you can override to
set the locale based on, e.g., a user preference stored in a
database, or get_browser_locale, which uses the Accept-Language
header.
"""
if not hasattr(self, "_locale"):
self._locale = self.get_user_locale()
if not self._locale:
self._locale = self.get_browser_locale()
assert self._locale
return self._locale
def get_user_locale(self):
"""Override to determine the locale from the authenticated user.
If None is returned, we fall back to get_browser_locale().
This method should return a cyclone.locale.Locale object,
most likely obtained via a call like cyclone.locale.get("en")
"""
return None
def get_browser_locale(self, default="en_US"):
"""Determines the user's locale from Accept-Language header.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
"""
if "Accept-Language" in self.request.headers:
languages = self.request.headers["Accept-Language"].split(",")
locales = []
for language in languages:
parts = language.strip().split(";")
if len(parts) > 1 and parts[1].startswith("q="):
try:
score = float(parts[1][2:])
except (ValueError, TypeError):
score = 0.0
else:
score = 1.0
locales.append((parts[0], score))
if locales:
locales.sort(key=lambda pair: pair[1], reverse=True)
codes = [l[0] for l in locales]
return locale.get(*codes)
return locale.get(default)
@property
def current_user(self):
"""The authenticated user for this request.
Determined by either get_current_user, which you can override to
set the user based on, e.g., a cookie. If that method is not
overridden, this method always returns None.
We lazy-load the current user the first time this method is called
and cache the result after that.
"""
if not hasattr(self, "_current_user"):
self._current_user = self.get_current_user()
return self._current_user
def get_current_user(self):
"""Override to determine the current user from, e.g., a cookie."""
return None
def get_login_url(self):
"""Override to customize the login URL based on the request.
By default, we use the 'login_url' application setting.
"""
self.require_setting("login_url", "@cyclone.web.authenticated")
return self.application.settings["login_url"]
def get_template_path(self):
"""Override to customize template path for each handler.
By default, we use the 'template_path' application setting.
Return None to load templates relative to the calling file.
"""
return self.application.settings.get("template_path")
@property
def xsrf_token(self):
"""The XSRF-prevention token for the current user/session.
To prevent cross-site request forgery, we set an '_xsrf' cookie
and include the same '_xsrf' value as an argument with all POST
requests. If the two do not match, we reject the form submission
as a potential forgery.
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
"""
if not hasattr(self, "_xsrf_token"):
token = self.get_cookie(self.xsrf_cookie_name)
if not token:
token = binascii.b2a_hex(uuid.uuid4().bytes)
expires_days = 30 if self.current_user else None
self.set_cookie(self.xsrf_cookie_name, token, expires_days=expires_days)
self._xsrf_token = token
return self._xsrf_token
def check_xsrf_cookie(self):
"""Verifies that the '_xsrf' cookie matches the '_xsrf' argument.
To prevent cross-site request forgery, we set an '_xsrf'
cookie and include the same value as a non-cookie
field with all POST requests. If the two do not match, we
reject the form submission as a potential forgery.
The _xsrf value may be set as either a form field named _xsrf
or in a custom HTTP header named X-XSRFToken or X-CSRFToken
(the latter is accepted for compatibility with Django).
See http://en.wikipedia.org/wiki/Cross-site_request_forgery
Prior to release 1.1.1, this check was ignored if the HTTP header
"X-Requested-With: XMLHTTPRequest" was present. This exception
has been shown to be insecure and has been removed. For more
information please see
http://www.djangoproject.com/weblog/2011/feb/08/security/
http://weblog.rubyonrails.org/2011/2/8/\
csrf-protection-bypass-in-ruby-on-rails
"""
token = (self.get_argument(self.xsrf_cookie_name, None) or
self.request.headers.get("X-Xsrftoken") or
self.request.headers.get("X-Csrftoken"))
if not token:
raise HTTPError(403, "'_xsrf' argument missing from POST")
if self.xsrf_token != token:
raise HTTPError(403, "XSRF cookie does not match POST argument")
def xsrf_form_html(self):
"""An HTML <input/> element to be included with all POST forms.
It defines the _xsrf input value, which we check on all POST
requests to prevent cross-site request forgery. If you have set
the 'xsrf_cookies' application setting, you must include this
HTML within all of your HTML forms.
See check_xsrf_cookie() above for more information.
"""
return '<input type="hidden" name="' + self.xsrf_cookie_name + \
'" value="' + escape.xhtml_escape(self.xsrf_token) + '"/>'
def static_url(self, path, include_host=None):
"""Returns a static URL for the given relative static file path.
This method requires you set the 'static_path' setting in your
application (which specifies the root directory of your static
files).
We append ?v=<signature> to the returned URL, which makes our
static file handler set an infinite expiration header on the
returned content. The signature is based on the content of the
file.
By default this method returns URLs relative to the current
host, but if ``include_host`` is true the URL returned will be
absolute. If this handler has an ``include_host`` attribute,
that value will be used as the default for all `static_url`
calls that do not pass ``include_host`` as a keyword argument.
"""
self.require_setting("static_path", "static_url")
static_handler_class = self.settings.get(
"static_handler_class", StaticFileHandler)
if include_host is None:
include_host = getattr(self, "include_host", False)
if include_host:
base = self.request.protocol + "://" + self.request.host + \
static_handler_class.make_static_url(self.settings, path)
else:
base = static_handler_class.make_static_url(self.settings, path)
return base
def async_callback(self, callback, *args, **kwargs):
"""Obsolete - catches exceptions from the wrapped function.
This function is unnecessary since Tornado 1.1.
"""
if callback is None:
return None
if args or kwargs:
callback = functools.partial(callback, *args, **kwargs)
def wrapper(*args, **kwargs):
try:
return callback(*args, **kwargs)
except Exception, e:
if self._headers_written:
log.msg("Exception after headers written: " + e)
else:
self._handle_request_exception(e)
return wrapper
def require_setting(self, name, feature="this feature"):
"""Raises an exception if the given app setting is not defined."""
if not self.application.settings.get(name):
raise Exception("You must define the '%s' setting in your "
"application to use %s" % (name, feature))
def reverse_url(self, name, *args):
"""Alias for `Application.reverse_url`."""
return self.application.reverse_url(name, *args)
def compute_etag(self):
"""Computes the etag header to be used for this request.
May be overridden to provide custom etag implementations,
or may return None to disable cyclone's default etag support.
"""
hasher = hashlib.sha1()
for part in self._write_buffer:
hasher.update(part)
return '"' + hasher.hexdigest() + '"'
def _execute(self, transforms, *args, **kwargs):
"""Executes this request with the given output transforms."""
self._transforms = transforms
try:
if self.request.method not in self.SUPPORTED_METHODS:
raise HTTPError(405)
self.path_args = [self.decode_argument(arg) for arg in args]
self.path_kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.items())
# If XSRF cookies are turned on, reject form submissions without
# the proper cookie
if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
self.application.settings.get("xsrf_cookies"): # is True
if not getattr(self, "no_xsrf", False):
self.check_xsrf_cookie()
defer.maybeDeferred(self.prepare).addCallbacks(
self._execute_handler,
lambda f: self._handle_request_exception(f.value),
callbackArgs=(args, kwargs))
except Exception, e:
self._handle_request_exception(e)
def _deferred_handler(self, function, *args, **kwargs):
try:
result = function(*args, **kwargs)
except:
return defer.fail(failure.Failure(
captureVars=defer.Deferred.debug))
else:
if isinstance(result, defer.Deferred):
return result
elif isinstance(result, types.GeneratorType):
# This may degrade performance a bit, but at least avoid the
# server from breaking when someone call yield without
# decorating their handler with @inlineCallbacks.
log.msg("[warning] %s.%s() returned a generator. "
"Perhaps it should be decorated with "
"@inlineCallbacks." % (self.__class__.__name__,
self.request.method.lower()))
return self._deferred_handler(defer.inlineCallbacks(function),
*args, **kwargs)
elif isinstance(result, failure.Failure):
return defer.fail(result)
else:
return defer.succeed(result)
def _execute_handler(self, r, args, kwargs):
if not self._finished:
args = [self.decode_argument(arg) for arg in args]
kwargs = dict((k, self.decode_argument(v, name=k))
for (k, v) in kwargs.iteritems())
function = getattr(self, self.request.method.lower())
#d = defer.maybeDeferred(function, *args, **kwargs)
d = self._deferred_handler(function, *args, **kwargs)
d.addCallbacks(self._execute_success, self._execute_failure)
self.notifyFinish().addCallback(self.on_connection_close)
def _execute_success(self, ign):
if self._auto_finish and not self._finished:
return self.finish()
def _execute_failure(self, err):
return self._handle_request_exception(err)
def _generate_headers(self):
reason = self._reason
lines = [utf8(self.request.version + " " +
str(self._status_code) +
" " + reason)]
lines.extend([(utf8(n) + ": " + utf8(v)) for n, v in
itertools.chain(self._headers.items(), self._list_headers)])
if hasattr(self, "_new_cookie"):
for cookie in self._new_cookie.values():
lines.append(utf8("Set-Cookie: " + cookie.OutputString(None)))
return "\r\n".join(lines) + "\r\n\r\n"
def _log(self):
"""Logs the current request.
Sort of deprecated since this functionality was moved to the
Application, but left in place for the benefit of existing apps
that have overridden this method.
"""
self.application.log_request(self)
def _request_summary(self):
return self.request.method + " " + self.request.uri + " (" + \
self.request.remote_ip + ")"
def _handle_request_exception(self, e):
try:
# These are normally twisted.python.failure.Failure
if isinstance(e.value, (template.TemplateError,
HTTPError, HTTPAuthenticationRequired)):
e = e.value
except:
pass
if isinstance(e, template.TemplateError):
log.msg(str(e))
self.send_error(500, exception=e)
elif isinstance(e, (HTTPError, HTTPAuthenticationRequired)):
if e.log_message and self.settings.get("debug") is True:
log.msg(str(e))
if e.status_code not in httplib.responses:
log.msg("Bad HTTP status code: " + repr(e.status_code))
e.status_code = 500
self.send_error(e.status_code, exception=e)
else:
log.msg("Uncaught exception\n" + str(e))
if self.settings.get("debug"):
log.msg(repr(self.request))
self.send_error(500, exception=e)
def _ui_module(self, name, module):
def render(*args, **kwargs):
if not hasattr(self, "_active_modules"):
self._active_modules = {}
if name not in self._active_modules:
self._active_modules[name] = module(self)
rendered = self._active_modules[name].render(*args, **kwargs)
return rendered
return render
def _ui_method(self, method):
return lambda *args, **kwargs: method(self, *args, **kwargs)
def _clear_headers_for_304(self):
# 304 responses should not contain entity headers (defined in
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
# not explicitly allowed by
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
headers = ["Allow", "Content-Encoding", "Content-Language",
"Content-Length", "Content-MD5", "Content-Range",
"Content-Type", "Last-Modified"]
for h in headers:
self.clear_header(h)
def asynchronous(method):
"""Wrap request handler methods with this if they are asynchronous.
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call self.finish()
to terminate the HTTP request. Without this decorator, the request is
automatically finished when the get() or post() method returns. ::
from twisted.internet import reactor
class MyRequestHandler(web.RequestHandler):
@web.asynchronous
def get(self):
self.write("Processing your request...")
reactor.callLater(5, self.do_something)
def do_something(self):
self.finish("done!")
It may be used for Comet and similar push techniques.
http://en.wikipedia.org/wiki/Comet_(programming)
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
self._auto_finish = False
return method(self, *args, **kwargs)
return wrapper
def removeslash(method):
"""Use this decorator to remove trailing slashes from the request path.
For example, a request to ``'/foo/'`` would redirect to ``'/foo'`` with
this decorator. Your request handler mapping should use a regular
expression like ``r'/foo/*'`` in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD", "POST", "PUT", "DELETE"):
uri = self.request.path.rstrip("/")
if uri: # don't try to redirect '/' to ''
if self.request.query:
uri = uri + "?" + self.request.query
self.redirect(uri, permanent=True)
return
else:
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
def addslash(method):
"""Use this decorator to add a missing trailing slash to the request path.
For example, a request to '/foo' would redirect to '/foo/' with this
decorator. Your request handler mapping should use a regular expression
like r'/foo/?' in conjunction with using the decorator.
"""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.request.path.endswith("/"):
if self.request.method in ("GET", "HEAD", "POST", "PUT", "DELETE"):
uri = self.request.path + "/"
if self.request.query:
uri = uri + "?" + self.request.query
self.redirect(uri, permanent=True)
return
raise HTTPError(404)
return method(self, *args, **kwargs)
return wrapper
class Application(protocol.ServerFactory):
"""A collection of request handlers that make up a web application.
Instances of this class are callable and can be passed directly to
HTTPServer to serve the application::
application = web.Application([
(r"/", MainPageHandler),
])
reactor.listenTCP(8888, application)
reactor.run()
The constructor for this class takes in a list of URLSpec objects
or (regexp, request_class) tuples. When we receive requests, we
iterate over the list in order and instantiate an instance of the
first request class whose regexp matches the request path.
Each tuple can contain an optional third element, which should be a
dictionary if it is present. That dictionary is passed as keyword
arguments to the contructor of the handler. This pattern is used
for the StaticFileHandler below (note that a StaticFileHandler
can be installed automatically with the static_path setting described
below)::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
We support virtual hosts with the add_handlers method, which takes in
a host regular expression as the first argument::
application.add_handlers(r"www\.myhost\.com", [
(r"/article/([0-9]+)", ArticleHandler),
])
You can serve static files by sending the static_path setting as a
keyword argument. We will serve those files from the /static/ URI
(this is configurable with the static_url_prefix setting),
and we will serve /favicon.ico and /robots.txt from the same directory.
A custom subclass of StaticFileHandler can be specified with the
static_handler_class setting.
.. attribute:: settings
Additonal keyword arguments passed to the constructor are saved in the
`settings` dictionary, and are often referred to in documentation as
"application settings".
"""
protocol = httpserver.HTTPConnection
def __init__(self, handlers=None, default_host="",
transforms=None, **settings):
if transforms is None:
self.transforms = []
if settings.get("gzip"):
self.transforms.append(GZipContentEncoding)
self.transforms.append(ChunkedTransferEncoding)
else:
self.transforms = transforms
self.handlers = []
self.named_handlers = {}
self.default_host = default_host
self.settings = ObjectDict(settings)
self.ui_modules = {"linkify": _linkify,
"xsrf_form_html": _xsrf_form_html,
"Template": TemplateModule}
self.ui_methods = {}
self._load_ui_modules(settings.get("ui_modules", {}))
self._load_ui_methods(settings.get("ui_methods", {}))
if "static_path" in self.settings:
path = self.settings["static_path"]
handlers = list(handlers or [])
static_url_prefix = settings.get("static_url_prefix",
"/static/")
static_handler_class = settings.get("static_handler_class",
StaticFileHandler)
static_handler_args = settings.get("static_handler_args", {})
static_handler_args["path"] = path
for pattern in [re.escape(static_url_prefix) + r"(.*)",
r"/(favicon\.ico)", r"/(robots\.txt)"]:
handlers.insert(0, (pattern, static_handler_class,
static_handler_args))
if handlers:
self.add_handlers(".*$", handlers)
def add_handlers(self, host_pattern, host_handlers):
"""Appends the given handlers to our handler list.
Host patterns are processed sequentially in the order they were
added. All matching patterns will be considered.
"""
if not host_pattern.endswith("$"):
host_pattern += "$"
handlers = []
# The handlers with the wildcard host_pattern are a special
# case - they're added in the constructor but should have lower
# precedence than the more-precise handlers added later.
# If a wildcard handler group exists, it should always be last
# in the list, so insert new groups just before it.
if self.handlers and self.handlers[-1][0].pattern == '.*$':
self.handlers.insert(-1, (re.compile(host_pattern), handlers))
else:
self.handlers.append((re.compile(host_pattern), handlers))
for spec in host_handlers:
if isinstance(spec, types.TupleType):
assert len(spec) in (2, 3)
pattern = spec[0]
handler = spec[1]
if isinstance(handler, types.StringType):
# import the Module and instantiate the class
# Must be a fully qualified name (module.ClassName)
try:
handler = import_object(handler)
except ImportError, e:
reactor.callWhenRunning(log.msg,
"Unable to load handler '%s' for "
"'%s': %s" % (handler, pattern, e))
continue
if len(spec) == 3:
kwargs = spec[2]
else:
kwargs = {}
spec = URLSpec(pattern, handler, kwargs)
handlers.append(spec)
if spec.name:
if spec.name in self.named_handlers:
log.msg("Multiple handlers named %s; "
"replacing previous value" % spec.name)
self.named_handlers[spec.name] = spec
def add_transform(self, transform_class):
"""Adds the given OutputTransform to our transform list."""
self.transforms.append(transform_class)
def _get_host_handlers(self, request):
host = request.host.lower().split(':')[0]
matches = []
for pattern, handlers in self.handlers:
if pattern.match(host):
matches.extend(handlers)
# Look for default host if not behind load balancer (for debugging)
if not matches and "X-Real-Ip" not in request.headers:
for pattern, handlers in self.handlers:
if pattern.match(self.default_host):
matches.extend(handlers)
return matches or None
def _load_ui_methods(self, methods):
if isinstance(methods, types.ModuleType):
self._load_ui_methods(dict((n, getattr(methods, n))
for n in dir(methods)))
elif isinstance(methods, types.ListType):
for m in methods:
self._load_ui_methods(m)
else:
for name, fn in methods.items():
if not name.startswith("_") and hasattr(fn, "__call__") \
and name[0].lower() == name[0]:
self.ui_methods[name] = fn
def _load_ui_modules(self, modules):
if isinstance(modules, types.ModuleType):
self._load_ui_modules(dict((n, getattr(modules, n))
for n in dir(modules)))
elif isinstance(modules, types.ListType):
for m in modules:
self._load_ui_modules(m)
else:
assert isinstance(modules, types.DictType)
for name, cls in modules.items():
try:
if issubclass(cls, UIModule):
self.ui_modules[name] = cls
except TypeError:
pass
def __call__(self, request):
"""Called by HTTPServer to execute the request."""
transforms = [t(request) for t in self.transforms]
handler = None
args = []
kwargs = {}
handlers = self._get_host_handlers(request)
if not handlers:
handler = RedirectHandler(self, request,
url="http://" + self.default_host + "/")
else:
for spec in handlers:
match = spec.regex.match(request.path)
if match:
handler = spec.handler_class(self, request, **spec.kwargs)
if spec.regex.groups:
# None-safe wrapper around url_unescape to handle
# unmatched optional groups correctly
def unquote(s):
if s is None:
return s
return escape.url_unescape(s, encoding=None)
# Pass matched groups to the handler. Since
# match.groups() includes both named and
# unnamed groups,we want to use either groups
# or groupdict but not both.
# Note that args are passed as bytes so the handler can
# decide what encoding to use.
if spec.regex.groupindex:
kwargs = dict((str(k), unquote(v))
for (k, v) in match.groupdict().items())
else:
args = [unquote(s) for s in match.groups()]
break
if not handler:
handler = ErrorHandler(self, request, status_code=404)
# In debug mode, re-compile templates and reload static files on every
# request so you don't need to restart to see changes
if self.settings.get("debug"):
with RequestHandler._template_loader_lock:
for loader in RequestHandler._template_loaders.values():
loader.reset()
StaticFileHandler.reset()
handler._execute(transforms, *args, **kwargs)
return handler
def reverse_url(self, name, *args):
"""Returns a URL path for handler named `name`
The handler must be added to the application as a named URLSpec.
Args will be substituted for capturing groups in the URLSpec regex.
They will be converted to strings if necessary, encoded as utf8,
and url-escaped.
"""
if name in self.named_handlers:
return self.named_handlers[name].reverse(*args)
raise KeyError("%s not found in named urls" % name)
def log_request(self, handler):
"""Writes a completed HTTP request to the logs.
By default writes to the python root logger. To change
this behavior either subclass Application and override this method,
or pass a function in the application settings dictionary as
'log_function'.
"""
if "log_function" in self.settings:
self.settings["log_function"](handler)
return
request_time = 1000.0 * handler.request.request_time()
log.msg("[" + handler.request.protocol + "] " +
str(handler.get_status()) + " " + handler._request_summary() +
" %.2fms" % request_time)
class HTTPError(Exception):
"""An exception that will turn into an HTTP error response.
:arg int status_code: HTTP status code. Must be listed in
`httplib.responses` unless the ``reason`` keyword argument is given.
:arg string log_message: Message to be written to the log for this error
(will not be shown to the user unless the `Application` is in debug
mode). May contain ``%s``-style placeholders, which will be filled
in with remaining positional parameters.
:arg string reason: Keyword-only argument. The HTTP "reason" phrase
to pass in the status line along with ``status_code``. Normally
determined automatically from ``status_code``, but can be used
to use a non-standard numeric code.
"""
def __init__(self, status_code, log_message=None, *args, **kwargs):
self.status_code = status_code
self.log_message = log_message
self.args = args
self.reason = kwargs.get("reason", None)
def __str__(self):
if self.log_message:
return self.log_message % self.args
else:
return self.reason or \
httplib.responses.get(self.status_code, "Unknown")
class HTTPAuthenticationRequired(HTTPError):
"""An exception that will turn into an HTTP 401, Authentication Required.
The arguments are used to compose the ``WWW-Authenticate`` header.
See http://en.wikipedia.org/wiki/Basic_access_authentication for details.
:arg string auth_type: Authentication type (``Basic``, ``Digest``, etc)
:arg string realm: Realm (Usually displayed by the browser)
"""
def __init__(self, log_message=None,
auth_type="Basic", realm="Restricted Access", **kwargs):
self.status_code = 401
self.log_message = log_message
self.auth_type = auth_type
self.kwargs = kwargs
self.kwargs["realm"] = realm
class ErrorHandler(RequestHandler):
"""Generates an error response with status_code for all requests."""
def initialize(self, status_code):
self.set_status(status_code)
def prepare(self):
raise HTTPError(self._status_code)
def check_xsrf_cookie(self):
# POSTs to an ErrorHandler don't actually have side effects,
# so we don't need to check the xsrf token. This allows POSTs
# to the wrong url to return a 404 instead of 403.
pass
class RedirectHandler(RequestHandler):
"""Redirects the client to the given URL for all GET requests.
You should provide the keyword argument "url" to the handler, e.g.::
application = web.Application([
(r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
])
"""
def initialize(self, url, permanent=True):
self._url = url
self._permanent = permanent
def get(self):
self.redirect(self._url, permanent=self._permanent)
class StaticFileHandler(RequestHandler):
"""A simple handler that can serve static content from a directory.
To map a path to this handler for a static data directory /var/www,
you would add a line to your application like::
application = web.Application([
(r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
])
The local root directory of the content should be passed as the "path"
argument to the handler.
To support aggressive browser caching, if the argument "v" is given
with the path, we set an infinite HTTP expiration header. So, if you
want browsers to cache a file indefinitely, send them to, e.g.,
/static/images/myimage.png?v=xxx. Override ``get_cache_time`` method for
more fine-grained cache control.
"""
CACHE_MAX_AGE = 86400 * 365 * 10 # 10 years
_static_hashes = {}
_lock = threading.Lock() # protects _static_hashes
def initialize(self, path, default_filename=None):
self.root = "%s%s" % (os.path.abspath(path), os.path.sep)
self.default_filename = default_filename
@classmethod
def reset(cls):
with cls._lock:
cls._static_hashes = {}
def head(self, path):
self.get(path, include_body=False)
def get(self, path, include_body=True):
path = self.parse_url_path(path)
abspath = os.path.abspath(os.path.join(self.root, path))
# os.path.abspath strips a trailing /
# it needs to be temporarily added back for requests to root/
if not (abspath + os.path.sep).startswith(self.root):
raise HTTPError(403, "%s is not in root static directory", path)
if os.path.isdir(abspath) and self.default_filename is not None:
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
if not self.request.path.endswith("/"):
self.redirect("%s/" % self.request.path)
abspath = os.path.join(abspath, self.default_filename)
if not os.path.exists(abspath):
raise HTTPError(404)
if not os.path.isfile(abspath):
raise HTTPError(403, "%s is not a file", path)
stat_result = os.stat(abspath)
modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME])
self.set_header("Last-Modified", modified)
mime_type, encoding = mimetypes.guess_type(abspath)
if mime_type:
self.set_header("Content-Type", mime_type)
cache_time = self.get_cache_time(path, modified, mime_type)
if cache_time > 0:
self.set_header("Expires", "%s%s" % (datetime.datetime.utcnow(),
datetime.timedelta(seconds=cache_time)))
self.set_header("Cache-Control", "max-age=%s" % str(cache_time))
self.set_extra_headers(path)
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.get("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple))
if if_since >= modified:
self.set_status(304)
return
with open(abspath, "rb") as file:
data = file.read()
if include_body:
self.write(data)
else:
assert self.request.method == "HEAD"
self.set_header("Content-Length", len(data))
def set_extra_headers(self, path):
"""For subclass to add extra headers to the response"""
pass
def get_cache_time(self, path, modified, mime_type):
"""Override to customize cache control behavior.
Return a positive number of seconds to trigger aggressive caching or 0
to mark resource as cacheable, only.
By default returns cache expiry of 10 years for resources requested
with "v" argument.
"""
return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
@classmethod
def make_static_url(cls, settings, path):
"""Constructs a versioned url for the given path.
This method may be overridden in subclasses (but note that it is
a class method rather than an instance method).
``settings`` is the `Application.settings` dictionary. ``path``
is the static path being requested. The url returned should be
relative to the current host.
"""
static_url_prefix = settings.get('static_url_prefix', '/static/')
version_hash = cls.get_version(settings, path)
if version_hash:
return "%s%s?v=%s" % (static_url_prefix, path, version_hash)
return "%s%s" % (static_url_prefix, path)
@classmethod
def get_version(cls, settings, path):
"""Generate the version string to be used in static URLs.
This method may be overridden in subclasses (but note that it
is a class method rather than a static method). The default
implementation uses a hash of the file's contents.
``settings`` is the `Application.settings` dictionary and ``path``
is the relative location of the requested asset on the filesystem.
The returned value should be a string, or ``None`` if no version
could be determined.
"""
abs_path = os.path.join(settings["static_path"], path)
with cls._lock:
hashes = cls._static_hashes
if abs_path not in hashes:
try:
f = open(abs_path, "rb")
hashes[abs_path] = hashlib.md5(f.read()).hexdigest()
f.close()
except Exception:
log.msg("Could not open static file %r" % path)
hashes[abs_path] = None
hsh = hashes.get(abs_path)
if hsh:
return hsh[:5]
return None
def parse_url_path(self, url_path):
"""Converts a static URL path into a filesystem path.
``url_path`` is the path component of the URL with
``static_url_prefix`` removed. The return value should be
filesystem path relative to ``static_path``.
"""
if os.path.sep != "/":
url_path = url_path.replace("/", os.path.sep)
return url_path
class FallbackHandler(RequestHandler):
"""A RequestHandler that wraps another HTTP server callback.
Tornado has this to combine RequestHandlers and WSGI handlers, but it's
not supported in cyclone and is just here for compatibily purposes.
"""
def initialize(self, fallback):
self.fallback = fallback
def prepare(self):
self.fallback(self.request)
self._finished = True
class OutputTransform(object):
"""A transform modifies the result of an HTTP request (e.g., GZip encoding)
A new transform instance is created for every request. See the
ChunkedTransferEncoding example below if you want to implement a
new Transform.
"""
def __init__(self, request):
pass
def transform_first_chunk(self, status_code, headers, chunk, finishing):
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
return chunk
class GZipContentEncoding(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
"""
CONTENT_TYPES = set([
"text/plain", "text/html", "text/css", "text/xml",
"application/javascript", "application/x-javascript",
"application/xml", "application/atom+xml",
"text/javascript", "application/json", "application/xhtml+xml"])
MIN_LENGTH = 5
def __init__(self, request):
self._gzipping = request.supports_http_1_1() and \
"gzip" in request.headers.get("Accept-Encoding", [])
def transform_first_chunk(self, status_code, headers, chunk, finishing):
if 'Vary' in headers:
headers['Vary'] += ', Accept-Encoding'
else:
headers['Vary'] = 'Accept-Encoding'
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = (ctype in self.CONTENT_TYPES) and \
(not finishing or len(chunk) >= self.MIN_LENGTH) and \
(finishing or "Content-Length" not in headers) and \
("Content-Encoding" not in headers)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
headers["Content-Length"] = str(len(chunk))
return status_code, headers, chunk
def transform_chunk(self, chunk, finishing):
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
class ChunkedTransferEncoding(OutputTransform):
"""Applies the chunked transfer encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1
"""
def __init__(self, request):
self._chunking = request.supports_http_1_1()
def transform_first_chunk(self, status_code, headers, chunk, finishing):
# 304 responses have no body (not even a zero-length body), and so
# should not have either Content-Length or Transfer-Encoding headers.
if self._chunking and status_code != 304:
# No need to chunk the output if a Content-Length is specified
if "Content-Length" in headers or "Transfer-Encoding" in headers:
self._chunking = False
else:
headers["Transfer-Encoding"] = "chunked"
chunk = self.transform_chunk(chunk, finishing)
return status_code, headers, chunk
def transform_chunk(self, block, finishing):
if self._chunking:
# Don't write out empty chunks because that means END-OF-STREAM
# with chunked encoding
if block:
block = "%s\r\n%s\r\n" % (utf8("%x" % len(block)), block)
if finishing:
block = "%s0\r\n\r\n" % block
return block
def authenticated(method):
"""Decorate methods with this to require that the user be logged in."""
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urlparse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
next_url = self.request.uri
url = "%s?%s" % (url,
urllib.urlencode(dict(next=next_url)))
return self.redirect(url)
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
class UIModule(object):
"""A UI re-usable, modular unit on a page.
UI modules often execute additional queries, and they can include
additional CSS and JavaScript that will be included in the output
page, which is automatically inserted on page render.
"""
def __init__(self, handler):
self.handler = handler
self.request = handler.request
self.ui = handler.ui
self.current_user = handler.current_user
self.locale = handler.locale
def render(self, *args, **kwargs):
"""Overridden in subclasses to return this module's output."""
raise NotImplementedError()
def embedded_javascript(self):
"""Returns a JavaScript string that will be embedded in the page."""
return None
def javascript_files(self):
"""Returns a list of JavaScript files required by this module."""
return None
def embedded_css(self):
"""Returns a CSS string that will be embedded in the page."""
return None
def css_files(self):
"""Returns a list of CSS files required by this module."""
return None
def html_head(self):
"""Returns a CSS string that will be put in the <head/> element"""
return None
def html_body(self):
"""Returns an HTML string that will be put in the <body/> element"""
return None
def render_string(self, path, **kwargs):
"""Renders a template and returns it as a string."""
return self.handler.render_string(path, **kwargs)
class _linkify(UIModule):
def render(self, text, **kwargs):
return escape.linkify(text, **kwargs)
class _xsrf_form_html(UIModule):
def render(self):
return self.handler.xsrf_form_html()
class TemplateModule(UIModule):
"""UIModule that simply renders the given template.
{% module Template("foo.html") %} is similar to {% include "foo.html" %},
but the module version gets its own namespace (with kwargs passed to
Template()) instead of inheriting the outer template's namespace.
Templates rendered through this module also get access to UIModule's
automatic javascript/css features. Simply call set_resources
inside the template and give it keyword arguments corresponding to
the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
Note that these resources are output once per template file, not once
per instantiation of the template, so they must not depend on
any arguments to the template.
"""
def __init__(self, handler):
super(TemplateModule, self).__init__(handler)
# keep resources in both a list and a dict to preserve order
self._resource_list = []
self._resource_dict = {}
def render(self, path, **kwargs):
def set_resources(**kwargs):
if path not in self._resource_dict:
self._resource_list.append(kwargs)
self._resource_dict[path] = kwargs
else:
if self._resource_dict[path] != kwargs:
raise ValueError("set_resources called with different "
"resources for the same template")
return ""
return self.render_string(path, set_resources=set_resources,
**kwargs)
def _get_resources(self, key):
return (r[key] for r in self._resource_list if key in r)
def embedded_javascript(self):
return "\n".join(self._get_resources("embedded_javascript"))
def javascript_files(self):
result = []
for f in self._get_resources("javascript_files"):
if isinstance(f, (unicode, bytes_type)):
result.append(f)
else:
result.extend(f)
return result
def embedded_css(self):
return "\n".join(self._get_resources("embedded_css"))
def css_files(self):
result = []
for f in self._get_resources("css_files"):
if isinstance(f, (unicode, bytes_type)):
result.append(f)
else:
result.extend(f)
return result
def html_head(self):
return "".join(self._get_resources("html_head"))
def html_body(self):
return "".join(self._get_resources("html_body"))
class URLSpec(object):
"""Specifies mappings between URLs and handlers."""
def __init__(self, pattern, handler_class, kwargs=None, name=None):
"""Creates a URLSpec.
Parameters:
pattern: Regular expression to be matched. Any groups in the regex
will be passed in to the handler's get/post/etc methods as
arguments.
handler_class: RequestHandler subclass to be invoked.
kwargs (optional): A dictionary of additional arguments to be passed
to the handler's constructor.
name (optional): A name for this handler. Used by
Application.reverse_url.
"""
if not pattern.endswith('$'):
pattern += '$'
self.regex = re.compile(pattern)
assert len(self.regex.groupindex) in (0, self.regex.groups), \
("groups in url regexes must either be all named or all "
"positional: %r" % self.regex.pattern)
self.handler_class = handler_class
self.kwargs = kwargs or {}
self.name = name
self._path, self._group_count = self._find_groups()
def __repr__(self):
return '%s(%r, %s, kwargs=%r, name=%r)' % \
(self.__class__.__name__, self.regex.pattern,
self.handler_class, self.kwargs, self.name)
def _find_groups(self):
"""Returns a tuple (reverse string, group count) for a url.
For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
would return ('/%s/%s/', 2).
"""
pattern = self.regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
if pattern.endswith('$'):
pattern = pattern[:-1]
if self.regex.groups != pattern.count('('):
# The pattern is too complicated for our simplistic matching,
# so we can't support reversing it.
return (None, None)
pieces = []
for fragment in pattern.split('('):
if ')' in fragment:
paren_loc = fragment.index(')')
if paren_loc >= 0:
pieces.append('%s' + fragment[paren_loc + 1:])
else:
pieces.append(fragment)
return (''.join(pieces), self.regex.groups)
def reverse(self, *args):
assert self._path is not None, \
"Cannot reverse url regex " + self.regex.pattern
assert len(args) == self._group_count, "required number of arguments "\
"not found"
if not len(args):
return self._path
converted_args = []
for a in args:
if not isinstance(a, (unicode_type, bytes_type)):
a = str(a)
converted_args.append(escape.url_escape(utf8(a)))
return self._path % tuple(converted_args)
url = URLSpec
def _time_independent_equals(a, b):
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], types.IntType): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def create_signed_value(secret, name, value):
timestamp = utf8(str(int(time.time())))
value = base64.b64encode(utf8(value))
signature = _create_signature(secret, name, value, timestamp)
value = "|".join([value, timestamp, signature])
return value
def decode_signed_value(secret, name, value, max_age_days=31):
if not value:
return None
parts = utf8(value).split("|")
if len(parts) != 3:
return None
signature = _create_signature(secret, name, parts[0], parts[1])
if not _time_independent_equals(parts[2], signature):
log.msg("Invalid cookie signature %r" % value)
return None
timestamp = int(parts[1])
if timestamp < time.time() - max_age_days * 86400:
log.msg("Expired cookie %r" % value)
return None
if timestamp > time.time() + 31 * 86400:
# _cookie_signature does not hash a delimiter between the
# parts of the cookie, so an attacker could transfer trailing
# digits from the payload to the timestamp without altering the
# signature. For backwards compatibility, sanity-check timestamp
# here instead of modifying _cookie_signature.
log.msg("Cookie timestamp in future; possible tampering %r" % value)
return None
if parts[1].startswith("0"):
log.msg("Tampered cookie %r" % value)
try:
return base64.b64decode(parts[0])
except Exception:
return None
def _create_signature(secret, *parts):
hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
for part in parts:
hash.update(utf8(part))
return utf8(hash.hexdigest())
| gpl-2.0 | 4,279,697,711,647,361,000 | 38.892443 | 88 | 0.589415 | false | 4.374803 | false | false | false |
larsyencken/cjktools | cjktools/resources/radkdict.py | 1 | 2575 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# radkdict.py
# cjktools
#
"""
Based on the radkfile, a dictionary mapping character to bag of radicals.
"""
import sys
from cjktools import maps
from cjktools.common import get_stream_context, stream_codec
from . import cjkdata
from six import text_type
def _default_stream():
return open(cjkdata.get_resource('radkfile'))
class RadkDict(dict):
"""
Determines which radicals a character contains.
:param istream:
The radkfile to parse.
"""
def __init__(self, istream=None):
"""
"""
with get_stream_context(_default_stream, istream) as istream:
self._parse_radkfile(stream_codec(istream))
def _parse_radkfile(self, line_stream):
"""
Parses the radkfile and populates the current dictionary.
:param line_stream:
A stream yielding the lines in the radkfile to parse.
"""
radical_to_kanji = {}
radical_to_stroke_count = {}
current_radical = None
stroke_count = None
for line in line_stream:
line = line.rstrip()
if line.startswith('#'):
# found a comment line
continue
if line.startswith('$'):
# found a line with a new radical
parts = line.split()
if len(parts) not in (3, 4):
raise Exception('format error parsing radkfile')
dollar, current_radical, stroke_count = parts[:3]
radical_to_stroke_count[current_radical] = int(stroke_count)
continue
# found a line of kanji
kanji = line.strip()
radical_to_kanji.setdefault(current_radical, []).extend(kanji)
self.update(maps.invert_mapping(radical_to_kanji))
maps.map_dict(tuple, self, in_place=True)
self.radical_to_stroke_count = radical_to_stroke_count
self.radical_to_kanji = radical_to_kanji
@classmethod
def get_cached(cls):
""" Returns a memory-cached class instance. """
cached = getattr(cls, '_cached', cls())
cls._cached = cached
return cls._cached
def print_radicals(kanji_list):
""" Print out each kanji and the radicals it contains. """
radical_dict = RadkDict()
for kanji in kanji_list:
kanji = text_type(kanji)
radicals = radical_dict[kanji]
print('%s: ' % kanji, ' '.join(sorted(radicals)))
if __name__ == '__main__':
print_radicals(sys.argv[1:])
| bsd-3-clause | -4,731,615,904,252,097,000 | 24.49505 | 76 | 0.582524 | false | 3.64215 | false | false | false |
Ambuj-UF/ConCat-1.0 | src/Utils/Bio/MaxEntropy.py | 1 | 10435 | # Copyright 2001 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Maximum Entropy code.
Uses Improved Iterative Scaling.
"""
# TODO Define terminology
from __future__ import print_function
from functools import reduce
import sys
# Add path to Bio
sys.path.append('..')
from Bio._py3k import map
import numpy
class MaxEntropy(object):
"""Holds information for a Maximum Entropy classifier.
Members:
classes List of the possible classes of data.
alphas List of the weights for each feature.
feature_fns List of the feature functions.
"""
def __init__(self):
self.classes = []
self.alphas = []
self.feature_fns = []
def calculate(me, observation):
"""calculate(me, observation) -> list of log probs
Calculate the log of the probability for each class. me is a
MaxEntropy object that has been trained. observation is a vector
representing the observed data. The return value is a list of
unnormalized log probabilities for each class.
"""
scores = []
assert len(me.feature_fns) == len(me.alphas)
for klass in me.classes:
lprob = 0.0
for fn, alpha in zip(me.feature_fns, me.alphas):
lprob += fn(observation, klass) * alpha
scores.append(lprob)
return scores
def classify(me, observation):
"""classify(me, observation) -> class
Classify an observation into a class.
"""
scores = calculate(me, observation)
max_score, klass = scores[0], me.classes[0]
for i in range(1, len(scores)):
if scores[i] > max_score:
max_score, klass = scores[i], me.classes[i]
return klass
def _eval_feature_fn(fn, xs, classes):
"""_eval_feature_fn(fn, xs, classes) -> dict of values
Evaluate a feature function on every instance of the training set
and class. fn is a callback function that takes two parameters: a
training instance and a class. Return a dictionary of (training
set index, class index) -> non-zero value. Values of 0 are not
stored in the dictionary.
"""
values = {}
for i in range(len(xs)):
for j in range(len(classes)):
f = fn(xs[i], classes[j])
if f != 0:
values[(i, j)] = f
return values
def _calc_empirical_expects(xs, ys, classes, features):
"""_calc_empirical_expects(xs, ys, classes, features) -> list of expectations
Calculate the expectation of each function from the data. This is
the constraint for the maximum entropy distribution. Return a
list of expectations, parallel to the list of features.
"""
# E[f_i] = SUM_x,y P(x, y) f(x, y)
# = 1/N f(x, y)
class2index = {}
for index, key in enumerate(classes):
class2index[key] = index
ys_i = [class2index[y] for y in ys]
expect = []
N = len(xs)
for feature in features:
s = 0
for i in range(N):
s += feature.get((i, ys_i[i]), 0)
expect.append(float(s) / N)
return expect
def _calc_model_expects(xs, classes, features, alphas):
"""_calc_model_expects(xs, classes, features, alphas) -> list of expectations.
Calculate the expectation of each feature from the model. This is
not used in maximum entropy training, but provides a good function
for debugging.
"""
# SUM_X P(x) SUM_Y P(Y|X) F(X, Y)
# = 1/N SUM_X SUM_Y P(Y|X) F(X, Y)
p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
expects = []
for feature in features:
sum = 0.0
for (i, j), f in feature.items():
sum += p_yx[i][j] * f
expects.append(sum / len(xs))
return expects
def _calc_p_class_given_x(xs, classes, features, alphas):
"""_calc_p_class_given_x(xs, classes, features, alphas) -> matrix
Calculate P(y|x), where y is the class and x is an instance from
the training set. Return a XSxCLASSES matrix of probabilities.
"""
prob_yx = numpy.zeros((len(xs), len(classes)))
# Calculate log P(y, x).
assert len(features) == len(alphas)
for feature, alpha in zip(features, alphas):
for (x, y), f in feature.items():
prob_yx[x][y] += alpha * f
# Take an exponent to get P(y, x)
prob_yx = numpy.exp(prob_yx)
# Divide out the probability over each class, so we get P(y|x).
for i in range(len(xs)):
z = sum(prob_yx[i])
prob_yx[i] = prob_yx[i] / z
return prob_yx
def _calc_f_sharp(N, nclasses, features):
"""_calc_f_sharp(N, nclasses, features) -> matrix of f sharp values."""
# f#(x, y) = SUM_i feature(x, y)
f_sharp = numpy.zeros((N, nclasses))
for feature in features:
for (i, j), f in feature.items():
f_sharp[i][j] += f
return f_sharp
def _iis_solve_delta(N, feature, f_sharp, empirical, prob_yx,
max_newton_iterations, newton_converge):
# Solve delta using Newton's method for:
# SUM_x P(x) * SUM_c P(c|x) f_i(x, c) e^[delta_i * f#(x, c)] = 0
delta = 0.0
iters = 0
while iters < max_newton_iterations: # iterate for Newton's method
f_newton = df_newton = 0.0 # evaluate the function and derivative
for (i, j), f in feature.items():
prod = prob_yx[i][j] * f * numpy.exp(delta * f_sharp[i][j])
f_newton += prod
df_newton += prod * f_sharp[i][j]
f_newton, df_newton = empirical - f_newton / N, -df_newton / N
ratio = f_newton / df_newton
delta -= ratio
if numpy.fabs(ratio) < newton_converge: # converged
break
iters = iters + 1
else:
raise RuntimeError("Newton's method did not converge")
return delta
def _train_iis(xs, classes, features, f_sharp, alphas, e_empirical,
max_newton_iterations, newton_converge):
"""Do one iteration of hill climbing to find better alphas (PRIVATE)."""
# This is a good function to parallelize.
# Pre-calculate P(y|x)
p_yx = _calc_p_class_given_x(xs, classes, features, alphas)
N = len(xs)
newalphas = alphas[:]
for i in range(len(alphas)):
delta = _iis_solve_delta(N, features[i], f_sharp, e_empirical[i], p_yx,
max_newton_iterations, newton_converge)
newalphas[i] += delta
return newalphas
def train(training_set, results, feature_fns, update_fn=None,
max_iis_iterations=10000, iis_converge=1.0e-5,
max_newton_iterations=100, newton_converge=1.0e-10):
"""Train a maximum entropy classifier, returns MaxEntropy object.
Train a maximum entropy classifier on a training set.
training_set is a list of observations. results is a list of the
class assignments for each observation. feature_fns is a list of
the features. These are callback functions that take an
observation and class and return a 1 or 0. update_fn is a
callback function that is called at each training iteration. It is
passed a MaxEntropy object that encapsulates the current state of
the training.
The maximum number of iterations and the convergence criterion for IIS
are given by max_iis_iterations and iis_converge, respectively, while
max_newton_iterations and newton_converge are the maximum number
of iterations and the convergence criterion for Newton's method.
"""
if not training_set:
raise ValueError("No data in the training set.")
if len(training_set) != len(results):
raise ValueError("training_set and results should be parallel lists.")
# Rename variables for convenience.
xs, ys = training_set, results
# Get a list of all the classes that need to be trained.
classes = sorted(set(results))
# Cache values for all features.
features = [_eval_feature_fn(fn, training_set, classes)
for fn in feature_fns]
# Cache values for f#.
f_sharp = _calc_f_sharp(len(training_set), len(classes), features)
# Pre-calculate the empirical expectations of the features.
e_empirical = _calc_empirical_expects(xs, ys, classes, features)
# Now train the alpha parameters to weigh each feature.
alphas = [0.0] * len(features)
iters = 0
while iters < max_iis_iterations:
nalphas = _train_iis(xs, classes, features, f_sharp,
alphas, e_empirical,
max_newton_iterations, newton_converge)
diff = map(lambda x, y: numpy.fabs(x - y), alphas, nalphas)
diff = reduce(lambda x, y: x + y, diff, 0)
alphas = nalphas
me = MaxEntropy()
me.alphas, me.classes, me.feature_fns = alphas, classes, feature_fns
if update_fn is not None:
update_fn(me)
if diff < iis_converge: # converged
break
else:
raise RuntimeError("IIS did not converge")
return me
if __name__ == "__main__":
# Car data from example Naive Bayes Classifier example by Eric Meisner November 22, 2003
# http://www.inf.u-szeged.hu/~ormandi/teaching/mi2/02-naiveBayes-example.pdf
xcar = [
['Red', 'Sports', 'Domestic'],
['Red', 'Sports', 'Domestic'],
['Red', 'Sports', 'Domestic'],
['Yellow', 'Sports', 'Domestic'],
['Yellow', 'Sports', 'Imported'],
['Yellow', 'SUV', 'Imported'],
['Yellow', 'SUV', 'Imported'],
['Yellow', 'SUV', 'Domestic'],
['Red', 'SUV', 'Imported'],
['Red', 'Sports', 'Imported']
]
ycar = [
'Yes',
'No',
'Yes',
'No',
'Yes',
'No',
'Yes',
'No',
'No',
'Yes'
]
# Requires some rules or features
def udf1(ts, cl):
if ts[0] == 'Red':
return 0
else:
return 1
def udf2(ts, cl):
if ts[1] == 'Sports':
return 0
else:
return 1
def udf3(ts, cl):
if ts[2] == 'Domestic':
return 0
else:
return 1
user_functions = [udf1, udf2, udf3] # must be an iterable type
xe = train(xcar, ycar, user_functions)
for xv, yv in zip(xcar, ycar):
xc = classify(xe, xv)
print('Pred: %s gives %s y is %s' % (xv, xc, yv))
| gpl-2.0 | -8,712,417,844,903,718,000 | 30.814024 | 92 | 0.601725 | false | 3.465626 | false | false | false |
jtoppins/beaker | IntegrationTests/src/bkr/inttest/server/selenium/test_job_export_xml.py | 1 | 2025 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from bkr.inttest.server.selenium import WebDriverTestCase
from bkr.inttest import data_setup, get_server_base
from turbogears.database import session
from bkr.server.model import Job
import requests
import lxml.etree
from StringIO import StringIO
class JobExportXML(WebDriverTestCase):
maxDiff = None
def setUp(self):
with session.begin():
self.job_to_export = data_setup.create_completed_job()
self.browser = self.get_browser()
def test_export_xml(self):
b = self.browser
# Make sure the Export button is present in the jobs grid. We can't
# actually click it because it triggers a download, which WebDriver
# can't handle.
b.get(get_server_base() + 'jobs/')
b.find_element_by_name('simplesearch').send_keys(unicode(self.job_to_export.id))
b.find_element_by_name('jobsearch_simple').submit()
b.find_element_by_xpath(
'//tr[normalize-space(string(./td[1]))="%s"]'
'//a[text()="Export"]'
% self.job_to_export.t_id)
# Fetch the exported XML directly.
response = requests.get(get_server_base() +
'to_xml?taskid=%s&pretty=False' % self.job_to_export.t_id)
actual = response.content
with session.begin():
# Expire the job, otherwise the exported job XML (read from the
# Python instance) will have a duration attribute while the export
# from the view will have not since our database stores only seconds
session.expire_all()
job = Job.by_id(self.job_to_export.id)
expected = lxml.etree.tostring(job.to_xml(), pretty_print=True, encoding='utf8')
self.assertMultiLineEqual(expected, actual)
| gpl-2.0 | 621,352,936,695,160,400 | 42.085106 | 92 | 0.65284 | false | 3.894231 | false | false | false |
samueljackson92/NDImage | ndimage/gui/table_model.py | 1 | 1313 | from PyQt4 import QtCore
class DataFrameTableModel(QtCore.QAbstractTableModel):
def __init__(self, data=None, parent=None):
QtCore.QAbstractTableModel.__init__(self, parent=parent)
self.set_data(data)
def rowCount(self, parent):
return self._data.shape[0] if self._data is not None else 0
def columnCount(self, parent):
return self._data.shape[1] if self._data is not None else 0
def data(self, index, role):
if not index.isValid():
return QtCore.QVariant()
elif role != QtCore.Qt.DisplayRole:
return QtCore.QVariant()
value = self._data.iloc[index.row()][index.column()]
return QtCore.QVariant(str(value))
def get_data(self):
return self._data
def set_data(self, data):
self.beginResetModel()
self._data = data
self.endResetModel()
def headerData(self, section, orientation, role=QtCore.Qt.DisplayRole):
if role == QtCore.Qt.DisplayRole and orientation == QtCore.Qt.Horizontal:
return str(self._data.columns[section])
if role == QtCore.Qt.DisplayRole and orientation == QtCore.Qt.Vertical:
return str(self._data.index[section])
return QtCore.QAbstractTableModel.headerData(self, section, orientation, role)
| mit | -1,186,761,553,999,052,300 | 34.486486 | 86 | 0.648134 | false | 3.954819 | false | false | false |
stdlib-js/stdlib | lib/node_modules/@stdlib/math/base/special/maxabs/benchmark/python/benchmark.py | 1 | 2209 | #!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark maxabs."""
from __future__ import print_function
import timeit
NAME = "maxabs"
REPEATS = 3
ITERATIONS = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from math import fabs; from random import random;"
stmt = "y = max(fabs(1000.0*random() - 500.0), fabs(1000.0*random() - 500.0))"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in range(REPEATS):
print("# python::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| apache-2.0 | -5,144,522,500,454,437,000 | 21.773196 | 82 | 0.626981 | false | 3.627258 | false | false | false |
henry0312/LightGBM | python-package/lightgbm/plotting.py | 1 | 25113 | # coding: utf-8
"""Plotting library."""
from copy import deepcopy
from io import BytesIO
import numpy as np
from .basic import Booster, _log_warning
from .compat import GRAPHVIZ_INSTALLED, MATPLOTLIB_INSTALLED
from .sklearn import LGBMModel
def _check_not_tuple_of_2_elements(obj, obj_name='obj'):
"""Check object is not tuple or does not have 2 elements."""
if not isinstance(obj, tuple) or len(obj) != 2:
raise TypeError(f"{obj_name} must be a tuple of 2 elements.")
def _float2str(value, precision=None):
return (f"{value:.{precision}f}"
if precision is not None and not isinstance(value, str)
else str(value))
def plot_importance(booster, ax=None, height=0.2,
xlim=None, ylim=None, title='Feature importance',
xlabel='Feature importance', ylabel='Features',
importance_type='split', max_num_features=None,
ignore_zero=True, figsize=None, dpi=None, grid=True,
precision=3, **kwargs):
"""Plot model's feature importances.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance which feature importance should be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
height : float, optional (default=0.2)
Bar height, passed to ``ax.barh()``.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Feature importance")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Feature importance")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Features")
Y-axis title label.
If None, title is disabled.
importance_type : string, optional (default="split")
How the importance is calculated.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
max_num_features : int or None, optional (default=None)
Max number of top features displayed on plot.
If None or <1, all features will be displayed.
ignore_zero : bool, optional (default=True)
Whether to ignore features with zero importance.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
grid : bool, optional (default=True)
Whether to add a grid for axes.
precision : int or None, optional (default=3)
Used to restrict the display of floating point values to a certain precision.
**kwargs
Other parameters passed to ``ax.barh()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with model's feature importances.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib and restart your session to plot importance.')
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
importance = booster.feature_importance(importance_type=importance_type)
feature_name = booster.feature_name()
if not len(importance):
raise ValueError("Booster's feature_importance is empty.")
tuples = sorted(zip(feature_name, importance), key=lambda x: x[1])
if ignore_zero:
tuples = [x for x in tuples if x[1] > 0]
if max_num_features is not None and max_num_features > 0:
tuples = tuples[-max_num_features:]
labels, values = zip(*tuples)
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ylocs = np.arange(len(values))
ax.barh(ylocs, values, align='center', height=height, **kwargs)
for x, y in zip(values, ylocs):
ax.text(x + 1, y,
_float2str(x, precision) if importance_type == 'gain' else x,
va='center')
ax.set_yticks(ylocs)
ax.set_yticklabels(labels)
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
xlim = (0, max(values) * 1.1)
ax.set_xlim(xlim)
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
ylim = (-1, len(values))
ax.set_ylim(ylim)
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def plot_split_value_histogram(booster, feature, bins=None, ax=None, width_coef=0.8,
xlim=None, ylim=None,
title='Split value histogram for feature with @index/name@ @feature@',
xlabel='Feature split value', ylabel='Count',
figsize=None, dpi=None, grid=True, **kwargs):
"""Plot split value histogram for the specified feature of the model.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance of which feature split value histogram should be plotted.
feature : int or string
The feature name or index the histogram is plotted for.
If int, interpreted as index.
If string, interpreted as name.
bins : int, string or None, optional (default=None)
The maximum number of bins.
If None, the number of bins equals number of unique split values.
If string, it should be one from the list of the supported values by ``numpy.histogram()`` function.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
width_coef : float, optional (default=0.8)
Coefficient for histogram bar width.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Split value histogram for feature with @index/name@ @feature@")
Axes title.
If None, title is disabled.
@feature@ placeholder can be used, and it will be replaced with the value of ``feature`` parameter.
@index/name@ placeholder can be used,
and it will be replaced with ``index`` word in case of ``int`` type ``feature`` parameter
or ``name`` word in case of ``string`` type ``feature`` parameter.
xlabel : string or None, optional (default="Feature split value")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="Count")
Y-axis title label.
If None, title is disabled.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
grid : bool, optional (default=True)
Whether to add a grid for axes.
**kwargs
Other parameters passed to ``ax.bar()``.
Returns
-------
ax : matplotlib.axes.Axes
The plot with specified model's feature split value histogram.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
else:
raise ImportError('You must install matplotlib and restart your session to plot split value histogram.')
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
hist, bins = booster.get_split_value_histogram(feature=feature, bins=bins, xgboost_style=False)
if np.count_nonzero(hist) == 0:
raise ValueError('Cannot plot split value histogram, '
f'because feature {feature} was not used in splitting')
width = width_coef * (bins[1] - bins[0])
centred = (bins[:-1] + bins[1:]) / 2
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
ax.bar(centred, hist, align='center', width=width, **kwargs)
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
range_result = bins[-1] - bins[0]
xlim = (bins[0] - range_result * 0.2, bins[-1] + range_result * 0.2)
ax.set_xlim(xlim)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
ylim = (0, max(hist) * 1.1)
ax.set_ylim(ylim)
if title is not None:
title = title.replace('@feature@', str(feature))
title = title.replace('@index/name@', ('name' if isinstance(feature, str) else 'index'))
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def plot_metric(booster, metric=None, dataset_names=None,
ax=None, xlim=None, ylim=None,
title='Metric during training',
xlabel='Iterations', ylabel='auto',
figsize=None, dpi=None, grid=True):
"""Plot one metric during training.
Parameters
----------
booster : dict or LGBMModel
Dictionary returned from ``lightgbm.train()`` or LGBMModel instance.
metric : string or None, optional (default=None)
The metric name to plot.
Only one metric supported because different metrics have various scales.
If None, first metric picked from dictionary (according to hashcode).
dataset_names : list of strings or None, optional (default=None)
List of the dataset names which are used to calculate metric to plot.
If None, all datasets are used.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
xlim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.xlim()``.
ylim : tuple of 2 elements or None, optional (default=None)
Tuple passed to ``ax.ylim()``.
title : string or None, optional (default="Metric during training")
Axes title.
If None, title is disabled.
xlabel : string or None, optional (default="Iterations")
X-axis title label.
If None, title is disabled.
ylabel : string or None, optional (default="auto")
Y-axis title label.
If 'auto', metric name is used.
If None, title is disabled.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
grid : bool, optional (default=True)
Whether to add a grid for axes.
Returns
-------
ax : matplotlib.axes.Axes
The plot with metric's history over the training.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib and restart your session to plot metric.')
if isinstance(booster, LGBMModel):
eval_results = deepcopy(booster.evals_result_)
elif isinstance(booster, dict):
eval_results = deepcopy(booster)
else:
raise TypeError('booster must be dict or LGBMModel.')
num_data = len(eval_results)
if not num_data:
raise ValueError('eval results cannot be empty.')
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
if dataset_names is None:
dataset_names = iter(eval_results.keys())
elif not isinstance(dataset_names, (list, tuple, set)) or not dataset_names:
raise ValueError('dataset_names should be iterable and cannot be empty')
else:
dataset_names = iter(dataset_names)
name = next(dataset_names) # take one as sample
metrics_for_one = eval_results[name]
num_metric = len(metrics_for_one)
if metric is None:
if num_metric > 1:
_log_warning("More than one metric available, picking one to plot.")
metric, results = metrics_for_one.popitem()
else:
if metric not in metrics_for_one:
raise KeyError('No given metric in eval results.')
results = metrics_for_one[metric]
num_iteration, max_result, min_result = len(results), max(results), min(results)
x_ = range(num_iteration)
ax.plot(x_, results, label=name)
for name in dataset_names:
metrics_for_one = eval_results[name]
results = metrics_for_one[metric]
max_result, min_result = max(max(results), max_result), min(min(results), min_result)
ax.plot(x_, results, label=name)
ax.legend(loc='best')
if xlim is not None:
_check_not_tuple_of_2_elements(xlim, 'xlim')
else:
xlim = (0, num_iteration)
ax.set_xlim(xlim)
if ylim is not None:
_check_not_tuple_of_2_elements(ylim, 'ylim')
else:
range_result = max_result - min_result
ylim = (min_result - range_result * 0.2, max_result + range_result * 0.2)
ax.set_ylim(ylim)
if ylabel == 'auto':
ylabel = metric
if title is not None:
ax.set_title(title)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
ax.grid(grid)
return ax
def _to_graphviz(tree_info, show_info, feature_names, precision=3,
orientation='horizontal', constraints=None, **kwargs):
"""Convert specified tree to graphviz instance.
See:
- https://graphviz.readthedocs.io/en/stable/api.html#digraph
"""
if GRAPHVIZ_INSTALLED:
from graphviz import Digraph
else:
raise ImportError('You must install graphviz and restart your session to plot tree.')
def add(root, total_count, parent=None, decision=None):
"""Recursively add node or edge."""
if 'split_index' in root: # non-leaf
l_dec = 'yes'
r_dec = 'no'
if root['decision_type'] == '<=':
lte_symbol = "≤"
operator = lte_symbol
elif root['decision_type'] == '==':
operator = "="
else:
raise ValueError('Invalid decision type in tree model.')
name = f"split{root['split_index']}"
if feature_names is not None:
label = f"<B>{feature_names[root['split_feature']]}</B> {operator}"
else:
label = f"feature <B>{root['split_feature']}</B> {operator} "
label += f"<B>{_float2str(root['threshold'], precision)}</B>"
for info in ['split_gain', 'internal_value', 'internal_weight', "internal_count", "data_percentage"]:
if info in show_info:
output = info.split('_')[-1]
if info in {'split_gain', 'internal_value', 'internal_weight'}:
label += f"<br/>{_float2str(root[info], precision)} {output}"
elif info == 'internal_count':
label += f"<br/>{output}: {root[info]}"
elif info == "data_percentage":
label += f"<br/>{_float2str(root['internal_count'] / total_count * 100, 2)}% of data"
fillcolor = "white"
style = ""
if constraints:
if constraints[root['split_feature']] == 1:
fillcolor = "#ddffdd" # light green
if constraints[root['split_feature']] == -1:
fillcolor = "#ffdddd" # light red
style = "filled"
label = f"<{label}>"
graph.node(name, label=label, shape="rectangle", style=style, fillcolor=fillcolor)
add(root['left_child'], total_count, name, l_dec)
add(root['right_child'], total_count, name, r_dec)
else: # leaf
name = f"leaf{root['leaf_index']}"
label = f"leaf {root['leaf_index']}: "
label += f"<B>{_float2str(root['leaf_value'], precision)}</B>"
if 'leaf_weight' in show_info:
label += f"<br/>{_float2str(root['leaf_weight'], precision)} weight"
if 'leaf_count' in show_info:
label += f"<br/>count: {root['leaf_count']}"
if "data_percentage" in show_info:
label += f"<br/>{_float2str(root['leaf_count'] / total_count * 100, 2)}% of data"
label = f"<{label}>"
graph.node(name, label=label)
if parent is not None:
graph.edge(parent, name, decision)
graph = Digraph(**kwargs)
rankdir = "LR" if orientation == "horizontal" else "TB"
graph.attr("graph", nodesep="0.05", ranksep="0.3", rankdir=rankdir)
if "internal_count" in tree_info['tree_structure']:
add(tree_info['tree_structure'], tree_info['tree_structure']["internal_count"])
else:
raise Exception("Cannot plot trees with no split")
if constraints:
# "#ddffdd" is light green, "#ffdddd" is light red
legend = """<
<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" CELLPADDING="4">
<TR>
<TD COLSPAN="2"><B>Monotone constraints</B></TD>
</TR>
<TR>
<TD>Increasing</TD>
<TD BGCOLOR="#ddffdd"></TD>
</TR>
<TR>
<TD>Decreasing</TD>
<TD BGCOLOR="#ffdddd"></TD>
</TR>
</TABLE>
>"""
graph.node("legend", label=legend, shape="rectangle", color="white")
return graph
def create_tree_digraph(booster, tree_index=0, show_info=None, precision=3,
orientation='horizontal', **kwargs):
"""Create a digraph representation of specified tree.
Each node in the graph represents a node in the tree.
Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means
"this node splits on the feature named "Column_10", with threshold 875.9".
Leaf nodes have labels like ``leaf 2: 0.422``, which means "this node is a
leaf node, and the predicted value for records that fall into this node
is 0.422". The number (``2``) is an internal unique identifier and doesn't
have any special meaning.
.. note::
For more information please visit
https://graphviz.readthedocs.io/en/stable/api.html#digraph.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance to be converted.
tree_index : int, optional (default=0)
The index of a target tree to convert.
show_info : list of strings or None, optional (default=None)
What information should be shown in nodes.
- ``'split_gain'`` : gain from adding this split to the model
- ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node
- ``'internal_count'`` : number of records from the training data that fall into this non-leaf node
- ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node
- ``'leaf_count'`` : number of records from the training data that fall into this leaf node
- ``'leaf_weight'`` : total weight (sum of hessian) of all observations that fall into this leaf node
- ``'data_percentage'`` : percentage of training data that fall into this node
precision : int or None, optional (default=3)
Used to restrict the display of floating point values to a certain precision.
orientation : string, optional (default='horizontal')
Orientation of the tree.
Can be 'horizontal' or 'vertical'.
**kwargs
Other parameters passed to ``Digraph`` constructor.
Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters.
Returns
-------
graph : graphviz.Digraph
The digraph representation of specified tree.
"""
if isinstance(booster, LGBMModel):
booster = booster.booster_
elif not isinstance(booster, Booster):
raise TypeError('booster must be Booster or LGBMModel.')
model = booster.dump_model()
tree_infos = model['tree_info']
if 'feature_names' in model:
feature_names = model['feature_names']
else:
feature_names = None
monotone_constraints = model.get('monotone_constraints', None)
if tree_index < len(tree_infos):
tree_info = tree_infos[tree_index]
else:
raise IndexError('tree_index is out of range.')
if show_info is None:
show_info = []
graph = _to_graphviz(tree_info, show_info, feature_names, precision,
orientation, monotone_constraints, **kwargs)
return graph
def plot_tree(booster, ax=None, tree_index=0, figsize=None, dpi=None,
show_info=None, precision=3, orientation='horizontal', **kwargs):
"""Plot specified tree.
Each node in the graph represents a node in the tree.
Non-leaf nodes have labels like ``Column_10 <= 875.9``, which means
"this node splits on the feature named "Column_10", with threshold 875.9".
Leaf nodes have labels like ``leaf 2: 0.422``, which means "this node is a
leaf node, and the predicted value for records that fall into this node
is 0.422". The number (``2``) is an internal unique identifier and doesn't
have any special meaning.
.. note::
It is preferable to use ``create_tree_digraph()`` because of its lossless quality
and returned objects can be also rendered and displayed directly inside a Jupyter notebook.
Parameters
----------
booster : Booster or LGBMModel
Booster or LGBMModel instance to be plotted.
ax : matplotlib.axes.Axes or None, optional (default=None)
Target axes instance.
If None, new figure and axes will be created.
tree_index : int, optional (default=0)
The index of a target tree to plot.
figsize : tuple of 2 elements or None, optional (default=None)
Figure size.
dpi : int or None, optional (default=None)
Resolution of the figure.
show_info : list of strings or None, optional (default=None)
What information should be shown in nodes.
- ``'split_gain'`` : gain from adding this split to the model
- ``'internal_value'`` : raw predicted value that would be produced by this node if it was a leaf node
- ``'internal_count'`` : number of records from the training data that fall into this non-leaf node
- ``'internal_weight'`` : total weight of all nodes that fall into this non-leaf node
- ``'leaf_count'`` : number of records from the training data that fall into this leaf node
- ``'leaf_weight'`` : total weight (sum of hessian) of all observations that fall into this leaf node
- ``'data_percentage'`` : percentage of training data that fall into this node
precision : int or None, optional (default=3)
Used to restrict the display of floating point values to a certain precision.
orientation : string, optional (default='horizontal')
Orientation of the tree.
Can be 'horizontal' or 'vertical'.
**kwargs
Other parameters passed to ``Digraph`` constructor.
Check https://graphviz.readthedocs.io/en/stable/api.html#digraph for the full list of supported parameters.
Returns
-------
ax : matplotlib.axes.Axes
The plot with single tree.
"""
if MATPLOTLIB_INSTALLED:
import matplotlib.image as image
import matplotlib.pyplot as plt
else:
raise ImportError('You must install matplotlib and restart your session to plot tree.')
if ax is None:
if figsize is not None:
_check_not_tuple_of_2_elements(figsize, 'figsize')
_, ax = plt.subplots(1, 1, figsize=figsize, dpi=dpi)
graph = create_tree_digraph(booster=booster, tree_index=tree_index,
show_info=show_info, precision=precision,
orientation=orientation, **kwargs)
s = BytesIO()
s.write(graph.pipe(format='png'))
s.seek(0)
img = image.imread(s)
ax.imshow(img)
ax.axis('off')
return ax
| mit | -408,649,031,525,272,200 | 39.309791 | 115 | 0.614781 | false | 3.971691 | false | false | false |
darknightghost/AutoDeployer | cmd_pipe/SubProc.py | 1 | 3581 | #! /usr/bin/python3
# -*- coding: utf-8 -*-
'''
Copyright 2016,王思远 <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import sys
class SubProc:
def __init__(self, path, argv, encoding = 'utf-8'):
self.path = path
self.argv = argv
self.encoding = encoding
#Get file descriptors
self.stdinFd = sys.stdin.fileno()
self.stdoutFd = sys.stdout.fileno()
self.stderrFd = sys.stderr.fileno()
#Create pipe
self.parentIn, self.childStdout = os.pipe()
self.childStdin, self.parentOut = os.pipe()
pid = os.fork()
if pid == 0:
self.is_child()
else:
self.child_id = pid
self.is_parent()
self.buffer = ""
def is_child(self):
os.close(self.parentIn)
os.close(self.parentOut)
os.dup2(self.childStdin, self.stdinFd)
os.dup2(self.childStdout, self.stdoutFd)
os.dup2(self.childStdout, self.stderrFd)
os.execv(self.path, self.argv)
def is_parent(self):
os.close(self.childStdin)
os.close(self.childStdout)
def read(self):
bs = os.read(self.parentIn, 1024)
ret = self.buffer + bs.decode(encoding = self.encoding,
errors = 'ignore')
self.buffer = ""
return ret
def read_until(self, *strings):
for string in strings:
if string in self.buffer:
index = self.buffer.index(string)
ret = self.buffer[: index] = string
self.buffer = self.buffer[index + len(self.buffer): ]
return ret, strings.index(string)
str_len = 0
for string in strings:
str_len = max(str_len, len(string))
cmp_str = self.buffer[-str_len :]
while True:
bs = os.read(self.parentIn, 1024)
s = self.buffer + bs.decode(encoding = self.encoding,
errors = 'ignore')
cmp_str += s
self.buffer += s
for string in strings:
if string in cmp_str:
index = self.buffer.index(string)
ret = self.buffer[: index] = string
self.buffer = self.buffer[index + len(self.buffer): ]
return ret, strings.index(string)
cmp_str = cmp_str[-str_len :]
print("log" + cmp_str)
def write(self, string):
os.write(self.parentOut, string.encode(encoding = self.encoding,
errors = 'ignore'))
def read_line(self):
return self.read_until('\n')
def close(self):
os.close(self.parentIn)
os.close(self.parentOut)
os.wait()
def __del__(self):
os.close(self.parentIn)
os.close(self.parentOut)
try:
os.kill(self.child_id, 9)
except Exception:
pass
os.wait()
| gpl-3.0 | -127,845,725,472,308,430 | 29.29661 | 74 | 0.567552 | false | 3.919956 | false | false | false |
martinzlocha/mad | portal/forms.py | 1 | 2190 | from django import forms
from django.core.exceptions import ValidationError
from portal.models import Student, Hobby
class SignUpForm(forms.ModelForm):
class Meta:
model = Student
fields = ['name', 'username', 'gender', 'course']
widgets = {
'name': forms.TextInput(attrs={'placeholder': 'Ideally similar to name on FB'}),
'username': forms.TextInput(attrs={'placeholder': 'xy1217'}),
}
def clean_username(self):
username = self.cleaned_data['username']
# ToDo(martinzlocha): Check if the username provided looks valid
return username
class PreferenceForm(forms.ModelForm):
hobbies = forms.ModelMultipleChoiceField(widget=forms.CheckboxSelectMultiple(), required=False,
queryset=Hobby.objects.all())
class Meta:
model = Student
fields = ('party', 'hobbies')
def __init__(self, *args, **kwargs):
super(PreferenceForm, self).__init__(*args, **kwargs)
self.fields['party'].label = "Do you enjoy clubbing/partying/drinking?"
self.fields['hobbies'].label = "What are your hobbies? (Maximum 5 responses)"
def clean_hobbies(self):
hobbies = self.cleaned_data['hobbies']
if len(hobbies) > 5:
raise ValidationError("Maximum of 5 hobbies.")
return hobbies
class PartnerForm(forms.ModelForm):
class Meta:
model = Student
fields = ('partner',)
def __init__(self, *args, **kwargs):
super(PartnerForm, self).__init__(*args, **kwargs)
self.instance = kwargs.pop('instance', None)
self.fields['partner'].help_text = "If you don't have a partner then one will be allocated to you with " \
"similar hobbies."
choice = Student.objects.filter(confirmed=False, child=False).exclude(username__contains=self.instance.username)
self.fields["partner"].queryset = choice
def get_successful_proposal_popup(self):
message = "Proposal has been successfully sent to %s." % self.cleaned_data['partner']
return {'message': message, 'state': 'success'}
| mit | -1,180,396,039,648,989,400 | 34.322581 | 120 | 0.620091 | false | 4.163498 | false | false | false |
citiususc/construe | construe/utils/signal_processing/wave_extraction.py | 1 | 8755 | # -*- coding: utf-8 -*-
# pylint: disable-msg=C0103
"""
Created on Thu Oct 17 13:15:52 2013
This module provides the functionality to obtain basic primitive structures,
called "peaks", from a signal fragment and its corresponding simplification
using the Douglas-Peucker algorithm. The process is based on the paper:
"Trahanias: Syntactic pattern recognition of the ECG, 1990".
@author: T. Teijeiro
"""
from .signal_measures import get_peaks
from ..units_helper import (msec2samples as ms2sp, phys2digital as ph2dg,
digital2phys as dg2ph, digital2mm as dg2mm,
samples2mm as sp2mm)
import numpy as np
import math
###############################################################################
# Amplitude and duration thresholds for the waves, extracted from the paper: #
# European Heart Journal: Recommendations for measurement standards in #
# quantitative electrocardiography. (1985) #
###############################################################################
MIN_AMP = ph2dg(0.05)
MIN_DUR = ms2sp(10.)
#Custom threshold, taken as an intuitive reference
MIN_ANGLE = math.pi/4.0
class Wave(object):
"""
This class provides the model of a Peak as is defined in the paper in which
this module is based. We have added an amplitude attribute.
"""
__slots__ = ('pts', 'e', 'amp')
def __init__(self):
#X coordinates for the left bound, peak, and right bound.
self.pts = (0, 0, 0)
#Wave energy
self.e = 0.0
#Wave amplitude
self.amp = 0.0
def __str__(self):
return '{0} - {1} - {2}, e = {3}, amp = {4} mV'.format(self.l,
self.m, self.r, self.e, dg2ph(self.amp))
def __repr__(self):
return str(self)
def __eq__(self, other):
return (type(self) is type(other) and self.e == other.e
and self.amp == other.amp and self.pts == other.pts)
@property
def sign(self):
"""
Obtains whether this Wave is a positive or negative Wave.
"""
return np.sign(self.amp)
@property
def l(self):
"""Returns the left boundary of the wave"""
return self.pts[0]
@property
def r(self):
"""Returns the left boundary of the wave"""
return self.pts[2]
@property
def m(self):
"""Returns the left boundary of the wave"""
return self.pts[1]
@property
def dur(self):
"""Returns the duration of the wave"""
return self.pts[2] - self.pts[0]
def move(self, displacement):
"""
Moves the wave a certain time, by adding the displacement value to
each bound and peak.
"""
self.pts = tuple(p+displacement for p in self.pts)
def extract_waves(signal, points, baseline= None):
"""
Obtains the sequence of *Wave* objects present in a signal fragment, based
on the shape simplification determined by points.
Parameters
----------
signal:
Raw signal fragment.
points:
Indices of the relevant points in the signal, that will be used to
determine the peaks.
Returns
-------
out:
Tuple of *Wave* objects.
"""
if baseline is None or not np.min(signal) <= baseline <= np.max(signal):
baseline = signal[0] - (signal[0]-signal[-1])/2.0
result = []
#Angle between two points
angle = lambda a, b : math.atan(dg2mm(abs(signal[b]-signal[a]))/sp2mm(b-a))
pks = points[get_peaks(signal[points])]
#If there are no peaks, there are no waves.
if len(pks) == 0:
return tuple()
#The limits of the waves will be the baseline level, or an angle decrease.
for i in range(len(pks)):
newpk = Wave()
#The limits of each wave is the next and the prevoius peak.
lb = 0 if i == 0 else pks[i-1]
#Left slope
idx = np.where(points==lb)[0][0]
while (points[idx] < pks[i] and (angle(points[idx], pks[i]) < MIN_ANGLE
or angle(points[idx], points[idx+1]) < MIN_ANGLE)):
idx += 1
#If we stop in the peak, we discard a wave in that peak.
if points[idx] == pks[i]:
continue
lb = points[idx]
#Right slope
rb = points[-1] if i == len(pks)-1 else pks[i+1]
idx = np.where(points==rb)[0][0]
while (points[idx] > pks[i] and (angle(pks[i], points[idx]) < MIN_ANGLE
or angle(points[idx-1], points[idx]) < MIN_ANGLE)):
idx -= 1
if points[idx] == pks[i]:
continue
rb = points[idx]
#Now we have ensured to meet minimum angle requirements. We now check
#duration and amplitude.
newpk.pts = (lb, pks[i], rb)
fref = min if signal[newpk.m] > signal[lb] else max
newpk.amp = signal[newpk.m] - fref(signal[rb], signal[lb])
#We remove peaks not satisfying basic constraints.
if (newpk.dur >= MIN_DUR and abs(newpk.amp) >= MIN_AMP):
result.append(newpk)
#The limits of consecutive waves have to be refined.
_refine_wave_limits(result, signal, baseline)
return tuple(result)
def _refine_wave_limits(waves, signal, baseline):
"""
This auxiliary function checks a sequence of wave objects, join two
consecutive waves if they are very close, and establishing the proper
join point if they overlap.
"""
i = 0
while i < len(waves):
#First we check for overlaps with the precedent wave
if i > 0 and waves[i].l < waves[i-1].r:
#The join point is the point closer to the baseline.
jp = waves[i].l + np.argmin(np.abs(
signal[waves[i].l:waves[i-1].r+1]-baseline))
waves[i].pts = (jp, waves[i].m, waves[i].r)
#And then for overlaps with the next one
if i < len(waves)-1 and waves[i].r > waves[i+1].l:
jp = waves[i+1].l + np.argmin(np.abs(
signal[waves[i+1].l:waves[i].r+1]-baseline))
waves[i].pts = (waves[i].l, waves[i].m, jp)
#Now we recompute amplitude.
fref = min if signal[waves[i].m] > signal[waves[i].l] else max
waves[i].amp = signal[waves[i].m] - fref(signal[waves[i].l],
signal[waves[i].r])
if (abs(waves[i].amp) < MIN_AMP or waves[i].dur < MIN_DUR or
waves[i].l == waves[i].m or waves[i].m == waves[i].r):
waves.pop(i)
else:
waves[i].e = np.sum(np.diff(signal[waves[i].l:waves[i].r+1])**2)
i += 1
#Now we join waves that are very close
for i in range(1, len(waves)):
sep = waves[i].l - waves[i-1].r
if 0 < sep < MIN_DUR:
#We join the waves in the maximum deviation point from the
#baseline.
pk = waves[i-1].r + np.argmax(np.abs(
signal[waves[i-1].r:waves[i].l+1]-baseline))
waves[i-1].pts = (waves[i-1].l, waves[i-1].m, pk)
waves[i].pts = (pk, waves[i].m, waves[i].r)
if __name__ == "__main__":
import matplotlib.pyplot as plt
#Small tests with a real delineated QRS example.
#Example 1: Record 124, beat 0, lead MLII
pts = np.array([ 0, 8, 14, 23, 27, 30, 42])
sig = np.array([837, 841, 854, 874, 893, 910, 924, 931,
935, 925, 902, 874, 840, 821, 821, 842,
880, 929, 982, 1031, 1076, 1122, 1162, 1200,
1229, 1250, 1262, 1263, 1257, 1241, 1218, 1187,
1151, 1109, 1067, 1024, 981, 938, 895, 857,
828, 810, 799])
#Example 2: Record 124, beat 0, lead V4
pts = np.array([ 0, 7, 9, 12, 14, 22])
sig = np.array([ 875., 886., 901., 928., 952., 970., 975., 972.,
955., 921., 868., 811., 758., 725., 717., 733.,
764., 803., 840., 871., 897., 915., 926.])
#Example 2: Record 113, beat 0
pts = np.array([ 0, 8, 10, 14, 17, 22, 28])
sig = np.array([ 1042., 1046., 1053., 1059., 1066., 1074., 1079.,
1078., 1082., 1080., 1069., 1053., 1031., 1009.,
990., 978., 965., 965., 971., 987., 1011.,
1023., 1032., 1030., 1025., 1027., 1034., 1041.,
1045.])
plt.figure()
plt.plot(sig, '--')
plt.plot(pts, sig[pts], 'bo')
for p in extract_waves(sig, pts):
x = np.array(p.pts)
plt.plot(x, sig[x])
print(str(p))
| agpl-3.0 | 7,635,516,548,468,927,000 | 37.231441 | 79 | 0.531468 | false | 3.276572 | false | false | false |
endreman0/Excalibot | excalibot/cogs/voice.py | 1 | 4864 | import asyncio, discord
from discord.ext.commands import guild_only
from .. import db, log
from ..core import command
class VoiceText(metaclass=command.Cog):
async def on_voice_state_update(self, member, before, after):
if before.channel is after.channel: return # Only care about changing channels
with self.bot.session as session:
give_links = tuple() if after.channel is None else session.get(TextVoiceLink, voice_id=after.channel.id).all()
take_links = tuple() if before.channel is None else session.get(TextVoiceLink, voice_id=before.channel.id).all()
if give_links:
give_roles = tuple(member.guild.get_role(link.role_id) for link in give_links)
await member.add_roles(*give_roles)
if take_links:
take_roles = tuple(member.guild.get_role(link.role_id) for link in take_links)
await member.remove_roles(*take_roles)
@guild_only()
@command.group(name='voicetext', aliases=['vt', 'voice'])
async def base_command(self, ctx):
"""Voice-text bridge, linking voice channels to text channels.
Any number of text channels can be connected to any number of voice channels.
For example, you can link all voice channels to a general voice-text channel, and also link each one to one or more text channels for that specific voice channel."""
await ctx.send('Use `{}help {}` for info'.format(ctx.prefix, ctx.invoked_with))
@base_command.command()
async def link(self, ctx, text: discord.TextChannel, *, voice: discord.VoiceChannel):
"""Link an existing text channel to a voice channel."""
with ctx.session:
link = ctx.session.get(TextVoiceLink, text_id=text.id, voice_id=voice.id).one_or_none()
if link is not None:
return await ctx.send('BAKA! Those channels are already linked!')
role = await self._create_role(ctx.guild, text, voice, 'Voice link requested by {}'.format(ctx.author))
link = ctx.session.add(TextVoiceLink(role_id=role.id, text_id=text.id, voice_id=voice.id))
await ctx.send('Linked {} to "{}"', text.mention, voice.name)
link.example_usage = """
`{prefix}voicetext link #voice-1 Voice 1` - link the text channel #voice-1 with the voice channel "Voice 1", so that users in voice get access to the text channel
"""
@base_command.command()
async def make(self, ctx, *, voice: discord.VoiceChannel):
"""Create a text channel and link it to the given voice channel."""
msg = await ctx.send('Creating text channel for {}', voice.name)
text = await ctx.guild.create_text_channel('voice-' + voice.name.lower().replace(' ', '-'), reason='Voice link requested by {}'.format(ctx.author))
with ctx.session:
role = await self._create_role(ctx.guild, text, voice, 'Voice link requested by {}'.format(ctx.author))
link = ctx.session.add(TextVoiceLink(role_id=role.id, text_id=text.id, voice_id=voice.id))
await ctx.send('Created {} and linked it to "{}"', text.mention, voice.name)
make.example_usage = """
`{prefix}voicetext make Voice 1` - creats a text channel #voice-1 with the voice channel "Voice 1", so that users in voice get access to the text channel
"""
@base_command.command()
async def unlink(self, ctx, text: discord.TextChannel, *, voice: discord.VoiceChannel):
"""Unlinks a voice channel and deletes the corresponding role."""
with ctx.session:
link = ctx.session.get(TextVoiceLink, text_id=text.id, voice_id=voice.id).one_or_none()
if link is None:
return await ctx.send('BAKA! Those channels are not linked!')
role_id, text_id, voice_id = link.role_id, link.text_id, link.voice_id
ctx.session.delete(link)
role = ctx.guild.get_role(role_id)
if role is None:
await ctx.send('Unlinked {} from "{}" and deleted the "{}" role.', text.mention, voice.name, role.name)
else:
await self._delete_role(ctx.guild, role, text, voice, 'Voice unlink requested by {}'.format(ctx.author))
await ctx.send('Unlinked {} from "{}" and deleted the "{}" role.', text.mention, voice.name, role.name)
unlink.example_usage = """
`{prefix}voicetext unlink #voice-1 Voice 1` - unlink the text channel #voice-1 from the voice channel "Voice 1"
"""
async def _create_role(self, guild, text, voice, reason):
role = await guild.create_role(name='Voice {}'.format(voice.name), reason=reason)
await text.set_permissions(guild.default_role, read_messages=False, reason=reason)
await text.set_permissions(role, read_messages=True, reason=reason)
return role
async def _delete_role(self, guild, role, text, voice, reason):
await text.set_permissions(guild.default_role, read_messages=None, reason=reason)
await text.set_permissions(role, overwrite=None, reason=reason)
await role.delete(reason=reason)
class TextVoiceLink(db.DatabaseObject):
__tablename__ = 'voicetext_links'
text_id = db.Column(db.Integer, primary_key=True)
voice_id = db.Column(db.Integer, primary_key=True)
role_id = db.Column(db.Integer, unique=True)
| gpl-3.0 | -6,292,609,427,924,506,000 | 52.450549 | 167 | 0.717516 | false | 3.206328 | false | false | false |
yeming233/rally | rally/plugins/openstack/services/storage/cinder_common.py | 1 | 29782 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common.i18n import _
from rally import exceptions
from rally.plugins.openstack.services.image import image
from rally.plugins.openstack.services.storage import block
from rally.task import atomic
from rally.task import utils as bench_utils
CONF = block.CONF
class CinderMixin(object):
def _get_client(self):
return self._clients.cinder(self.version)
def _update_resource(self, resource):
try:
manager = getattr(resource, "manager", None)
if manager:
res = manager.get(resource.id)
else:
if isinstance(resource, block.Volume):
attr = "volumes"
elif isinstance(resource, block.VolumeSnapshot):
attr = "volume_snapshots"
elif isinstance(resource, block.VolumeBackup):
attr = "backups"
res = getattr(self._get_client(), attr).get(resource.id)
except Exception as e:
if getattr(e, "code", getattr(e, "http_status", 400)) == 404:
raise exceptions.GetResourceNotFound(resource=resource)
raise exceptions.GetResourceFailure(resource=resource, err=e)
return res
def _wait_available_volume(self, volume):
return bench_utils.wait_for_status(
volume,
ready_statuses=["available"],
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
def list_volumes(self, detailed=True):
"""List all volumes."""
aname = "cinder_v%s.list_volumes" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volumes.list(detailed)
def get_volume(self, volume_id):
"""Get target volume information."""
aname = "cinder_v%s.get_volume" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volumes.get(volume_id)
def delete_volume(self, volume):
"""Delete target volume."""
aname = "cinder_v%s.delete_volume" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volumes.delete(volume)
bench_utils.wait_for_status(
volume,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=(CONF.benchmark
.cinder_volume_delete_poll_interval)
)
def extend_volume(self, volume, new_size):
"""Extend the size of the specified volume."""
if isinstance(new_size, dict):
new_size = random.randint(new_size["min"], new_size["max"])
aname = "cinder_v%s.extend_volume" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volumes.extend(volume, new_size)
return self._wait_available_volume(volume)
def list_snapshots(self, detailed=True):
"""Get a list of all snapshots."""
aname = "cinder_v%s.list_snapshots" % self.version
with atomic.ActionTimer(self, aname):
return (self._get_client()
.volume_snapshots.list(detailed))
def set_metadata(self, volume, sets=10, set_size=3):
"""Set volume metadata.
:param volume: The volume to set metadata on
:param sets: how many operations to perform
:param set_size: number of metadata keys to set in each operation
:returns: A list of keys that were set
"""
key = "cinder_v%s.set_%s_metadatas_%s_times" % (self.version,
set_size,
sets)
with atomic.ActionTimer(self, key):
keys = []
for i in range(sets):
metadata = {}
for j in range(set_size):
key = self.generate_random_name()
keys.append(key)
metadata[key] = self.generate_random_name()
self._get_client().volumes.set_metadata(volume, metadata)
return keys
def delete_metadata(self, volume, keys, deletes=10, delete_size=3):
"""Delete volume metadata keys.
Note that ``len(keys)`` must be greater than or equal to
``deletes * delete_size``.
:param volume: The volume to delete metadata from
:param deletes: how many operations to perform
:param delete_size: number of metadata keys to delete in each operation
:param keys: a list of keys to choose deletion candidates from
"""
if len(keys) < deletes * delete_size:
raise exceptions.InvalidArgumentsException(
"Not enough metadata keys to delete: "
"%(num_keys)s keys, but asked to delete %(num_deletes)s" %
{"num_keys": len(keys),
"num_deletes": deletes * delete_size})
# make a shallow copy of the list of keys so that, when we pop
# from it later, we don't modify the original list.
keys = list(keys)
random.shuffle(keys)
action_name = ("cinder_v%s.delete_%s_metadatas_%s_times"
% (self.version, delete_size, deletes))
with atomic.ActionTimer(self, action_name):
for i in range(deletes):
to_del = keys[i * delete_size:(i + 1) * delete_size]
self._get_client().volumes.delete_metadata(volume, to_del)
def update_readonly_flag(self, volume, read_only):
"""Update the read-only access mode flag of the specified volume.
:param volume: The UUID of the volume to update.
:param read_only: The value to indicate whether to update volume to
read-only access mode.
:returns: A tuple of http Response and body
"""
aname = "cinder_v%s.update_readonly_flag" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volumes.update_readonly_flag(
volume, read_only)
def upload_volume_to_image(self, volume, force=False,
container_format="bare", disk_format="raw"):
"""Upload the given volume to image.
Returns created image.
:param volume: volume object
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso
:returns: Returns created image object
"""
aname = "cinder_v%s.upload_volume_to_image" % self.version
with atomic.ActionTimer(self, aname):
resp, img = self._get_client().volumes.upload_to_image(
volume, force, self.generate_random_name(), container_format,
disk_format)
# NOTE (e0ne): upload_to_image changes volume status to uploading
# so we need to wait until it will be available.
volume = self._wait_available_volume(volume)
image_id = img["os-volume_upload_image"]["image_id"]
glance = image.Image(self._clients)
image_inst = glance.get_image(image_id)
image_inst = bench_utils.wait_for_status(
image_inst,
ready_statuses=["active"],
update_resource=glance.get_image,
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=(CONF.benchmark
.glance_image_create_poll_interval)
)
return image_inst
def create_qos(self, specs):
"""Create a qos specs.
:param specs: A dict of key/value pairs to be set
:rtype: :class:'QoSSpecs'
"""
aname = "cinder_v%s.create_qos" % self.version
name = self.generate_random_name()
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.create(name, specs)
def list_qos(self, search_opts=None):
"""Get a list of all qos specs.
:param search_opts: search options
:rtype: list of :class: 'QoSpecs'
"""
aname = "cinder_v%s.list_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.list(search_opts)
def get_qos(self, qos_id):
"""Get a specific qos specs.
:param qos_id: The ID of the :class: 'QoSSpecs' to get
:rtype: :class: 'QoSSpecs'
"""
aname = "cinder_v%s.get_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.get(qos_id)
def set_qos(self, qos_id, set_specs_args):
"""Add/Update keys in qos specs.
:param qos_id: The ID of the :class:`QoSSpecs` to get
:param set_specs_args: A dict of key/value pairs to be set
:rtype: class 'cinderclient.apiclient.base.DictWithMeta'
{"qos_specs": set_specs_args}
"""
aname = "cinder_v%s.set_qos" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().qos_specs.set_keys(qos_id,
set_specs_args)
def qos_associate_type(self, qos_specs, vol_type_id):
"""Associate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.qos_associate_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().qos_specs.associate(qos_specs,
vol_type_id)
return (tuple_res[0].status_code == 202)
def qos_disassociate_type(self, qos_specs, vol_type_id):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be disassociated with
:param vol_type_id: The volume type id to be disassociated with
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.qos_disassociate_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().qos_specs.disassociate(qos_specs,
vol_type_id)
return (tuple_res[0].status_code == 202)
def delete_snapshot(self, snapshot):
"""Delete the given snapshot.
Returns when the snapshot is actually deleted.
:param snapshot: snapshot object
"""
aname = "cinder_v%s.delete_snapshot" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().volume_snapshots.delete(snapshot)
bench_utils.wait_for_status(
snapshot,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=(CONF.benchmark
.cinder_volume_delete_poll_interval)
)
def delete_backup(self, backup):
"""Delete the given backup.
Returns when the backup is actually deleted.
:param backup: backup instance
"""
aname = "cinder_v%s.delete_backup" % self.version
with atomic.ActionTimer(self, aname):
self._get_client().backups.delete(backup)
bench_utils.wait_for_status(
backup,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=self._update_resource,
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=(CONF.benchmark
.cinder_volume_delete_poll_interval)
)
def restore_backup(self, backup_id, volume_id=None):
"""Restore the given backup.
:param backup_id: The ID of the backup to restore.
:param volume_id: The ID of the volume to restore the backup to.
"""
aname = "cinder_v%s.restore_backup" % self.version
with atomic.ActionTimer(self, aname):
restore = self._get_client().restores.restore(backup_id, volume_id)
restored_volume = self._get_client().volumes.get(restore.volume_id)
return self._wait_available_volume(restored_volume)
def list_backups(self, detailed=True):
"""Return user volume backups list.
:param detailed: True if detailed information about backup
should be listed
"""
aname = "cinder_v%s.list_backups" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().backups.list(detailed)
def list_transfers(self, detailed=True, search_opts=None):
"""Get a list of all volume transfers.
:param detailed: If True, detailed information about transfer
should be listed
:param search_opts: Search options to filter out volume transfers
:returns: list of :class:`VolumeTransfer`
"""
aname = "cinder_v%s.list_transfers" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.list(detailed, search_opts)
def get_volume_type(self, volume_type):
"""get details of volume_type.
:param volume_type: The ID of the :class:`VolumeType` to get
:returns: :class:`VolumeType`
"""
aname = "cinder_v%s.get_volume_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_types.get(volume_type)
def delete_volume_type(self, volume_type):
"""delete a volume type.
:param volume_type: Name or Id of the volume type
:returns: base on client response return True if the request
has been accepted or not
"""
aname = "cinder_v%s.delete_volume_type" % self.version
with atomic.ActionTimer(self, aname):
tuple_res = self._get_client().volume_types.delete(
volume_type)
return (tuple_res[0].status_code == 202)
def set_volume_type_keys(self, volume_type, metadata):
"""Set extra specs on a volume type.
:param volume_type: The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
:returns: extra_specs if the request has been accepted
"""
aname = "cinder_v%s.set_volume_type_keys" % self.version
with atomic.ActionTimer(self, aname):
return volume_type.set_keys(metadata)
def transfer_create(self, volume_id, name=None):
"""Create a volume transfer.
:param name: The name of created transfer
:param volume_id: The ID of the volume to transfer
:rtype: VolumeTransfer
"""
name = name or self.generate_random_name()
aname = "cinder_v%s.transfer_create" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.create(volume_id, name=name)
def transfer_accept(self, transfer_id, auth_key):
"""Accept a volume transfer.
:param transfer_id: The ID of the transfer to accept.
:param auth_key: The auth_key of the transfer.
:rtype: VolumeTransfer
"""
aname = "cinder_v%s.transfer_accept" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().transfers.accept(transfer_id, auth_key)
def create_encryption_type(self, volume_type, specs):
"""Create encryption type for a volume type. Default: admin only.
:param volume_type: the volume type on which to add an encryption type
:param specs: the encryption type specifications to add
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.create_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.create(
volume_type, specs)
def get_encryption_type(self, volume_type):
"""Get the volume encryption type for the specified volume type.
:param volume_type: the volume type to query
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.get_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.get(
volume_type)
def list_encryption_type(self, search_opts=None):
"""List all volume encryption types.
:param search_opts: Options used when search for encryption types
:return: a list of :class: VolumeEncryptionType instances
"""
aname = "cinder_v%s.list_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.list(
search_opts)
def delete_encryption_type(self, volume_type):
"""Delete the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be deleted
"""
aname = "cinder_v%s.delete_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
resp = self._get_client().volume_encryption_types.delete(
volume_type)
if (resp[0].status_code != 202):
raise exceptions.RallyException(
_("EncryptionType Deletion Failed"))
def update_encryption_type(self, volume_type, specs):
"""Update the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be updated
:param specs: the encryption type specifications to update
:return: an instance of :class: VolumeEncryptionType
"""
aname = "cinder_v%s.update_encryption_type" % self.version
with atomic.ActionTimer(self, aname):
return self._get_client().volume_encryption_types.update(
volume_type, specs)
class UnifiedCinderMixin(object):
@staticmethod
def _unify_backup(backup):
return block.VolumeBackup(id=backup.id, name=backup.name,
volume_id=backup.volume_id,
status=backup.status)
@staticmethod
def _unify_transfer(transfer):
auth_key = transfer.auth_key if hasattr(transfer, "auth_key") else None
return block.VolumeTransfer(id=transfer.id, name=transfer.name,
volume_id=transfer.volume_id,
auth_key=auth_key)
@staticmethod
def _unify_qos(qos):
return block.QoSSpecs(id=qos.id, name=qos.name, specs=qos.specs)
@staticmethod
def _unify_encryption_type(encryption_type):
return block.VolumeEncryptionType(
id=encryption_type.encryption_id,
volume_type_id=encryption_type.volume_type_id)
def delete_volume(self, volume):
"""Delete a volume."""
self._impl.delete_volume(volume)
def set_metadata(self, volume, sets=10, set_size=3):
"""Update/Set a volume metadata.
:param volume: The updated/setted volume.
:param sets: how many operations to perform
:param set_size: number of metadata keys to set in each operation
:returns: A list of keys that were set
"""
return self._impl.set_metadata(volume, sets=sets, set_size=set_size)
def delete_metadata(self, volume, keys, deletes=10, delete_size=3):
"""Delete volume metadata keys.
Note that ``len(keys)`` must be greater than or equal to
``deletes * delete_size``.
:param volume: The volume to delete metadata from
:param deletes: how many operations to perform
:param delete_size: number of metadata keys to delete in each operation
:param keys: a list of keys to choose deletion candidates from
"""
self._impl.delete_metadata(volume, keys=keys, deletes=10,
delete_size=3)
def update_readonly_flag(self, volume, read_only):
"""Update the read-only access mode flag of the specified volume.
:param volume: The UUID of the volume to update.
:param read_only: The value to indicate whether to update volume to
read-only access mode.
:returns: A tuple of http Response and body
"""
return self._impl.update_readonly_flag(volume, read_only=read_only)
def upload_volume_to_image(self, volume, force=False,
container_format="bare", disk_format="raw"):
"""Upload the given volume to image.
Returns created image.
:param volume: volume object
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi and iso
:returns: Returns created image object
"""
return self._impl.upload_volume_to_image(
volume, force=force, container_format=container_format,
disk_format=disk_format)
def create_qos(self, specs):
"""Create a qos specs.
:param specs: A dict of key/value pairs to be set
:rtype: :class:'QoSSpecs'
"""
return self._unify_qos(self._impl.create_qos(specs))
def list_qos(self, search_opts=None):
"""Get a list of all qos specs.
:param search_opts: search options
:rtype: list of :class: 'QoSpecs'
"""
return [self._unify_qos(qos)
for qos in self._impl.list_qos(search_opts)]
def get_qos(self, qos_id):
"""Get a specific qos specs.
:param qos_id: The ID of the :class: 'QoSSpecs' to get
:rtype: :class: 'QoSSpecs'
"""
return self._unify_qos(self._impl.get_qos(qos_id))
def set_qos(self, qos, set_specs_args):
"""Add/Update keys in qos specs.
:param qos: The instance of the :class:`QoSSpecs` to set
:param set_specs_args: A dict of key/value pairs to be set
:rtype: :class: 'QoSSpecs'
"""
self._impl.set_qos(qos.id, set_specs_args)
return self._unify_qos(qos)
def qos_associate_type(self, qos_specs, vol_type_id):
"""Associate qos specs from volume type.
:param qos_specs: The qos specs to be associated with
:param vol_type_id: The volume type id to be associated with
"""
self._impl.qos_associate_type(qos_specs, vol_type_id)
return self._unify_qos(qos_specs)
def qos_disassociate_type(self, qos_specs, vol_type_id):
"""Disassociate qos specs from volume type.
:param qos_specs: The qos specs to be disassociated with
:param vol_type_id: The volume type id to be disassociated with
"""
self._impl.qos_disassociate_type(qos_specs, vol_type_id)
return self._unify_qos(qos_specs)
def delete_snapshot(self, snapshot):
"""Delete the given backup.
Returns when the backup is actually deleted.
:param backup: backup instance
"""
self._impl.delete_snapshot(snapshot)
def delete_backup(self, backup):
"""Delete a volume backup."""
self._impl.delete_backup(backup)
def list_backups(self, detailed=True):
"""Return user volume backups list."""
return [self._unify_backup(backup)
for backup in self._impl.list_backups(detailed=detailed)]
def list_transfers(self, detailed=True, search_opts=None):
"""Get a list of all volume transfers.
:param detailed: If True, detailed information about transfer
should be listed
:param search_opts: Search options to filter out volume transfers
:returns: list of :class:`VolumeTransfer`
"""
return [self._unify_transfer(transfer)
for transfer in self._impl.list_transfers(
detailed=detailed, search_opts=search_opts)]
def get_volume_type(self, volume_type):
"""get details of volume_type.
:param volume_type: The ID of the :class:`VolumeType` to get
:returns: :class:`VolumeType`
"""
return self._impl.get_volume_type(volume_type)
def delete_volume_type(self, volume_type):
"""delete a volume type.
:param volume_type: Name or Id of the volume type
:returns: base on client response return True if the request
has been accepted or not
"""
return self._impl.delete_volume_type(volume_type)
def set_volume_type_keys(self, volume_type, metadata):
"""Set extra specs on a volume type.
:param volume_type: The :class:`VolumeType` to set extra spec on
:param metadata: A dict of key/value pairs to be set
:returns: extra_specs if the request has been accepted
"""
return self._impl.set_volume_type_keys(volume_type, metadata)
def transfer_create(self, volume_id, name=None):
"""Creates a volume transfer.
:param name: The name of created transfer
:param volume_id: The ID of the volume to transfer.
:returns: Return the created transfer.
"""
return self._unify_transfer(
self._impl.transfer_create(volume_id, name=name))
def transfer_accept(self, transfer_id, auth_key):
"""Accept a volume transfer.
:param transfer_id: The ID of the transfer to accept.
:param auth_key: The auth_key of the transfer.
:returns: VolumeTransfer
"""
return self._unify_transfer(
self._impl.transfer_accept(transfer_id, auth_key=auth_key))
def create_encryption_type(self, volume_type, specs):
"""Create encryption type for a volume type. Default: admin only.
:param volume_type: the volume type on which to add an encryption type
:param specs: the encryption type specifications to add
:return: an instance of :class: VolumeEncryptionType
"""
return self._unify_encryption_type(
self._impl.create_encryption_type(volume_type, specs=specs))
def get_encryption_type(self, volume_type):
"""Get the volume encryption type for the specified volume type.
:param volume_type: the volume type to query
:return: an instance of :class: VolumeEncryptionType
"""
return self._unify_encryption_type(
self._impl.get_encryption_type(volume_type))
def list_encryption_type(self, search_opts=None):
"""List all volume encryption types.
:param search_opts: Options used when search for encryption types
:return: a list of :class: VolumeEncryptionType instances
"""
return [self._unify_encryption_type(encryption_type)
for encryption_type in self._impl.list_encryption_type(
search_opts=search_opts)]
def delete_encryption_type(self, volume_type):
"""Delete the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be deleted
"""
return self._impl.delete_encryption_type(volume_type)
def update_encryption_type(self, volume_type, specs):
"""Update the encryption type information for the specified volume type.
:param volume_type: the volume type whose encryption type information
must be updated
:param specs: the encryption type specifications to update
:return: an instance of :class: VolumeEncryptionType
"""
return self._impl.update_encryption_type(volume_type, specs=specs)
| apache-2.0 | -218,852,773,355,620,600 | 39.853224 | 80 | 0.601874 | false | 4.245474 | false | false | false |
tsauerwein/c2cgeoportal | c2cgeoportal/tests/xmlstr.py | 3 | 5978 | # -*- coding: utf-8 -*-
# Copyright (c) 2013-2014, Camptocamp SA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
getfeature = """
<wfs:GetFeature xmlns:wfs="http://www.opengis.net/wfs" service="WFS" version="1.1.0" xsi:schemaLocation="http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<wfs:Query typeName="feature:grundstueck" srsName="EPSG:2056" xmlns:feature="http://mapserver.gis.umn.edu/mapserver">
<ogc:Filter xmlns:ogc="http://www.opengis.net/ogc">
<ogc:PropertyIsLike matchCase="false" wildCard="*" singleChar="." escapeChar="!">
<ogc:PropertyName>nummer</ogc:PropertyName>
<ogc:Literal>10*</ogc:Literal>
</ogc:PropertyIsLike>
</ogc:Filter>
</wfs:Query>
</wfs:GetFeature>
"""
feature = """
<gml:featureMember>
<ms:grundstueck>
<gml:boundedBy>
<gml:Envelope srsName="EPSG:2056">
<gml:lowerCorner>2626901.051818 1258035.790009</gml:lowerCorner>
<gml:upperCorner>2627050.862856 1258132.841364</gml:upperCorner>
</gml:Envelope>
</gml:boundedBy>
<ms:msGeometry>
<gml:LineString srsName="EPSG:2056">
<gml:posList srsDimension="2">2627033.201116 1258103.390372 2627034.048142 1258105.737388 2627010.821109 1258118.506850 2626985.111074 1258132.841364 2626980.135958 1258123.622322 2626978.010913 1258120.089309 2626966.170890 1258126.005538 2626949.985629 1258108.760552 2626924.919220 1258081.422566 2626910.187979 1258065.386575 2626901.051818 1258054.063564 2626935.224905 1258039.509934 2626956.098017 1258037.068626 2626971.167108 1258036.400415 2627000.949294 1258035.790009 2627018.708458 1258041.255835 2627029.967583 1258047.114753 2627048.056822 1258060.580669 2627050.862856 1258062.337652 2627048.942861 1258064.236700 2627036.107888 1258076.303014 2627023.360917 1258088.497329 2627028.596025 1258096.640354 2627033.201116 1258103.390372 </gml:posList>
</gml:LineString>
</ms:msGeometry>
<ms:gs_id>1676545</ms:gs_id>
<ms:lsn_oid>1510175178</ms:lsn_oid>
<ms:nummer>1071</ms:nummer>
<ms:gueltigkeit>rechtskräftig</ms:gueltigkeit>
<ms:art>Liegenschaft</ms:art>
<ms:gemeinde_id_bfs>2861</ms:gemeinde_id_bfs>
<ms:meta_id>1510</ms:meta_id>
<ms:flaechenmass>8774</ms:flaechenmass>
<ms:nummer_m_deko>1071</ms:nummer_m_deko>
<ms:nbident>BL0200002861</ms:nbident>
<ms:vollstaendigkeit>vollständig</ms:vollstaendigkeit>
<ms:datenherr>Jermann</ms:datenherr>
<ms:mut_nummer>pn18</ms:mut_nummer>
</ms:grundstueck>
</gml:featureMember>
"""
featurecollection_outlimit = """
<wfs:FeatureCollection xmlns:ms="http://mapserver.gis.umn.edu/mapserver" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wfs="http://www.opengis.net/wfs" xsi:schemaLocation="http://mapserver.gis.umn.edu/mapserver http://c2cpc29.camptocamp.com/sbrunner/mapserv?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=feature:grundstueck&OUTPUTFORMAT=text/xml;%20subtype=gml/3.1.1 http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:boundedBy>
<gml:Envelope srsName="EPSG:2056">
<gml:lowerCorner>2595270.118588 1244096.257242</gml:lowerCorner>
<gml:upperCorner>2638409.063753 1267658.751429</gml:upperCorner>
</gml:Envelope>
</gml:boundedBy>
""" + feature * 205 + """
</wfs:FeatureCollection>
"""
featurecollection_inlimit = """
<wfs:FeatureCollection xmlns:ms="http://mapserver.gis.umn.edu/mapserver" xmlns:ogc="http://www.opengis.net/ogc" xmlns:gml="http://www.opengis.net/gml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wfs="http://www.opengis.net/wfs" xsi:schemaLocation="http://mapserver.gis.umn.edu/mapserver http://c2cpc29.camptocamp.com/sbrunner/mapserv?SERVICE=WFS&VERSION=1.1.0&REQUEST=DescribeFeatureType&TYPENAME=feature:grundstueck&OUTPUTFORMAT=text/xml;%20subtype=gml/3.1.1 http://www.opengis.net/wfs http://schemas.opengis.net/wfs/1.1.0/wfs.xsd">
<gml:boundedBy>
<gml:Envelope srsName="EPSG:2056">
<gml:lowerCorner>2595270.118588 1244096.257242</gml:lowerCorner>
<gml:upperCorner>2638409.063753 1267658.751429</gml:upperCorner>
</gml:Envelope>
</gml:boundedBy>
""" + feature * 199 + """
</wfs:FeatureCollection>
"""
| bsd-2-clause | -5,369,876,525,444,830,000 | 60.608247 | 776 | 0.732932 | false | 2.985015 | false | false | false |
ayepezv/GAD_ERP | openerp/__init__.py | 2 | 2641 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" OpenERP core library."""
#----------------------------------------------------------
# Running mode flags (gevent, prefork)
#----------------------------------------------------------
# Is the server running with gevent.
import sys
evented = False
if sys.modules.get("gevent") is not None:
evented = True
# Is the server running in prefork mode (e.g. behind Gunicorn).
# If this is True, the processes have to communicate some events,
# e.g. database update or cache invalidation. Each process has also
# its own copy of the data structure and we don't need to care about
# locks between threads.
multi_process = False
#----------------------------------------------------------
# libc UTC hack
#----------------------------------------------------------
# Make sure the OpenERP server runs in UTC. This is especially necessary
# under Windows as under Linux it seems the real import of time is
# sufficiently deferred so that setting the TZ environment variable
# in openerp.cli.server was working.
import os
os.environ['TZ'] = 'UTC' # Set the timezone...
import time # ... *then* import time.
del os
del time
#----------------------------------------------------------
# Shortcuts
#----------------------------------------------------------
# The hard-coded super-user id (a.k.a. administrator, or root user).
SUPERUSER_ID = 1
def registry(database_name=None):
"""
Return the model registry for the given database, or the database mentioned
on the current thread. If the registry does not exist yet, it is created on
the fly.
"""
if database_name is None:
import threading
database_name = threading.currentThread().dbname
return modules.registry.RegistryManager.get(database_name)
#----------------------------------------------------------
# Imports
#----------------------------------------------------------
import addons
import conf
import loglevels
import modules
import netsvc
import osv
import pooler
import release
import report
import service
import sql_db
import tools
import workflow
#----------------------------------------------------------
# Model classes, fields, api decorators, and translations
#----------------------------------------------------------
from . import models
from . import fields
from . import api
from openerp.tools.translate import _
#----------------------------------------------------------
# Other imports, which may require stuff from above
#----------------------------------------------------------
import cli
import http
| gpl-3.0 | -7,602,556,953,469,250,000 | 31.604938 | 79 | 0.537675 | false | 5.168297 | false | false | false |
bburan/psiexperiment | psi/controller/calibration/chirp_calibration.py | 1 | 1129 | from ..util import acquire
def chirp_power(engine, freq_lb=50, freq_ub=100e3, attenuation=0, vrms=1,
repetitions=32, duration=0.1, iti=0.01):
calibration = FlatCalibration.as_attenuation(vrms=vrms)
ai_fs = engine.hw_ai_channels[0].fs
ao_fs = engine.hw_ao_channels[0].fs
queue = FIFOSignalQueue(ao_fs)
factory = chirp_factory(ao_fs, freq_lb, freq_ub, duration, attenuation,
calibration=calibration)
waveform = generate_waveform(factory, int(duration*ao_fs))
print(waveform)
queue.append(waveform, repetitions, iti)
ao_channel = engine.hw_ao_channels[0]
output = QueuedEpochOutput(parent=ao_channel, queue=quee,
auto_decrement=True)
epochs = acquire(engine, queue, duration+iti)
def tone_calibration(engine, *args, **kwargs):
'''
Single output calibration at a fixed frequency
Returns
-------
sens : dB (V/Pa)
Sensitivity of output in dB (V/Pa).
'''
output_sens = tone_sens(engine, frequencies, *args, **kwargs)[0]
return PointCalibration(frequencies, output_sens)
| mit | -6,651,210,350,675,184,000 | 33.212121 | 75 | 0.643047 | false | 3.30117 | false | false | false |
optikfluffel/lagesonum | lagesonum/bottle_app.py | 1 | 4024 | # coding: utf-8
import sqlite3
import os
import time
import bottle
from bottle import default_app, route, view
from bottle import request
from bottle_utils.i18n import I18NPlugin
#from bottle_utils.i18n import lazy_gettext as _
#todo: refactor so that there is no error in Py3 local deployment and testing
import input_number as ip
from dbhelper import initialize_database
import hashlib
MOD_PATH = os.path.dirname(os.path.abspath(__file__))
DB_PATH = os.path.abspath(os.path.join(MOD_PATH, '..', '..', "lagesonr.db"))
if not os.path.exists(DB_PATH):
initialize_database(DB_PATH)
lagesonrdb = sqlite3.connect(DB_PATH)
#todo: populate list dynamically based on available/selected translations
LANGS = [
('de_DE', 'Deutsch'),
('en_US', 'English'),
]
# ('ar_AR', 'Arab'),
DEFAULT_LOCALE = 'en_US'
@route('/')
@view('start_page')
def index():
"""1.Seite: Helfer steht am LaGeSo und gibt Nummern ein [_____] """
return {'entered': []}
@route('/', method='POST')
@view('start_page')
def do_enter():
numbers = request.forms.get('numbers')
timestamp = time.asctime()
numbers = [num.strip() for num in numbers.split('\n')]
result_num = []
#todo: refactor fingerprint in extra function for better testing
usr_agent = str(request.environ.get('HTTP_USER_AGENT'))
usr_lang = str(request.environ.get('HTTP_ACCEPT_LANGUAGE'))
usr_ip = str(request.remote_addr)
usr_fingerprint = usr_agent + usr_lang + usr_ip
usr_hash = hashlib.md5(usr_fingerprint.encode("utf-8")).hexdigest()
with lagesonrdb as con:
cur = con.cursor()
for num in set(numbers):
if ip.is_valid_number(num) and ip.is_ok_with_db(
num) and ip.is_valid_user():
num = str(num).capitalize()
query = 'SELECT NUMBER FROM NUMBERS WHERE NUMBER="%s" AND FINGERPRINT="%s"' % (num, usr_hash)
if len(list(cur.execute(query))) == 0:
insert = 'INSERT INTO NUMBERS(NUMBER, TIME, PLACE, USER, FINGERPRINT) VALUES ("%s", "%s", "-", ' \
'"-", "%s")' % (num, timestamp, usr_hash)
cur.execute(insert)
result_num.append(num)
else:
result_num.append("ALREADY ENTERED BY - %s - %s - %s: %s" % (usr_ip, usr_agent, usr_lang, num))
#return {'entered': ["already before - by you!"], 'timestamp': timestamp}
else:
result_num.append("INVALID INPUT: %s" % num)
return {'entered': result_num, 'timestamp': timestamp}
@route('/query')
@view('query_page')
def query():
return {'result': '-', 'timestamp_first': '-','timestamp_last': '-', 'n': '-'}
@route('/query', method='POST')
@view('query_page')
def do_query():
number = request.forms.get('number')
timestamp_first = '-'
timestamp_last = '-'
n = '0'
if ip.is_valid_number(number) and ip.is_ok_with_db(
number) and ip.is_valid_user():
with lagesonrdb as con:
cur = con.cursor()
number = str(number).capitalize()
query = 'SELECT TIME FROM NUMBERS WHERE NUMBER="%s" ORDER BY TIME' % number
result = list(cur.execute(query))
n = len(result)
if n > 0:
timestamp_first, timestamp_last = result[0][0], result[-1][0]
else:
timestamp_first = 'NOT FOUND'
else:
timestamp_first = 'INVALID INPUT'
return {'result': number, 'timestamp_first': timestamp_first,
'timestamp_last': timestamp_last, 'n': n}
@route('/about')
@view('about')
def about():
pass
@route('/impressum')
@view('impressum')
def impressum():
pass
# findet templates im gleichen Verzeichnis
bottle.TEMPLATE_PATH.append(MOD_PATH)
app = default_app()
application = I18NPlugin(app, langs=LANGS, default_locale=DEFAULT_LOCALE,
domain='messages',
locale_dir=os.path.join(MOD_PATH, 'locales'))
| mit | 4,199,236,689,896,837,600 | 30.193798 | 118 | 0.591451 | false | 3.445205 | false | false | false |
shivam5992/pywordcloud-flask | words.py | 1 | 4499 | '''
Python implementation of HTML wordcloud of words collected from
a website, Paragraph Input or File Upload. Flask Web App implementation
of the same.
Author: Shivam Bansal
Email: [email protected]
Website: www.shivambansal.com
Version: 0.1
'''
from flask import Flask, render_template, request, flash, redirect, url_for
from BeautifulSoup import BeautifulSoup
import urllib, random, re, string, stopwords
app = Flask(__name__)
app.secret_key = 'You will never guess'
'''
Index router function, Receive post request and displays the html wordcloud
'''
@app.route('/', methods = ['GET','POST'])
@app.route('/index', methods = ['GET','POST'])
def index():
if request.method == 'POST':
''' Store post variables '''
url = request.form['urllink']
case = request.form['case']
show_freq = request.form['show_freq']
''' Try to connect with the URL '''
try:
if not url.startswith("http"):
url = "http://" + url
htmltext = urllib.urlopen(url).read()
except:
flash("Cannot connect to the requested url")
return redirect(url_for('startover'))
''' Get all text from the html repsonse '''
soup = BeautifulSoup(htmltext)
texts = soup.findAll(text=True)
visible_texts = filter(visible, texts)
article = ""
for text in visible_texts:
article += text.encode("utf-8")
article = str(article)
article = BeautifulSoup(article, convertEntities=BeautifulSoup.HTML_ENTITIES)
#exclude = set(string.punctuation)
#article = str(article)
#article = ''.join(ch for ch in article if ch not in exclude)
article = str(article).replace("\n"," ")
''' Get top keywords '''
freq = 50
a = getKeywords(article, case, freq)
random.shuffle(a)
b = [x[1] for x in a]
minFreq = min(b)
maxFreq = max(b)
''' Create html span tags and corresponding css '''
span = ""
css = """#box{font-family:'calibri';border:2px solid black;}
#box a{text-decoration : none}
"""
''' Colors for words in wordcloud '''
colors = ['#607ec5','#002a8b','#86a0dc','#4c6db9']
colsize = len(colors)
k = 0
for index,item in enumerate(a):
index += 1
if case == "upper":
tag = str(item[0]).upper()
else:
tag = str(item[0])
if show_freq == "yes":
span += '<a href=#><span class="word'+str(index)+'" id="tag'+str(index)+'"> ' + tag + " (" + str(item[1]) + ") " + " </span></a>\n"
else:
span += '<a href=#><span class="word'+str(index)+'" id="tag'+str(index)+'"> ' + tag + " </span></a>\n"
''' Algorithm to scale sizes'''
freqTag = int(item[1])
fontMax = 5.5
fontMin = 1.5
K = (freqTag - minFreq)/(maxFreq - minFreq)
frange = fontMax - fontMin
C = 4
K = float(freqTag - minFreq)/(maxFreq - minFreq)
size = fontMin + (C*float(K*frange/C))
css += '#tag'+str(index)+'{font-size: '+ str(size) +'em;color: '+colors[int(k%colsize)]+'}\n'
css += '#tag'+str(index)+':hover{color: red}\n'
k += 1
''' Write the HTML and CSS into seperate files '''
f = open('templates/wordcloud.html', 'w')
message = """
<style type="text/css">
""" + css +"""
</style>
<div id='box'>
""" + span + """
</div>
"""
f.write(message)
f.close
f.flush()
return render_template('index.html')
startover()
return render_template('index.html')
'''
Function to get top keywords from an article
'''
def getKeywords(articletext, case, freq):
''' Create the dictionary for output response '''
word_dict = {}
word_list = articletext.lower().split()
filtered_words = word_list
for word in filtered_words:
if word not in stopwords.stopwords and word.isalnum() and not word.isdigit() and not len(word) == 1:
if word not in word_dict:
word_dict[word] = 1
if word in word_dict:
word_dict[word] += 1
top_words = sorted(word_dict.items(),key=lambda(k,v):(v,k),reverse=True)[0:freq]
''' Return a list of dictionaies, dictionaies contains word and their frequencies '''
top = []
for w in top_words:
top.append(w)
return top
'''
Function to reset everthing and startover
'''
@app.route('/startover')
def startover():
f = open("templates/wordcloud.html",'w')
f.write("")
f.close
return redirect(url_for('index'))
def visible(element):
if element.parent.name in ['style', 'script', '[document]', 'head', 'title']:
return False
elif re.match('<!--.*-->', str(element)):
return False
return True
'''
Run the Flask Application
'''
if __name__ == '__main__':
app.run(debug = True) | mit | 8,035,192,736,946,009,000 | 25.162791 | 145 | 0.625695 | false | 3.033715 | false | false | false |
steveb/heat | heat/engine/clients/os/neutron/__init__.py | 1 | 6485 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutronclient.common import exceptions
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.v2_0 import client as nc
from oslo_utils import uuidutils
from heat.common import exception
from heat.engine.clients import client_plugin
from heat.engine.clients import os as os_client
class NeutronClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exceptions
service_types = [NETWORK] = ['network']
def _create(self):
con = self.context
endpoint_type = self._get_client_option('neutron', 'endpoint_type')
endpoint = self.url_for(service_type=self.NETWORK,
endpoint_type=endpoint_type)
args = {
'auth_url': con.auth_url,
'service_type': self.NETWORK,
'token': self.auth_token,
'endpoint_url': endpoint,
'endpoint_type': endpoint_type,
'ca_cert': self._get_client_option('neutron', 'ca_file'),
'insecure': self._get_client_option('neutron', 'insecure')
}
return nc.Client(**args)
def is_not_found(self, ex):
if isinstance(ex, (exceptions.NotFound,
exceptions.NetworkNotFoundClient,
exceptions.PortNotFoundClient)):
return True
return (isinstance(ex, exceptions.NeutronClientException) and
ex.status_code == 404)
def is_conflict(self, ex):
bad_conflicts = (exceptions.OverQuotaClient,)
return (isinstance(ex, exceptions.Conflict) and
not isinstance(ex, bad_conflicts))
def is_over_limit(self, ex):
if not isinstance(ex, exceptions.NeutronClientException):
return False
return ex.status_code == 413
def is_no_unique(self, ex):
return isinstance(ex, exceptions.NeutronClientNoUniqueMatch)
def is_invalid(self, ex):
return isinstance(ex, exceptions.StateInvalidClient)
def find_resourceid_by_name_or_id(self, resource, name_or_id,
cmd_resource=None):
return self._find_resource_id(self.context.tenant_id,
resource, name_or_id,
cmd_resource)
@os_client.MEMOIZE_FINDER
def _find_resource_id(self, tenant_id,
resource, name_or_id, cmd_resource):
# tenant id in the signature is used for the memoization key,
# that would differentiate similar resource names across tenants.
return neutronV20.find_resourceid_by_name_or_id(
self.client(), resource, name_or_id, cmd_resource=cmd_resource)
@os_client.MEMOIZE_EXTENSIONS
def _list_extensions(self):
extensions = self.client().list_extensions().get('extensions')
return set(extension.get('alias') for extension in extensions)
def has_extension(self, alias):
"""Check if specific extension is present."""
return alias in self._list_extensions()
def _resolve(self, props, key, id_key, key_type):
if props.get(key):
props[id_key] = self.find_resourceid_by_name_or_id(key_type,
props.pop(key))
return props[id_key]
def resolve_pool(self, props, pool_key, pool_id_key):
if props.get(pool_key):
props[pool_id_key] = self.find_resourceid_by_name_or_id(
'pool', props.get(pool_key), cmd_resource='lbaas_pool')
props.pop(pool_key)
return props[pool_id_key]
def resolve_router(self, props, router_key, router_id_key):
return self._resolve(props, router_key, router_id_key, 'router')
def network_id_from_subnet_id(self, subnet_id):
subnet_info = self.client().show_subnet(subnet_id)
return subnet_info['subnet']['network_id']
def check_lb_status(self, lb_id):
lb = self.client().show_loadbalancer(lb_id)['loadbalancer']
status = lb['provisioning_status']
if status == 'ERROR':
raise exception.ResourceInError(resource_status=status)
return status == 'ACTIVE'
def get_qos_policy_id(self, policy):
"""Returns the id of QoS policy.
Args:
policy: ID or name of the policy.
"""
return self.find_resourceid_by_name_or_id(
'policy', policy, cmd_resource='qos_policy')
def get_secgroup_uuids(self, security_groups):
'''Returns a list of security group UUIDs.
Args:
security_groups: List of security group names or UUIDs
'''
seclist = []
all_groups = None
for sg in security_groups:
if uuidutils.is_uuid_like(sg):
seclist.append(sg)
else:
if not all_groups:
response = self.client().list_security_groups()
all_groups = response['security_groups']
same_name_groups = [g for g in all_groups if g['name'] == sg]
groups = [g['id'] for g in same_name_groups]
if len(groups) == 0:
raise exception.EntityNotFound(entity='Resource', name=sg)
elif len(groups) == 1:
seclist.append(groups[0])
else:
# for admin roles, can get the other users'
# securityGroups, so we should match the tenant_id with
# the groups, and return the own one
own_groups = [g['id'] for g in same_name_groups
if g['tenant_id'] == self.context.tenant_id]
if len(own_groups) == 1:
seclist.append(own_groups[0])
else:
raise exception.PhysicalResourceNameAmbiguity(name=sg)
return seclist
| apache-2.0 | -6,405,323,272,617,855,000 | 38.542683 | 78 | 0.588743 | false | 4.14377 | false | false | false |
gazbot/conference-project | models.py | 1 | 5513 | #!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = '[email protected] (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
class ConflictException(endpoints.ServiceException):
"""ConflictException -- exception mapped to HTTP 409 response"""
http_status = httplib.CONFLICT
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
conferenceKeysToAttend = ndb.StringProperty(repeated=True)
sessionKeysWishlist = ndb.StringProperty(repeated=True)
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
conferenceKeysToAttend = messages.StringField(4, repeated=True)
sessionKeysWishlist = messages.StringField(5, repeated=True)
class StringMessage(messages.Message):
"""StringMessage-- outbound (single) string message"""
data = messages.StringField(1, required=True)
class BooleanMessage(messages.Message):
"""BooleanMessage-- outbound Boolean value message"""
data = messages.BooleanField(1)
class Conference(ndb.Model):
"""Conference -- Conference object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
organizerUserId = ndb.StringProperty()
topics = ndb.StringProperty(repeated=True)
city = ndb.StringProperty()
startDate = ndb.DateProperty()
month = ndb.IntegerProperty() # TODO: do we need for indexing like Java?
endDate = ndb.DateProperty()
maxAttendees = ndb.IntegerProperty()
seatsAvailable = ndb.IntegerProperty()
class Session(ndb.Model):
"""Session -- Session object"""
name = ndb.StringProperty(required=True)
description = ndb.StringProperty()
highlights = ndb.StringProperty(repeated=True)
startTime = ndb.TimeProperty()
sessionDate = ndb.DateProperty()
typeOfSession = ndb.StringProperty(default='NOT_SPECIFIED')
duration = ndb.IntegerProperty()
speaker = ndb.StringProperty(required=True)
class ConferenceForm(messages.Message):
"""ConferenceForm -- Conference outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
organizerUserId = messages.StringField(3)
topics = messages.StringField(4, repeated=True)
city = messages.StringField(5)
startDate = messages.StringField(6) # DateTimeField()
month = messages.IntegerField(7)
maxAttendees = messages.IntegerField(8)
seatsAvailable = messages.IntegerField(9)
endDate = messages.StringField(10) # DateTimeField()
websafeKey = messages.StringField(11)
organizerDisplayName = messages.StringField(12)
class SessionForm(messages.Message):
"""SessionForm -- Session outbound form message"""
name = messages.StringField(1)
description = messages.StringField(2)
highlights = messages.StringField(3, repeated=True)
startTime = messages.StringField(4)
sessionDate = messages.StringField(5)
typeOfSession = messages.EnumField('TypeOfSession', 6)
speaker = messages.StringField(7)
websafeKey = messages.StringField(8)
duration = messages.IntegerField(9)
class ConferenceForms(messages.Message):
"""ConferenceForms -- multiple Conference outbound form message"""
items = messages.MessageField(ConferenceForm, 1, repeated=True)
class SessionForms(messages.Message):
"""SessionForms -- multiple Session outbound form message"""
items = messages.MessageField(SessionForm, 1, repeated=True)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
class TypeOfSession(messages.Enum):
"""TypeOfSession -- session type enumeration value"""
NOT_SPECIFIED = 1
LECTURE = 2
KEYNOTE = 3
WORKSHOP = 4
FORUM = 5
class ConferenceQueryForm(messages.Message):
"""ConferenceQueryForm -- Conference query inbound form message"""
field = messages.StringField(1)
operator = messages.StringField(2)
value = messages.StringField(3)
class ConferenceQueryForms(messages.Message):
"""ConferenceQueryForms -- multiple ConferenceQueryForm inbound form message"""
filters = messages.MessageField(ConferenceQueryForm, 1, repeated=True) | apache-2.0 | -3,578,478,729,146,258,400 | 33.037037 | 87 | 0.6579 | false | 4.059647 | false | false | false |
sensbio/sensbiotk | examples/scripts/expe_prima.py | 1 | 4757 |
# -*- coding: utf-8 -*-
"""
Reconstruction angles example comparison
"""
import numpy as np
from sensbiotk.algorithms import martin_ahrs
from sensbiotk.algorithms.basic import find_static_periods
from sensbiotk.io.iofox import load_foxcsvfile
from sensbiotk.io.ahrs import save_ahrs_csvfile
import sensbiotk.calib.calib as calib
from sensbiotk.transforms3d import quaternions as nq
from sensbiotk.transforms3d.eulerangles import quat2euler
from sensbiotk.transforms3d.quaternions import quat2mat
from visual import *
import scipy.io
import matplotlib.pyplot as plt
DATACALIBFILE = "data/calib01_imu.csv"
CALIBFILE= "data/calib_imu.txt"
DATAFILE = "data/expe02_imu.csv"
ANGLEFILE = "data/angle02_imu.csv"
def plot_quat(title, timu, qw, qx, qy, qz):
""" Plot quaternion
"""
plt.figure()
plt.title(title+" Quaternion")
plt.plot(timu, qw)
plt.plot(timu, qx)
plt.plot(timu, qy)
plt.plot(timu, qz)
plt.legend(('qw', 'qx', 'qy', 'qz'))
return
def plot_euler(title, time, phi, theta, psi):
""" Plot euler angles
"""
plt.figure()
plt.title(title+" Euler angles")
plt.plot(time, phi*180/np.pi)
plt.plot(time, theta*180/np.pi)
plt.plot(time, psi*180/np.pi)
plt.legend(('e_x', 'e_y', 'e_z'))
return
def calib_param(compute = True):
""" Load or compute calibration parameters
"""
if compute == True :
[params_acc, params_mag, params_gyr] = \
calib.compute(imuNumber=5 ,filepath=DATACALIBFILE, param = 3)
calib.save_param(CALIBFILE,
params_acc, params_mag, params_gyr, comments="Expe Prima")
else:
[params_acc, params_mag, params_gyr] = \
calib.load_param(CALIBFILE)
return [params_acc, params_mag, params_gyr]
def normalize_data(data, param_calib):
""" normalize_data
"""
scale = param_calib[1:4,:]
bias = param_calib[0,:]
data_n = np.transpose(np.dot(scale,np.transpose((data-np.transpose(bias)))))
return data_n
def run_example():
""" run example : "martin"
"""
# Compute (True) or load (False
[params_acc, params_mag, params_gyr] = calib_param(compute = False)
# Load the recording data
[time_sens, accx, accy, accz, mx, my, mz, gyrx, gyry, gyrz] = \
load_foxcsvfile(DATAFILE)
# Find motionless begin periods
freqs = 200
start, end = find_static_periods(gyrz, 2 * np.pi/180, 3*freqs)
static_duration = time_sens[end[0]] - time_sens[start[0]]
print "LGHT", start[0], len(end)
if static_duration < 5.0 :
print "Warning: static duration too low"
time_imu = time_sens
acc_imu = np.column_stack([accx, accy, accz])
mag_imu = np.column_stack([mx, my, mz])
gyr_imu = np.column_stack([gyrx, gyry, gyrz])
# Init output
quat = np.zeros((len(acc_imu),4))
euler = np.zeros((len(acc_imu),3))
observer = martin_ahrs.martin_ahrs()
quat_offset = [1, 0, 0, 0]
# Initialization loop
for i in range(0, end[0]):
# Applies the Scale and Offset to data
acc_imu[i,:] = normalize_data(acc_imu[i,:], params_acc)
mag_imu[i,:] = normalize_data(mag_imu[i,:], params_mag)
gyr_imu[i,:] = normalize_data(gyr_imu[i,:], params_gyr)
# Filter call
if i == 0:
quat[0]=observer.init_observer(np.hstack([acc_imu[0,:],
mag_imu[0,:], gyr_imu[0,:]]))
else:
quat[i]=observer.update(np.hstack([acc_imu[i,:],
mag_imu[i,:], gyr_imu[i,:]]), 0.005)
quat_offset = nq.conjugate(quat[end-1][0])
print "Quaternion init", quat_offset
# Computation loop
for i in range(end[0], len(acc_imu)):
# Applies the Scale and Offset to data
acc_imu[i,:] = normalize_data(acc_imu[i,:], params_acc)
mag_imu[i,:] = normalize_data(mag_imu[i,:], params_mag)
gyr_imu[i,:] = normalize_data(gyr_imu[i,:], params_gyr)
# Filter call
quat[i]=observer.update(np.hstack([acc_imu[i,:],
mag_imu[i,:], gyr_imu[i,:]]), 0.005)
quat[i] = nq.mult(quat_offset, quat[i])
euler[i]=quat2euler(quat[i])
# Plot results
plot_quat("Expe Prima ", time_imu,\
quat[:,0], quat[:,1], quat[:,2], quat[:,3])
plot_euler("Expe Prima ", time_imu,\
euler[:,2], euler[:,1], euler[:,0])
# Save results
save_ahrs_csvfile(ANGLEFILE, time_imu, quat, euler)
if __name__ == '__main__':
run_example()
plt.show()
| gpl-3.0 | -5,332,932,333,264,223,000 | 31.737589 | 83 | 0.570107 | false | 2.971268 | false | false | false |
Spandex-at-Exeter/demography_database | app/matrix_functions.py | 1 | 14553 | from models import Permission, Role, User, IUCNStatus, OrganismType, GrowthFormRaunkiaer, ReproductiveRepetition, \
DicotMonoc, AngioGymno, SpandExGrowthType, SourceType, Database, Purpose, MissingData, ContentEmail, Ecoregion, Continent, InvasiveStatusStudy, InvasiveStatusElsewhere, StageTypeClass, \
TransitionType, MatrixComposition, StartSeason, EndSeason, StudiedSex, Captivity, Species, Taxonomy, PurposeEndangered, PurposeWeed, Trait, \
Publication, AuthorContact, AdditionalSource, Population, Stage, StageType, Treatment, \
MatrixStage, MatrixValue, Matrix, Interval, Fixed, Small, CensusTiming, Institute, Status, Version, ChangeLogger
import numpy as np
#mat_str = "[0 0 2.81;0.5 0 0;0 0.45 0.45]" # for testing
def as_array(mat_str):
# input: matlab format matrix
# output:
try:
mat_str = mat_str[1:(len(mat_str)-1)].replace(";"," ").split()
mat_str = [float(i) for i in mat_str]
mat_str = np.array(mat_str)
order = int(np.sqrt(len(mat_str)))
shape = (order,order)
try:
mat_str = mat_str.reshape(shape)
return(mat_str)
except ValueError:
return("NA")
except:
return("NA")
def calc_lambda(matA):
matA = as_array(matA)
# input: matrix in string matlab format
# output: float
if matA != "NA":
w, v = np.linalg.eig(matA)
return float(max(w))
else:
return(None)
def calc_surv_issue(matU):
matU = as_array(matU)
# input: matrix in string matlab format
# output: float
if matU != "NA":
column_sums = [sum([row[i] for row in matU]) for i in range(0,len(matU[0]))]
return max(column_sums)
else:
return(None)
def is_matrix_irreducible(matA):
matA = as_array(matA)
# input: matrix in string matlab format
# output: 0 or 1
if matA != "NA":
order = np.shape(matA)[0]
I = np.matrix(np.identity(order))
IplusA = I + matA
powermatrix = np.linalg.matrix_power(IplusA, (order - 1))
minval = powermatrix.min()
if minval > 0:
return(1)
else:
return(0)
else:
return(None)
def is_matrix_primitive(matA):
matA = as_array(matA)
# input: matrix in string matlab format
# output: 0 or 1
if matA != "NA":
order = np.shape(matA)[0]
powermatrix = np.linalg.matrix_power(matA,((order ** 2) - (2 * order) + 2))
minval = powermatrix.min()
if minval > 0:
return(1)
else:
return(0)
else:
return(None)
def is_matrix_ergodic(matA):
matA = as_array(matA)
# input: matrix in string matlab format
# output: 0 or 1
if matA != "NA":
digits = 12
order = np.shape(matA)[0]
lw, lv = np.linalg.eig(np.transpose(matA))
lmax = lw.tolist().index(max(lw))
v = lv[:,lmax]
Rev = abs(np.real(v))
Rev = np.round(Rev,decimals = digits)
if min(Rev) > 0:
return(1)
else:
return(0)
else:
return(None)
###### Some functions to create summary statistics on the front-end
####Structure of functions
###a. unreleased and incomplete
###b. unreleased and complete aka. ready for release
###c. released and complete
###d. released but missing stuff
##Each of these 4 categories is split into 3 subsections (all; compadre; comadre)
#Each of these 3 subsections is split into 3 sections (species;populations;matrices)
##### a. unreleased and incomplete (amber) ######
###Note these won't work yet until database is related to the Population.model which it isn't atm
## All ##
# #Species
# def all_species_unreleased():
# all_species_unreleased = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_species_unreleased
# #Populations
# def all_populations_unreleased():
# all_populations_unreleased = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_populations_unreleased
# #Matrices
# def all_matrices_unreleased():
# all_matrices_unreleased = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_matrices_unreleased
## COMPADRE ##
#Species.join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
#def compadre_species_unreleased():
# compadre_species_unreleased = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Amber").join.(Species.taxonomy).filter(Taxonomy.kingdom == "Plantae").join(Population).join(Population.database).filter(Database.database_name=="Unreleased").count()
# return compadre_species_unreleased
#Populations
#Matrices
## COMADRE ##
#Species
#def comadre_species_unreleased():
# comadre_species_unreleased = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Amber").join.(Species.taxonomy).filter(Taxonomy.kingdom == "Animalia").join(Population).join(Population.database).filter(Database.database_name=="Unreleased").count()
# return comadre_species_unreleased
#Populations
#Matrices
# ##### b. unreleased and complete aka. ready for release (green) ######
# ## All ##
# #Species
# def all_species_unreleased_complete():
# all_species_unreleased_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_species_unreleased_complete
# #Populations
# def all_populations_unreleased_complete():
# all_populations_unreleased_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_populations_unreleased_complete
# #Matrices
# def all_matrices_unreleased_complete():
# all_matrices_unreleased_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_matrices_unreleased_complete
## COMPADRE ##
#Species
#Populations
#Matrices
## COMADRE ##
#Species
# #Populations
# #Matrices
# ###c. released and complete
# ## ALL ##
# #Species
# def all_species_released_complete():
# all_species_released_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_species_released_complete
# #Populations
# def all_populations_released_complete():
# all_populations_released_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_populations_released_complete
# #Matrices
# def all_matrices_released_complete():
# all_matrices_released_complete = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_matrices_released_complete
# ## COMPADRE ## - when new versions of COMPADRE come out, these will need new versions added to get an accurate summary
# #Species
# def all_species_released_compadre():
# # all_species_released_2 = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="3.2.1").count()
# # all_species_released_3 = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="4.0.1").count()
# # all_species_released_4 = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="3.0.0").count()
# all_species_released_compadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_species_released_compadre
# #Populations
# def all_populations_released_compadre():
# all_populations_released_compadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_populations_released_compadre
# #Matrices
# def all_matrices_released_compadre():
# # all_matrices_released_2 = Matrix.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="3.2.1").count()
# # all_matrices_released_3 = Matrix.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="4.0.1").count()
# # all_matrices_released_4 = Matrix.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="3.0.0").count()
# all_matrices_released_compadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_matrices_released_compadre
# ## COMADRE ##
# #Species
# def all_species_released_comadre():
# # all_species_released_5 = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="2.0.1").count()
# # all_species_released_6 = Species.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="1.0.0").count()
# all_species_released_comadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_species_released_comadre
# #Populations
# def all_populations_released_comadre():
# # all_populations_released_5 = Population.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Database).filter(Database.database_name=="2.0.1").count()
# # all_populations_released_6 = Population.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Database).filter(Database.database_name=="1.0.0").count()
# all_populations_released_comadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_populations_released_comadre
# #Matrices
# def all_matrices_released_comadre():
# # all_matrices_released_5 = Matrix.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="2.0.1").count()
# # all_matrices_released_6 = Matrix.query.join(Version).join(Version.statuses).filter(Status.status_name=="Green").join(Population).join(Population.database).filter(Database.database_name=="1.0.0").count()
# all_matrices_released_comadre = Population.query.join(Database).filter(Database.database_master_version=="X").count()
# return all_matrices_released_comadre
# ###d. released but missing stuff
## ALL ##
#Species
#Populations
#Matrices
## COMPADRE ##
#Species
# #Populations
# #Matrices
# ## COMADRE ##
# #Species
# #Populations
# ######Admin Use Only#######
# ###Count function for admin areas - Total sums###
# def all_matrices():
# all_matrices_count = Matrix.query.count()
# return all_matrices_count
# ##All_populations
# def all_pops():
# all_pops_count = Population.query.count()
# return all_pops_count
# ##All_species
# def all_species():
# all_species = Species.query.count()
# return all_species
# ##All. matrices in compadre (plants only)
# def count_plants():
# count_plants = Matrix.query.join(Matrix.population).join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
# return count_plants
# ##All. matrices in comadre (animalia only)
# def count_comadre():
# count_comadre = Matrix.query.join(Matrix.population).join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Animalia").count()
# return count_comadre
# ##No. matrices in compadre (plants, fungi and algae)
# def count_compadre():
# count_fungi = Matrix.query.join(Matrix.population).join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
# count_chromista = Matrix.query.join(Matrix.population).join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Chromista").count()
# count_chromalveolata = Matrix.query.join(Matrix.population).join(Population.species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Chromalveolata").count()
# count_compadre = count_plants() + count_fungi + count_chromista + count_chromalveolata
# return count_compadre
# ##No. populations in compadre (plants only)
# def count_plants_pop():
# count_plants_pop = Population.query.join(Species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
# return count_plants_pop
# ##No. populations in compadre (plants, fungi and algae)
# def count_compadre_pop():
# count_chromista_pop = Population.query.join(Species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Chromista").count()
# count_chromalveolta_pop = Population.query.join(Species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Chromalveolata").count()
# count_fungi_pop = Population.query.join(Species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Fungi").count()
# count_compadre_pop = count_plants_pop() + count_chromalveolta_pop + count_chromista_pop + count_fungi_pop
# return count_compadre_pop
# ##No. populations in comadre (animalia only)
# def count_comadre_pop():
# count_comadre_pop = Population.query.join(Species).join(Species.taxonomy).filter(Taxonomy.kingdom == "Animalia").count()
# return count_comadre_pop
# ##No. compadre species inc. fungi, algae, etc. admin
# def species_compadre_count():
# species_chromista_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Chromista").count()
# species_chromalveolta_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Chromalveolta").count()
# species_fungi_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Fungi").count()
# species_plant_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
# species_compadre_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Plantae").count()
# return species_compadre_count
# ##No. comadre species admin
# def species_comadre_count():
# species_comadre_count = Species.query.join(Taxonomy).filter(Taxonomy.kingdom == "Animalia").count()
# return species_comadre_count
| mit | 9,208,999,180,805,464,000 | 42.056213 | 279 | 0.698413 | false | 3.228977 | false | false | false |
thepian/theapps | theapps/supervisor/sites.py | 1 | 1074 | from django.conf import settings
class SiteManager(object):
def __init__(self):
self.cur = None
# Map of Site instances
self.sites = {}
def get_current(self):
if not self.cur:
self.cur = Site()
return self.cur
def get_default_site(self):
return self.get_site('www.' + settings.DOMAINS[0])
def get_site(self,host):
if host in self.sites:
return self.sites[host]
#TODO consider domain redirection rules
site = Site()
site.domain = host
site.base_domain = settings.DOMAINS[0].startswith(".") and settings.DOMAINS[0] or "."+settings.DOMAINS[0]
for d in settings.DOMAINS:
host.endswith(d)
site.base_doamin = d
site.name = settings.SITE_TITLE
self.sites[host] = site
return site
class Site(object):
domain = "www.thepia.com"
name = "Thepia Site"
objects = SiteManager()
def __repr__(self):
return self.domain+":"+self.name
| gpl-3.0 | 8,910,944,919,843,968,000 | 25.85 | 113 | 0.555866 | false | 3.948529 | false | false | false |
boldfield/s3-encryption | s3_encryption/crypto.py | 1 | 1913 | from Crypto import Random
from Crypto.Cipher import AES as pyAES
import codecs
class AES(object):
def __init__(self):
self.key = None
self._mode = None
self.iv = None
@staticmethod
def str_to_bytes(data):
t = type(b''.decode('utf-8'))
if isinstance(data, t):
return codecs.encode(data, 'utf-8')
return data
def encrypt(self, data):
if self.iv is None:
cipher = pyAES.new(self.key, self.mode)
else:
cipher = pyAES.new(self.key, self.mode, self.iv)
return cipher.encrypt(pad_data(AES.str_to_bytes(data)))
def decrypt(self, data):
if self.iv is None:
cipher = pyAES.new(self.key, self.mode)
else:
cipher = pyAES.new(self.key, self.mode, self.iv)
return unpad_data(cipher.decrypt(AES.str_to_bytes(data)))
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, mode):
m = 'MODE_{}'.format(mode.upper()) if not mode.startswith('MODE') else mode
self._mode = getattr(pyAES, m)
def aes_cipher(key=None, iv=None, mode=None):
aes = AES()
aes.iv = iv if iv else None
aes.mode = mode if mode else None
aes.key = key if key else None
return aes
def aes_encrypt(key, data, mode='ECB', iv=None):
aes = AES()
aes.mode = mode
aes.iv = iv
aes.key = key
return aes.encrypt(data)
def aes_decrypt(key, data, mode='ECB', iv=None):
aes = AES()
aes.mode = mode
aes.iv = iv
aes.key = key
return aes.decrypt(data)
def aes_iv():
return Random.new().read(pyAES.block_size)
def aes_key():
return Random.new().read(pyAES.block_size)
pad_data = lambda s: s + (pyAES.block_size - len(s) % pyAES.block_size) * AES.str_to_bytes(chr(pyAES.block_size - len(s) % pyAES.block_size))
unpad_data = lambda s: s[0:-ord(s[len(s)-1:])]
| bsd-3-clause | -4,556,593,097,893,904,400 | 23.844156 | 141 | 0.591218 | false | 3.080515 | false | false | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/azure_reachability_report_parameters.py | 1 | 2096 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureReachabilityReportParameters(Model):
"""Geographic and time constraints for Azure reachability report.
:param provider_location:
:type provider_location:
~azure.mgmt.network.v2017_11_01.models.AzureReachabilityReportLocation
:param providers: List of Internet service providers.
:type providers: list[str]
:param azure_locations: Optional Azure regions to scope the query to.
:type azure_locations: list[str]
:param start_time: The start time for the Azure reachability report.
:type start_time: datetime
:param end_time: The end time for the Azure reachability report.
:type end_time: datetime
"""
_validation = {
'provider_location': {'required': True},
'start_time': {'required': True},
'end_time': {'required': True},
}
_attribute_map = {
'provider_location': {'key': 'providerLocation', 'type': 'AzureReachabilityReportLocation'},
'providers': {'key': 'providers', 'type': '[str]'},
'azure_locations': {'key': 'azureLocations', 'type': '[str]'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
}
def __init__(self, provider_location, start_time, end_time, providers=None, azure_locations=None):
super(AzureReachabilityReportParameters, self).__init__()
self.provider_location = provider_location
self.providers = providers
self.azure_locations = azure_locations
self.start_time = start_time
self.end_time = end_time
| mit | -2,620,773,727,717,192,700 | 40.098039 | 102 | 0.623569 | false | 4.268839 | false | false | false |
pwarren/AGDeviceControl | agdevicecontrol/thirdparty/site-packages/darwin/phidgets/servomotor.py | 1 | 3317 | # This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _servomotor
def _swig_setattr(self,class_type,name,value):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
self.__dict__[name] = value
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
class PhidgetServoMotor(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, PhidgetServoMotor, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, PhidgetServoMotor, name)
def __repr__(self):
return "<C PhidgetServoMotor instance at %s>" % (self.this,)
__swig_setmethods__["min_pulse"] = _servomotor.PhidgetServoMotor_min_pulse_set
__swig_getmethods__["min_pulse"] = _servomotor.PhidgetServoMotor_min_pulse_get
if _newclass:min_pulse = property(_servomotor.PhidgetServoMotor_min_pulse_get, _servomotor.PhidgetServoMotor_min_pulse_set)
__swig_setmethods__["max_pulse"] = _servomotor.PhidgetServoMotor_max_pulse_set
__swig_getmethods__["max_pulse"] = _servomotor.PhidgetServoMotor_max_pulse_get
if _newclass:max_pulse = property(_servomotor.PhidgetServoMotor_max_pulse_get, _servomotor.PhidgetServoMotor_max_pulse_set)
__swig_setmethods__["factor"] = _servomotor.PhidgetServoMotor_factor_set
__swig_getmethods__["factor"] = _servomotor.PhidgetServoMotor_factor_get
if _newclass:factor = property(_servomotor.PhidgetServoMotor_factor_get, _servomotor.PhidgetServoMotor_factor_set)
__swig_setmethods__["position"] = _servomotor.PhidgetServoMotor_position_set
__swig_getmethods__["position"] = _servomotor.PhidgetServoMotor_position_get
if _newclass:position = property(_servomotor.PhidgetServoMotor_position_get, _servomotor.PhidgetServoMotor_position_set)
def __init__(self, *args):
_swig_setattr(self, PhidgetServoMotor, 'this', _servomotor.new_PhidgetServoMotor(*args))
_swig_setattr(self, PhidgetServoMotor, 'thisown', 1)
def __del__(self, destroy=_servomotor.delete_PhidgetServoMotor):
try:
if self.thisown: destroy(self)
except: pass
class PhidgetServoMotorPtr(PhidgetServoMotor):
def __init__(self, this):
_swig_setattr(self, PhidgetServoMotor, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, PhidgetServoMotor, 'thisown', 0)
_swig_setattr(self, PhidgetServoMotor,self.__class__,PhidgetServoMotor)
_servomotor.PhidgetServoMotor_swigregister(PhidgetServoMotorPtr)
phidget_reset_PhidgetServoMotor = _servomotor.phidget_reset_PhidgetServoMotor
phidget_servomotor_set_parameters = _servomotor.phidget_servomotor_set_parameters
| gpl-2.0 | 6,873,438,704,202,684,000 | 45.71831 | 127 | 0.708472 | false | 3.25835 | false | false | false |
brosner/django-sqlalchemy | django_sqlalchemy/management/sql.py | 1 | 1402 | from django.db.models.loading import get_models
from django.core.management.sql import custom_sql_for_model
from sqlalchemy import create_engine
from django_sqlalchemy.backend import metadata, session
def reset(engine, app):
metadata.drop_all(engine, tables=_get_tables_for_app(app))
session.commit()
def create(engine, app):
metadata.create_all(engine, tables=_get_tables_for_app(app))
session.commit()
def _get_tables_for_app(app):
tables = []
for model in get_models(app):
tables.append(model.__table__)
tables.extend([f.__table__ for f in model._meta.local_many_to_many])
return tables
def process_custom_sql(models, verbosity):
# TODO: complete this
# install custom sql for the specified models
for model in models:
custom_sql = custom_sql_for_model(model)
if custom_sql:
if verbosity >= 1:
print "Installing custom SQL for %s.%s model" % (app_name, model._meta.object_name)
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception, e:
sys.stderr.write("Failed to install custom SQL for %s.%s model: %s" % \
(app_name, model._meta.object_name, e))
transaction.rollback_unless_managed()
else:
transaction.commit_unless_managed()
| bsd-3-clause | -8,844,244,939,070,740,000 | 36.891892 | 99 | 0.616976 | false | 3.949296 | false | false | false |
TarasLevelUp/asynqp | src/asynqp/message.py | 1 | 8788 | import json
from collections import OrderedDict
from datetime import datetime
from io import BytesIO
from . import amqptypes
from . import serialisation
class Message(object):
"""
An AMQP Basic message.
Some of the constructor parameters are ignored by the AMQP broker and are provided
just for the convenience of user applications. They are marked "for applications"
in the list below.
:param body: :func:`bytes` , :class:`str` or :class:`dict` representing the body of the message.
Strings will be encoded according to the content_encoding parameter;
dicts will be converted to a string using JSON.
:param dict headers: a dictionary of message headers
:param str content_type: MIME content type
(defaults to 'application/json' if :code:`body` is a :class:`dict`,
or 'application/octet-stream' otherwise)
:param str content_encoding: MIME encoding (defaults to 'utf-8')
:param int delivery_mode: 1 for non-persistent, 2 for persistent
:param int priority: message priority - integer between 0 and 9
:param str correlation_id: correlation id of the message *(for applications)*
:param str reply_to: reply-to address *(for applications)*
:param str expiration: expiration specification *(for applications)*
:param str message_id: unique id of the message *(for applications)*
:param datetime.datetime timestamp: :class:`~datetime.datetime` of when the message was sent
(default: :meth:`datetime.now() <datetime.datetime.now>`)
:param str type: message type *(for applications)*
:param str user_id: ID of the user sending the message *(for applications)*
:param str app_id: ID of the application sending the message *(for applications)*
Attributes are the same as the constructor parameters.
"""
property_types = OrderedDict(
[("content_type", amqptypes.ShortStr),
("content_encoding", amqptypes.ShortStr),
("headers", amqptypes.Table),
("delivery_mode", amqptypes.Octet),
("priority", amqptypes.Octet),
("correlation_id", amqptypes.ShortStr),
("reply_to", amqptypes.ShortStr),
("expiration", amqptypes.ShortStr),
("message_id", amqptypes.ShortStr),
("timestamp", amqptypes.Timestamp),
("type", amqptypes.ShortStr),
("user_id", amqptypes.ShortStr),
("app_id", amqptypes.ShortStr)]
)
def __init__(self, body, *,
headers=None, content_type=None,
content_encoding=None, delivery_mode=None,
priority=None, correlation_id=None,
reply_to=None, expiration=None,
message_id=None, timestamp=None,
type=None, user_id=None,
app_id=None):
if content_encoding is None:
content_encoding = 'utf-8'
if isinstance(body, dict):
body = json.dumps(body)
if content_type is None:
content_type = 'application/json'
elif content_type is None:
content_type = 'application/octet-stream'
if isinstance(body, bytes):
self.body = body
else:
self.body = body.encode(content_encoding)
timestamp = timestamp if timestamp is not None else datetime.now()
self._properties = OrderedDict()
for name, amqptype in self.property_types.items():
value = locals()[name]
if value is not None:
value = amqptype(value)
self._properties[name] = value
def __eq__(self, other):
return (self.body == other.body
and self._properties == other._properties)
def __getattr__(self, name):
try:
return self._properties[name]
except KeyError as e:
raise AttributeError from e
def __setattr__(self, name, value):
amqptype = self.property_types.get(name)
if amqptype is not None:
self._properties[name] = value if isinstance(value, amqptype) else amqptype(value)
return
super().__setattr__(name, value)
def json(self):
"""
Parse the message body as JSON.
:return: the parsed JSON.
"""
return json.loads(self.body.decode(self.content_encoding))
class IncomingMessage(Message):
"""
A message that has been delivered to the client.
Subclass of :class:`Message`.
.. attribute::delivery_tag
The *delivery tag* assigned to this message by the AMQP broker.
.. attribute::exchange_name
The name of the exchange to which the message was originally published.
.. attribute::routing_key
The routing key under which the message was originally published.
"""
def __init__(self, *args, sender, delivery_tag, exchange_name, routing_key, **kwargs):
super().__init__(*args, **kwargs)
self.sender = sender
self.delivery_tag = delivery_tag
self.exchange_name = exchange_name
self.routing_key = routing_key
def ack(self):
"""
Acknowledge the message.
"""
self.sender.send_BasicAck(self.delivery_tag)
def reject(self, *, requeue=True):
"""
Reject the message.
:keyword bool requeue: if true, the broker will attempt to requeue the
message and deliver it to an alternate consumer.
"""
self.sender.send_BasicReject(self.delivery_tag, requeue)
def get_header_payload(message, class_id):
return ContentHeaderPayload(class_id, len(message.body), list(message._properties.values()))
# NB: the total frame size will be 8 bytes larger than frame_body_size
def get_frame_payloads(message, frame_body_size):
frames = []
remaining = message.body
while remaining:
frame = remaining[:frame_body_size]
remaining = remaining[frame_body_size:]
frames.append(frame)
return frames
class ContentHeaderPayload(object):
synchronous = True
def __init__(self, class_id, body_length, properties):
self.class_id = class_id
self.body_length = body_length
self.properties = properties
def __eq__(self, other):
return (self.class_id == other.class_id
and self.body_length == other.body_length
and self.properties == other.properties)
def write(self, stream):
stream.write(serialisation.pack_unsigned_short(self.class_id))
stream.write(serialisation.pack_unsigned_short(0)) # weight
stream.write(serialisation.pack_unsigned_long_long(self.body_length))
bytesio = BytesIO()
property_flags = 0
bitshift = 15
for val in self.properties:
if val is not None:
property_flags |= (1 << bitshift)
val.write(bytesio)
bitshift -= 1
stream.write(serialisation.pack_unsigned_short(property_flags))
stream.write(bytesio.getvalue())
@classmethod
def read(cls, raw):
bytesio = BytesIO(raw)
class_id = serialisation.read_unsigned_short(bytesio)
weight = serialisation.read_unsigned_short(bytesio)
assert weight == 0
body_length = serialisation.read_unsigned_long_long(bytesio)
property_flags_short = serialisation.read_unsigned_short(bytesio)
properties = []
for i, amqptype in enumerate(Message.property_types.values()):
pos = 15 - i # We started from `content_type` witch has pos==15
if property_flags_short & (1 << pos):
properties.append(amqptype.read(bytesio))
else:
properties.append(None)
return cls(class_id, body_length, properties)
class MessageBuilder(object):
def __init__(self, sender, delivery_tag, redelivered, exchange_name, routing_key, consumer_tag=None):
self.sender = sender
self.delivery_tag = delivery_tag
self.body = b''
self.consumer_tag = consumer_tag
self.exchange_name = exchange_name
self.routing_key = routing_key
def set_header(self, header):
self.body_length = header.body_length
self.properties = {}
for name, prop in zip(IncomingMessage.property_types, header.properties):
self.properties[name] = prop
def add_body_chunk(self, chunk):
self.body += chunk
def done(self):
return len(self.body) == self.body_length
def build(self):
return IncomingMessage(
self.body,
sender=self.sender,
delivery_tag=self.delivery_tag,
exchange_name=self.exchange_name,
routing_key=self.routing_key,
**self.properties)
| mit | 9,189,294,022,764,453,000 | 34.152 | 105 | 0.624033 | false | 4.198758 | false | false | false |
porksmash/swarfarm | bestiary/models.py | 1 | 75620 | from collections import OrderedDict
from functools import partial
from math import floor, ceil
from operator import is_not
from django.contrib.auth.models import User
from django.contrib.postgres.fields import ArrayField
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.utils.safestring import mark_safe
from django.utils.text import slugify
class Monster(models.Model):
ELEMENT_PURE = 'pure'
ELEMENT_FIRE = 'fire'
ELEMENT_WIND = 'wind'
ELEMENT_WATER = 'water'
ELEMENT_LIGHT = 'light'
ELEMENT_DARK = 'dark'
TYPE_ATTACK = 'attack'
TYPE_HP = 'hp'
TYPE_SUPPORT = 'support'
TYPE_DEFENSE = 'defense'
TYPE_MATERIAL = 'material'
TYPE_NONE = 'none'
ELEMENT_CHOICES = (
(ELEMENT_PURE, 'Pure'),
(ELEMENT_FIRE, 'Fire'),
(ELEMENT_WIND, 'Wind'),
(ELEMENT_WATER, 'Water'),
(ELEMENT_LIGHT, 'Light'),
(ELEMENT_DARK, 'Dark'),
)
TYPE_CHOICES = (
(TYPE_NONE, 'None'),
(TYPE_ATTACK, 'Attack'),
(TYPE_HP, 'HP'),
(TYPE_SUPPORT, 'Support'),
(TYPE_DEFENSE, 'Defense'),
(TYPE_MATERIAL, 'Material'),
)
STAR_CHOICES = (
(1, mark_safe('1<span class="glyphicon glyphicon-star"></span>')),
(2, mark_safe('2<span class="glyphicon glyphicon-star"></span>')),
(3, mark_safe('3<span class="glyphicon glyphicon-star"></span>')),
(4, mark_safe('4<span class="glyphicon glyphicon-star"></span>')),
(5, mark_safe('5<span class="glyphicon glyphicon-star"></span>')),
(6, mark_safe('6<span class="glyphicon glyphicon-star"></span>')),
)
name = models.CharField(max_length=40)
com2us_id = models.IntegerField(blank=True, null=True, help_text='ID given in game data files')
family_id = models.IntegerField(blank=True, null=True, help_text='Identifier that matches same family monsters')
image_filename = models.CharField(max_length=250, null=True, blank=True)
element = models.CharField(max_length=6, choices=ELEMENT_CHOICES, default=ELEMENT_FIRE)
archetype = models.CharField(max_length=10, choices=TYPE_CHOICES, default=TYPE_ATTACK)
base_stars = models.IntegerField(choices=STAR_CHOICES, help_text='Default stars a monster is summoned at')
obtainable = models.BooleanField(default=True, help_text='Is available for players to acquire')
can_awaken = models.BooleanField(default=True, help_text='Has an awakened form')
is_awakened = models.BooleanField(default=False, help_text='Is the awakened form')
awaken_bonus = models.TextField(blank=True, help_text='Bonus given upon awakening')
skills = models.ManyToManyField('Skill', blank=True)
skill_ups_to_max = models.IntegerField(null=True, blank=True, help_text='Number of skill-ups required to max all skills')
leader_skill = models.ForeignKey('LeaderSkill', on_delete=models.SET_NULL, null=True, blank=True)
# 1-star lvl 1 values from data source
raw_hp = models.IntegerField(null=True, blank=True, help_text='HP value from game data files')
raw_attack = models.IntegerField(null=True, blank=True, help_text='ATK value from game data files')
raw_defense = models.IntegerField(null=True, blank=True, help_text='DEF value from game data files')
# Base-star lvl MAX values as seen in-game
base_hp = models.IntegerField(null=True, blank=True, help_text='HP at base_stars lvl 1')
base_attack = models.IntegerField(null=True, blank=True, help_text='ATK at base_stars lvl 1')
base_defense = models.IntegerField(null=True, blank=True, help_text='DEF at base_stars lvl 1')
# 6-star lvl MAX values
max_lvl_hp = models.IntegerField(null=True, blank=True, help_text='HP at 6-stars lvl 40')
max_lvl_attack = models.IntegerField(null=True, blank=True, help_text='ATK at 6-stars lvl 40')
max_lvl_defense = models.IntegerField(null=True, blank=True, help_text='DEF at 6-stars lvl 40')
speed = models.IntegerField(null=True, blank=True)
crit_rate = models.IntegerField(null=True, blank=True)
crit_damage = models.IntegerField(null=True, blank=True)
resistance = models.IntegerField(null=True, blank=True)
accuracy = models.IntegerField(null=True, blank=True)
# Homunculus monster fields
homunculus = models.BooleanField(default=False)
craft_materials = models.ManyToManyField('CraftMaterial', through='MonsterCraftCost')
craft_cost = models.IntegerField(null=True, blank=True, help_text='Mana cost to craft this monster')
# Unicorn fields
transforms_into = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='+', help_text='Monster which this monster can transform into during battle')
awakens_from = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='+', help_text='Unawakened form of this monster')
awakens_to = models.ForeignKey('self', on_delete=models.SET_NULL, null=True, blank=True, related_name='+', help_text='Awakened form of this monster')
awaken_mats_fire_low = models.IntegerField(blank=True, default=0)
awaken_mats_fire_mid = models.IntegerField(blank=True, default=0)
awaken_mats_fire_high = models.IntegerField(blank=True, default=0)
awaken_mats_water_low = models.IntegerField(blank=True, default=0)
awaken_mats_water_mid = models.IntegerField(blank=True, default=0)
awaken_mats_water_high = models.IntegerField(blank=True, default=0)
awaken_mats_wind_low = models.IntegerField(blank=True, default=0)
awaken_mats_wind_mid = models.IntegerField(blank=True, default=0)
awaken_mats_wind_high = models.IntegerField(blank=True, default=0)
awaken_mats_light_low = models.IntegerField(blank=True, default=0)
awaken_mats_light_mid = models.IntegerField(blank=True, default=0)
awaken_mats_light_high = models.IntegerField(blank=True, default=0)
awaken_mats_dark_low = models.IntegerField(blank=True, default=0)
awaken_mats_dark_mid = models.IntegerField(blank=True, default=0)
awaken_mats_dark_high = models.IntegerField(blank=True, default=0)
awaken_mats_magic_low = models.IntegerField(blank=True, default=0)
awaken_mats_magic_mid = models.IntegerField(blank=True, default=0)
awaken_mats_magic_high = models.IntegerField(blank=True, default=0)
source = models.ManyToManyField('Source', blank=True, help_text='Where this monster can be acquired from')
farmable = models.BooleanField(default=False, help_text='Monster can be acquired easily without luck')
fusion_food = models.BooleanField(default=False, help_text='Monster is used as a fusion ingredient')
bestiary_slug = models.SlugField(max_length=255, editable=False, null=True)
def image_url(self):
if self.image_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/monsters/' + self.image_filename))
else:
return 'No Image'
def max_level_from_stars(self, stars=None):
if stars:
return 10 + stars * 5
else:
return 10 + self.base_stars * 5
def get_stats(self):
from collections import OrderedDict
start_grade = self.base_stars
stats_list = OrderedDict()
if self.is_awakened and self.base_stars > 1:
start_grade -= 1
for grade in range(start_grade, 7):
max_level = self.max_level_from_stars(grade)
# Add the actual calculated stats
stats_list[str(grade)] = {
'HP': self.actual_hp(grade, max_level),
'ATK': self.actual_attack(grade, max_level),
'DEF': self.actual_defense(grade, max_level),
}
return stats_list
def actual_hp(self, grade, level):
# Check that base stat exists first
if not self.raw_hp:
return None
else:
return self._calculate_actual_stat(self.raw_hp, grade, level) * 15
def actual_attack(self, grade=base_stars, level=1):
# Check that base stat exists first
if not self.raw_attack:
return None
else:
return self._calculate_actual_stat(self.raw_attack, grade, level)
def actual_defense(self, grade=base_stars, level=1):
# Check that base stat exists first
if not self.raw_defense:
return None
else:
return self._calculate_actual_stat(self.raw_defense, grade, level)
@staticmethod
def _calculate_actual_stat(stat, grade, level):
# Magic multipliers taken from summoner's war wikia calculator. Used to calculate stats for lvl 1 and lvl MAX
magic_multipliers = [
{'1': 1.0, 'max': 1.9958},
{'1': 1.5966, 'max': 3.03050646},
{'1': 2.4242774, 'max': 4.364426603},
{'1': 3.4914444, 'max': 5.941390935},
{'1': 4.7529032, 'max': 8.072330795},
{'1': 6.4582449, 'max': 10.97901633},
]
max_lvl = 10 + grade * 5
stat_lvl_1 = round(stat * magic_multipliers[grade - 1]['1'], 0)
stat_lvl_max = round(stat * magic_multipliers[grade - 1]['max'], 0)
if level == 1:
return int(stat_lvl_1)
elif level == max_lvl:
return int(stat_lvl_max)
else:
# Use exponential function in format value=ae^(bx)
# a=stat_lvl_1*e^(-b)
from math import log, exp
b_coeff = log(stat_lvl_max / stat_lvl_1) / (max_lvl - 1)
return int(round((stat_lvl_1 * exp(-b_coeff)) * exp(b_coeff * level)))
def monster_family(self):
should_be_shown = Q(obtainable=True) | Q(transforms_into__isnull=False)
family = Monster.objects.filter(family_id=self.family_id).filter(should_be_shown).order_by('element', 'is_awakened')
return [
family.filter(element=Monster.ELEMENT_FIRE).first(),
family.filter(element=Monster.ELEMENT_WATER).first(),
family.filter(element=Monster.ELEMENT_WIND).first(),
family.filter(element=Monster.ELEMENT_LIGHT).first(),
family.filter(element=Monster.ELEMENT_DARK).first(),
]
def all_skill_effects(self):
return SkillEffect.objects.filter(pk__in=self.skills.exclude(skill_effect=None).values_list('skill_effect', flat=True))
def get_awakening_materials(self):
mats = OrderedDict()
mats['magic'] = OrderedDict()
mats['magic']['low'] = self.awaken_mats_magic_low
mats['magic']['mid'] = self.awaken_mats_magic_mid
mats['magic']['high'] = self.awaken_mats_magic_high
mats['fire'] = OrderedDict()
mats['fire']['low'] = self.awaken_mats_fire_low
mats['fire']['mid'] = self.awaken_mats_fire_mid
mats['fire']['high'] = self.awaken_mats_fire_high
mats['water'] = OrderedDict()
mats['water']['low'] = self.awaken_mats_water_low
mats['water']['mid'] = self.awaken_mats_water_mid
mats['water']['high'] = self.awaken_mats_water_high
mats['wind'] = OrderedDict()
mats['wind']['low'] = self.awaken_mats_wind_low
mats['wind']['mid'] = self.awaken_mats_wind_mid
mats['wind']['high'] = self.awaken_mats_wind_high
mats['light'] = OrderedDict()
mats['light']['low'] = self.awaken_mats_light_low
mats['light']['mid'] = self.awaken_mats_light_mid
mats['light']['high'] = self.awaken_mats_light_high
mats['dark'] = OrderedDict()
mats['dark']['low'] = self.awaken_mats_dark_low
mats['dark']['mid'] = self.awaken_mats_dark_mid
mats['dark']['high'] = self.awaken_mats_dark_high
return mats
def clean(self):
# Update null values
if self.awaken_mats_fire_high is None:
self.awaken_mats_fire_high = 0
if self.awaken_mats_fire_mid is None:
self.awaken_mats_fire_mid = 0
if self.awaken_mats_fire_low is None:
self.awaken_mats_fire_low = 0
if self.awaken_mats_water_high is None:
self.awaken_mats_water_high = 0
if self.awaken_mats_water_mid is None:
self.awaken_mats_water_mid = 0
if self.awaken_mats_water_low is None:
self.awaken_mats_water_low = 0
if self.awaken_mats_wind_high is None:
self.awaken_mats_wind_high = 0
if self.awaken_mats_wind_mid is None:
self.awaken_mats_wind_mid = 0
if self.awaken_mats_wind_low is None:
self.awaken_mats_wind_low = 0
if self.awaken_mats_light_high is None:
self.awaken_mats_light_high = 0
if self.awaken_mats_light_mid is None:
self.awaken_mats_light_mid = 0
if self.awaken_mats_light_low is None:
self.awaken_mats_light_low = 0
if self.awaken_mats_dark_high is None:
self.awaken_mats_dark_high = 0
if self.awaken_mats_dark_mid is None:
self.awaken_mats_dark_mid = 0
if self.awaken_mats_dark_low is None:
self.awaken_mats_dark_low = 0
if self.awaken_mats_magic_high is None:
self.awaken_mats_magic_high = 0
if self.awaken_mats_magic_mid is None:
self.awaken_mats_magic_mid = 0
if self.awaken_mats_magic_low is None:
self.awaken_mats_magic_low = 0
super(Monster, self).clean()
def save(self, *args, **kwargs):
# Update null values
if self.awaken_mats_fire_high is None:
self.awaken_mats_fire_high = 0
if self.awaken_mats_fire_mid is None:
self.awaken_mats_fire_mid = 0
if self.awaken_mats_fire_low is None:
self.awaken_mats_fire_low = 0
if self.awaken_mats_water_high is None:
self.awaken_mats_water_high = 0
if self.awaken_mats_water_mid is None:
self.awaken_mats_water_mid = 0
if self.awaken_mats_water_low is None:
self.awaken_mats_water_low = 0
if self.awaken_mats_wind_high is None:
self.awaken_mats_wind_high = 0
if self.awaken_mats_wind_mid is None:
self.awaken_mats_wind_mid = 0
if self.awaken_mats_wind_low is None:
self.awaken_mats_wind_low = 0
if self.awaken_mats_light_high is None:
self.awaken_mats_light_high = 0
if self.awaken_mats_light_mid is None:
self.awaken_mats_light_mid = 0
if self.awaken_mats_light_low is None:
self.awaken_mats_light_low = 0
if self.awaken_mats_dark_high is None:
self.awaken_mats_dark_high = 0
if self.awaken_mats_dark_mid is None:
self.awaken_mats_dark_mid = 0
if self.awaken_mats_dark_low is None:
self.awaken_mats_dark_low = 0
if self.awaken_mats_magic_high is None:
self.awaken_mats_magic_high = 0
if self.awaken_mats_magic_mid is None:
self.awaken_mats_magic_mid = 0
if self.awaken_mats_magic_low is None:
self.awaken_mats_magic_low = 0
if self.raw_hp:
self.base_hp = self._calculate_actual_stat(
self.raw_hp,
self.base_stars,
self.max_level_from_stars(self.base_stars)
) * 15
self.max_lvl_hp = self.actual_hp(6, 40)
if self.raw_attack:
self.base_attack = self._calculate_actual_stat(
self.raw_attack,
self.base_stars,
self.max_level_from_stars(self.base_stars)
)
self.max_lvl_attack = self.actual_attack(6, 40)
if self.raw_defense:
self.base_defense = self._calculate_actual_stat(
self.raw_defense,
self.base_stars,
self.max_level_from_stars(self.base_stars)
)
self.max_lvl_defense = self.actual_defense(6, 40)
if self.is_awakened and self.awakens_from:
self.bestiary_slug = self.awakens_from.bestiary_slug
else:
if self.awakens_to is not None:
self.bestiary_slug = slugify(" ".join([str(self.com2us_id), self.element, self.name, self.awakens_to.name]))
else:
self.bestiary_slug = slugify(" ".join([str(self.com2us_id), self.element, self.name]))
# Pull info from unawakened version of this monster. This copying of data is one directional only
if self.awakens_from:
# Copy awaken bonus from unawakened version
if self.is_awakened and self.awakens_from.awaken_bonus:
self.awaken_bonus = self.awakens_from.awaken_bonus
super(Monster, self).save(*args, **kwargs)
# Automatically set awakens from/to relationship if none exists
if self.awakens_from and self.awakens_from.awakens_to is not self:
self.awakens_from.awakens_to = self
self.awakens_from.save()
elif self.awakens_to and self.awakens_to.awakens_from is not self:
self.awakens_to.awakens_from = self
self.awakens_to.save()
class Meta:
ordering = ['name', 'element']
def __str__(self):
if self.is_awakened:
return self.name
else:
return self.name + ' (' + self.element.capitalize() + ')'
class Skill(models.Model):
name = models.CharField(max_length=40)
com2us_id = models.IntegerField(blank=True, null=True, help_text='ID given in game data files')
description = models.TextField()
slot = models.IntegerField(default=1, help_text='Which button position the skill is in during battle')
skill_effect = models.ManyToManyField('SkillEffect', blank=True)
effect = models.ManyToManyField('SkillEffect', through='SkillEffectDetail', blank=True, related_name='effect', help_text='Detailed skill effect information')
cooltime = models.IntegerField(null=True, blank=True, help_text='Number of turns until skill can be used again')
hits = models.IntegerField(default=1, help_text='Number of times this skill hits an enemy')
aoe = models.BooleanField(default=False, help_text='Skill affects all enemies or allies')
passive = models.BooleanField(default=False, help_text='Skill activates automatically')
max_level = models.IntegerField()
level_progress_description = models.TextField(null=True, blank=True, help_text='Description of bonus each skill level')
icon_filename = models.CharField(max_length=100, null=True, blank=True)
multiplier_formula = models.TextField(null=True, blank=True, help_text='Parsed multiplier formula')
multiplier_formula_raw = models.CharField(max_length=150, null=True, blank=True, help_text='Multiplier formula given in game data files')
scaling_stats = models.ManyToManyField('ScalingStat', blank=True, help_text='Monster stats which this skill scales on')
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/skills/' + self.icon_filename))
else:
return 'No Image'
def level_progress_description_list(self):
return self.level_progress_description.splitlines()
def __str__(self):
if self.name:
name = self.name
else:
name = ''
if self.icon_filename:
icon = ' - ' + self.icon_filename
else:
icon = ''
if self.com2us_id:
com2us_id = ' - ' + str(self.com2us_id)
else:
com2us_id = ''
return name + com2us_id + icon
class Meta:
ordering = ['slot', 'name']
verbose_name = 'Skill'
verbose_name_plural = 'Skills'
class LeaderSkill(models.Model):
ATTRIBUTE_HP = 1
ATTRIBUTE_ATK = 2
ATTRIBUTE_DEF = 3
ATTRIBUTE_SPD = 4
ATTRIBUTE_CRIT_RATE = 5
ATTRIBUTE_RESIST = 6
ATTRIBUTE_ACCURACY = 7
ATTRIBUTE_CRIT_DMG = 8
ATTRIBUTE_CHOICES = (
(ATTRIBUTE_HP, 'HP'),
(ATTRIBUTE_ATK, 'Attack Power'),
(ATTRIBUTE_DEF, 'Defense'),
(ATTRIBUTE_SPD, 'Attack Speed'),
(ATTRIBUTE_CRIT_RATE, 'Critical Rate'),
(ATTRIBUTE_RESIST, 'Resistance'),
(ATTRIBUTE_ACCURACY, 'Accuracy'),
(ATTRIBUTE_CRIT_DMG, 'Critical DMG'),
)
AREA_GENERAL = 1
AREA_DUNGEON = 2
AREA_ELEMENT = 3
AREA_ARENA = 4
AREA_GUILD = 5
AREA_CHOICES = (
(AREA_GENERAL, 'General'),
(AREA_DUNGEON, 'Dungeon'),
(AREA_ELEMENT, 'Element'),
(AREA_ARENA, 'Arena'),
(AREA_GUILD, 'Guild'),
)
attribute = models.IntegerField(choices=ATTRIBUTE_CHOICES, help_text='Monster stat which is granted the bonus')
amount = models.IntegerField(help_text='Amount of bonus granted')
area = models.IntegerField(choices=AREA_CHOICES, default=AREA_GENERAL, help_text='Where this leader skill has an effect')
element = models.CharField(max_length=6, null=True, blank=True, choices=Monster.ELEMENT_CHOICES, help_text='Element of monster which this leader skill applies to')
def skill_string(self):
if self.area == self.AREA_DUNGEON:
condition = 'in the Dungeons '
elif self.area == self.AREA_ARENA:
condition = 'in the Arena '
elif self.area == self.AREA_GUILD:
condition = 'in Guild Content '
elif self.area == self.AREA_ELEMENT:
condition = 'with {} attribute '.format(self.get_element_display())
else:
condition = ''
return "Increase the {0} of ally monsters {1}by {2}%".format(self.get_attribute_display(), condition, self.amount)
def icon_filename(self):
if self.area == self.AREA_ELEMENT:
suffix = '_{}'.format(self.get_element_display())
elif self.area == self.AREA_GENERAL:
suffix = ''
else:
suffix = '_{}'.format(self.get_area_display())
return 'leader_skill_{0}{1}.png'.format(self.get_attribute_display().replace(' ', '_'), suffix)
def image_url(self):
return mark_safe('<img src="{}" height="42" width="42"/>'.format(
static('herders/images/skills/leader/' + self.icon_filename())
))
def __str__(self):
if self.area == self.AREA_ELEMENT:
condition = ' {}'.format(self.get_element_display())
elif self.area == self.AREA_GENERAL:
condition = ''
else:
condition = ' {}'.format(self.get_area_display())
return self.get_attribute_display() + ' ' + str(self.amount) + '%' + condition
class Meta:
ordering = ['attribute', 'amount', 'element']
verbose_name = 'Leader Skill'
verbose_name_plural = 'Leader Skills'
class SkillEffectBuffsManager(models.Manager):
def get_queryset(self):
return super(SkillEffectBuffsManager, self).get_queryset().values_list('pk', 'icon_filename').filter(is_buff=True).exclude(icon_filename='')
class SkillEffectDebuffsManager(models.Manager):
def get_queryset(self):
return super(SkillEffectDebuffsManager, self).get_queryset().values_list('pk', 'icon_filename').filter(is_buff=False).exclude(icon_filename='')
class SkillEffectOtherManager(models.Manager):
def get_queryset(self):
return super(SkillEffectOtherManager, self).get_queryset().filter(icon_filename='')
class SkillEffect(models.Model):
is_buff = models.BooleanField(default=True, help_text='Effect is beneficial to affected monster')
name = models.CharField(max_length=40)
description = models.TextField()
icon_filename = models.CharField(max_length=100, blank=True, default='')
objects = models.Manager()
class Meta:
ordering = ['name']
verbose_name = 'Skill Effect'
verbose_name_plural = 'Skill Effects'
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/buffs/' + self.icon_filename))
else:
return 'No Image'
def __str__(self):
return self.name
class SkillEffectDetail(models.Model):
skill = models.ForeignKey(Skill, on_delete=models.CASCADE)
effect = models.ForeignKey(SkillEffect, on_delete=models.CASCADE)
aoe = models.BooleanField(default=False, help_text='Effect applies to entire friendly or enemy group')
single_target = models.BooleanField(default=False, help_text='Effect applies to a single monster')
self_effect = models.BooleanField(default=False, help_text='Effect applies to the monster using the skill')
chance = models.IntegerField(null=True, blank=True, help_text='Chance of effect occuring per hit')
on_crit = models.BooleanField(default=False)
on_death = models.BooleanField(default=False)
random = models.BooleanField(default=False, help_text='Skill effect applies randomly to the target')
quantity = models.IntegerField(null=True, blank=True, help_text='Number of items this effect affects on the target')
all = models.BooleanField(default=False, help_text='This effect affects all items on the target')
self_hp = models.BooleanField(default=False, help_text="Amount of this effect is based on casting monster's HP")
target_hp = models.BooleanField(default=False, help_text="Amount of this effect is based on target monster's HP")
damage = models.BooleanField(default=False, help_text='Amount of this effect is based on damage dealt')
note = models.TextField(blank=True, null=True, help_text="Explain anything else that doesn't fit in other fields")
class ScalingStat(models.Model):
stat = models.CharField(max_length=20)
com2us_desc = models.CharField(max_length=30, null=True, blank=True)
description = models.TextField(null=True, blank=True)
def __str__(self):
return self.stat
class Meta:
ordering = ['stat',]
verbose_name = 'Scaling Stat'
verbose_name_plural = 'Scaling Stats'
class HomunculusSkill(models.Model):
skill = models.ForeignKey(Skill, on_delete=models.CASCADE)
monsters = models.ManyToManyField(Monster)
craft_materials = models.ManyToManyField('CraftMaterial', through='HomunculusSkillCraftCost', help_text='Crafting materials required to purchase')
mana_cost = models.IntegerField(default=0, help_text='Cost to purchase')
prerequisites = models.ManyToManyField(Skill, blank=True, related_name='homunculus_prereq', help_text='Skills which must be acquired first')
def __str__(self):
return '{} ({})'.format(self.skill, self.skill.com2us_id)
class Source(models.Model):
name = models.CharField(max_length=100)
description = models.TextField(null=True, blank=True)
icon_filename = models.CharField(max_length=100, null=True, blank=True)
farmable_source = models.BooleanField(default=False)
meta_order = models.IntegerField(db_index=True, default=0)
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/icons/' + self.icon_filename))
else:
return 'No Image'
def __str__(self):
return self.name
class Meta:
ordering = ['meta_order', 'icon_filename', 'name']
class Fusion(models.Model):
product = models.ForeignKey('Monster', on_delete=models.CASCADE, related_name='product')
stars = models.IntegerField()
cost = models.IntegerField()
ingredients = models.ManyToManyField('Monster')
meta_order = models.IntegerField(db_index=True, default=0)
def __str__(self):
return str(self.product) + ' Fusion'
class Meta:
ordering = ['meta_order']
def sub_fusion_available(self):
return Fusion.objects.filter(product__in=self.ingredients.values_list('awakens_from__pk', flat=True)).exists()
def total_awakening_cost(self, owned_ingredients=None):
cost = {
'magic': {
'low': 0,
'mid': 0,
'high': 0,
},
'fire': {
'low': 0,
'mid': 0,
'high': 0,
},
'water': {
'low': 0,
'mid': 0,
'high': 0,
},
'wind': {
'low': 0,
'mid': 0,
'high': 0,
},
'light': {
'low': 0,
'mid': 0,
'high': 0,
},
'dark': {
'low': 0,
'mid': 0,
'high': 0,
},
}
if owned_ingredients:
qs = self.ingredients.exclude(pk__in=[o.monster.pk for o in owned_ingredients])
else:
qs = self.ingredients.all()
for ingredient in qs:
if ingredient.awakens_from:
cost['magic']['low'] += ingredient.awakens_from.awaken_mats_magic_low
cost['magic']['mid'] += ingredient.awakens_from.awaken_mats_magic_mid
cost['magic']['high'] += ingredient.awakens_from.awaken_mats_magic_high
cost['fire']['low'] += ingredient.awakens_from.awaken_mats_fire_low
cost['fire']['mid'] += ingredient.awakens_from.awaken_mats_fire_mid
cost['fire']['high'] += ingredient.awakens_from.awaken_mats_fire_high
cost['water']['low'] += ingredient.awakens_from.awaken_mats_water_low
cost['water']['mid'] += ingredient.awakens_from.awaken_mats_water_mid
cost['water']['high'] += ingredient.awakens_from.awaken_mats_water_high
cost['wind']['low'] += ingredient.awakens_from.awaken_mats_wind_low
cost['wind']['mid'] += ingredient.awakens_from.awaken_mats_wind_mid
cost['wind']['high'] += ingredient.awakens_from.awaken_mats_wind_high
cost['light']['low'] += ingredient.awakens_from.awaken_mats_light_low
cost['light']['mid'] += ingredient.awakens_from.awaken_mats_light_mid
cost['light']['high'] += ingredient.awakens_from.awaken_mats_light_high
cost['dark']['low'] += ingredient.awakens_from.awaken_mats_dark_low
cost['dark']['mid'] += ingredient.awakens_from.awaken_mats_dark_mid
cost['dark']['high'] += ingredient.awakens_from.awaken_mats_dark_high
return cost
class Building(models.Model):
AREA_GENERAL = 0
AREA_GUILD = 1
AREA_CHOICES = [
(AREA_GENERAL, 'Everywhere'),
(AREA_GUILD, 'Guild Content'),
]
STAT_HP = 0
STAT_ATK = 1
STAT_DEF = 2
STAT_SPD = 3
STAT_CRIT_RATE_PCT = 4
STAT_CRIT_DMG_PCT = 5
STAT_RESIST_PCT = 6
STAT_ACCURACY_PCT = 7
MAX_ENERGY = 8
MANA_STONE_STORAGE = 9
MANA_STONE_PRODUCTION = 10
ENERGY_PRODUCTION = 11
ARCANE_TOWER_ATK = 12
ARCANE_TOWER_SPD = 13
STAT_CHOICES = [
(STAT_HP, 'HP'),
(STAT_ATK, 'ATK'),
(STAT_DEF, 'DEF'),
(STAT_SPD, 'SPD'),
(STAT_CRIT_RATE_PCT, 'CRI Rate'),
(STAT_CRIT_DMG_PCT, 'CRI Dmg'),
(STAT_RESIST_PCT, 'Resistance'),
(STAT_ACCURACY_PCT, 'Accuracy'),
(MAX_ENERGY, 'Max. Energy'),
(MANA_STONE_STORAGE, 'Mana Stone Storage'),
(MANA_STONE_PRODUCTION, 'Mana Stone Production Rate'),
(ENERGY_PRODUCTION, 'Energy Production Rate'),
(ARCANE_TOWER_ATK, 'Arcane Tower ATK'),
(ARCANE_TOWER_SPD, 'Arcane Tower SPD'),
]
PERCENT_STATS = [
STAT_HP,
STAT_ATK,
STAT_DEF,
STAT_SPD,
STAT_CRIT_RATE_PCT,
STAT_CRIT_DMG_PCT,
STAT_RESIST_PCT,
STAT_ACCURACY_PCT,
MANA_STONE_PRODUCTION,
ENERGY_PRODUCTION,
ARCANE_TOWER_ATK,
ARCANE_TOWER_SPD,
]
com2us_id = models.IntegerField()
name = models.CharField(max_length=30)
max_level = models.IntegerField()
area = models.IntegerField(choices=AREA_CHOICES, null=True, blank=True)
affected_stat = models.IntegerField(choices=STAT_CHOICES, null=True, blank=True)
element = models.CharField(max_length=6, choices=Monster.ELEMENT_CHOICES, blank=True, null=True)
stat_bonus = ArrayField(models.IntegerField(blank=True, null=True))
upgrade_cost = ArrayField(models.IntegerField(blank=True, null=True))
description = models.TextField(null=True, blank=True)
icon_filename = models.CharField(max_length=100, null=True, blank=True)
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/buildings/' + self.icon_filename))
else:
return 'No Image'
def __str__(self):
return self.name
class CraftMaterial(models.Model):
com2us_id = models.IntegerField()
name = models.CharField(max_length=40)
icon_filename = models.CharField(max_length=100, null=True, blank=True)
sell_value = models.IntegerField(blank=True, null=True)
source = models.ManyToManyField(Source, blank=True)
def image_url(self):
if self.icon_filename:
return mark_safe('<img src="%s" height="42" width="42"/>' % static('herders/images/crafts/' + self.icon_filename))
else:
return 'No Image'
def __str__(self):
return self.name
class MonsterCraftCost(models.Model):
monster = models.ForeignKey(Monster, on_delete=models.CASCADE)
craft = models.ForeignKey(CraftMaterial, on_delete=models.CASCADE)
quantity = models.IntegerField()
def __str__(self):
return '{} - qty. {}'.format(self.craft.name, self.quantity)
class HomunculusSkillCraftCost(models.Model):
skill = models.ForeignKey(HomunculusSkill, on_delete=models.CASCADE)
craft = models.ForeignKey(CraftMaterial, on_delete=models.CASCADE)
quantity = models.IntegerField()
def __str__(self):
return '{} - qty. {}'.format(self.craft.name, self.quantity)
class RuneObjectBase:
# Provides basic rune related constants
TYPE_ENERGY = 1
TYPE_FATAL = 2
TYPE_BLADE = 3
TYPE_RAGE = 4
TYPE_SWIFT = 5
TYPE_FOCUS = 6
TYPE_GUARD = 7
TYPE_ENDURE = 8
TYPE_VIOLENT = 9
TYPE_WILL = 10
TYPE_NEMESIS = 11
TYPE_SHIELD = 12
TYPE_REVENGE = 13
TYPE_DESPAIR = 14
TYPE_VAMPIRE = 15
TYPE_DESTROY = 16
TYPE_FIGHT = 17
TYPE_DETERMINATION = 18
TYPE_ENHANCE = 19
TYPE_ACCURACY = 20
TYPE_TOLERANCE = 21
TYPE_CHOICES = (
(TYPE_ENERGY, 'Energy'),
(TYPE_FATAL, 'Fatal'),
(TYPE_BLADE, 'Blade'),
(TYPE_RAGE, 'Rage'),
(TYPE_SWIFT, 'Swift'),
(TYPE_FOCUS, 'Focus'),
(TYPE_GUARD, 'Guard'),
(TYPE_ENDURE, 'Endure'),
(TYPE_VIOLENT, 'Violent'),
(TYPE_WILL, 'Will'),
(TYPE_NEMESIS, 'Nemesis'),
(TYPE_SHIELD, 'Shield'),
(TYPE_REVENGE, 'Revenge'),
(TYPE_DESPAIR, 'Despair'),
(TYPE_VAMPIRE, 'Vampire'),
(TYPE_DESTROY, 'Destroy'),
(TYPE_FIGHT, 'Fight'),
(TYPE_DETERMINATION, 'Determination'),
(TYPE_ENHANCE, 'Enhance'),
(TYPE_ACCURACY, 'Accuracy'),
(TYPE_TOLERANCE, 'Tolerance'),
)
STAR_CHOICES = (
(1, 1),
(2, 2),
(3, 3),
(4, 4),
(5, 5),
(6, 6),
)
STAT_HP = 1
STAT_HP_PCT = 2
STAT_ATK = 3
STAT_ATK_PCT = 4
STAT_DEF = 5
STAT_DEF_PCT = 6
STAT_SPD = 7
STAT_CRIT_RATE_PCT = 8
STAT_CRIT_DMG_PCT = 9
STAT_RESIST_PCT = 10
STAT_ACCURACY_PCT = 11
# Used for selecting type of stat in form
STAT_CHOICES = (
(STAT_HP, 'HP'),
(STAT_HP_PCT, 'HP %'),
(STAT_ATK, 'ATK'),
(STAT_ATK_PCT, 'ATK %'),
(STAT_DEF, 'DEF'),
(STAT_DEF_PCT, 'DEF %'),
(STAT_SPD, 'SPD'),
(STAT_CRIT_RATE_PCT, 'CRI Rate %'),
(STAT_CRIT_DMG_PCT, 'CRI Dmg %'),
(STAT_RESIST_PCT, 'Resistance %'),
(STAT_ACCURACY_PCT, 'Accuracy %'),
)
# The STAT_DISPLAY is used to construct rune values for display as 'HP: 5%' rather than 'HP %: 5' using
# the built in get_FOO_display() functions
STAT_DISPLAY = {
STAT_HP: 'HP',
STAT_HP_PCT: 'HP',
STAT_ATK: 'ATK',
STAT_ATK_PCT: 'ATK',
STAT_DEF: 'DEF',
STAT_DEF_PCT: 'DEF',
STAT_SPD: 'SPD',
STAT_CRIT_RATE_PCT: 'CRI Rate',
STAT_CRIT_DMG_PCT: 'CRI Dmg',
STAT_RESIST_PCT: 'Resistance',
STAT_ACCURACY_PCT: 'Accuracy',
}
PERCENT_STATS = [
STAT_HP_PCT,
STAT_ATK_PCT,
STAT_DEF_PCT,
STAT_CRIT_RATE_PCT,
STAT_CRIT_DMG_PCT,
STAT_RESIST_PCT,
STAT_ACCURACY_PCT,
]
FLAT_STATS = [
STAT_HP,
STAT_ATK,
STAT_DEF,
STAT_SPD,
]
QUALITY_NORMAL = 0
QUALITY_MAGIC = 1
QUALITY_RARE = 2
QUALITY_HERO = 3
QUALITY_LEGEND = 4
QUALITY_CHOICES = (
(QUALITY_NORMAL, 'Normal'),
(QUALITY_MAGIC, 'Magic'),
(QUALITY_RARE, 'Rare'),
(QUALITY_HERO, 'Hero'),
(QUALITY_LEGEND, 'Legend'),
)
class Rune(models.Model, RuneObjectBase):
MAIN_STAT_VALUES = {
# [stat][stars][level]: value
RuneObjectBase.STAT_HP: {
1: [40, 85, 130, 175, 220, 265, 310, 355, 400, 445, 490, 535, 580, 625, 670, 804],
2: [70, 130, 190, 250, 310, 370, 430, 490, 550, 610, 670, 730, 790, 850, 910, 1092],
3: [100, 175, 250, 325, 400, 475, 550, 625, 700, 775, 850, 925, 1000, 1075, 1150, 1380],
4: [160, 250, 340, 430, 520, 610, 700, 790, 880, 970, 1060, 1150, 1240, 1330, 1420, 1704],
5: [270, 375, 480, 585, 690, 795, 900, 1005, 1110, 1215, 1320, 1425, 1530, 1635, 1740, 2088],
6: [360, 480, 600, 720, 840, 960, 1080, 1200, 1320, 1440, 1560, 1680, 1800, 1920, 2040, 2448],
},
RuneObjectBase.STAT_HP_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 27, 29, 31, 33, 36, 43],
5: [8, 10, 12, 15, 17, 20, 22, 24, 27, 29, 32, 34, 37, 40, 43, 51],
6: [11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 63],
},
RuneObjectBase.STAT_ATK: {
1: [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 54],
2: [5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 73],
3: [7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 77, 92],
4: [10, 16, 22, 28, 34, 40, 46, 52, 58, 64, 70, 76, 82, 88, 94, 112],
5: [15, 22, 29, 36, 43, 50, 57, 64, 71, 78, 85, 92, 99, 106, 113, 135],
6: [22, 30, 38, 46, 54, 62, 70, 78, 86, 94, 102, 110, 118, 126, 134, 160],
},
RuneObjectBase.STAT_ATK_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 27, 29, 31, 33, 36, 43],
5: [8, 10, 12, 15, 17, 20, 22, 24, 27, 29, 32, 34, 37, 40, 43, 51],
6: [11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 63],
},
RuneObjectBase.STAT_DEF: {
1: [3, 6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 54],
2: [5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 73],
3: [7, 12, 17, 22, 27, 32, 37, 42, 47, 52, 57, 62, 67, 72, 77, 92],
4: [10, 16, 22, 28, 34, 40, 46, 52, 58, 64, 70, 76, 82, 88, 94, 112],
5: [15, 22, 29, 36, 43, 50, 57, 64, 71, 78, 85, 92, 99, 106, 113, 135],
6: [22, 30, 38, 46, 54, 62, 70, 78, 86, 94, 102, 110, 118, 126, 134, 160],
},
RuneObjectBase.STAT_DEF_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [5, 7, 9, 11, 13, 16, 18, 20, 22, 24, 27, 29, 31, 33, 36, 43],
5: [8, 10, 12, 15, 17, 20, 22, 24, 27, 29, 32, 34, 37, 40, 43, 51],
6: [11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 63],
},
RuneObjectBase.STAT_SPD: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [3, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 19, 21, 25],
4: [4, 5, 7, 8, 10, 11, 13, 14, 16, 17, 19, 20, 22, 23, 25, 30],
5: [5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 39],
6: [7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 42],
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 37],
4: [4, 6, 8, 11, 13, 15, 17, 19, 22, 24, 26, 28, 30, 33, 35, 41],
5: [5, 7, 10, 12, 15, 17, 19, 22, 24, 27, 29, 31, 34, 36, 39, 47],
6: [7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40, 43, 46, 49, 58],
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
1: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
2: [3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 37],
3: [4, 6, 9, 11, 13, 16, 18, 20, 22, 25, 27, 29, 32, 34, 36, 43],
4: [6, 9, 12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 57],
5: [8, 11, 15, 18, 21, 25, 28, 31, 34, 38, 41, 44, 48, 51, 54, 65],
6: [11, 15, 19, 23, 27, 31, 35, 39, 43, 47, 51, 55, 59, 63, 67, 80],
},
RuneObjectBase.STAT_RESIST_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [6, 8, 10, 13, 15, 17, 19, 21, 24, 26, 28, 30, 32, 35, 37, 44],
5: [9, 11, 14, 16, 19, 21, 23, 26, 28, 31, 33, 35, 38, 40, 43, 51],
6: [12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 64],
},
RuneObjectBase.STAT_ACCURACY_PCT: {
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18],
2: [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 19],
3: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 38],
4: [6, 8, 10, 13, 15, 17, 19, 21, 24, 26, 28, 30, 32, 35, 37, 44],
5: [9, 11, 14, 16, 19, 21, 23, 26, 28, 31, 33, 35, 38, 40, 43, 51],
6: [12, 15, 18, 21, 24, 27, 30, 33, 36, 39, 42, 45, 48, 51, 54, 64],
},
}
MAIN_STATS_BY_SLOT = {
1: [
RuneObjectBase.STAT_ATK,
],
2: [
RuneObjectBase.STAT_ATK,
RuneObjectBase.STAT_ATK_PCT,
RuneObjectBase.STAT_DEF,
RuneObjectBase.STAT_DEF_PCT,
RuneObjectBase.STAT_HP,
RuneObjectBase.STAT_HP_PCT,
RuneObjectBase.STAT_SPD,
],
3: [
RuneObjectBase.STAT_DEF,
],
4: [
RuneObjectBase.STAT_ATK,
RuneObjectBase.STAT_ATK_PCT,
RuneObjectBase.STAT_DEF,
RuneObjectBase.STAT_DEF_PCT,
RuneObjectBase.STAT_HP,
RuneObjectBase.STAT_HP_PCT,
RuneObjectBase.STAT_CRIT_RATE_PCT,
RuneObjectBase.STAT_CRIT_DMG_PCT,
],
5: [
RuneObjectBase.STAT_HP,
],
6: [
RuneObjectBase.STAT_ATK,
RuneObjectBase.STAT_ATK_PCT,
RuneObjectBase.STAT_DEF,
RuneObjectBase.STAT_DEF_PCT,
RuneObjectBase.STAT_HP,
RuneObjectBase.STAT_HP_PCT,
RuneObjectBase.STAT_RESIST_PCT,
RuneObjectBase.STAT_ACCURACY_PCT,
]
}
SUBSTAT_INCREMENTS = {
# [stat][stars]: value
RuneObjectBase.STAT_HP: {
1: 60,
2: 105,
3: 165,
4: 225,
5: 300,
6: 375,
},
RuneObjectBase.STAT_HP_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_ATK: {
1: 4,
2: 5,
3: 8,
4: 10,
5: 15,
6: 20,
},
RuneObjectBase.STAT_ATK_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_DEF: {
1: 4,
2: 5,
3: 8,
4: 10,
5: 15,
6: 20,
},
RuneObjectBase.STAT_DEF_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_SPD: {
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
1: 2,
2: 3,
3: 4,
4: 5,
5: 6,
6: 7,
},
RuneObjectBase.STAT_RESIST_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
RuneObjectBase.STAT_ACCURACY_PCT: {
1: 2,
2: 3,
3: 5,
4: 6,
5: 7,
6: 8,
},
}
INNATE_STAT_TITLES = {
RuneObjectBase.STAT_HP: 'Strong',
RuneObjectBase.STAT_HP_PCT: 'Tenacious',
RuneObjectBase.STAT_ATK: 'Ferocious',
RuneObjectBase.STAT_ATK_PCT: 'Powerful',
RuneObjectBase.STAT_DEF: 'Sturdy',
RuneObjectBase.STAT_DEF_PCT: 'Durable',
RuneObjectBase.STAT_SPD: 'Quick',
RuneObjectBase.STAT_CRIT_RATE_PCT: 'Mortal',
RuneObjectBase.STAT_CRIT_DMG_PCT: 'Cruel',
RuneObjectBase.STAT_RESIST_PCT: 'Resistant',
RuneObjectBase.STAT_ACCURACY_PCT: 'Intricate',
}
RUNE_SET_COUNT_REQUIREMENTS = {
RuneObjectBase.TYPE_ENERGY: 2,
RuneObjectBase.TYPE_FATAL: 4,
RuneObjectBase.TYPE_BLADE: 2,
RuneObjectBase.TYPE_RAGE: 4,
RuneObjectBase.TYPE_SWIFT: 4,
RuneObjectBase.TYPE_FOCUS: 2,
RuneObjectBase.TYPE_GUARD: 2,
RuneObjectBase.TYPE_ENDURE: 2,
RuneObjectBase.TYPE_VIOLENT: 4,
RuneObjectBase.TYPE_WILL: 2,
RuneObjectBase.TYPE_NEMESIS: 2,
RuneObjectBase.TYPE_SHIELD: 2,
RuneObjectBase.TYPE_REVENGE: 2,
RuneObjectBase.TYPE_DESPAIR: 4,
RuneObjectBase.TYPE_VAMPIRE: 4,
RuneObjectBase.TYPE_DESTROY: 2,
RuneObjectBase.TYPE_FIGHT: 2,
RuneObjectBase.TYPE_DETERMINATION: 2,
RuneObjectBase.TYPE_ENHANCE: 2,
RuneObjectBase.TYPE_ACCURACY: 2,
RuneObjectBase.TYPE_TOLERANCE: 2,
}
RUNE_SET_BONUSES = {
RuneObjectBase.TYPE_ENERGY: {
'count': 2,
'stat': RuneObjectBase.STAT_HP_PCT,
'value': 15.0,
'team': False,
'description': '2 Set: HP +15%',
},
RuneObjectBase.TYPE_FATAL: {
'count': 4,
'stat': RuneObjectBase.STAT_ATK_PCT,
'value': 35.0,
'team': False,
'description': '4 Set: Attack Power +35%',
},
RuneObjectBase.TYPE_BLADE: {
'count': 2,
'stat': RuneObjectBase.STAT_CRIT_RATE_PCT,
'value': 12.0,
'team': False,
'description': '2 Set: Critical Rate +12%',
},
RuneObjectBase.TYPE_RAGE: {
'count': 4,
'stat': RuneObjectBase.STAT_CRIT_DMG_PCT,
'value': 40.0,
'team': False,
'description': '4 Set: Critical Damage +40%',
},
RuneObjectBase.TYPE_SWIFT: {
'count': 4,
'stat': RuneObjectBase.STAT_SPD,
'value': 25.0,
'team': False,
'description': '4 Set: Attack Speed +25%',
},
RuneObjectBase.TYPE_FOCUS: {
'count': 2,
'stat': RuneObjectBase.STAT_ACCURACY_PCT,
'value': 20.0,
'team': False,
'description': '2 Set: Accuracy +20%',
},
RuneObjectBase.TYPE_GUARD: {
'count': 2,
'stat': RuneObjectBase.STAT_DEF_PCT,
'value': 15.0,
'team': False,
'description': '2 Set: Defense +15%',
},
RuneObjectBase.TYPE_ENDURE: {
'count': 2,
'stat': RuneObjectBase.STAT_RESIST_PCT,
'value': 20.0,
'team': False,
'description': '2 Set: Resistance +20%',
},
RuneObjectBase.TYPE_VIOLENT: {
'count': 4,
'stat': None,
'value': None,
'team': False,
'description': '4 Set: Get Extra Turn +22%',
},
RuneObjectBase.TYPE_WILL: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': '2 Set: Immunity +1 turn',
},
RuneObjectBase.TYPE_NEMESIS: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': '2 Set: ATK Gauge +4% (for every 7% HP lost)',
},
RuneObjectBase.TYPE_SHIELD: {
'count': 2,
'stat': None,
'value': None,
'team': True,
'description': '2 Set: Ally Shield 3 turns (15% of HP)',
},
RuneObjectBase.TYPE_REVENGE: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': '2 Set: Counterattack +15%',
},
RuneObjectBase.TYPE_DESPAIR: {
'count': 4,
'stat': None,
'value': None,
'team': False,
'description': '4 Set: Stun Rate +25%',
},
RuneObjectBase.TYPE_VAMPIRE: {
'count': 4,
'stat': None,
'value': None,
'team': False,
'description': '4 Set: Life Drain +35%',
},
RuneObjectBase.TYPE_DESTROY: {
'count': 2,
'stat': None,
'value': None,
'team': False,
'description': "2 Set: 30% of the damage dealt will reduce up to 4% of the enemy's Max HP",
},
RuneObjectBase.TYPE_FIGHT: {
'count': 2,
'stat': RuneObjectBase.STAT_ATK,
'value': 7.0,
'team': True,
'description': '2 Set: Increase the Attack Power of all allies by 7%',
},
RuneObjectBase.TYPE_DETERMINATION: {
'count': 2,
'stat': RuneObjectBase.STAT_DEF,
'value': 7.0,
'team': True,
'description': '2 Set: Increase the Defense of all allies by 7%',
},
RuneObjectBase.TYPE_ENHANCE: {
'count': 2,
'stat': RuneObjectBase.STAT_HP,
'value': 7.0,
'team': True,
'description': '2 Set: Increase the HP of all allies by 7%',
},
RuneObjectBase.TYPE_ACCURACY: {
'count': 2,
'stat': RuneObjectBase.STAT_ACCURACY_PCT,
'value': 10.0,
'team': True,
'description': '2 Set: Increase the Accuracy of all allies by 10%',
},
RuneObjectBase.TYPE_TOLERANCE: {
'count': 2,
'stat': RuneObjectBase.STAT_RESIST_PCT,
'value': 10.0,
'team': True,
'description': '2 Set: Increase the Resistance of all allies by 10%',
},
}
type = models.IntegerField(choices=RuneObjectBase.TYPE_CHOICES)
stars = models.IntegerField()
level = models.IntegerField()
slot = models.IntegerField()
quality = models.IntegerField(default=0, choices=RuneObjectBase.QUALITY_CHOICES)
original_quality = models.IntegerField(choices=RuneObjectBase.QUALITY_CHOICES, blank=True, null=True)
value = models.IntegerField(blank=True, null=True)
main_stat = models.IntegerField(choices=RuneObjectBase.STAT_CHOICES)
main_stat_value = models.IntegerField()
innate_stat = models.IntegerField(choices=RuneObjectBase.STAT_CHOICES, null=True, blank=True)
innate_stat_value = models.IntegerField(null=True, blank=True)
substats = ArrayField(
models.IntegerField(choices=RuneObjectBase.STAT_CHOICES, null=True, blank=True),
size=4,
default=list,
)
substat_values = ArrayField(
models.IntegerField(blank=True, null=True),
size=4,
default=list,
)
# The following fields exist purely to allow easier filtering and are updated on model save
has_hp = models.BooleanField(default=False)
has_atk = models.BooleanField(default=False)
has_def = models.BooleanField(default=False)
has_crit_rate = models.BooleanField(default=False)
has_crit_dmg = models.BooleanField(default=False)
has_speed = models.BooleanField(default=False)
has_resist = models.BooleanField(default=False)
has_accuracy = models.BooleanField(default=False)
efficiency = models.FloatField(blank=True, null=True)
max_efficiency = models.FloatField(blank=True, null=True)
substat_upgrades_remaining = models.IntegerField(blank=True, null=True)
class Meta:
abstract = True
def get_main_stat_rune_display(self):
return RuneObjectBase.STAT_DISPLAY.get(self.main_stat, '')
def get_innate_stat_rune_display(self):
return RuneObjectBase.STAT_DISPLAY.get(self.innate_stat, '')
def get_substat_rune_display(self, idx):
if len(self.substats) > idx:
return RuneObjectBase.STAT_DISPLAY.get(self.substats[idx], '')
else:
return ''
def get_stat(self, stat_type, sub_stats_only=False):
if self.main_stat == stat_type and not sub_stats_only:
return self.main_stat_value
elif self.innate_stat == stat_type and not sub_stats_only:
return self.innate_stat_value
else:
for idx, substat in enumerate(self.substats):
if substat == stat_type:
return self.substat_values[idx]
return 0
@property
def substat_upgrades_received(self):
return int(floor(min(self.level, 12) / 3) + 1)
def get_efficiency(self):
# https://www.youtube.com/watch?v=SBWeptNNbYc
# All runes are compared against max stat values for perfect 6* runes.
# Main stat efficiency
running_sum = float(self.MAIN_STAT_VALUES[self.main_stat][self.stars][15]) / float(self.MAIN_STAT_VALUES[self.main_stat][6][15])
# Substat efficiencies
if self.innate_stat is not None:
running_sum += self.innate_stat_value / float(self.SUBSTAT_INCREMENTS[self.innate_stat][6] * 5)
for substat, value in zip(self.substats, self.substat_values):
running_sum += value / float(self.SUBSTAT_INCREMENTS[substat][6] * 5)
return running_sum / 2.8 * 100
def update_fields(self):
# Set filterable fields
rune_stat_types = [self.main_stat, self.innate_stat] + self.substats
self.has_hp = any([i for i in rune_stat_types if i in [self.STAT_HP, self.STAT_HP_PCT]])
self.has_atk = any([i for i in rune_stat_types if i in [self.STAT_ATK, self.STAT_ATK_PCT]])
self.has_def = any([i for i in rune_stat_types if i in [self.STAT_DEF, self.STAT_DEF_PCT]])
self.has_crit_rate = self.STAT_CRIT_RATE_PCT in rune_stat_types
self.has_crit_dmg = self.STAT_CRIT_DMG_PCT in rune_stat_types
self.has_speed = self.STAT_SPD in rune_stat_types
self.has_resist = self.STAT_RESIST_PCT in rune_stat_types
self.has_accuracy = self.STAT_ACCURACY_PCT in rune_stat_types
self.quality = len([substat for substat in self.substats if substat])
self.substat_upgrades_remaining = 5 - self.substat_upgrades_received
self.efficiency = self.get_efficiency()
self.max_efficiency = self.efficiency + max(ceil((12 - self.level) / 3.0), 0) * 0.2 / 2.8 * 100
# Cap stat values to appropriate value
# Very old runes can have different values, but never higher than the cap
if self.main_stat_value:
self.main_stat_value = min(self.MAIN_STAT_VALUES[self.main_stat][self.stars][15], self.main_stat_value)
else:
self.main_stat_value = self.MAIN_STAT_VALUES[self.main_stat][self.stars][self.level]
if self.innate_stat and self.innate_stat_value > self.SUBSTAT_INCREMENTS[self.innate_stat][self.stars]:
self.innate_stat_value = self.SUBSTAT_INCREMENTS[self.innate_stat][self.stars]
for idx, substat in enumerate(self.substats):
max_sub_value = self.SUBSTAT_INCREMENTS[substat][self.stars] * self.substat_upgrades_received
if self.substat_values[idx] > max_sub_value:
self.substat_values[idx] = max_sub_value
def clean(self):
# Check slot, level, etc for valid ranges
if self.level is None or self.level < 0 or self.level > 15:
raise ValidationError({
'level': ValidationError(
'Level must be 0 through 15.',
code='invalid_rune_level',
)
})
if self.stars is None or (self.stars < 1 or self.stars > 6):
raise ValidationError({
'stars': ValidationError(
'Stars must be between 1 and 6.',
code='invalid_rune_stars',
)
})
if self.slot is not None:
if self.slot < 1 or self.slot > 6:
raise ValidationError({
'slot': ValidationError(
'Slot must be 1 through 6.',
code='invalid_rune_slot',
)
})
# Do slot vs stat check
if self.main_stat not in self.MAIN_STATS_BY_SLOT[self.slot]:
raise ValidationError({
'main_stat': ValidationError(
'Unacceptable stat for slot %(slot)s. Must be %(valid_stats)s.',
params={
'slot': self.slot,
'valid_stats': ', '.join([RuneObjectBase.STAT_CHOICES[stat - 1][1] for stat in self.MAIN_STATS_BY_SLOT[self.slot]])
},
code='invalid_rune_main_stat'
),
})
# Check that the same stat type was not used multiple times
stat_list = list(filter(
partial(is_not, None),
[self.main_stat, self.innate_stat] + self.substats
))
if len(stat_list) != len(set(stat_list)):
raise ValidationError(
'All stats and substats must be unique.',
code='duplicate_stats'
)
# Check if stat type was specified that it has value > 0
if self.main_stat_value is None:
raise ValidationError({
'main_stat_value': ValidationError(
'Missing main stat value.',
code='main_stat_missing_value',
)
})
max_main_stat_value = self.MAIN_STAT_VALUES[self.main_stat][self.stars][self.level]
if self.main_stat_value > max_main_stat_value:
raise ValidationError(
f'Main stat value for {self.get_main_stat_display()} at {self.stars}* lv. {self.level} must be less than {max_main_stat_value}',
code='main_stat_value_invalid',
)
if self.innate_stat is not None:
if self.innate_stat_value is None or self.innate_stat_value <= 0:
raise ValidationError({
'innate_stat_value': ValidationError(
'Must be greater than 0.',
code='invalid_rune_innate_stat_value'
)
})
max_sub_value = self.SUBSTAT_INCREMENTS[self.innate_stat][self.stars]
if self.innate_stat_value > max_sub_value:
raise ValidationError({
'innate_stat_value': ValidationError(
'Must be less than or equal to ' + str(max_sub_value) + '.',
code='invalid_rune_innate_stat_value'
)
})
for substat, value in zip(self.substats, self.substat_values):
if value is None or value <= 0:
raise ValidationError({
f'substat_values]': ValidationError(
'Must be greater than 0.',
code=f'invalid_rune_substat_values'
)
})
max_sub_value = self.SUBSTAT_INCREMENTS[substat][self.stars] * self.substat_upgrades_received
if value > max_sub_value:
raise ValidationError({
f'substat_values': ValidationError(
'Must be less than or equal to ' + str(max_sub_value) + '.',
code=f'invalid_rune_substat_value]'
)
})
class RuneCraft(RuneObjectBase):
CRAFT_GRINDSTONE = 0
CRAFT_ENCHANT_GEM = 1
CRAFT_IMMEMORIAL_GRINDSTONE = 2
CRAFT_IMMEMORIAL_GEM = 3
CRAFT_CHOICES = (
(CRAFT_GRINDSTONE, 'Grindstone'),
(CRAFT_ENCHANT_GEM, 'Enchant Gem'),
(CRAFT_IMMEMORIAL_GRINDSTONE, 'Immemorial Grindstone'),
(CRAFT_IMMEMORIAL_GEM, 'Immemorial Gem'),
)
CRAFT_ENCHANT_GEMS = [
CRAFT_ENCHANT_GEM,
CRAFT_IMMEMORIAL_GEM,
]
CRAFT_GRINDSTONES = [
CRAFT_GRINDSTONE,
CRAFT_IMMEMORIAL_GRINDSTONE,
]
# Type > Stat > Quality > Min/Max
CRAFT_VALUE_RANGES = {
CRAFT_GRINDSTONE: {
RuneObjectBase.STAT_HP: {
RuneObjectBase.QUALITY_NORMAL: {'min': 80, 'max': 120},
RuneObjectBase.QUALITY_MAGIC: {'min': 100, 'max': 200},
RuneObjectBase.QUALITY_RARE: {'min': 180, 'max': 250},
RuneObjectBase.QUALITY_HERO: {'min': 230, 'max': 450},
RuneObjectBase.QUALITY_LEGEND: {'min': 430, 'max': 550},
},
RuneObjectBase.STAT_HP_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 10},
},
RuneObjectBase.STAT_ATK: {
RuneObjectBase.QUALITY_NORMAL: {'min': 4, 'max': 8},
RuneObjectBase.QUALITY_MAGIC: {'min': 6, 'max': 12},
RuneObjectBase.QUALITY_RARE: {'min': 10, 'max': 18},
RuneObjectBase.QUALITY_HERO: {'min': 12, 'max': 22},
RuneObjectBase.QUALITY_LEGEND: {'min': 18, 'max': 30},
},
RuneObjectBase.STAT_ATK_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 10},
},
RuneObjectBase.STAT_DEF: {
RuneObjectBase.QUALITY_NORMAL: {'min': 4, 'max': 8},
RuneObjectBase.QUALITY_MAGIC: {'min': 6, 'max': 12},
RuneObjectBase.QUALITY_RARE: {'min': 10, 'max': 18},
RuneObjectBase.QUALITY_HERO: {'min': 12, 'max': 22},
RuneObjectBase.QUALITY_LEGEND: {'min': 18, 'max': 30},
},
RuneObjectBase.STAT_DEF_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 5, 'max': 10},
},
RuneObjectBase.STAT_SPD: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 2},
RuneObjectBase.QUALITY_MAGIC: {'min': 1, 'max': 2},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 3},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 4},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 5},
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 2},
RuneObjectBase.QUALITY_MAGIC: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 6},
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 7},
},
RuneObjectBase.STAT_RESIST_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 8},
},
RuneObjectBase.STAT_ACCURACY_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 2, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 4, 'max': 8},
},
},
CRAFT_ENCHANT_GEM: {
RuneObjectBase.STAT_HP: {
RuneObjectBase.QUALITY_NORMAL: {'min': 100, 'max': 150},
RuneObjectBase.QUALITY_MAGIC: {'min': 130, 'max': 220},
RuneObjectBase.QUALITY_RARE: {'min': 200, 'max': 310},
RuneObjectBase.QUALITY_HERO: {'min': 290, 'max': 420},
RuneObjectBase.QUALITY_LEGEND: {'min': 400, 'max': 580},
},
RuneObjectBase.STAT_HP_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 9},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 11},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 13},
},
RuneObjectBase.STAT_ATK: {
RuneObjectBase.QUALITY_NORMAL: {'min': 8, 'max': 12},
RuneObjectBase.QUALITY_MAGIC: {'min': 10, 'max': 16},
RuneObjectBase.QUALITY_RARE: {'min': 15, 'max': 23},
RuneObjectBase.QUALITY_HERO: {'min': 20, 'max': 30},
RuneObjectBase.QUALITY_LEGEND: {'min': 28, 'max': 40},
},
RuneObjectBase.STAT_ATK_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 9},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 11},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 13},
},
RuneObjectBase.STAT_DEF: {
RuneObjectBase.QUALITY_NORMAL: {'min': 8, 'max': 12},
RuneObjectBase.QUALITY_MAGIC: {'min': 10, 'max': 16},
RuneObjectBase.QUALITY_RARE: {'min': 15, 'max': 23},
RuneObjectBase.QUALITY_HERO: {'min': 20, 'max': 30},
RuneObjectBase.QUALITY_LEGEND: {'min': 28, 'max': 40},
},
RuneObjectBase.STAT_DEF_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 7},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 9},
RuneObjectBase.QUALITY_HERO: {'min': 7, 'max': 11},
RuneObjectBase.QUALITY_LEGEND: {'min': 9, 'max': 13},
},
RuneObjectBase.STAT_SPD: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_LEGEND: {'min': 7, 'max': 10},
},
RuneObjectBase.STAT_CRIT_RATE_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 1, 'max': 3},
RuneObjectBase.QUALITY_MAGIC: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_RARE: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_HERO: {'min': 4, 'max': 7},
RuneObjectBase.QUALITY_LEGEND: {'min': 6, 'max': 9},
},
RuneObjectBase.STAT_CRIT_DMG_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 5},
RuneObjectBase.QUALITY_RARE: {'min': 4, 'max': 6},
RuneObjectBase.QUALITY_HERO: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_LEGEND: {'min': 7, 'max': 10},
},
RuneObjectBase.STAT_RESIST_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_HERO: {'min': 6, 'max': 9},
RuneObjectBase.QUALITY_LEGEND: {'min': 8, 'max': 11},
},
RuneObjectBase.STAT_ACCURACY_PCT: {
RuneObjectBase.QUALITY_NORMAL: {'min': 2, 'max': 4},
RuneObjectBase.QUALITY_MAGIC: {'min': 3, 'max': 6},
RuneObjectBase.QUALITY_RARE: {'min': 5, 'max': 8},
RuneObjectBase.QUALITY_HERO: {'min': 6, 'max': 9},
RuneObjectBase.QUALITY_LEGEND: {'min': 8, 'max': 11},
},
}
}
CRAFT_VALUE_RANGES[CRAFT_IMMEMORIAL_GEM] = CRAFT_VALUE_RANGES[CRAFT_ENCHANT_GEM]
CRAFT_VALUE_RANGES[CRAFT_IMMEMORIAL_GRINDSTONE] = CRAFT_VALUE_RANGES[CRAFT_GRINDSTONE]
class Dungeon(models.Model):
CATEGORY_SCENARIO = 0
CATEGORY_RUNE_DUNGEON = 1
CATEGORY_ESSENCE_DUNGEON = 2
CATEGORY_OTHER_DUNGEON = 3
CATEGORY_RAID = 4
CATEGORY_HALL_OF_HEROES = 5
CATEGORY_CHOICES = [
(CATEGORY_SCENARIO, 'Scenarios'),
(CATEGORY_RUNE_DUNGEON, 'Rune Dungeons'),
(CATEGORY_ESSENCE_DUNGEON, 'Elemental Dungeons'),
(CATEGORY_OTHER_DUNGEON, 'Other Dungeons'),
(CATEGORY_RAID, 'Raids'),
(CATEGORY_HALL_OF_HEROES, 'Hall of Heroes'),
]
id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=100)
max_floors = models.IntegerField(default=10)
slug = models.SlugField(blank=True, null=True)
category = models.IntegerField(choices=CATEGORY_CHOICES, blank=True, null=True)
# TODO: Remove following fields when Level model is fully utilized everywhere: energy_cost, xp, monster_slots
# For the following fields:
# Outer array index is difficulty (normal, hard, hell). Inner array index is the stage/floor
# Example: Hell B2 is dungeon.energy_cost[RunLog.DIFFICULTY_HELL][1]
energy_cost = ArrayField(ArrayField(models.IntegerField(blank=True, null=True)), blank=True, null=True)
xp = ArrayField(ArrayField(models.IntegerField(blank=True, null=True)), blank=True, null=True)
monster_slots = ArrayField(ArrayField(models.IntegerField(blank=True, null=True)), blank=True, null=True)
class Meta:
ordering = ['id', ]
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Dungeon, self).save(*args, **kwargs)
class Level(models.Model):
DIFFICULTY_NORMAL = 1
DIFFICULTY_HARD = 2
DIFFICULTY_HELL = 3
DIFFICULTY_CHOICES = (
(DIFFICULTY_NORMAL, 'Normal'),
(DIFFICULTY_HARD, 'Hard'),
(DIFFICULTY_HELL, 'Hell'),
)
dungeon = models.ForeignKey(Dungeon, on_delete=models.CASCADE)
floor = models.IntegerField()
difficulty = models.IntegerField(choices=DIFFICULTY_CHOICES, blank=True, null=True)
energy_cost = models.IntegerField(blank=True, null=True, help_text='Energy cost to start a run')
xp = models.IntegerField(blank=True, null=True, help_text='XP gained by fully clearing the level')
frontline_slots = models.IntegerField(
default=5,
help_text='Serves as general slots if dungeon does not have front/back lines'
)
backline_slots = models.IntegerField(blank=True, null=True, help_text='Leave null for normal dungeons')
max_slots = models.IntegerField(
blank=True,
null=True,
help_text='Maximum monsters combined front/backline. Not required if backline not specified.'
)
class Meta:
ordering = ('difficulty', 'floor')
unique_together = ('dungeon', 'floor', 'difficulty')
def __str__(self):
return f'{self.dungeon_id} {self.floor} - {self.get_difficulty_display()}'
class GuideBase(models.Model):
short_text = models.TextField(blank=True, default='')
long_text = models.TextField(blank=True, default='')
last_updated = models.DateTimeField(auto_now=True)
edited_by = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL, editable=False)
class Meta:
abstract = True
class MonsterGuide(GuideBase):
monster = models.OneToOneField(Monster, on_delete=models.CASCADE)
def __str__(self):
return f'Monster Guide - {self.monster}'
class Meta:
ordering = ['monster__name']
| apache-2.0 | -5,660,210,679,202,290,000 | 39.546917 | 188 | 0.562391 | false | 3.259061 | false | false | false |
akrause2014/dispel4py | dispel4py/new/mpi_process_test.py | 1 | 1115 | # Copyright (c) The University of Edinburgh 2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dispel4py.new.mpi_process import process
from dispel4py.workflow_graph import WorkflowGraph
from dispel4py.examples.graph_testing.testing_PEs import TestProducer, TestOneInOneOut
from mpi4py import MPI
comm=MPI.COMM_WORLD
rank=comm.Get_rank()
size=comm.Get_size()
prod = TestProducer()
cons1 = TestOneInOneOut()
cons2 = TestOneInOneOut()
graph = WorkflowGraph()
graph.connect(prod, 'output', cons1, 'input')
graph.connect(cons1, 'output', cons2, 'input')
process(graph, { prod : [ {}, {}, {} ] } )
| apache-2.0 | 5,902,654,700,553,839,000 | 32.787879 | 86 | 0.747982 | false | 3.506289 | true | false | false |
dbdd4us/compose | compose/utils.py | 1 | 3235 | from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
import hashlib
import json
import json.decoder
import logging
import ntpath
import six
from .errors import StreamParseError
json_decoder = json.JSONDecoder()
log = logging.getLogger(__name__)
def get_output_stream(stream):
if six.PY3:
return stream
return codecs.getwriter('utf-8')(stream)
def stream_as_text(stream):
"""Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once docker-py returns text streams instead
of byte streams.
"""
for data in stream:
if not isinstance(data, six.text_type):
data = data.decode('utf-8', 'replace')
yield data
def line_splitter(buffer, separator=u'\n'):
index = buffer.find(six.text_type(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
def split_buffer(stream, splitter=None, decoder=lambda a: a):
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
splitter = splitter or line_splitter
buffered = six.text_type('')
for data in stream_as_text(stream):
buffered += data
while True:
buffer_split = splitter(buffered)
if buffer_split is None:
break
item, buffered = buffer_split
yield item
if buffered:
try:
yield decoder(buffered)
except Exception as e:
log.error(
'Compose tried decoding the following data chunk, but failed:'
'\n%s' % repr(buffered)
)
raise StreamParseError(e)
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
buffer = buffer.strip()
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
return obj, rest
except ValueError:
return None
def json_stream(stream):
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream, json_splitter, json_decoder.decode)
def json_hash(obj):
dump = json.dumps(obj, sort_keys=True, separators=(',', ':'))
h = hashlib.sha256()
h.update(dump.encode('utf8'))
return h.hexdigest()
def microseconds_from_time_nano(time_nano):
return int(time_nano % 1000000000 / 1000)
def build_string_dict(source_dict):
return dict((k, str(v if v is not None else '')) for k, v in source_dict.items())
def splitdrive(path):
if len(path) == 0:
return ('', '')
if path[0] in ['.', '\\', '/', '~']:
return ('', path)
return ntpath.splitdrive(path)
| apache-2.0 | -2,486,481,074,851,938,300 | 26.184874 | 85 | 0.636785 | false | 3.998764 | false | false | false |
javierwilson/forocacao | forocacao/app/png.py | 1 | 4177 | # -*- coding: utf-8 -*-
from PIL import Image, ImageDraw, ImageFont
import textwrap
from unidecode import unidecode
from reportlab.graphics import renderPM
from reportlab.graphics.barcode import code128
from reportlab.graphics.barcode import createBarcodeDrawing
from reportlab.graphics.barcode import createBarcodeImageInMemory
from reportlab.graphics.shapes import Drawing
from django.conf import settings
def get_barcode(value, width, humanReadable = True):
#value = value.encode('ascii', 'ignore')
value = unidecode(value)
barcode = createBarcodeDrawing('Code128', value = value, humanReadable = humanReadable, fontSize = 8)
drawing_width = width
barcode_scale = drawing_width / barcode.width
drawing_height = barcode.height * barcode_scale
drawing = Drawing(drawing_width, drawing_height)
drawing.scale(barcode_scale, barcode_scale)
drawing.add(barcode, name='barcode')
return drawing
def createPNG(participant, where):
event = participant.event
badge_size_x = event.badge_size_x or 390
badge_size_y = event.badge_size_y or 260
badge_color = event.badge_color or "#FFFFFF"
image_file = settings.MEDIA_ROOT + '/gafete390x260.png'
img = Image.open(image_file)
#img = Image.new('RGBA', (badge_size_x, badge_size_y), badge_color)
draw = ImageDraw.Draw(img)
draw.rectangle(((0,0),(badge_size_x-1, badge_size_y-1)), outline = "black")
if (len(participant.last_name) + len(participant.first_name) > 20):
last_name = participant.last_name.partition(' ')[0] if len(participant.last_name) > 12 else participant.last_name
first_name = participant.first_name.partition(' ')[0] if len(participant.first_name) >= 12 else participant.first_name
else:
last_name = participant.last_name
first_name = participant.first_name
match = {
'event': event.name,
#'name': "%s %s" % (participant.first_name, participant.last_name ),
#'name': "%s %s" % (participant.first_name.partition(' ')[0], participant.last_name.partition(' ')[0]),
'name': "%s %s" % (first_name, last_name),
'first_name': participant.first_name,
'last_name': participant.last_name,
'profession': participant.profession,
'organization': participant.organization,
'country': participant.country.name,
'type': participant.type,
'email': participant.email,
}
for field in event.eventbadge_set.all():
x = field.x
y = field.y
size = field.size
if field.field == 'logo':
if participant.event.logo:
logo = Image.open(participant.event.logo.file.file)
logo.thumbnail((size,size))
img.paste(logo, (x,y))
elif field.field == 'photo':
if participant.photo:
photo = Image.open(participant.photo)
photo.thumbnail((size,size))
img.paste(photo, (x,y))
else:
if field.field == 'text':
content = field.format
else:
content = match[field.field]
fnt = ImageFont.truetype(field.font.filename, size)
color = field.color
text = ("%s") % (content)
textsize = draw.textsize(text, font=fnt)
if textsize[0]+x < badge_size_x:
draw.text((x,y), ("%s") % (content), font=fnt, fill=color)
else:
# calculate maximum size in characters
max_chars = (badge_size_x-(x*2)) * len(text) / textsize[0]
lines = textwrap.fill(text, max_chars).splitlines()
tmp = y
for line in lines:
draw.text((x,y), line, font=fnt, fill=color)
y += size
y = tmp
# FIXME: NO barcode
#short_full_name = "%s: %s" % (participant.id, participant.short_full_name())
#barcode = get_barcode(short_full_name, badge_size_x-4)
#barcode_image = renderPM.drawToPIL(barcode)
#img.paste(barcode_image, (0+2, badge_size_y-70))
img.save(where, "PNG")
| bsd-3-clause | -7,824,533,436,109,494,000 | 38.40566 | 126 | 0.603304 | false | 3.689929 | false | false | false |
mhahn/stacker | stacker/lookups/registry.py | 1 | 1745 | from ..exceptions import UnknownLookupType
from ..util import load_object_from_string
from .handlers import output
from .handlers import kms
from .handlers import xref
from .handlers import file as file_handler
LOOKUP_HANDLERS = {}
DEFAULT_LOOKUP = output.TYPE_NAME
def register_lookup_handler(lookup_type, handler_or_path):
"""Register a lookup handler.
Args:
lookup_type (str): Name to register the handler under
handler_or_path (OneOf[func, str]): a function or a path to a handler
"""
handler = handler_or_path
if isinstance(handler_or_path, basestring):
handler = load_object_from_string(handler_or_path)
LOOKUP_HANDLERS[lookup_type] = handler
def resolve_lookups(lookups, context, provider):
"""Resolve a set of lookups.
Args:
lookups (list of :class:`stacker.lookups.Lookup`): a list of stacker
lookups to resolve
context (:class:`stacker.context.Context`): stacker context
provider (:class:`stacker.provider.base.BaseProvider`): subclass of the
base provider
Returns:
dict: dict of Lookup -> resolved value
"""
resolved_lookups = {}
for lookup in lookups:
try:
handler = LOOKUP_HANDLERS[lookup.type]
except KeyError:
raise UnknownLookupType(lookup)
resolved_lookups[lookup] = handler(
value=lookup.input,
context=context,
provider=provider,
)
return resolved_lookups
register_lookup_handler(output.TYPE_NAME, output.handler)
register_lookup_handler(kms.TYPE_NAME, kms.handler)
register_lookup_handler(xref.TYPE_NAME, xref.handler)
register_lookup_handler(file_handler.TYPE_NAME, file_handler.handler)
| bsd-2-clause | -1,027,917,854,892,513,400 | 29.614035 | 79 | 0.679083 | false | 4.011494 | false | false | false |
charlesfleche/charlesfleche.net | fabfile.py | 1 | 3663 | from fabric.api import *
import fabric.contrib.project as project
import http.server
import os
import shutil
import sys
import socketserver
# Local path configuration (can be absolute or relative to fabfile)
env.deploy_path = 'output'
DEPLOY_PATH = env.deploy_path
# Remote server configuration
production = '[email protected]'
dest_path = '/var/www/charlesfleche.net'
nginx_site_path = '/etc/nginx/sites-available/charlesfleche.net'
icons_root = 'themes/charlesfleche/static'
css_root = 'themes/charlesfleche/static/css'
# Rackspace Cloud Files configuration settings
env.cloudfiles_username = 'my_rackspace_username'
env.cloudfiles_api_key = 'my_rackspace_api_key'
env.cloudfiles_container = 'my_cloudfiles_container'
# Github Pages configuration
env.github_pages_branch = "gh-pages"
# Port for `serve`
PORT = 8000
def goaccess():
"""Create goaccess realtime web report"""
local('''ssh [email protected] 'tail -n +1 -f /var/log/nginx/blog.access.log' | goaccess -o /tmp/report.html --log-format=COMBINED --real-time-html --geoip-database GeoLite2-Country.mmdb -a -'''.format(production))
def clean():
"""Remove generated files"""
if os.path.isdir(DEPLOY_PATH):
shutil.rmtree(DEPLOY_PATH)
os.makedirs(DEPLOY_PATH)
def build():
"""Build local version of site"""
local('pelican -s pelicanconf.py')
def build_icons():
"""Build icons"""
local('inkscape -z -e /tmp/favicon.png -w 64 -h 64 logo.svg')
local('cp logo.svg {}'.format(icons_root))
local('convert /tmp/favicon.png {}/favicon.ico'.format(icons_root))
local('inkscape -z -e {}/icon.png -w 192 -h 192 logo.svg'.format(icons_root))
local('inkscape -z -e {}/tile.png -w 558 -h 558 logo.svg'.format(icons_root))
local('inkscape -z -e {}/tile-wide.png -w 558 -h 270 --export-area=-5:0:15:10 logo.svg'.format(icons_root))
def copy_fonts():
'''Copy icomoon fonts to theme folder'''
local('cp icomoon/style.css {}/fonts.css'.format(css_root))
local('cp -r icomoon/fonts {}'.format(css_root))
def rebuild():
"""`build` with the delete switch"""
local('pelican -d -s pelicanconf.py')
def regenerate():
"""Automatically regenerate site upon file modification"""
local('pelican -r -s pelicanconf.py')
def serve():
"""Serve site at http://localhost:8000/"""
os.chdir(env.deploy_path)
with http.server.HTTPServer(("", PORT), http.server.SimpleHTTPRequestHandler) as httpd:
print("Serving at port", PORT)
httpd.serve_forever()
def reserve():
"""`build`, then `serve`"""
build()
serve()
def preview():
"""Build production version of site"""
local('pelican -s publishconf.py')
def cf_upload():
"""Publish to Rackspace Cloud Files"""
rebuild()
with lcd(DEPLOY_PATH):
local('swift -v -A https://auth.api.rackspacecloud.com/v1.0 '
'-U {cloudfiles_username} '
'-K {cloudfiles_api_key} '
'upload -c {cloudfiles_container} .'.format(**env))
@hosts(production)
def publish():
"""Publish to production via rsync"""
local('pelican -s publishconf.py')
project.rsync_project(
remote_dir=dest_path,
exclude=['.DS_Store', 'Articles', '.webassets-cache'],
local_dir=DEPLOY_PATH.rstrip('/') + '/',
delete=True,
extra_opts='-c',
)
@hosts(production)
def publish_nginx():
put('nginx.site', nginx_site_path, use_sudo=True)
@hosts(production)
def reload_nginx():
sudo('sudo systemctl reload nginx')
def gh_pages():
"""Publish to GitHub Pages"""
rebuild()
local("ghp-import -b {github_pages_branch} {deploy_path} -p".format(**env))
| mit | -9,078,609,407,775,656,000 | 30.577586 | 223 | 0.663118 | false | 3.273458 | false | false | false |
cylc/cylc | cylc/flow/broadcast_report.py | 1 | 3589 | # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Provide a function to report modification to broadcast settings."""
BAD_OPTIONS_FMT = "\n --%s=%s"
BAD_OPTIONS_TITLE = "No broadcast to cancel/clear for these options:"
BAD_OPTIONS_TITLE_SET = ("Rejected broadcast: settings are not"
" compatible with the suite")
CHANGE_FMT = "\n%(change)s [%(namespace)s.%(point)s] %(key)s=%(value)s"
CHANGE_PREFIX_CANCEL = "-"
CHANGE_PREFIX_SET = "+"
CHANGE_TITLE_CANCEL = "Broadcast cancelled:"
CHANGE_TITLE_SET = "Broadcast set:"
def get_broadcast_bad_options_report(bad_options, is_set=False):
"""Return a string to report bad options for broadcast cancel/clear."""
if not bad_options:
return None
if is_set:
msg = BAD_OPTIONS_TITLE_SET
else:
msg = BAD_OPTIONS_TITLE
for key, values in sorted(bad_options.items()):
for value in values:
if isinstance(value, tuple) or isinstance(value, list):
value_str = ""
values = list(value)
while values:
val = values.pop(0)
if values:
value_str += "[" + val + "]"
else:
value_str += val
else:
value_str = value
msg += BAD_OPTIONS_FMT % (key, value_str)
return msg
def get_broadcast_change_iter(modified_settings, is_cancel=False):
"""Return an iterator of broadcast changes.
Each broadcast change is a dict with keys:
change, point, namespace, key, value
"""
if not modified_settings:
return
if is_cancel:
change = CHANGE_PREFIX_CANCEL
else:
change = CHANGE_PREFIX_SET
for modified_setting in sorted(modified_settings,
key=lambda x: (x[0], x[1])):
# sorted by (point, namespace)
point, namespace, setting = modified_setting
value = setting
keys_str = ""
while isinstance(value, dict):
key, value = list(value.items())[0]
if isinstance(value, dict):
keys_str += "[" + key + "]"
else:
keys_str += key
yield {
"change": change,
"point": point,
"namespace": namespace,
"key": keys_str,
"value": str(value)}
def get_broadcast_change_report(modified_settings, is_cancel=False):
"""Return a string for reporting modification to broadcast settings."""
if not modified_settings:
return ""
if is_cancel:
msg = CHANGE_TITLE_CANCEL
else:
msg = CHANGE_TITLE_SET
for broadcast_change in get_broadcast_change_iter(
modified_settings, is_cancel):
msg += CHANGE_FMT % broadcast_change
return msg
| gpl-3.0 | -5,435,023,536,567,714,000 | 35.252525 | 75 | 0.593201 | false | 4.173256 | false | false | false |
dceoy/fractus | fract/model/ewma.py | 1 | 1996 | #!/usr/bin/env python
import logging
import numpy as np
from .sieve import LRFeatureSieve
class Ewma(object):
def __init__(self, config_dict):
self.__logger = logging.getLogger(__name__)
self.__alpha = config_dict['model']['ewma']['alpha']
self.__sigma_band = config_dict['model']['ewma']['sigma_band']
self.__lrfs = LRFeatureSieve(
type=config_dict['feature']['type'],
weight_decay=config_dict['model']['ewma']['alpha']
)
def detect_signal(self, history_dict, pos=None):
best_f = self.__lrfs.extract_best_feature(history_dict=history_dict)
sig_dict = self._ewm_stats(series=best_f['series'])
if sig_dict['ewmbb'][0] > 0:
sig_act = 'buy'
elif sig_dict['ewmbb'][1] < 0:
sig_act = 'sell'
elif pos and (
(pos['side'] == 'buy' and sig_dict['ewma'] < 0) or
(pos['side'] == 'sell' and sig_dict['ewma'] > 0)):
sig_act = 'close'
else:
sig_act = None
sig_log_str = '{:^40}|'.format(
'{0:>3}[{1:>3}]:{2:>9}{3:>18}'.format(
self.__lrfs.code, best_f['granularity_str'],
'{:.1g}'.format(sig_dict['ewma']),
np.array2string(
sig_dict['ewmbb'],
formatter={'float_kind': lambda f: '{:.1g}'.format(f)}
)
)
)
return {
'sig_act': sig_act, 'sig_log_str': sig_log_str,
'sig_ewma': sig_dict['ewma'], 'sig_ewmbbl': sig_dict['ewmbb'][0],
'sig_ewmbbu': sig_dict['ewmbb'][1]
}
def _ewm_stats(self, series):
ewm = series.ewm(alpha=self.__alpha)
ewma = ewm.mean().iloc[-1]
self.__logger.debug('ewma: {}'.format(ewma))
ewm_bollinger_band = (
np.array([-1, 1]) * ewm.std().iloc[-1] * self.__sigma_band
) + ewma
return {'ewma': ewma, 'ewmbb': ewm_bollinger_band}
| mit | -7,870,564,357,281,349,000 | 35.962963 | 77 | 0.491483 | false | 3.214171 | false | false | false |
tmm/django-username-email | cuser/admin.py | 1 | 1437 | from django.contrib import admin
from django.contrib.auth.admin import GroupAdmin as BaseGroupAdmin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group as StockGroup
from django.utils.translation import gettext_lazy as _
from cuser.forms import UserChangeForm, UserCreationForm
from cuser.models import CUser, Group
from cuser.settings import CUSER_SETTINGS
@admin.register(CUser)
class UserAdmin(BaseUserAdmin):
add_form_template = 'admin/cuser/cuser/add_form.html'
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2'),
}),
)
form = UserChangeForm
add_form = UserCreationForm
list_display = ('email', 'first_name', 'last_name', 'is_staff')
search_fields = ('email', 'first_name', 'last_name')
ordering = ('email',)
if CUSER_SETTINGS['register_proxy_auth_group_model']:
admin.site.unregister(StockGroup)
@admin.register(Group)
class GroupAdmin(BaseGroupAdmin):
pass
| mit | 9,063,590,292,572,750,000 | 34.925 | 79 | 0.637439 | false | 3.781579 | false | false | false |
anthonyfok/frescobaldi | frescobaldi_app/docbrowser/browser.py | 1 | 10953 | # This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
The browser widget for the help browser.
"""
import os
from PyQt5.QtCore import QSettings, Qt, QUrl
from PyQt5.QtGui import QKeySequence
from PyQt5.QtPrintSupport import QPrintDialog, QPrinter
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWebKitWidgets import QWebPage, QWebView
from PyQt5.QtWidgets import QComboBox, QMenu, QToolBar, QVBoxLayout, QWidget
import app
import icons
import helpers
import widgets.lineedit
import lilypondinfo
import lilydoc.manager
import lilydoc.network
import textformats
class Browser(QWidget):
"""LilyPond documentation browser widget."""
def __init__(self, dockwidget):
super(Browser, self).__init__(dockwidget)
layout = QVBoxLayout(spacing=0)
layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(layout)
self.toolbar = tb = QToolBar()
self.webview = QWebView(contextMenuPolicy=Qt.CustomContextMenu)
self.chooser = QComboBox(sizeAdjustPolicy=QComboBox.AdjustToContents)
self.search = SearchEntry(maximumWidth=200)
layout.addWidget(self.toolbar)
layout.addWidget(self.webview)
ac = dockwidget.actionCollection
ac.help_back.triggered.connect(self.webview.back)
ac.help_forward.triggered.connect(self.webview.forward)
ac.help_home.triggered.connect(self.showHomePage)
ac.help_print.triggered.connect(self.slotPrint)
self.webview.page().setNetworkAccessManager(lilydoc.network.accessmanager())
self.webview.page().setLinkDelegationPolicy(QWebPage.DelegateAllLinks)
self.webview.page().linkClicked.connect(self.openUrl)
self.webview.page().setForwardUnsupportedContent(True)
self.webview.page().unsupportedContent.connect(self.slotUnsupported)
self.webview.urlChanged.connect(self.slotUrlChanged)
self.webview.customContextMenuRequested.connect(self.slotShowContextMenu)
tb.addAction(ac.help_back)
tb.addAction(ac.help_forward)
tb.addSeparator()
tb.addAction(ac.help_home)
tb.addAction(ac.help_print)
tb.addSeparator()
tb.addWidget(self.chooser)
tb.addWidget(self.search)
self.chooser.activated[int].connect(self.showHomePage)
self.search.textEdited.connect(self.slotSearchChanged)
self.search.returnPressed.connect(self.slotSearchReturnPressed)
dockwidget.mainwindow().iconSizeChanged.connect(self.updateToolBarSettings)
dockwidget.mainwindow().toolButtonStyleChanged.connect(self.updateToolBarSettings)
app.settingsChanged.connect(self.readSettings)
self.readSettings()
self.loadDocumentation()
self.showInitialPage()
app.settingsChanged.connect(self.loadDocumentation)
app.translateUI(self)
def readSettings(self):
s = QSettings()
s.beginGroup("documentation")
ws = self.webview.page().settings()
family = s.value("fontfamily", self.font().family(), str)
size = s.value("fontsize", 16, int)
ws.setFontFamily(QWebSettings.StandardFont, family)
ws.setFontSize(QWebSettings.DefaultFontSize, size)
fixed = textformats.formatData('editor').font
ws.setFontFamily(QWebSettings.FixedFont, fixed.family())
ws.setFontSize(QWebSettings.DefaultFixedFontSize, fixed.pointSizeF() * 96 / 72)
def keyPressEvent(self, ev):
if ev.text() == "/":
self.search.setFocus()
else:
super(Browser, self).keyPressEvent(ev)
def translateUI(self):
try:
self.search.setPlaceholderText(_("Search..."))
except AttributeError:
pass # not in Qt 4.6
def showInitialPage(self):
"""Shows the preferred start page.
If a local documentation instance already has a suitable version,
just loads it. Otherwise connects to the allLoaded signal, that is
emitted when all the documentation instances have loaded their version
information and then shows the start page (if another page wasn't yet
loaded).
"""
if self.webview.url().isEmpty():
docs = lilydoc.manager.docs()
version = lilypondinfo.preferred().version()
index = -1
if version:
for num, doc in enumerate(docs):
if doc.version() is not None and doc.version() >= version:
index = num # a suitable documentation is found
break
if index == -1:
# nothing found (or LilyPond version not available),
# wait for loading or show the most recent version
if not lilydoc.manager.loaded():
lilydoc.manager.allLoaded.connect(self.showInitialPage)
return
index = len(docs) - 1
self.chooser.setCurrentIndex(index)
self.showHomePage()
def loadDocumentation(self):
"""Puts the available documentation instances in the combobox."""
i = self.chooser.currentIndex()
self.chooser.clear()
for doc in lilydoc.manager.docs():
v = doc.versionString()
if doc.isLocal():
t = _("(local)")
else:
t = _("({hostname})").format(hostname=doc.url().host())
self.chooser.addItem("{0} {1}".format(v or _("<unknown>"), t))
self.chooser.setCurrentIndex(i)
if not lilydoc.manager.loaded():
lilydoc.manager.allLoaded.connect(self.loadDocumentation, -1)
return
def updateToolBarSettings(self):
mainwin = self.parentWidget().mainwindow()
self.toolbar.setIconSize(mainwin.iconSize())
self.toolbar.setToolButtonStyle(mainwin.toolButtonStyle())
def showManual(self):
"""Invoked when the user presses F1."""
self.slotHomeFrescobaldi() # TEMP
def slotUrlChanged(self):
ac = self.parentWidget().actionCollection
ac.help_back.setEnabled(self.webview.history().canGoBack())
ac.help_forward.setEnabled(self.webview.history().canGoForward())
def openUrl(self, url):
if url.path().endswith(('.ily', '.lyi', '.ly')):
self.sourceViewer().showReply(lilydoc.network.get(url))
else:
self.webview.load(url)
def slotUnsupported(self, reply):
helpers.openUrl(reply.url())
def slotSearchChanged(self):
text = self.search.text()
if not text.startswith(':'):
self.webview.page().findText(text, QWebPage.FindWrapsAroundDocument)
def slotSearchReturnPressed(self):
text = self.search.text()
if not text.startswith(':'):
self.slotSearchChanged()
else:
pass # TODO: implement full doc search
def sourceViewer(self):
try:
return self._sourceviewer
except AttributeError:
from . import sourceviewer
self._sourceviewer = sourceviewer.SourceViewer(self)
return self._sourceviewer
def showHomePage(self):
"""Shows the homepage of the LilyPond documentation."""
i = self.chooser.currentIndex()
if i < 0:
i = 0
doc = lilydoc.manager.docs()[i]
url = doc.home()
if doc.isLocal():
path = url.toLocalFile()
langs = lilydoc.network.langs()
if langs:
for lang in langs:
if os.path.exists(path + '.' + lang + '.html'):
path += '.' + lang
break
url = QUrl.fromLocalFile(path + '.html')
self.webview.load(url)
def slotPrint(self):
printer = QPrinter()
dlg = QPrintDialog(printer, self)
dlg.setWindowTitle(app.caption(_("Print")))
if dlg.exec_():
self.webview.print_(printer)
def slotShowContextMenu(self, pos):
hit = self.webview.page().currentFrame().hitTestContent(pos)
menu = QMenu()
if hit.linkUrl().isValid():
a = self.webview.pageAction(QWebPage.CopyLinkToClipboard)
a.setIcon(icons.get("edit-copy"))
a.setText(_("Copy &Link"))
menu.addAction(a)
menu.addSeparator()
a = menu.addAction(icons.get("window-new"), _("Open Link in &New Window"))
a.triggered.connect((lambda url: lambda: self.slotNewWindow(url))(hit.linkUrl()))
else:
if hit.isContentSelected():
a = self.webview.pageAction(QWebPage.Copy)
a.setIcon(icons.get("edit-copy"))
a.setText(_("&Copy"))
menu.addAction(a)
menu.addSeparator()
a = menu.addAction(icons.get("window-new"), _("Open Document in &New Window"))
a.triggered.connect((lambda url: lambda: self.slotNewWindow(url))(self.webview.url()))
if menu.actions():
menu.exec_(self.webview.mapToGlobal(pos))
def slotNewWindow(self, url):
helpers.openUrl(url)
class SearchEntry(widgets.lineedit.LineEdit):
"""A line edit that clears itself when ESC is pressed."""
def keyPressEvent(self, ev):
if ev.key() == Qt.Key_Escape:
if self.text():
self.clear()
else:
webview = self.parentWidget().parentWidget().webview
webview.setFocus()
webview.page().findText(None)
elif any(ev.matches(key) for key in (
QKeySequence.MoveToNextLine, QKeySequence.MoveToPreviousLine,
QKeySequence.MoveToNextPage, QKeySequence.MoveToPreviousPage,
)):
webview = self.parentWidget().parentWidget().webview
webview.keyPressEvent(ev)
else:
super(SearchEntry, self).keyPressEvent(ev)
| gpl-2.0 | 8,974,451,822,156,147,000 | 37.978648 | 98 | 0.619739 | false | 4.136329 | false | false | false |
ftrimble/route-grower | pyroute/compress/compress.py | 1 | 4419 | #!/usr/bin/python
#----------------------------------------------------------------
#
#------------------------------------------------------
# Usage:
#
#------------------------------------------------------
# Copyright 2007, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------
import sys
import os
from xml.sax import make_parser, handler
import xml
from struct import *
class BinaryOsm(handler.ContentHandler):
def __init__(self):
pass
def encode(self, filename, output):
self.nextKID = 3
self.nextVID = 1
self.tags = {}
self.values = {}
if(not os.path.exists(filename)):
print "No such data file %s" % filename
return
try:
self.out = open(output, "wb")
parser = make_parser()
parser.setContentHandler(self)
parser.parse(filename)
self.out.write("X")
self.out.close()
except xml.sax._exceptions.SAXParseException:
print "Error loading %s" % filename
def startElement(self, name, attrs):
"""Handle XML elements"""
if(name =='node'):
self.meta = { \
'id':int(attrs.get('id')),
'lon':float(attrs.get('lat')),
'lat':float(attrs.get('lon'))
}
self.tags = {}
elif(name == 'way'):
self.meta = {'id':int(attrs.get('id'))}
self.tags = {}
self.waynodes = []
elif(name == 'relation'):
self.tags = {}
elif name == 'nd':
"""Nodes within a way -- add them to a list"""
self.waynodes.append(int(attrs.get('ref')))
elif name == 'tag':
"""Tags - store them in a hash"""
k,v = (attrs.get('k'), attrs.get('v'))
if not k in ('created_by'):
self.tags[k] = v
def endElement(self, name):
"""Handle ways in the OSM data"""
writeTags = False
if(name =='node'):
data = 'N' + pack("L", self.meta['id']) + self.encodeLL(self.meta['lat'], self.meta['lon'])
self.out.write(data)
writeTags = True
elif(name == 'way'):
data = 'W' + pack("L", self.meta['id'])
self.out.write(data)
self.out.write(pack('H', len(self.waynodes)))
for n in self.waynodes:
self.out.write(pack('L', n))
writeTags = True
if(writeTags):
n = len(self.tags.keys())
if(n > 255):
# TODO:
print "Error: more than 255 tags on an item"
return
self.out.write(pack('B', n))
for k,v in self.tags.items():
self.encodeTag(k, False, k)
volatile = k in ('name','ref','ncn_ref','note','notes','description','ele','time','url','website','postal_code','image','source_ref','source:ref','source:name','source_ref:name',"FIXME","fixme","place_numbers")
self.encodeTag(v,volatile,k)
def encodeTag(self,text,volatile,key):
text = text.encode('utf8')
if(not volatile):
try:
ID = self.values[text]
self.out.write(pack('H', ID))
except KeyError:
if(self.nextKID >= 65535):
# TODO
print "Error: too many stored tags!"
sys.exit()
print "%d: %s %s" % (self.nextKID, key,text)
self.values[text] = self.nextKID
self.out.write(pack('HHB', 1, self.nextKID, len(text)))
self.out.write(text)
self.nextKID = self.nextKID + 1
else:
self.out.write(pack('HB', 0, len(text)))
self.out.write(text)
#print "Storing simple %s" % (text)
def encodeLL(self,lat,lon):
pLat = (lat + 90.0) / 180.0
pLon = (lon + 180.0) / 360.0
iLat = self.encodeP(pLat)
iLon = self.encodeP(pLon)
return(pack("II", iLat, iLon))
def encodeP(self,p):
i = int(p * 4294967296.0)
return(i)
# Parse the supplied OSM file
if __name__ == "__main__":
print "Loading data..."
Binary = BinaryOsm()
Binary.encode(sys.argv[1], sys.argv[2])
| apache-2.0 | 2,230,958,692,004,118,500 | 30.564286 | 218 | 0.562118 | false | 3.48227 | false | false | false |
tp81/openmicroscopy | components/tools/OmeroWeb/omeroweb/webclient/show.py | 1 | 29181 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2014 University of Dundee & Open Microscopy Environment.
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Generic functionality for handling particular links and "showing" objects
in the OMERO.web tree view.
"""
import omero
import re
from omero.rtypes import rint, rlong
from django.core.urlresolvers import reverse
from copy import deepcopy
class IncorrectMenuError(Exception):
"""Exception to signal that we are on the wrong menu."""
def __init__(self, uri):
"""
Constructs a new Exception instance.
@param uri URI to redirect to.
@type uri String
"""
super(Exception, self).__init__()
self.uri = uri
class Show(object):
"""
This object is used by most of the top-level pages. The "show" and
"path" query strings are used by this object to both direct OMERO.web to
the correct locations in the hierarchy and select the correct objects
in that hierarchy.
"""
# List of prefixes that are at the top level of the tree
TOP_LEVEL_PREFIXES = ('project', 'screen')
# List of supported object types
SUPPORTED_OBJECT_TYPES = (
'project', 'dataset', 'image', 'screen', 'plate', 'tag',
'acquisition', 'run', 'well'
)
# Regular expression which declares the format for a "path" used either
# in the "path" or "show" query string. No modifications should be made
# to this regex without corresponding unit tests in
# "tests/unit/test_show.py".
PATH_REGEX = re.compile(
r'(?P<object_type>\w+)\.?(?P<key>\w+)?[-=](?P<value>[^\|]*)\|?'
)
# Regular expression for matching Well names
WELL_REGEX = re.compile(
'^(?:(?P<alpha_row>[a-zA-Z]+)(?P<digit_column>\d+))|'
'(?:(?P<digit_row>\d+)(?P<alpha_column>[a-zA-Z]+))$'
)
def __init__(self, conn, request, menu):
"""
Constructs a Show instance. The instance will not be fully
initialised until the first retrieval of the L{Show.first_selected}
property.
@param conn OMERO gateway.
@type conn L{omero.gateway.BlitzGateway}
@param request Django HTTP request.
@type request L{django.http.HttpRequest}
@param menu Literal representing the current menu we are on.
@type menu String
"""
# The list of "paths" ("type-id") we have been requested to
# show/select in the user interface. May be modified if one or
# more of the elements is not in the tree. This is currently the
# case for all Screen-Plate-Well hierarchy elements below Plate
# (Well for example).
self._initially_select = list()
# The nodes of the tree that will be initially open based on the
# nodes that are initially selected.
self._initially_open = None
# The owner of the node closest to the root of the tree from the
# list of initially open nodes.
self._initially_open_owner = None
# First selected node from the requested initially open "paths"
# that is first loaded on first retrieval of the "first_selected"
# property.
self._first_selected = None
self.conn = conn
self.request = request
self.menu = menu
path = self.request.REQUEST.get('path', '').split('|')[-1]
self._add_if_supported(path)
show = self.request.REQUEST.get('show', '')
for path in show.split('|'):
self._add_if_supported(path)
def _add_if_supported(self, path):
"""Adds a path to the initially selected list if it is supported."""
m = self.PATH_REGEX.match(path)
if m is None:
return
object_type = m.group('object_type')
key = m.group('key')
value = m.group('value')
if key is None:
key = 'id'
if object_type in self.SUPPORTED_OBJECT_TYPES:
# 'run' is an alternative for 'acquisition'
object_type = object_type.replace('run', 'acquisition')
self._initially_select.append(
'%s.%s-%s' % (object_type, key, value)
)
def _load_tag(self, attributes):
"""
Loads a Tag based on a certain set of attributes from the server.
@param attributes Set of attributes to filter on.
@type attributes L{dict}
"""
# Tags have an "Annotation" suffix added to the object name so
# need to be loaded differently.
return next(self.conn.getObjects(
"TagAnnotation", attributes=attributes
))
def get_well_row_column(self, well):
"""
Retrieves a tuple of row and column as L{int} for a given Well name
("A1" or "1A") string.
@param well Well name string to retrieve the row and column tuple for.
@type well L{str}
"""
m = self.WELL_REGEX.match(well)
if m is None:
return None
# We are using an algorithm that expects alpha columns and digit
# rows (like a spreadsheet). is_reversed will be True if those
# conditions are not met, signifying that the row and column
# calculated needs to be reversed before returning.
is_reversed = False
if m.group('alpha_row') is not None:
a = m.group('alpha_row').upper()
b = m.group('digit_column')
is_reversed = True
else:
a = m.group('alpha_column').upper()
b = m.group('digit_row')
# Convert base26 column string to number. Adapted from XlsxWriter:
# * https://github.com/jmcnamara/XlsxWriter
# * xlsxwriter/utility.py
n = 0
column = 0
for character in reversed(a):
column += (ord(character) - ord('A') + 1) * (26 ** n)
n += 1
# Convert 1-index to zero-index
row = int(b) - 1
column -= 1
if is_reversed:
return column, row
return row, column
def _load_well(self, attributes):
"""
Loads a Well based on a certain set of attributes from the server.
@param attributes Set of attributes to filter on.
@type attributes L{dict}
"""
if 'id' in attributes:
return self.conn.getObject('Well', attributes=attributes)
if 'name' in attributes:
row, column = self.get_well_row_column(attributes['name'])
path = self.request.REQUEST.get('path', '')
for m in self.PATH_REGEX.finditer(path):
object_type = m.group('object_type')
# May have 'run' here rather than 'acquisition' because
# the path will not have been validated and replaced.
if object_type not in ('plate', 'run', 'acquisition'):
continue
# 'run' is an alternative for 'acquisition'
object_type = object_type.replace('run', 'acquisition')
# Try and load the potential parent first
key = m.group('key')
value = m.group('value')
if key is None:
key = 'id'
if key == 'id':
value = long(value)
parent_attributes = {key: value}
parent, = self.conn.getObjects(
object_type, attributes=parent_attributes
)
# Now use the parent to try and locate the Well
query_service = self.conn.getQueryService()
params = omero.sys.ParametersI()
params.map['row'] = rint(row)
params.map['column'] = rint(column)
params.addId(parent.id)
if object_type == 'plate':
db_row, = query_service.projection(
'select w.id from Well as w '
'where w.row = :row and w.column = :column '
'and w.plate.id = :id', params, self.conn.SERVICE_OPTS
)
if object_type == 'acquisition':
db_row, = query_service.projection(
'select distinct w.id from Well as w '
'join w.wellSamples as ws '
'where w.row = :row and w.column = :column '
'and ws.plateAcquisition.id = :id',
params, self.conn.SERVICE_OPTS
)
well_id, = db_row
return self.conn.getObject(
'Well', well_id.val
)
def _load_first_selected(self, first_obj, attributes):
"""
Loads the first selected object from the server. Will raise
L{IncorrectMenuError} if the initialized menu was incorrect for
the loaded objects.
@param first_obj Type of the first selected object.
@type first_obj String
@param attributes Set of attributes to filter on.
@type attributes L{dict}
"""
first_selected = None
if first_obj == "tag":
first_selected = self._load_tag(attributes)
elif first_obj == "well":
first_selected = self._load_well(attributes)
else:
# All other objects can be loaded by type and attributes.
first_selected, = self.conn.getObjects(
first_obj, attributes=attributes
)
if first_obj == "well":
# Wells aren't in the tree, so we need to look up the parent
well_sample = first_selected.getWellSample()
parent_node = None
parent_type = None
# It's possible that the Well that we've been requested to show
# has no fields (WellSample instances). In that case the Plate
# will be used but we don't have much choice.
if well_sample is not None:
parent_node = well_sample.getPlateAcquisition()
parent_type = "acquisition"
if parent_node is None:
# No WellSample for this well, try and retrieve the
# PlateAcquisition from the parent Plate.
plate = first_selected.getParent()
try:
parent_node, = plate.listPlateAcquisitions()
parent_type = "acquisition"
except ValueError:
# No PlateAcquisition for this well, use Plate instead
parent_node = plate
parent_type = "plate"
# Tree hierarchy open to first selected "real" object available
# in the tree.
self._initially_open = [
"%s-%s" % (parent_type, parent_node.getId()),
"%s-%s" % (first_obj, first_selected.getId())
]
first_selected = parent_node
self._initially_select = self._initially_open[:]
else:
# Tree hierarchy open to first selected object.
self._initially_open = [
'%s-%s' % (first_obj, first_selected.getId())
]
# support for multiple objects selected by ID,
# E.g. show=image-1|image-2
if 'id' in attributes.keys() and len(self._initially_select) > 1:
# 'image.id-1' -> 'image-1'
self._initially_select = [
i.replace(".id", "") for i in self._initially_select]
else:
# Only select a single object
self._initially_select = self._initially_open[:]
self._initially_open_owner = first_selected.details.owner.id.val
return first_selected
def _find_first_selected(self):
"""Finds the first selected object."""
if len(self._initially_select) == 0:
return None
# tree hierarchy open to first selected object
m = self.PATH_REGEX.match(self._initially_select[0])
if m is None:
return None
first_obj = m.group('object_type')
# if we're showing a tag, make sure we're on the tags page...
if first_obj == "tag" and self.menu != "usertags":
# redirect to usertags/?show=tag-123
raise IncorrectMenuError(
reverse(viewname="load_template", args=['usertags']) +
"?show=" + self._initially_select[0].replace(".id", "")
)
first_selected = None
try:
key = m.group('key')
value = m.group('value')
if key == 'id':
value = long(value)
attributes = {key: value}
# Set context to 'cross-group'
self.conn.SERVICE_OPTS.setOmeroGroup('-1')
first_selected = self._load_first_selected(first_obj, attributes)
except:
pass
if first_obj not in self.TOP_LEVEL_PREFIXES:
# Need to see if first item has parents
if first_selected is not None:
for p in first_selected.getAncestry():
if first_obj == "tag":
# Parents of tags must be tags (no OMERO_CLASS)
self._initially_open.insert(0, "tag-%s" % p.getId())
else:
self._initially_open.insert(
0, "%s-%s" % (p.OMERO_CLASS.lower(), p.getId())
)
self._initially_open_owner = p.details.owner.id.val
m = self.PATH_REGEX.match(self._initially_open[0])
if m.group('object_type') == 'image':
self._initially_open.insert(0, "orphaned-0")
return first_selected
@property
def first_selected(self):
"""
Retrieves the first selected object. The first time this method is
invoked on the instance the actual retrieval is performed. All other
invocations retrieve the same instance without server interaction.
Will raise L{IncorrectMenuError} if the initialized menu was
incorrect for the loaded objects.
"""
if self._first_selected is None:
self._first_selected = self._find_first_selected()
return self._first_selected
@property
def initially_select(self):
"""
Retrieves the list of "paths" ("type-id") we have been requested to
show/select in the user interface. May be different than we were
first initialised with due to certain nodes of the Screen-Plate-Well
hierachy not being present in the tree. Should not be invoked until
after first retrieval of the L{Show.first_selected} property.
"""
return self._initially_select
@property
def initially_open(self):
"""
Retrieves the nodes of the tree that will be initially open based on
the nodes that are initially selected. Should not be invoked until
after first retrieval of the L{Show.first_selected} property.
"""
return self._initially_open
@property
def initially_open_owner(self):
"""
Retrieves the owner of the node closest to the root of the tree from
the list of initially open nodes. Should not be invoked until
after first retrieval of the L{Show.first_selected} property.
"""
return self._initially_open_owner
def paths_to_object(conn, experimenter_id=None, project_id=None,
dataset_id=None, image_id=None, screen_id=None,
plate_id=None, acquisition_id=None, well_id=None,
group_id=None):
# Set any of the parameters present and find the lowest type to find
# If path components are specified for incompatible paths, e.g. a dataset
# id and a screen id then the following priority is enforced for the
# object to find:
# image->dataset->project->well->acquisition->plate->screen->experimenter
# Note on wells:
# Selecting a 'well' is really for selecting well_sample paths
# if a well is specified on its own, we return all the well_sample paths
# than match
params = omero.sys.ParametersI()
service_opts = deepcopy(conn.SERVICE_OPTS)
lowest_type = None
if experimenter_id is not None:
params.add('eid', rlong(experimenter_id))
lowest_type = 'experimenter'
if screen_id is not None:
params.add('sid', rlong(screen_id))
lowest_type = 'screen'
if plate_id is not None:
params.add('plid', rlong(plate_id))
lowest_type = 'plate'
if acquisition_id is not None:
params.add('aid', rlong(acquisition_id))
lowest_type = 'acquisition'
if well_id is not None:
params.add('wid', rlong(well_id))
lowest_type = 'well'
if project_id is not None:
params.add('pid', rlong(project_id))
lowest_type = 'project'
if dataset_id is not None:
params.add('did', rlong(dataset_id))
lowest_type = 'dataset'
if image_id is not None:
params.add('iid', rlong(image_id))
lowest_type = 'image'
# If none of these parameters are set then there is nothing to find
if lowest_type is None:
return []
if group_id is not None:
service_opts.setOmeroGroup(group_id)
qs = conn.getQueryService()
# Hierarchies for this object
paths = []
# It is probably possible to write a more generic query instead
# of special casing each type, but it will be less readable and
# maintainable than these
if lowest_type == 'image':
q = '''
select coalesce(powner.id, downer.id, iowner.id),
pdlink.parent.id,
dilink.parent.id,
image.id
from Image image
left outer join image.details.owner iowner
left outer join image.datasetLinks dilink
left outer join dilink.parent.details.owner downer
left outer join dilink.parent.projectLinks pdlink
left outer join pdlink.parent.details.owner powner
where image.id = :iid
'''
where_clause = []
if dataset_id is not None:
where_clause.append('dilink.parent.id = :did')
if project_id is not None:
where_clause.append('pdlink.parent.id = :pid')
if experimenter_id is not None:
where_clause.append(
'coalesce(powner.id, downer.id, iowner.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
q += '''
order by coalesce(powner.id, downer.id, iowner.id),
pdlink.parent.id,
dilink.parent.id,
image.id
'''
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->project->dataset->image
if e[1] is not None:
path.append({
'type': 'project',
'id': e[1].val
})
# If it is experimenter->dataset->image or
# experimenter->project->dataset->image
if e[2] is not None:
path.append({
'type': 'dataset',
'id': e[2].val
})
# If it is orphaned->image
if e[2] is None:
path.append({
'type': 'orphaned',
'id': e[0].val
})
# Image always present
path.append({
'type': 'image',
'id': e[3].val
})
paths.append(path)
elif lowest_type == 'dataset':
q = '''
select coalesce(powner.id, downer.id),
pdlink.parent.id,
dataset.id
from Dataset dataset
left outer join dataset.details.owner downer
left outer join dataset.projectLinks pdlink
left outer join pdlink.parent.details.owner powner
where dataset.id = :did
'''
where_clause = []
if project_id is not None:
where_clause.append('pdlink.parent.id = :pid')
if experimenter_id is not None:
where_clause.append('coalesce(powner.id, downer.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->project->dataset
if e[1] is not None:
path.append({
'type': 'project',
'id': e[1].val
})
# Dataset always present
path.append({
'type': 'dataset',
'id': e[2].val
})
paths.append(path)
elif lowest_type == 'project':
q = '''
select project.details.owner.id,
project.id
from Project project
where project.id = :pid
'''
for e in qs.projection(q, params, service_opts):
path = []
# Always experimenter->project
path.append({
'type': 'experimenter',
'id': e[0].val
})
path.append({
'type': 'project',
'id': e[1].val
})
paths.append(path)
# This is basically the same as WellSample except that it is not
# restricted by a particular WellSample id
# May not have acquisition (load plate from well)
# We don't need to load the wellsample (not in tree)
elif lowest_type == 'well':
q = '''
select coalesce(sowner.id, plowner.id, aowner.id, wsowner.id),
slink.parent.id,
plate.id,
acquisition.id
from WellSample wellsample
left outer join wellsample.details.owner wsowner
left outer join wellsample.plateAcquisition acquisition
left outer join wellsample.details.owner aowner
join wellsample.well well
left outer join well.plate plate
left outer join plate.details.owner plowner
left outer join plate.screenLinks slink
left outer join slink.parent.details.owner sowner
where wellsample.well.id = :wid
'''
where_clause = []
if acquisition_id is not None:
where_clause.append('acquisition.id = :aid')
if plate_id is not None:
where_clause.append('plate.id = :plid')
if screen_id is not None:
where_clause.append('slink.parent.id = :sid')
if experimenter_id is not None:
where_clause.append(
'coalesce(sowner.id, plowner.id, aoener.id, wowner.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->screen->plate->acquisition->wellsample
if e[1] is not None:
path.append({
'type': 'screen',
'id': e[1].val
})
# Plate should always present
path.append({
'type': 'plate',
'id': e[2].val
})
# Acquisition not present if plate created via API (not imported)
if e[3] is not None:
path.append({
'type': 'acquisition',
'id': e[3].val
})
paths.append(path)
elif lowest_type == 'acquisition':
q = '''
select coalesce(sowner.id, plowner.id, aowner.id),
slink.parent.id,
plate.id,
acquisition.id
from PlateAcquisition acquisition
left outer join acquisition.details.owner aowner
left outer join acquisition.plate plate
left outer join plate.details.owner plowner
left outer join plate.screenLinks slink
left outer join slink.parent.details.owner sowner
where acquisition.id = :aid
'''
where_clause = []
if plate_id is not None:
where_clause.append('plate.id = :plid')
if screen_id is not None:
where_clause.append('slink.parent.id = :sid')
if experimenter_id is not None:
where_clause.append(
'coalesce(sowner.id, plowner.id, aowner.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->screen->plate->acquisition
if e[1] is not None:
path.append({
'type': 'screen',
'id': e[1].val
})
# If it is experimenter->plate->acquisition or
# experimenter->screen->plate->acquisition
if e[2] is not None:
path.append({
'type': 'plate',
'id': e[2].val
})
# Acquisition always present
path.append({
'type': 'acquisition',
'id': e[3].val
})
paths.append(path)
elif lowest_type == 'plate':
q = '''
select coalesce(sowner.id, plowner.id),
splink.parent.id,
plate.id
from Plate plate
left outer join plate.details.owner sowner
left outer join plate.screenLinks splink
left outer join splink.parent.details.owner plowner
where plate.id = :plid
'''
where_clause = []
if screen_id is not None:
where_clause.append('splink.parent.id = :sid')
if experimenter_id is not None:
where_clause.append('coalesce(sowner.id, plowner.id) = :eid')
if len(where_clause) > 0:
q += ' and ' + ' and '.join(where_clause)
for e in qs.projection(q, params, service_opts):
path = []
# Experimenter is always found
path.append({
'type': 'experimenter',
'id': e[0].val
})
# If it is experimenter->screen->plate
if e[1] is not None:
path.append({
'type': 'screen',
'id': e[1].val
})
# Plate always present
path.append({
'type': 'plate',
'id': e[2].val
})
paths.append(path)
elif lowest_type == 'screen':
q = '''
select screen.details.owner.id,
screen.id
from Screen screen
where screen.id = :sid
'''
for e in qs.projection(q, params, service_opts):
path = []
# Always experimenter->screen
path.append({
'type': 'experimenter',
'id': e[0].val
})
path.append({
'type': 'screen',
'id': e[1].val
})
paths.append(path)
elif lowest_type == 'experimenter':
path = []
# No query required here as this is the highest level container
path.append({
'type': 'experimenter',
'id': experimenter_id
})
paths.append(path)
return paths
| gpl-2.0 | -3,020,381,144,042,495,000 | 35.204715 | 79 | 0.533532 | false | 4.256272 | false | false | false |
DomNomNom/Cardo | Cardo.py | 1 | 4736 |
import sys, select # for timing out input
'''
This is the overall idea of the game's main logic:
stack = [] # last value is the top of the stack
while not gameOver:
if stack is empty:
stack = [ nextEvent() ]
# stack now has at least one element
top = stack[-1]
imminent(top) # this may modify the stack
if stack and stack[-1] == top: # nothing else wants to precede us
top.apply() # this may modify gameOver
'''
def imminent(event):
# see whether there are any cards that want to be used here
pass
def stackAppend(event):
global stack
stack.append(event)
# ====== Events ======
class Event(object):
def apply(self):
pass
class GameStarted(Event):
pass
class TurnStart(Event):
def __init__(self, player):
self.player = player
def apply(self):
global currentPlayer
currentPlayer = self.player
print 'It is now {0}s turn.'.format(self.player)
class TurnEnd(Event):
def __init__(self, player):
self.player = player
def apply(self):
global currentPlayer
currentPlayer = None
class PlayerTimeOut(Event):
def __init__(self, player):
self.player = player
def apply(self):
print str(self.player) + ' timed out.'
class PlayerWin(Event):
def __init__(self, player):
self.player = player
def apply(self):
global winner
winner = self.player
stackAppend(GameOver()) # That's right, we don't even directly set it here
class PlayerDrawCard(Event):
def __init__(self, player):
self.player = player
def apply(self):
pass # TODO: cards
class UnitNoHealth(Event):
def __init__(self, unit):
self.unit = unit
def apply(self):
stackAppend(UnitDeath(self.unit))
class UnitTakeDamadge(Event):
def __init__(self, *args):
self.unit, self.damadge = args
def apply(self):
stackAppend(UnitHealthChanged(self.unit, -self.damadge))
class UnitHealthChanged(Event):
def __init__(self, *args):
self.unit, self.change = args
def apply(self):
self.unit.health += self.change
if self.unit.health <= 0:
self.unit.onNoHealth()
class UnitNoHealth(Event):
def __init__(self, unit):
self.unit = unit
def apply(self):
self.unit.die()
class GameOver(Event):
def apply(self):
global gameOver
gameOver = True
print 'game over man, game over!'
# ====== Units ======
# A unit is anything that has health and dies when it's health goes to, or below zero
class Unit(object):
def __init__(self):
self.health = 0
def onNoHealth(self):
stackAppend(UnitNoHealth(self))
def die(self):
print str(self) + ' died.'
pass
class Player(Unit):
def __init__(self, name):
self.name = name
self.health = 30
def __str__(self):
return '{0}(health:{1})'.format(self.name, self.health)
def die(self):
stackAppend(GameOver())
# returns an Event within a finite time
def playerControl(player):
timeout = 10
print "You have {0} seconds to answer!".format(timeout)
# TODO: allow for multiple choices
# select allows for a timout
# stackoverflow.com/questions/1335507/
inputStreams, ignored, ignored2 = select.select([sys.stdin], [], [], timeout)
if (inputStreams):
playerInput = sys.stdin.readline()
print "echo: ", playerInput
# TODO: actually use the playerInput
else:
yield PlayerTimeOut(player)
# a infinite iterator returning Events in finite time
def makeGameEvents():
global gameOver, players
yield GameStarted()
while True:
for player in players:
yield TurnStart(player)
yield UnitTakeDamadge(player, 10)
yield PlayerDrawCard(player)
for event in playerControl(player):
yield event
yield TurnEnd(player)
# global variables
stack = []
currentPlayer = None
winner = None
gameEvents = makeGameEvents()
gameOver = False
players = [ Player('Player 1'), Player('Player 2')]
while not gameOver:
# safeguard for cards interrupting each other
if len(stack) > 9000:
stack = []
print 'the stack is too large. moving on to the next event'
if not stack:
stack = [ gameEvents.next() ]
# stack now has at least one element
top = stack[-1]
# print 'processing event: ' + str(top)
imminent(top) # this may modify the stack
if stack and stack[-1] == top: # nothing else wants to precede us
stack.pop()
top.apply() # this may modify gameOver
print str(winner) + ' wins!'
| gpl-3.0 | 6,443,879,362,117,278,000 | 22.919192 | 85 | 0.614231 | false | 3.797915 | false | false | false |
andrewjrobinson/FreeCAD_sf_master | src/Mod/Draft/DraftTrackers.py | 1 | 34739 | #***************************************************************************
#* *
#* Copyright (c) 2011 *
#* Yorik van Havre <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="FreeCAD Draft Trackers"
__author__ = "Yorik van Havre"
__url__ = "http://www.freecadweb.org"
import FreeCAD,FreeCADGui,math,Draft, DraftVecUtils
from FreeCAD import Vector
from pivy import coin
class Tracker:
"A generic Draft Tracker, to be used by other specific trackers"
def __init__(self,dotted=False,scolor=None,swidth=None,children=[],ontop=False):
global Part, DraftGeomUtils
import Part, DraftGeomUtils
self.ontop = ontop
color = coin.SoBaseColor()
color.rgb = scolor or FreeCADGui.draftToolBar.getDefaultColor("ui")
drawstyle = coin.SoDrawStyle()
if swidth:
drawstyle.lineWidth = swidth
if dotted:
drawstyle.style = coin.SoDrawStyle.LINES
drawstyle.lineWeight = 3
drawstyle.linePattern = 0x0f0f #0xaa
node = coin.SoSeparator()
for c in [drawstyle, color] + children:
node.addChild(c)
self.switch = coin.SoSwitch() # this is the on/off switch
self.switch.addChild(node)
self.switch.whichChild = -1
self.Visible = False
from DraftGui import todo
todo.delay(self._insertSwitch, self.switch)
def finalize(self):
from DraftGui import todo
todo.delay(self._removeSwitch, self.switch)
self.switch = None
def _insertSwitch(self, switch):
'''insert self.switch into the scene graph. Must not be called
from an event handler (or other scene graph traversal).'''
sg=Draft.get3DView().getSceneGraph()
if self.ontop:
sg.insertChild(switch,0)
else:
sg.addChild(switch)
def _removeSwitch(self, switch):
'''remove self.switch from the scene graph. As with _insertSwitch,
must not be called during scene graph traversal).'''
sg=Draft.get3DView().getSceneGraph()
sg.removeChild(switch)
def on(self):
self.switch.whichChild = 0
self.Visible = True
def off(self):
self.switch.whichChild = -1
self.Visible = False
def lowerTracker(self):
'''lowers the tracker to the bottom of the scenegraph, so
it doesn't obscure the other objects'''
if self.switch:
sg=Draft.get3DView().getSceneGraph()
sg.removeChild(self.switch)
sg.addChild(self.switch)
def raiseTracker(self):
'''raises the tracker to the top of the scenegraph, so
it obscures the other objects'''
if self.switch:
sg=Draft.get3DView().getSceneGraph()
sg.removeChild(self.switch)
sg.insertChild(self.switch,0)
class snapTracker(Tracker):
"A Snap Mark tracker, used by tools that support snapping"
def __init__(self):
color = coin.SoBaseColor()
color.rgb = FreeCADGui.draftToolBar.getDefaultColor("snap")
self.marker = coin.SoMarkerSet() # this is the marker symbol
self.marker.markerIndex = coin.SoMarkerSet.CIRCLE_FILLED_9_9
self.coords = coin.SoCoordinate3() # this is the coordinate
self.coords.point.setValue((0,0,0))
node = coin.SoAnnotation()
node.addChild(self.coords)
node.addChild(color)
node.addChild(self.marker)
Tracker.__init__(self,children=[node])
def setMarker(self,style):
if (style == "square"):
self.marker.markerIndex = coin.SoMarkerSet.DIAMOND_FILLED_9_9
elif (style == "circle"):
self.marker.markerIndex = coin.SoMarkerSet.CIRCLE_LINE_9_9
elif (style == "quad"):
self.marker.markerIndex = coin.SoMarkerSet.SQUARE_FILLED_9_9
elif (style == "empty"):
self.marker.markerIndex = coin.SoMarkerSet.SQUARE_LINE_9_9
else:
self.marker.markerIndex = coin.SoMarkerSet.CIRCLE_FILLED_9_9
def setCoords(self,point):
self.coords.point.setValue((point.x,point.y,point.z))
class lineTracker(Tracker):
"A Line tracker, used by the tools that need to draw temporary lines"
def __init__(self,dotted=False,scolor=None,swidth=None):
line = coin.SoLineSet()
line.numVertices.setValue(2)
self.coords = coin.SoCoordinate3() # this is the coordinate
self.coords.point.setValues(0,2,[[0,0,0],[1,0,0]])
Tracker.__init__(self,dotted,scolor,swidth,[self.coords,line])
def p1(self,point=None):
"sets or gets the first point of the line"
if point:
self.coords.point.set1Value(0,point.x,point.y,point.z)
else:
return Vector(self.coords.point.getValues()[0].getValue())
def p2(self,point=None):
"sets or gets the second point of the line"
if point:
self.coords.point.set1Value(1,point.x,point.y,point.z)
else:
return Vector(self.coords.point.getValues()[-1].getValue())
def getLength(self):
"returns the length of the line"
p1 = Vector(self.coords.point.getValues()[0].getValue())
p2 = Vector(self.coords.point.getValues()[-1].getValue())
return (p2.sub(p1)).Length
class rectangleTracker(Tracker):
"A Rectangle tracker, used by the rectangle tool"
def __init__(self,dotted=False,scolor=None,swidth=None,face=False):
self.origin = Vector(0,0,0)
line = coin.SoLineSet()
line.numVertices.setValue(5)
self.coords = coin.SoCoordinate3() # this is the coordinate
self.coords.point.setValues(0,50,[[0,0,0],[2,0,0],[2,2,0],[0,2,0],[0,0,0]])
if face:
m1 = coin.SoMaterial()
m1.transparency.setValue(0.5)
m1.diffuseColor.setValue([0.5,0.5,1.0])
f = coin.SoIndexedFaceSet()
f.coordIndex.setValues([0,1,2,3])
Tracker.__init__(self,dotted,scolor,swidth,[self.coords,line,m1,f])
else:
Tracker.__init__(self,dotted,scolor,swidth,[self.coords,line])
self.u = FreeCAD.DraftWorkingPlane.u
self.v = FreeCAD.DraftWorkingPlane.v
def setorigin(self,point):
"sets the base point of the rectangle"
self.coords.point.set1Value(0,point.x,point.y,point.z)
self.coords.point.set1Value(4,point.x,point.y,point.z)
self.origin = point
def update(self,point):
"sets the opposite (diagonal) point of the rectangle"
diagonal = point.sub(self.origin)
inpoint1 = self.origin.add(DraftVecUtils.project(diagonal,self.v))
inpoint2 = self.origin.add(DraftVecUtils.project(diagonal,self.u))
self.coords.point.set1Value(1,inpoint1.x,inpoint1.y,inpoint1.z)
self.coords.point.set1Value(2,point.x,point.y,point.z)
self.coords.point.set1Value(3,inpoint2.x,inpoint2.y,inpoint2.z)
def setPlane(self,u,v=None):
'''sets given (u,v) vectors as working plane. You can give only u
and v will be deduced automatically given current workplane'''
self.u = u
if v:
self.v = v
else:
norm = FreeCAD.DraftWorkingPlane.u.cross(FreeCAD.DraftWorkingPlane.v)
self.v = self.u.cross(norm)
def p1(self,point=None):
"sets or gets the base point of the rectangle"
if point:
self.setorigin(point)
else:
return Vector(self.coords.point.getValues()[0].getValue())
def p2(self):
"gets the second point (on u axis) of the rectangle"
return Vector(self.coords.point.getValues()[3].getValue())
def p3(self,point=None):
"sets or gets the opposite (diagonal) point of the rectangle"
if point:
self.update(point)
else:
return Vector(self.coords.point.getValues()[2].getValue())
def p4(self):
"gets the fourth point (on v axis) of the rectangle"
return Vector(self.coords.point.getValues()[1].getValue())
def getSize(self):
"returns (length,width) of the rectangle"
p1 = Vector(self.coords.point.getValues()[0].getValue())
p2 = Vector(self.coords.point.getValues()[2].getValue())
diag = p2.sub(p1)
return ((DraftVecUtils.project(diag,self.u)).Length,(DraftVecUtils.project(diag,self.v)).Length)
def getNormal(self):
"returns the normal of the rectangle"
return (self.u.cross(self.v)).normalize()
class dimTracker(Tracker):
"A Dimension tracker, used by the dimension tool"
def __init__(self,dotted=False,scolor=None,swidth=None):
line = coin.SoLineSet()
line.numVertices.setValue(4)
self.coords = coin.SoCoordinate3() # this is the coordinate
self.coords.point.setValues(0,4,[[0,0,0],[0,0,0],[0,0,0],[0,0,0]])
Tracker.__init__(self,dotted,scolor,swidth,[self.coords,line])
self.p1 = self.p2 = self.p3 = None
def update(self,pts):
if not pts:
return
elif len(pts) == 1:
self.p3 = pts[0]
else:
self.p1 = pts[0]
self.p2 = pts[1]
if len(pts) > 2:
self.p3 = pts[2]
self.calc()
def calc(self):
import Part
if (self.p1 != None) and (self.p2 != None):
points = [DraftVecUtils.tup(self.p1,True),DraftVecUtils.tup(self.p2,True),\
DraftVecUtils.tup(self.p1,True),DraftVecUtils.tup(self.p2,True)]
if self.p3 != None:
p1 = self.p1
p4 = self.p2
if DraftVecUtils.equals(p1,p4):
proj = None
else:
base = Part.Line(p1,p4).toShape()
proj = DraftGeomUtils.findDistance(self.p3,base)
if not proj:
p2 = p1
p3 = p4
else:
p2 = p1.add(proj.negative())
p3 = p4.add(proj.negative())
points = [DraftVecUtils.tup(p1),DraftVecUtils.tup(p2),DraftVecUtils.tup(p3),DraftVecUtils.tup(p4)]
self.coords.point.setValues(0,4,points)
class bsplineTracker(Tracker):
"A bspline tracker"
def __init__(self,dotted=False,scolor=None,swidth=None,points = []):
self.bspline = None
self.points = points
self.trans = coin.SoTransform()
self.sep = coin.SoSeparator()
self.recompute()
Tracker.__init__(self,dotted,scolor,swidth,[self.trans,self.sep])
def update(self, points):
self.points = points
self.recompute()
def recompute(self):
if (len(self.points) >= 2):
if self.bspline: self.sep.removeChild(self.bspline)
self.bspline = None
c = Part.BSplineCurve()
# DNC: allows to close the curve by placing ends close to each other
if ( len(self.points) >= 3 ) and ( (self.points[0] - self.points[-1]).Length < Draft.tolerance() ):
# YVH: Added a try to bypass some hazardous situations
try:
c.interpolate(self.points[:-1], True)
except:
pass
elif self.points:
try:
c.interpolate(self.points, False)
except:
pass
c = c.toShape()
buf=c.writeInventor(2,0.01)
#fp=open("spline.iv","w")
#fp.write(buf)
#fp.close()
try:
ivin = coin.SoInput()
ivin.setBuffer(buf)
ivob = coin.SoDB.readAll(ivin)
except:
# workaround for pivy SoInput.setBuffer() bug
import re
buf = buf.replace("\n","")
pts = re.findall("point \[(.*?)\]",buf)[0]
pts = pts.split(",")
pc = []
for p in pts:
v = p.strip().split()
pc.append([float(v[0]),float(v[1]),float(v[2])])
coords = coin.SoCoordinate3()
coords.point.setValues(0,len(pc),pc)
line = coin.SoLineSet()
line.numVertices.setValue(-1)
self.bspline = coin.SoSeparator()
self.bspline.addChild(coords)
self.bspline.addChild(line)
self.sep.addChild(self.bspline)
else:
if ivob and ivob.getNumChildren() > 1:
self.bspline = ivob.getChild(1).getChild(0)
self.bspline.removeChild(self.bspline.getChild(0))
self.bspline.removeChild(self.bspline.getChild(0))
self.sep.addChild(self.bspline)
else:
FreeCAD.Console.PrintWarning("bsplineTracker.recompute() failed to read-in Inventor string\n")
class arcTracker(Tracker):
"An arc tracker"
def __init__(self,dotted=False,scolor=None,swidth=None,start=0,end=math.pi*2):
self.circle = None
self.startangle = math.degrees(start)
self.endangle = math.degrees(end)
self.trans = coin.SoTransform()
self.trans.translation.setValue([0,0,0])
self.sep = coin.SoSeparator()
self.recompute()
Tracker.__init__(self,dotted,scolor,swidth,[self.trans, self.sep])
def setCenter(self,cen):
"sets the center point"
self.trans.translation.setValue([cen.x,cen.y,cen.z])
def setRadius(self,rad):
"sets the radius"
self.trans.scaleFactor.setValue([rad,rad,rad])
def getRadius(self):
"returns the current radius"
return self.trans.scaleFactor.getValue()[0]
def setStartAngle(self,ang):
"sets the start angle"
self.startangle = math.degrees(ang)
self.recompute()
def setEndAngle(self,ang):
"sets the end angle"
self.endangle = math.degrees(ang)
self.recompute()
def getAngle(self,pt):
"returns the angle of a given vector"
c = self.trans.translation.getValue()
center = Vector(c[0],c[1],c[2])
base = FreeCAD.DraftWorkingPlane.u
rad = pt.sub(center)
return(DraftVecUtils.angle(rad,base,FreeCAD.DraftWorkingPlane.axis))
def getAngles(self):
"returns the start and end angles"
return(self.startangle,self.endangle)
def setStartPoint(self,pt):
"sets the start angle from a point"
self.setStartAngle(-self.getAngle(pt))
def setEndPoint(self,pt):
"sets the end angle from a point"
self.setEndAngle(self.getAngle(pt))
def setApertureAngle(self,ang):
"sets the end angle by giving the aperture angle"
ap = math.degrees(ang)
self.endangle = self.startangle + ap
self.recompute()
def recompute(self):
import Part,re
if self.circle: self.sep.removeChild(self.circle)
self.circle = None
if self.endangle < self.startangle:
c = Part.makeCircle(1,Vector(0,0,0),FreeCAD.DraftWorkingPlane.axis,self.endangle,self.startangle)
else:
c = Part.makeCircle(1,Vector(0,0,0),FreeCAD.DraftWorkingPlane.axis,self.startangle,self.endangle)
buf=c.writeInventor(2,0.01)
try:
ivin = coin.SoInput()
ivin.setBuffer(buf)
ivob = coin.SoDB.readAll(ivin)
except:
# workaround for pivy SoInput.setBuffer() bug
buf = buf.replace("\n","")
pts = re.findall("point \[(.*?)\]",buf)[0]
pts = pts.split(",")
pc = []
for p in pts:
v = p.strip().split()
pc.append([float(v[0]),float(v[1]),float(v[2])])
coords = coin.SoCoordinate3()
coords.point.setValues(0,len(pc),pc)
line = coin.SoLineSet()
line.numVertices.setValue(-1)
self.circle = coin.SoSeparator()
self.circle.addChild(coords)
self.circle.addChild(line)
self.sep.addChild(self.circle)
else:
if ivob and ivob.getNumChildren() > 1:
self.circle = ivob.getChild(1).getChild(0)
self.circle.removeChild(self.circle.getChild(0))
self.circle.removeChild(self.circle.getChild(0))
self.sep.addChild(self.circle)
else:
FreeCAD.Console.PrintWarning("arcTracker.recompute() failed to read-in Inventor string\n")
class ghostTracker(Tracker):
'''A Ghost tracker, that allows to copy whole object representations.
You can pass it an object or a list of objects, or a shape.'''
def __init__(self,sel):
self.trans = coin.SoTransform()
self.trans.translation.setValue([0,0,0])
self.children = [self.trans]
rootsep = coin.SoSeparator()
if not isinstance(sel,list):
sel = [sel]
for obj in sel:
rootsep.addChild(self.getNode(obj))
self.children.append(rootsep)
Tracker.__init__(self,children=self.children)
def update(self,obj):
"recreates the ghost from a new object"
obj.ViewObject.show()
self.finalize()
sep = getNode(obj)
Tracker.__init__(self,children=[self.sep])
self.on()
obj.ViewObject.hide()
def move(self,delta):
"moves the ghost to a given position, relative from its start position"
self.trans.translation.setValue([delta.x,delta.y,delta.z])
def rotate(self,axis,angle):
"rotates the ghost of a given angle"
self.trans.rotation.setValue(coin.SbVec3f(DraftVecUtils.tup(axis)),angle)
def center(self,point):
"sets the rotation/scale center of the ghost"
self.trans.center.setValue(point.x,point.y,point.z)
def scale(self,delta):
"scales the ghost by the given factor"
self.trans.scaleFactor.setValue([delta.x,delta.y,delta.z])
def getNode(self,obj):
"returns a coin node representing the given object"
if isinstance(obj,Part.Shape):
return self.getNodeLight(obj)
elif obj.isDerivedFrom("Part::Feature"):
return self.getNodeFull(obj)
else:
return self.getNodeFull(obj)
def getNode(self,obj):
"gets a coin node which is a full copy of the current representation"
sep = coin.SoSeparator()
try:
sep.addChild(obj.ViewObject.RootNode.copy())
except:
pass
return sep
def getNodeLight(self,shape):
"extract a lighter version directly from a shape"
# very error-prone, will be obsoleted ASAP
sep = coin.SoSeparator()
try:
inputstr = coin.SoInput()
inputstr.setBuffer(shape.writeInventor())
coinobj = coin.SoDB.readAll(inputstr)
# only add wireframe or full node?
sep.addChild(coinobj.getChildren()[1])
# sep.addChild(coinobj)
except:
print "Error retrieving coin node"
return sep
class editTracker(Tracker):
"A node edit tracker"
def __init__(self,pos=Vector(0,0,0),name="None",idx=0,objcol=None):
color = coin.SoBaseColor()
if objcol:
color.rgb = objcol[:3]
else:
color.rgb = FreeCADGui.draftToolBar.getDefaultColor("snap")
self.marker = coin.SoMarkerSet() # this is the marker symbol
self.marker.markerIndex = coin.SoMarkerSet.SQUARE_FILLED_9_9
self.coords = coin.SoCoordinate3() # this is the coordinate
self.coords.point.setValue((pos.x,pos.y,pos.z))
selnode = coin.SoType.fromName("SoFCSelection").createInstance()
selnode.documentName.setValue(FreeCAD.ActiveDocument.Name)
selnode.objectName.setValue(name)
selnode.subElementName.setValue("EditNode"+str(idx))
node = coin.SoAnnotation()
selnode.addChild(self.coords)
selnode.addChild(color)
selnode.addChild(self.marker)
node.addChild(selnode)
Tracker.__init__(self,children=[node],ontop=True)
self.on()
def set(self,pos):
self.coords.point.setValue((pos.x,pos.y,pos.z))
def get(self):
p = self.coords.point.getValues()[0]
return Vector(p[0],p[1],p[2])
def move(self,delta):
self.set(self.get().add(delta))
class PlaneTracker(Tracker):
"A working plane tracker"
def __init__(self):
# getting screen distance
p1 = Draft.get3DView().getPoint((100,100))
p2 = Draft.get3DView().getPoint((110,100))
bl = (p2.sub(p1)).Length * (Draft.getParam("snapRange",5)/2)
pick = coin.SoPickStyle()
pick.style.setValue(coin.SoPickStyle.UNPICKABLE)
self.trans = coin.SoTransform()
self.trans.translation.setValue([0,0,0])
m1 = coin.SoMaterial()
m1.transparency.setValue(0.8)
m1.diffuseColor.setValue([0.4,0.4,0.6])
c1 = coin.SoCoordinate3()
c1.point.setValues([[-bl,-bl,0],[bl,-bl,0],[bl,bl,0],[-bl,bl,0]])
f = coin.SoIndexedFaceSet()
f.coordIndex.setValues([0,1,2,3])
m2 = coin.SoMaterial()
m2.transparency.setValue(0.7)
m2.diffuseColor.setValue([0.2,0.2,0.3])
c2 = coin.SoCoordinate3()
c2.point.setValues([[0,bl,0],[0,0,0],[bl,0,0],[-.05*bl,.95*bl,0],[0,bl,0],
[.05*bl,.95*bl,0],[.95*bl,.05*bl,0],[bl,0,0],[.95*bl,-.05*bl,0]])
l = coin.SoLineSet()
l.numVertices.setValues([3,3,3])
s = coin.SoSeparator()
s.addChild(pick)
s.addChild(self.trans)
s.addChild(m1)
s.addChild(c1)
s.addChild(f)
s.addChild(m2)
s.addChild(c2)
s.addChild(l)
Tracker.__init__(self,children=[s])
def set(self,pos=None):
if pos:
Q = FreeCAD.DraftWorkingPlane.getRotation().Rotation.Q
else:
plm = FreeCAD.DraftWorkingPlane.getPlacement()
Q = plm.Rotation.Q
pos = plm.Base
self.trans.translation.setValue([pos.x,pos.y,pos.z])
self.trans.rotation.setValue([Q[0],Q[1],Q[2],Q[3]])
self.on()
class wireTracker(Tracker):
"A wire tracker"
def __init__(self,wire):
self.line = coin.SoLineSet()
self.closed = DraftGeomUtils.isReallyClosed(wire)
if self.closed:
self.line.numVertices.setValue(len(wire.Vertexes)+1)
else:
self.line.numVertices.setValue(len(wire.Vertexes))
self.coords = coin.SoCoordinate3()
self.update(wire)
Tracker.__init__(self,children=[self.coords,self.line])
def update(self,wire,forceclosed=False):
if wire:
if self.closed or forceclosed:
self.line.numVertices.setValue(len(wire.Vertexes)+1)
else:
self.line.numVertices.setValue(len(wire.Vertexes))
for i in range(len(wire.Vertexes)):
p=wire.Vertexes[i].Point
self.coords.point.set1Value(i,[p.x,p.y,p.z])
if self.closed or forceclosed:
t = len(wire.Vertexes)
p = wire.Vertexes[0].Point
self.coords.point.set1Value(t,[p.x,p.y,p.z])
class gridTracker(Tracker):
"A grid tracker"
def __init__(self):
# self.space = 1
self.space = Draft.getParam("gridSpacing",1)
# self.mainlines = 10
self.mainlines = Draft.getParam("gridEvery",10)
self.numlines = 100
col = [0.2,0.2,0.3]
pick = coin.SoPickStyle()
pick.style.setValue(coin.SoPickStyle.UNPICKABLE)
self.trans = coin.SoTransform()
self.trans.translation.setValue([0,0,0])
bound = (self.numlines/2)*self.space
pts = []
mpts = []
apts = []
for i in range(self.numlines+1):
curr = -bound + i*self.space
z = 0
if i/float(self.mainlines) == i/self.mainlines:
if round(curr,4) == 0:
apts.extend([[-bound,curr,z],[bound,curr,z]])
apts.extend([[curr,-bound,z],[curr,bound,z]])
else:
mpts.extend([[-bound,curr,z],[bound,curr,z]])
mpts.extend([[curr,-bound,z],[curr,bound,z]])
else:
pts.extend([[-bound,curr,z],[bound,curr,z]])
pts.extend([[curr,-bound,z],[curr,bound,z]])
idx = []
midx = []
aidx = []
for p in range(0,len(pts),2):
idx.append(2)
for mp in range(0,len(mpts),2):
midx.append(2)
for ap in range(0,len(apts),2):
aidx.append(2)
mat1 = coin.SoMaterial()
mat1.transparency.setValue(0.7)
mat1.diffuseColor.setValue(col)
self.coords1 = coin.SoCoordinate3()
self.coords1.point.setValues(pts)
lines1 = coin.SoLineSet()
lines1.numVertices.setValues(idx)
mat2 = coin.SoMaterial()
mat2.transparency.setValue(0.3)
mat2.diffuseColor.setValue(col)
self.coords2 = coin.SoCoordinate3()
self.coords2.point.setValues(mpts)
lines2 = coin.SoLineSet()
lines2.numVertices.setValues(midx)
mat3 = coin.SoMaterial()
mat3.transparency.setValue(0)
mat3.diffuseColor.setValue(col)
self.coords3 = coin.SoCoordinate3()
self.coords3.point.setValues(apts)
lines3 = coin.SoLineSet()
lines3.numVertices.setValues(aidx)
s = coin.SoSeparator()
s.addChild(pick)
s.addChild(self.trans)
s.addChild(mat1)
s.addChild(self.coords1)
s.addChild(lines1)
s.addChild(mat2)
s.addChild(self.coords2)
s.addChild(lines2)
s.addChild(mat3)
s.addChild(self.coords3)
s.addChild(lines3)
Tracker.__init__(self,children=[s])
self.update()
def update(self):
bound = (self.numlines/2)*self.space
pts = []
mpts = []
for i in range(self.numlines+1):
curr = -bound + i*self.space
if i/float(self.mainlines) == i/self.mainlines:
mpts.extend([[-bound,curr,0],[bound,curr,0]])
mpts.extend([[curr,-bound,0],[curr,bound,0]])
else:
pts.extend([[-bound,curr,0],[bound,curr,0]])
pts.extend([[curr,-bound,0],[curr,bound,0]])
self.coords1.point.setValues(pts)
self.coords2.point.setValues(mpts)
def setSpacing(self,space):
self.space = space
self.update()
def setMainlines(self,ml):
self.mainlines = ml
self.update()
def set(self):
Q = FreeCAD.DraftWorkingPlane.getRotation().Rotation.Q
P = FreeCAD.DraftWorkingPlane.position
self.trans.rotation.setValue([Q[0],Q[1],Q[2],Q[3]])
self.trans.translation.setValue([P.x,P.y,P.z])
self.on()
def getClosestNode(self,point):
"returns the closest node from the given point"
# get the 2D coords.
# point = FreeCAD.DraftWorkingPlane.projectPoint(point)
pt = FreeCAD.DraftWorkingPlane.getLocalCoords(point)
pu = (round(pt.x/self.space,0))*self.space
pv = (round(pt.y/self.space,0))*self.space
pt = FreeCAD.DraftWorkingPlane.getGlobalCoords(Vector(pu,pv,0))
return pt
class boxTracker(Tracker):
"A box tracker, can be based on a line object"
def __init__(self,line=None,width=0.1,height=1):
self.trans = coin.SoTransform()
m = coin.SoMaterial()
m.transparency.setValue(0.8)
m.diffuseColor.setValue([0.4,0.4,0.6])
self.cube = coin.SoCube()
self.cube.height.setValue(width)
self.cube.depth.setValue(height)
self.baseline = None
if line:
self.baseline = line
self.update()
Tracker.__init__(self,children=[self.trans,m,self.cube])
def update(self,line=None,normal=None):
import WorkingPlane, DraftGeomUtils
if not normal:
normal = FreeCAD.DraftWorkingPlane.axis
if line:
if isinstance(line,list):
bp = line[0]
lvec = line[1].sub(line[0])
else:
lvec = DraftGeomUtils.vec(line.Shape.Edges[0])
bp = line.Shape.Edges[0].Vertexes[0].Point
elif self.baseline:
lvec = DraftGeomUtils.vec(self.baseline.Shape.Edges[0])
bp = self.baseline.Shape.Edges[0].Vertexes[0].Point
else:
return
right = lvec.cross(normal)
self.cube.width.setValue(lvec.Length)
p = WorkingPlane.getPlacementFromPoints([bp,bp.add(lvec),bp.add(right)])
if p:
self.trans.rotation.setValue(p.Rotation.Q)
bp = bp.add(lvec.multiply(0.5))
bp = bp.add(DraftVecUtils.scaleTo(normal,self.cube.depth.getValue()/2))
self.pos(bp)
def setRotation(self,rot):
self.trans.rotation.setValue(rot.Q)
def pos(self,p):
self.trans.translation.setValue(DraftVecUtils.tup(p))
def width(self,w=None):
if w:
self.cube.height.setValue(w)
else:
return self.cube.height.getValue()
def length(self,l=None):
if l:
self.cube.width.setValue(l)
else:
return self.cube.width.getValue()
def height(self,h=None):
if h:
self.cube.depth.setValue(h)
self.update()
else:
return self.cube.depth.getValue()
class radiusTracker(Tracker):
"A tracker that displays a transparent sphere to inicate a radius"
def __init__(self,position=FreeCAD.Vector(0,0,0),radius=1):
self.trans = coin.SoTransform()
self.trans.translation.setValue([position.x,position.y,position.z])
m = coin.SoMaterial()
m.transparency.setValue(0.9)
m.diffuseColor.setValue([0,1,0])
self.sphere = coin.SoSphere()
self.sphere.radius.setValue(radius)
self.baseline = None
Tracker.__init__(self,children=[self.trans,m,self.sphere])
def update(self,arg1,arg2=None):
if isinstance(arg1,FreeCAD.Vector):
self.trans.translation.setValue([arg1.x,arg1.y,arg1.z])
else:
self.sphere.radius.setValue(arg1)
if arg2 != None:
if isinstance(arg2,FreeCAD.Vector):
self.trans.translation.setValue([arg2.x,arg2.y,arg2.z])
else:
self.sphere.radius.setValue(arg2)
class archDimTracker(Tracker):
"A wrapper around a Sketcher dim"
def __init__(self,p1=FreeCAD.Vector(0,0,0),p2=FreeCAD.Vector(1,0,0),mode=1):
import SketcherGui
self.dimnode = coin.SoType.fromName("SoDatumLabel").createInstance()
p1node = coin.SbVec3f([p1.x,p1.y,p1.z])
p2node = coin.SbVec3f([p2.x,p2.y,p2.z])
self.dimnode.pnts.setValues([p1node,p2node])
self.dimnode.lineWidth = 1
color = FreeCADGui.draftToolBar.getDefaultColor("snap")
self.dimnode.textColor.setValue(coin.SbVec3f(color))
self.setString()
self.setMode(mode)
Tracker.__init__(self,children=[self.dimnode])
def setString(self,text=None):
"sets the dim string to the given value or auto value"
self.dimnode.param1.setValue(.5)
p1 = Vector(self.dimnode.pnts.getValues()[0].getValue())
p2 = Vector(self.dimnode.pnts.getValues()[-1].getValue())
m = self.dimnode.datumtype.getValue()
if m == 2:
self.Distance = (DraftVecUtils.project(p2.sub(p1),Vector(1,0,0))).Length
elif m == 3:
self.Distance = (DraftVecUtils.project(p2.sub(p1),Vector(0,1,0))).Length
else:
self.Distance = (p2.sub(p1)).Length
if not text:
text = Draft.getParam("dimPrecision",2)
text = "%."+str(text)+"f"
text = (text % self.Distance)
self.dimnode.string.setValue(text)
def setMode(self,mode=1):
"""sets the mode: 0 = without lines (a simple mark), 1 =
aligned (default), 2 = horizontal, 3 = vertical."""
self.dimnode.datumtype.setValue(mode)
def p1(self,point=None):
"sets or gets the first point of the dim"
if point:
self.dimnode.pnts.set1Value(0,point.x,point.y,point.z)
self.setString()
else:
return Vector(self.dimnode.pnts.getValues()[0].getValue())
def p2(self,point=None):
"sets or gets the second point of the dim"
if point:
self.dimnode.pnts.set1Value(1,point.x,point.y,point.z)
self.setString()
else:
return Vector(self.dimnode.pnts.getValues()[-1].getValue())
| lgpl-2.1 | -1,483,052,015,789,805,300 | 37.945067 | 114 | 0.567259 | false | 3.630748 | false | false | false |
CiscoSystems/os-sqe | lab/scenarios/servers_from_snapshot_scenario.py | 1 | 1179 | from lab.test_case_worker import TestCaseWorker
class ServersFromSnapshotScenario(TestCaseWorker):
ARG_MANDATORY_N_SERVERS = 'n_servers'
ARG_MANDATORY_UPTIME = 'uptime'
def check_arguments(self):
assert self.n_servers >= 1
assert self.uptime > 10
@property
def n_servers(self):
return self.args[self.ARG_MANDATORY_N_SERVERS]
@property
def uptime(self):
return self.args[self.ARG_MANDATORY_UPTIME]
def setup_worker(self):
pass
def loop_worker(self):
import time
from lab.cloud.cloud_server import CloudServer
self.log(self.STATUS_SERVER_CREATING + ' n=' + str(self.n_servers))
flavor = self.cloud.flavors[0]
image = self.cloud.images[0]
keypair = self.cloud.keypairs[0]
self.servers = CloudServer.create(how_many=self.n_servers, flavor=flavor.name, image=image.name, on_nets=[], key=keypair.name, timeout=self.timeout, cloud=self.cloud)
self.log('Waiting 30 sec to settle servers...')
time.sleep(30)
self.log(self.STATUS_SERVER_CREATED)
if str(self.uptime) != 'forever':
time.sleep(self.uptime)
| apache-2.0 | 719,308,940,948,195,300 | 30.864865 | 174 | 0.648855 | false | 3.447368 | false | false | false |
kreatorkodi/repository.torrentbr | plugin.video.youtube/resources/lib/youtube_plugin/kodion/impl/xbmc/xbmc_context_ui.py | 1 | 5105 | __author__ = 'bromix'
from six import string_types
import xbmc
import xbmcgui
from ..abstract_context_ui import AbstractContextUI
from .xbmc_progress_dialog import XbmcProgressDialog
from .xbmc_progress_dialog_bg import XbmcProgressDialogBG
from ... import constants
from ... import utils
class XbmcContextUI(AbstractContextUI):
def __init__(self, xbmc_addon, context):
AbstractContextUI.__init__(self)
self._xbmc_addon = xbmc_addon
self._context = context
self._view_mode = None
def create_progress_dialog(self, heading, text=None, background=False):
if background and self._context.get_system_version().get_version() > (12, 3):
return XbmcProgressDialogBG(heading, text)
return XbmcProgressDialog(heading, text)
def set_view_mode(self, view_mode):
if isinstance(view_mode, string_types):
view_mode = self._context.get_settings().get_int(constants.setting.VIEW_X % view_mode, 50)
self._view_mode = view_mode
def get_view_mode(self):
if self._view_mode is not None:
return self._view_mode
return self._context.get_settings().get_int(constants.setting.VIEW_DEFAULT, 50)
def get_skin_id(self):
return xbmc.getSkinDir()
def on_keyboard_input(self, title, default='', hidden=False):
# fallback for Frodo
if self._context.get_system_version().get_version() <= (12, 3):
keyboard = xbmc.Keyboard(default, title, hidden)
keyboard.doModal()
if keyboard.isConfirmed() and keyboard.getText():
text = utils.to_unicode(keyboard.getText())
return True, text
else:
return False, u''
# Starting with Gotham (13.X > ...)
dialog = xbmcgui.Dialog()
result = dialog.input(title, utils.to_unicode(default), type=xbmcgui.INPUT_ALPHANUM)
if result:
text = utils.to_unicode(result)
return True, text
return False, u''
def on_numeric_input(self, title, default=''):
dialog = xbmcgui.Dialog()
result = dialog.input(title, str(default), type=xbmcgui.INPUT_NUMERIC)
if result:
return True, int(result)
return False, None
def on_yes_no_input(self, title, text, nolabel='', yeslabel=''):
dialog = xbmcgui.Dialog()
return dialog.yesno(title, text, nolabel=nolabel, yeslabel=yeslabel)
def on_ok(self, title, text):
dialog = xbmcgui.Dialog()
return dialog.ok(title, text)
def on_remove_content(self, content_name):
text = self._context.localize(constants.localize.REMOVE_CONTENT) % utils.to_unicode(content_name)
return self.on_yes_no_input(self._context.localize(constants.localize.CONFIRM_REMOVE), text)
def on_delete_content(self, content_name):
text = self._context.localize(constants.localize.DELETE_CONTENT) % utils.to_unicode(content_name)
return self.on_yes_no_input(self._context.localize(constants.localize.CONFIRM_DELETE), text)
def on_select(self, title, items=[]):
_dict = {}
_items = []
i = 0
for item in items:
if isinstance(item, tuple):
_dict[i] = item[1]
_items.append(item[0])
else:
_dict[i] = i
_items.append(item)
i += 1
dialog = xbmcgui.Dialog()
result = dialog.select(title, _items)
return _dict.get(result, -1)
def show_notification(self, message, header='', image_uri='', time_milliseconds=5000):
_header = header
if not _header:
_header = self._context.get_name()
_header = utils.to_utf8(_header)
_image = image_uri
if not _image:
_image = self._context.get_icon()
_message = utils.to_utf8(message)
try:
_message = _message.replace(',', ' ')
_message = _message.replace('\n', ' ')
except TypeError:
_message = _message.replace(b',', b' ')
_message = _message.replace(b'\n', b' ')
_message = utils.to_unicode(_message)
_header = utils.to_unicode(_header)
xbmc.executebuiltin("Notification(%s, %s, %d, %s)" % (_header, _message, time_milliseconds, _image))
def open_settings(self):
self._xbmc_addon.openSettings()
@staticmethod
def refresh_container():
xbmc.executebuiltin("Container.Refresh")
@staticmethod
def set_home_window_property(property_id, value):
property_id = 'plugin.video.youtube-' + property_id
xbmcgui.Window(10000).setProperty(property_id, value)
@staticmethod
def get_home_window_property(property_id):
property_id = 'plugin.video.youtube-' + property_id
return xbmcgui.Window(10000).getProperty(property_id) or None
@staticmethod
def clear_home_window_property(property_id):
property_id = 'plugin.video.youtube-' + property_id
xbmcgui.Window(10000).clearProperty(property_id)
| gpl-2.0 | 5,859,240,278,206,949,000 | 33.261745 | 108 | 0.610382 | false | 3.849925 | false | false | false |
fernan9/LANGEBIO-Internship | BLAST_rbh_run.py | 1 | 3987 | #! /usr/bin/env python
""" DESCRIPTION
"""
import glob, sys, csv
from tabulate import tabulate
from Bio.Blast.Applications import NcbiblastpCommandline
"""--- FUNCTIONS ---"""
def carga_csv(file_name):
""" creates a list of lists with a csv file """
tabla = []
cr = csv.reader(open(file_name,"rb"))
for row in cr:
tabla.append(row)
return tabla
def crea_comparacion(tabla_ref, estructura = 'star', comparacion = 'bi'):
""" creates comparisons lists (code) depending on arguments """
lista = []
tabla = list(tabla_ref)
if estructura == 'star':
nodo = tabla.pop()
for organismo in tabla:
lista.append([nodo[1],organismo[1]])
if comparacion == 'bi':
lista.append([organismo[1], nodo[1]])
else:
comps = estructura.split(',')
for comp in comps:
pareja = comp.split('-')
query = tabla[int(pareja[0])][1]
db = tabla[int(pareja[1])][1]
lista.append([query, db])
if comparacion == 'bi':
lista.append([db, query])
return lista
def imprime_comparacion(listas):
""" prints the comparison as a readable format"""
print 'COMPARISONS\n-----------\n'
for lista in listas:
print lista[0] + ' --> ' + lista[1]
print '\n'
def imprime_referencia(claves):
""" prints the comparison as a readable format"""
print 'REFERENCE\n---------'
n = 0
for key, val in claves.items():
print n, '. ', key, '\t', val
n=n+1
print '\n'
def crea_diccionario(tabla):
""" creates a dictionary of code:organism"""
diccionario={}
for row in tabla:
diccionario[row[1]]=row[0]
return diccionario
"""--- PROGRAM BODY ---"""
print '----------------\nBLAST EVALUATION\n----------------'
blast_eval = 1e-05
comparison_list = []
# charge csv file
nombre_csv = raw_input('Please enter the CSV file name: ')
organismos = carga_csv(nombre_csv)
referencia = crea_diccionario(organismos)
comparison_list = crea_comparacion(organismos)
# present csv data
print '\nCSV data\n--------'
print tabulate(organismos, headers=["Organism","Code", "Genome File", "Database folder"]) + '\n'
# present options: blast parameters, comparison parameters, run
while 1:
imprime_referencia(referencia)
imprime_comparacion(comparison_list)
print 'CHOOSE AN OPTION\n----------------\n1) Comparisons\n2) Run\n3) Quit'
user_in = raw_input('Option: ')
if user_in == '1':
imprime_referencia(referencia)
print ('Please enter the comparisons using the organism index.\n' +
'Format: "-" between indices; "," between comparisons; no spaces.\n')
nueva_comparacion = raw_input('Comparisons: ')
print 'Choose "bi" for bidirectional or "uni" for unidirectional; no quotation marks.'
tipo_comparacion = raw_input('Direction: ')
comparison_list = crea_comparacion(organismos, nueva_comparacion, tipo_comparacion)
elif user_in == '2':
blast_eval = raw_input('\nPlease write the desired E value for BLAST runs; 1e-5 suggested.\nE_value: ')
print '\nBLAST+ commands to be runned...\n'
break
elif user_in == '3': quit()
else: print ('Incorrect option, try again.\n')
# create commands for comparisons
comandos = []
for pair in comparison_list:
nombre = referencia[pair[0]].split()
comandos.append([(nombre[0]+'_'+nombre[1]+'.faa'), ('db_'+pair[1]+'/db_'+pair[1]), (pair[0]+'_'+pair[1]+'.xml')])
print tabulate(comandos, headers=["Genome file","Database", "Product file"]) + '\n'
raw_input('Press ENTER to continue')
# run commands, inform data created
for comando in comandos:
blastp_cline = NcbiblastpCommandline(query=comando[0], db=comando[1], evalue=blast_eval ,outfmt=5, out=comando[2])
print 'File ' + comando[2] + ' is currently in progess...'
stdout, stderr = blastp_cline()
print 'WORK COMPLETED\n--------------'
| gpl-2.0 | -2,226,153,138,521,786,600 | 34.598214 | 118 | 0.610986 | false | 3.236201 | false | false | false |
brayden2544/Mystuff-final | account/views/reset_password.py | 1 | 1223 | from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from account import models as amod
from . import templater
import datetime
def process_request(request):
'''Creates a reset password form to email a unique link to the user'''
user = amod.User.objects.get(email=request.urlparams[0])
key = request.urlparams[1]
now = datetime.datetime.utcnow()
exp_date = user.password_reset_date.replace(tzinfo=None)
if key != user.password_reset_key or now > exp_date:
return HttpResponseRedirect('/account/password_reset_invalid')
form = PasswordForm()
if request.method == 'POST':
form = PasswordForm(request.POST)
if form.is_valid():
password = form.cleaned_data['password']
print(user)
print(password)
user.set_password(password)
user.save()
return HttpResponseRedirect('/account/password_reset/')
tvars = {
'form': form,
}
return templater.render_to_response(request, 'reset_password.html', tvars)
class PasswordForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput)
| apache-2.0 | -6,399,470,654,394,678,000 | 30.358974 | 77 | 0.670482 | false | 4.063123 | false | false | false |
sam-roth/Keypad | keypad/qt/qt_util.py | 1 | 2494 |
from PyQt4.Qt import *
from collections import namedtuple
import contextlib
from ..core.key import SimpleKeySequence
from ..core.color import Color
from ..abstract.textview import KeyEvent
from ..core.responder import Responder
from .. import api
import abc
import math
def set_tab_order(parent, widgets):
for first, second in zip(widgets, widgets[1:]):
parent.setTabOrder(first, second)
def qsizef_ceil(size):
'''
Converts a QSizeF to a QSize, rounding up.
:type size: PyQr4.Qt.QSizeF
'''
return QSize(
math.ceil(size.width()),
math.ceil(size.height())
)
class CloseEvent(object):
def __init__(self):
self.is_intercepted = False
def intercept(self):
self.is_intercepted = True
def marshal_key_event(event):
return KeyEvent(
key=SimpleKeySequence(
modifiers=event.modifiers() & ~Qt.KeypadModifier,
keycode=event.key()
),
text=event.text().replace('\r', '\n')
)
def to_q_key_sequence(key_seq):
return QKeySequence(key_seq.keycode | key_seq.modifiers)
def to_q_color(color):
if isinstance(color, QColor):
return color
r,g,b,a = Color.from_hex(color)
return QColor.fromRgb(r,g,b,a)
@contextlib.contextmanager
def ending(painter):
try:
yield painter
finally:
painter.end()
@contextlib.contextmanager
def restoring(painter):
try:
painter.save()
yield painter
finally:
painter.restore()
def qcolor_marshaller(attrname):
def fget(self):
# QColor::name() actually returns an HTML-style hex string like
# #AABBCC.
color = getattr(self, attrname)
return Color.from_rgb(color.red(),
color.green(),
color.blue(),
color.alpha())
def fset(self, value):
setattr(self, attrname, to_q_color(value))
return property(fget, fset)
class ABCWithQtMeta(pyqtWrapperType, abc.ABCMeta):
pass
class AutoresponderMixin:
@property
def next_responders(self):
pw = self.parentWidget()
while pw is not None and not isinstance(pw, Responder):
pw = pw.parentWidget()
if pw is not None and isinstance(pw, Responder):
return [pw] + super().next_responders
else:
return super().next_responders
class Autoresponder(AutoresponderMixin, Responder):
pass
| gpl-3.0 | -4,094,260,488,308,843,500 | 20.135593 | 71 | 0.615477 | false | 3.778788 | false | false | false |
nextoa/comb | comb/slot.py | 1 | 2027 | # -*- coding: utf-8 -*-
class Slot(object):
"""
To use comb, you should create a python module file. we named *slot*.
A legal slot must be named 'Slot' in your module file and it must be at least contain four method:
* `initialize`
initial resource, e.g: database handle
* `__enter__`
get next data to do,you can fetch one or more data.
* `slot`
user custom code
* `__exit__`
when slot finished, call this method
"""
def __init__(self, combd):
"""Don't override this method unless what you're doing.
"""
self.threads_num = combd.threads_num
self.sleep = combd.sleep
self.sleep_max = combd.sleep_max
self.debug = combd.debug
self.combd = combd
self.initialize()
def initialize(self):
"""Hook for subclass initialization.
This block is execute before thread initial
Example::
class UserSlot(Slot):
def initialize(self):
self.threads_num = 10
def slot(self, result):
...
"""
pass
def __enter__(self):
"""You **MUST** return False when no data to do.
The return value will be used in `Slot.slot`
"""
print("You should override __enter__ method by subclass")
return False
def __exit__(self, exc_type, exc_val, exc_tb):
"""When slot done, will call this method.
"""
print("You should override __exit__ method by subclass")
pass
def slot(self, msg):
"""
Add your custom code at here.
For example, look at:
* `comb.demo.list`
* `comb.demo.mongo`
* `comb.demo.redis`
"""
pass
# @staticmethod
# def options():
# """
# replace this method if you want add user options
# :return:
# """
# return ()
# pass
| mit | -5,057,463,884,052,601,000 | 19.474747 | 102 | 0.51258 | false | 4.406522 | false | false | false |
4ndreas/ROSCoffeButler | src/arbotix_python/src/arbotix_python/diff_controller.py | 1 | 10710 | #!/usr/bin/env python
"""
diff_controller.py - controller for a differential drive
Copyright (c) 2010-2011 Vanadium Labs LLC. All right reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Vanadium Labs LLC nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import rospy
from math import sin,cos,pi
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import Twist
from nav_msgs.msg import Odometry
from diagnostic_msgs.msg import *
from tf.broadcaster import TransformBroadcaster
from ax12 import *
from controllers import *
from struct import unpack
class DiffController(Controller):
""" Controller to handle movement & odometry feedback for a differential
drive mobile base. """
def __init__(self, device, name):
Controller.__init__(self, device, name)
self.pause = True
self.last_cmd = rospy.Time.now()
# parameters: rates and geometry
self.rate = rospy.get_param('~controllers/'+name+'/rate',10.0)
self.timeout = rospy.get_param('~controllers/'+name+'/timeout',1.0)
self.t_delta = rospy.Duration(1.0/self.rate)
self.t_next = rospy.Time.now() + self.t_delta
self.ticks_meter = float(rospy.get_param('~controllers/'+name+'/ticks_meter'))
self.base_width = float(rospy.get_param('~controllers/'+name+'/base_width'))
self.base_frame_id = rospy.get_param('~controllers/'+name+'/base_frame_id', 'base_link')
self.odom_frame_id = rospy.get_param('~controllers/'+name+'/odom_frame_id', 'odom')
# parameters: PID
self.Kp = rospy.get_param('~controllers/'+name+'/Kp', 5)
self.Kd = rospy.get_param('~controllers/'+name+'/Kd', 1)
self.Ki = rospy.get_param('~controllers/'+name+'/Ki', 0)
self.Ko = rospy.get_param('~controllers/'+name+'/Ko', 50)
# parameters: acceleration
self.accel_limit = rospy.get_param('~controllers/'+name+'/accel_limit', 0.1)
self.max_accel = int(self.accel_limit*self.ticks_meter/self.rate)
# output for joint states publisher
self.joint_names = ["base_l_wheel_joint","base_r_wheel_joint"]
self.joint_positions = [0,0]
self.joint_velocities = [0,0]
# internal data
self.v_left = 0 # current setpoint velocity
self.v_right = 0
self.v_des_left = 0 # cmd_vel setpoint
self.v_des_right = 0
self.enc_left = None # encoder readings
self.enc_right = None
self.x = 0 # position in xy plane
self.y = 0
self.th = 0
self.dx = 0 # speeds in x/rotation
self.dr = 0
self.then = rospy.Time.now() # time for determining dx/dy
# subscriptions
rospy.Subscriber("cmd_vel", Twist, self.cmdVelCb)
self.odomPub = rospy.Publisher("odom", Odometry, queue_size=5)
self.odomBroadcaster = TransformBroadcaster()
rospy.loginfo("Started DiffController ("+name+"). Geometry: " + str(self.base_width) + "m wide, " + str(self.ticks_meter) + " ticks/m.")
def startup(self):
if not self.fake:
self.setup(self.Kp,self.Kd,self.Ki,self.Ko)
def update(self):
now = rospy.Time.now()
if now > self.t_next:
elapsed = now - self.then
self.then = now
elapsed = elapsed.to_sec()
if self.fake:
x = cos(self.th)*self.dx*elapsed
y = -sin(self.th)*self.dx*elapsed
self.x += cos(self.th)*self.dx*elapsed
self.y += sin(self.th)*self.dx*elapsed
self.th += self.dr*elapsed
else:
# read encoders
try:
left, right = self.status()
except Exception as e:
rospy.logerr("Could not update encoders: " + str(e))
return
rospy.logdebug("Encoders: " + str(left) +","+ str(right))
# calculate odometry
if self.enc_left == None:
d_left = 0
d_right = 0
else:
d_left = (left - self.enc_left)/self.ticks_meter
d_right = (right - self.enc_right)/self.ticks_meter
self.enc_left = left
self.enc_right = right
d = (d_left+d_right)/2
th = (d_right-d_left)/self.base_width
self.dx = d / elapsed
self.dr = th / elapsed
if (d != 0):
x = cos(th)*d
y = -sin(th)*d
self.x = self.x + (cos(self.th)*x - sin(self.th)*y)
self.y = self.y + (sin(self.th)*x + cos(self.th)*y)
if (th != 0):
self.th = self.th + th
# publish or perish
quaternion = Quaternion()
quaternion.x = 0.0
quaternion.y = 0.0
quaternion.z = sin(self.th/2)
quaternion.w = cos(self.th/2)
self.odomBroadcaster.sendTransform(
(self.x, self.y, 0),
(quaternion.x, quaternion.y, quaternion.z, quaternion.w),
rospy.Time.now(),
self.base_frame_id,
self.odom_frame_id
)
odom = Odometry()
odom.header.stamp = now
odom.header.frame_id = self.odom_frame_id
odom.pose.pose.position.x = self.x
odom.pose.pose.position.y = self.y
odom.pose.pose.position.z = 0
odom.pose.pose.orientation = quaternion
odom.child_frame_id = self.base_frame_id
odom.twist.twist.linear.x = self.dx
odom.twist.twist.linear.y = 0
odom.twist.twist.angular.z = self.dr
self.odomPub.publish(odom)
if now > (self.last_cmd + rospy.Duration(self.timeout)):
self.v_des_left = 0
self.v_des_right = 0
# update motors
if not self.fake:
if self.v_left < self.v_des_left:
self.v_left += self.max_accel
if self.v_left > self.v_des_left:
self.v_left = self.v_des_left
else:
self.v_left -= self.max_accel
if self.v_left < self.v_des_left:
self.v_left = self.v_des_left
if self.v_right < self.v_des_right:
self.v_right += self.max_accel
if self.v_right > self.v_des_right:
self.v_right = self.v_des_right
else:
self.v_right -= self.max_accel
if self.v_right < self.v_des_right:
self.v_right = self.v_des_right
self.write(self.v_left, self.v_right)
self.t_next = now + self.t_delta
def shutdown(self):
if not self.fake:
self.write(0,0)
def cmdVelCb(self,req):
""" Handle movement requests. """
self.last_cmd = rospy.Time.now()
if self.fake:
self.dx = req.linear.x # m/s
self.dr = req.angular.z # rad/s
else:
# set motor speeds in ticks per 1/30s
self.v_des_left = int( ((req.linear.x - (req.angular.z * self.base_width/2.0)) * self.ticks_meter) / 30.0)
self.v_des_right = int( ((req.linear.x + (req.angular.z * self.base_width/2.0)) * self.ticks_meter) / 30.0)
def getDiagnostics(self):
""" Get a diagnostics status. """
msg = DiagnosticStatus()
msg.name = self.name
msg.level = DiagnosticStatus.OK
msg.message = "OK"
if not self.fake:
msg.values.append(KeyValue("Left", str(self.enc_left)))
msg.values.append(KeyValue("Right", str(self.enc_right)))
msg.values.append(KeyValue("dX", str(self.dx)))
msg.values.append(KeyValue("dR", str(self.dr)))
return msg
###
### Controller Specification:
###
### setup: Kp, Kd, Ki, Ko (all unsigned char)
###
### write: left_speed, right_speed (2-byte signed, ticks per frame)
###
### status: left_enc, right_enc (4-byte signed)
###
def setup(self, kp, kd, ki, ko):
success = self.device.execute(253, AX_CONTROL_SETUP, [10, kp, kd, ki, ko])
def write(self, left, right):
""" Send a closed-loop speed. Base PID loop runs at 30Hz, these values
are therefore in ticks per 1/30 second. """
left = left&0xffff
right = right&0xffff
success = self.device.execute(253, AX_CONTROL_WRITE, [10, left%256, left>>8, right%256, right>>8])
def status(self):
""" read 32-bit (signed) encoder values. """
values = self.device.execute(253, AX_CONTROL_STAT, [10])
left_values = "".join([chr(k) for k in values[0:4] ])
right_values = "".join([chr(k) for k in values[4:] ])
try:
left = unpack('=l',left_values)[0]
right = unpack('=l',right_values)[0]
return [left, right]
except:
return None
| mit | 5,647,903,849,542,350,000 | 40.034483 | 144 | 0.556489 | false | 3.752628 | false | false | false |
juancarlospaco/css-html-js-minify | css_html_js_minify/js_minifier.py | 1 | 6476 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""JavaScript Minifier functions for CSS-HTML-JS-Minify."""
import re
from io import StringIO # pure-Python StringIO supports unicode.
from .css_minifier import condense_semicolons
__all__ = ('js_minify', )
def remove_commented_lines(js):
"""Force remove commented out lines from Javascript."""
result = ""
for line in js.splitlines():
line = re.sub(r"/\*.*\*/" ,"" ,line) # (/*COMMENT */)
line = re.sub(r"//.*","" ,line) # (//COMMENT)
result += '\n'+line
return result
def simple_replacer_js(js):
"""Force strip simple replacements from Javascript."""
return condense_semicolons(js.replace("debugger;", ";").replace(
";}", "}").replace("; ", ";").replace(" ;", ";").rstrip("\n;"))
def js_minify_keep_comments(js):
"""Return a minified version of the Javascript string."""
ins, outs = StringIO(js), StringIO()
JavascriptMinify(ins, outs).minify()
return force_single_line_js(outs.getvalue())
def force_single_line_js(js):
"""Force Javascript to a single line, even if need to add semicolon."""
return ";".join(js.splitlines()) if len(js.splitlines()) > 1 else js
class JavascriptMinify(object):
"""Minify an input stream of Javascript, writing to an output stream."""
def __init__(self, instream=None, outstream=None):
"""Init class."""
self.ins, self.outs = instream, outstream
def minify(self, instream=None, outstream=None):
"""Minify Javascript using StringIO."""
if instream and outstream:
self.ins, self.outs = instream, outstream
write, read = self.outs.write, self.ins.read
space_strings = ("abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$\\")
starters, enders = '{[(+-', '}])+-"\''
newlinestart_strings = starters + space_strings
newlineend_strings = enders + space_strings
do_newline, do_space = False, False
doing_single_comment, doing_multi_comment = False, False
previous_before_comment, in_quote = '', ''
in_re, quote_buf = False, []
previous = read(1)
next1 = read(1)
if previous == '/':
if next1 == '/':
doing_single_comment = True
elif next1 == '*':
doing_multi_comment = True
else:
write(previous)
elif not previous:
return
elif previous >= '!':
if previous in "'\"":
in_quote = previous
write(previous)
previous_non_space = previous
else:
previous_non_space = ' '
if not next1:
return
while True:
next2 = read(1)
if not next2:
last = next1.strip()
conditional_1 = (doing_single_comment or doing_multi_comment)
if not conditional_1 and last not in ('', '/'):
write(last)
break
if doing_multi_comment:
if next1 == '*' and next2 == '/':
doing_multi_comment = False
next2 = read(1)
elif doing_single_comment:
if next1 in '\r\n':
doing_single_comment = False
while next2 in '\r\n':
next2 = read(1)
if not next2:
break
if previous_before_comment in ')}]':
do_newline = True
elif previous_before_comment in space_strings:
write('\n')
elif in_quote:
quote_buf.append(next1)
if next1 == in_quote:
numslashes = 0
for c in reversed(quote_buf[:-1]):
if c != '\\':
break
else:
numslashes += 1
if numslashes % 2 == 0:
in_quote = ''
write(''.join(quote_buf))
elif next1 in '\r\n':
conditional_2 = previous_non_space in newlineend_strings
if conditional_2 or previous_non_space > '~':
while 1:
if next2 < '!':
next2 = read(1)
if not next2:
break
else:
conditional_3 = next2 in newlinestart_strings
if conditional_3 or next2 > '~' or next2 == '/':
do_newline = True
break
elif next1 < '!' and not in_re:
conditional_4 = next2 in space_strings or next2 > '~'
conditional_5 = previous_non_space in space_strings
conditional_6 = previous_non_space > '~'
if (conditional_5 or conditional_6) and (conditional_4):
do_space = True
elif next1 == '/':
if in_re:
if previous != '\\':
in_re = False
write('/')
elif next2 == '/':
doing_single_comment = True
previous_before_comment = previous_non_space
elif next2 == '*':
doing_multi_comment = True
else:
in_re = previous_non_space in '(,=:[?!&|'
write('/')
else:
if do_space:
do_space = False
write(' ')
if do_newline:
write('\n')
do_newline = False
write(next1)
if not in_re and next1 in "'\"":
in_quote = next1
quote_buf = []
previous = next1
next1 = next2
if previous >= '!':
previous_non_space = previous
def js_minify(js):
"""Minify a JavaScript string."""
print("""Future JavaScript support is orphan and not supported!.
If you want to make ES6,ES7 work feel free to send pull requests.""")
js = remove_commented_lines(js)
js = js_minify_keep_comments(js)
return js.strip()
| lgpl-3.0 | 1,878,974,442,023,061,800 | 35.178771 | 79 | 0.468345 | false | 4.566996 | false | false | false |
garibaldu/boundary-seekers | Boundary Hunter Ideas/TensorFlow/RBF-BH-Network.py | 1 | 6690 | import tensorflow as tf
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import random
import math
np.random.seed(1234)
random.seed(1234)
plt.switch_backend("TkAgg")
def plotScatter(points, color):
xs = [x[0] for x in points]
ys = [y[1] for y in points]
plt.scatter(xs, ys, c=color)
def plot_weights(weights, center, color):
plot_centroid(center)
n = np.array([weights[0] * center[0] + weights[1] * center[1],
-weights[0],
-weights[1]])
byas = -1 * n[0]/n[2]
Xcoef = -1 * n[1]/n[2]
plt.plot([-1.0, 1.0], [-1*Xcoef + byas, Xcoef + byas], '{}-'.format(color))
print("B: " + str(byas))
print("XCoef: " + str(Xcoef))
def plot_centroid(centroid):
plt.plot(centroid[0], centroid[1], markersize=10, marker='x', color='g', mew=5)
def plot_incorrect(point):
plt.plot(point[0], point[1], markersize=5, marker='x', color='r', mew=5)
def generateChevronData():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x >= y and x <= -y:
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def generate_split_data():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if x < 25 and x > -25 :
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def generate_clumps():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, int(totalPoints/2.0)):
x = random.randint(xBounds[0], 0)
y = random.randint(yBounds[0], 0)
if -x - 30 < y:
points.append([x/50.0,y/50.0])
targets.append(1.0)
else:
points.append([x/50.0,y/50.0])
targets.append(0.0)
for i in range(0, int(totalPoints/2.0)):
x = random.randint(0, xBounds[1])
y = random.randint(0, yBounds[1])
if -x + 30 > y:
points.append([x/50.0,y/50.0])
targets.append(1.0)
else:
points.append([x/50.0,y/50.0])
targets.append(0.0)
return np.array(points), np.array(targets)
def generate_rectangle_data():
xBounds = [-50, 50]
yBounds = [-50, 50]
totalPoints = 100
points = []
targets = []
for i in range(0, totalPoints):
x = random.randint(xBounds[0], xBounds[1])
y = random.randint(yBounds[0], yBounds[1])
if np.abs(x) < 15 and np.abs(y) < 15 :
points.append([x/50.0,y/50.0])
targets.append(0.0)
else:
points.append([x/50.0,y/50.0])
targets.append(1.0)
return np.array(points), np.array(targets)
def sigmoid(phi):
return 1.0/(1.0 + tf.exp(-phi))
points, out = generate_rectangle_data()#generateChevronData()#generate_clumps()#generate_split_data()#
in_size = 2
out_size = 1
num_centroids = 1
num_outputs = 1
inputs = tf.placeholder('float64', [in_size])
targets = tf.placeholder('float64', [out_size])
centroids = tf.Variable(np.random.uniform(low=-1.0, high=1.0, size=(num_centroids, in_size)))
betas = tf.Variable(np.repeat(1.0, num_centroids))
hidden_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(num_centroids, in_size)))
output_weights = tf.Variable(np.random.uniform(low=-0.5, high=0.5, size=(num_outputs, num_centroids + 1)))
input_by_plane = lambda x: tf.subtract(inputs, x)
transformed_by_points = tf.map_fn(input_by_plane, centroids)
# Peform Computation
prob = tf.reduce_sum(tf.multiply(transformed_by_points, hidden_weights), 1)
square_diff = lambda c: tf.reduce_sum(tf.pow(tf.subtract(inputs, c), 2.0))
g = tf.exp(-1.0 * tf.multiply(betas, tf.map_fn(square_diff, centroids)))
hidden_out = sigmoid(tf.multiply(g, prob))#tf.add(0.5 * (1 - g), tf.multiply(g, prob))
#gated = tf.multiply(g, prob)
#hidden_out = sigmoid(gated)
hidden_out_prime = tf.concat([[1.0], hidden_out], 0)
output = sigmoid(tf.matmul(tf.transpose(tf.expand_dims(hidden_out_prime, 1)), tf.transpose(output_weights)))
errors = tf.pow(tf.subtract(tf.expand_dims(targets, 1), output), 2.0)
error = tf.reduce_sum(errors)
train_op = tf.train.GradientDescentOptimizer(0.01).minimize(error)
clip_op_betas = tf.assign(betas, tf.clip_by_value(betas, 0, np.infty))
model = tf.global_variables_initializer()
with tf.Session() as session:
session.run(model)
for e in range(10000):
for d in range(len(points)):
session.run(train_op, feed_dict={inputs: points[d], targets: [out[d]]})
session.run(clip_op_betas)
if e % 10 == 0:
err = 0
for d in range(len(points)):
err += session.run(error, feed_dict={inputs: points[d], targets: [out[d]]})
#print(session.run(prob, feed_dict={inputs: points[d], targets: [out[d]]}))
#print(session.run(g, feed_dict={inputs: points[d], targets: [out[d]]}))
print(err)
print(session.run(betas))
incorrect = []
for d in range(len(points)):
o = session.run(output, feed_dict={inputs: points[d], targets: [out[d]]})
if not int(round(o[0,0])) == out[d]:
incorrect.append(points[d])
centroids = session.run(centroids)
betas = session.run(betas)
boundarys = session.run(hidden_weights)
# Plot points on graph
c1 = []
c2 = []
for i in range(0, len(points)):
if out[i] == 0:
c1.append(points[i])
else:
c2.append(points[i])
print("Type 0: ", len(c1))
print("Type 1: ", len(c2))
plotScatter(c1,'y')
plotScatter(c2, 'b')
for centroid in centroids:
plot_centroid(centroid)
for i in range(len(boundarys)):
plot_weights(boundarys[i], centroids[i], 'g')
#for plane in boundarys:
# plot_weights(boundarys, 'g')
for point in incorrect:
plot_incorrect(point)
#plot_weights(final_gate, 'g')
plt.gca().set_aspect('equal')
plt.xlim(xmin=-1.5, xmax=1.5)
plt.ylim(ymin=-1.5, ymax=1.5)
plt.show()
| mit | -5,844,633,788,650,325,000 | 26.991632 | 108 | 0.582511 | false | 2.932924 | false | false | false |
hamdyaea/Daylight-GNU-Linux | Version4/usr/share/daylight/PySpeaking-GUI/speak.py | 1 | 1687 | # Developer : Hamdy Abou El Anein
import os
import sys
from easygui import *
print("IMPORTANT\n\nThis software work only if google_speech is installed on the system. To install it go to this link please : https://pypi.python.org/pypi/google_speech/\n\n")
def language():
global lang
msg = "What's the language do you want to make PySpeaking speak ?"
title = "PySpeaking-GUI"
choices = ["English", "French", "German", "Spanish","Japanese","Chinese","Italian","Arabic", "Russian"]
choice = choicebox(msg, title, choices)
if choice == "English":
lang = ' en '
textToSpeak()
elif choice == "French":
lang = ' fr '
textToSpeak()
elif choice == "German":
lang = ' de '
textToSpeak()
elif choice == "Spanish":
lang = ' es '
textToSpeak()
elif choice == "Japanese":
lang = ' ja '
textToSpeak()
elif choice == "Chinese":
lang = ' zh-CN '
textToSpeak()
elif choice == "Italian":
lang = ' it '
textToSpeak()
elif choice == "Arabic":
lang = ' ar '
textToSpeak()
elif choice == "Russian":
lang = ' ru '
textToSpeak()
else:
sys.exit(0)
def textToSpeak():
global fieldValues
msg = "Enter the text to speak"
title = "Enter the text to speak"
fieldNames = ["Text to speak"]
fieldValues = []
fieldValues = multenterbox(msg, title, fieldNames)
fieldValues[0]
speak()
def speak():
global lang, fieldValues
textValue = "google_speech -l" +str(lang) +str(" \"")+str(fieldValues[0].replace("'","\'"))+str("\"")
os.system(textValue)
language() | gpl-3.0 | 3,731,881,394,284,948,500 | 24.575758 | 177 | 0.578542 | false | 3.421907 | false | false | false |
dubvulture/pyku | pyku/digit_classifier.py | 1 | 4575 | # coding=utf-8
import os
import cv2
import numpy as np
from .utils import DSIZE, TRAIN_DATA
class DigitClassifier(object):
@staticmethod
def _feature(image):
"""
It's faster but still accurate enough with DSIZE = 14.
~0.9983 precision and recall
:param image:
:return: raw pixels as feature vector
"""
image = cv2.resize(image, None, fx=DSIZE/28, fy=DSIZE/28,
interpolation=cv2.INTER_LINEAR)
ret = image.astype(np.float32) / 255
return ret.ravel()
@staticmethod
def _zoning(image):
"""
It works better with DSIZE = 28
~0.9967 precision and recall
:param image:
:return: #pixels/area ratio of each zone (7x7) as feature vector
"""
zones = []
for i in range(0, 28, 7):
for j in range(0, 28, 7):
roi = image[i:i+7, j:j+7]
val = (np.sum(roi)/255) / 49.
zones.append(val)
return np.array(zones, np.float32)
def __init__(self,
saved_model=None,
train_folder=None,
feature=_feature.__func__):
"""
:param saved_model: optional saved train set and labels as .npz
:param train_folder: optional custom train data to process
:param feature: feature function - compatible with saved_model
"""
self.feature = feature
if train_folder is not None:
self.train_set, self.train_labels, self.model = \
self.create_model(train_folder)
else:
if cv2.__version__[0] == '2':
self.model = cv2.KNearest()
else:
self.model = cv2.ml.KNearest_create()
if saved_model is None:
saved_model = TRAIN_DATA+'raw_pixel_data.npz'
with np.load(saved_model) as data:
self.train_set = data['train_set']
self.train_labels = data['train_labels']
if cv2.__version__[0] == '2':
self.model.train(self.train_set, self.train_labels)
else:
self.model.train(self.train_set, cv2.ml.ROW_SAMPLE,
self.train_labels)
def create_model(self, train_folder):
"""
Return the training set, its labels and the trained model
:param train_folder: folder where to retrieve data
:return: (train_set, train_labels, trained_model)
"""
digits = []
labels = []
for n in range(1, 10):
folder = train_folder + str(n)
samples = [pic for pic in os.listdir(folder)
if os.path.isfile(os.path.join(folder, pic))]
for sample in samples:
image = cv2.imread(os.path.join(folder, sample))
# Expecting black on white
image = 255 - cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, image = cv2.threshold(image, 0, 255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU)
feat = self.feature(image)
digits.append(feat)
labels.append(n)
digits = np.array(digits, np.float32)
labels = np.array(labels, np.float32)
if cv2.__version__[0] == '2':
model = cv2.KNearest()
model.train(digits, labels)
else:
model = cv2.ml.KNearest_create()
model.train(digits, cv2.ml.ROW_SAMPLE, labels)
return digits, labels, model
def classify(self, image):
"""
Given a 28x28 image, returns an array representing the 2 highest
probable prediction
:param image:
:return: array of 2 highest prob-digit tuples
"""
if cv2.__version__[0] == '2':
res = self.model.find_nearest(np.array([self.feature(image)]), k=11)
else:
res = self.model.findNearest(np.array([self.feature(image)]), k=11)
hist = np.histogram(res[2], bins=9, range=(1, 10), normed=True)[0]
zipped = sorted(zip(hist, np.arange(1, 10)), reverse=True)
return np.array(zipped[:2])
def save_training(self, filename):
"""
Save traning set and labels of current model
:param filename: filename of new data.npz, it will be saved in 'train/'
"""
np.savez(os.path.join(TRAIN_DATA, filename),
train_set=self.train_set,
train_labels=self.train_labels) | gpl-3.0 | -8,652,264,273,431,171,000 | 35.903226 | 80 | 0.532022 | false | 3.893617 | false | false | false |
darrencheng0817/AlgorithmLearning | Python/leetcode/MergeKLists.py | 1 | 1442 |
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
from heapq import heappush, heappop, heapreplace, heapify
h=[]
res=ListNode(0)
p=res
h = [(n.val, n) for n in lists if n]
heapify(h)
while h:
value,minNode=h[0]
p.next=minNode
if not minNode.next:
heappop(h)
else:
heapreplace(h,(minNode.next.val,minNode.next))
p=p.next
return res.next
def mergeKLists2(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
from heapq import heappush, heappop, heapreplace, heapify
h=[]
res=ListNode(0)
p=res
for n in lists:
if n:
h.append((n.val,n))
heapify(h)
while h:
value,minNode=heappop(h)
p.next=minNode
if minNode.next:
heappush(h, (minNode.next.val,minNode.next))
p=p.next
return res.next
so=Solution()
l1=ListNode(3)
l1.next=ListNode(5)
l1.next.next=ListNode(6)
l2=ListNode(7)
l2.next=ListNode(9)
input=[l1,l2]
res=so.mergeKLists2(input)
while res:
print(res.val)
res=res.next
| mit | -99,459,296,128,431,540 | 22.639344 | 65 | 0.511789 | false | 3.284738 | false | false | false |
captainsafia/agate | agate/columns/base.py | 1 | 3431 | #!/usr/bin/env python
from collections import Mapping, Sequence
try:
from collections import OrderedDict
except ImportError: #pragma: no cover
from ordereddict import OrderedDict
import six
from agate.exceptions import ColumnDoesNotExistError
from agate.utils import memoize
class ColumnMapping(Mapping):
"""
Proxy access to :class:`Column` instances for :class:`.Table`.
:param table: :class:`.Table`.
"""
def __init__(self, table):
self._table = table
def __getitem__(self, k):
try:
i = self._table._column_names.index(k)
except ValueError:
raise ColumnDoesNotExistError(k)
return self._table._get_column(i)
def __iter__(self):
return ColumnIterator(self._table)
@memoize
def __len__(self):
return len(self._table._column_names)
class ColumnIterator(six.Iterator):
"""
Iterator over :class:`Column` instances within a :class:`.Table`.
:param table: :class:`.Table`.
"""
def __init__(self, table):
self._table = table
self._i = 0
def __next__(self):
try:
self._table._column_names[self._i]
except IndexError:
raise StopIteration
column = self._table._get_column(self._i)
self._i += 1
return column
class Column(Sequence):
"""
Proxy access to column data. Instances of :class:`Column` should
not be constructed directly. They are created by :class:`.Table`
instances.
:param table: The table that contains this column.
:param index: The index of this column in the table.
"""
def __init__(self, table, index):
self._table = table
self._index = index
def __unicode__(self):
data = self.get_data()
sample = ', '.join(six.text_type(d) for d in data[:5])
if len(data) > 5:
sample = '%s, ...' % sample
sample = '(%s)' % sample
return '<agate.columns.%s: %s>' % (self.__class__.__name__, sample)
def __str__(self):
return str(self.__unicode__())
def __getitem__(self, j):
return self.get_data()[j]
@memoize
def __len__(self):
return len(self.get_data())
def __eq__(self, other):
"""
Ensure equality test with lists works.
"""
return self.get_data() == other
def __ne__(self, other):
"""
Ensure inequality test with lists works.
"""
return not self.__eq__(other)
@memoize
def get_data(self):
"""
Get the data contained in this column as a :class:`tuple`.
"""
return tuple(r[self._index] for r in self._table._data)
@memoize
def get_data_without_nulls(self):
"""
Get the data contained in this column with any null values removed.
"""
return tuple(d for d in self.get_data() if d is not None)
@memoize
def get_data_sorted(self):
"""
Get the data contained in this column sorted.
"""
return sorted(self.get_data())
@memoize
def has_nulls(self):
"""
Returns `True` if this column contains null values.
"""
return None in self.get_data()
def aggregate(self, aggregation):
"""
Apply a :class:`.Aggregation` to this column and return the result.
"""
return aggregation.run(self)
| mit | -8,266,430,875,048,023,000 | 23.507143 | 75 | 0.568347 | false | 4.099164 | false | false | false |
Pulgama/supriya | supriya/nonrealtime/Moment.py | 1 | 1816 | from supriya.nonrealtime.SessionObject import SessionObject
class Moment(SessionObject):
"""
A moment-in-time referencing a singleton non-realtime state.
::
>>> import supriya.nonrealtime
>>> session = supriya.nonrealtime.Session()
>>> moment = session.at(10.5)
"""
### CLASS VARIABLES ###
__documentation_section__ = "Session Objects"
__slots__ = ("_offset", "_propagate", "_session", "_state")
### INITIALIZER ###
def __init__(self, session, offset, state, propagate=True):
SessionObject.__init__(self, session)
self._offset = offset
self._state = state
self._propagate = bool(propagate)
### SPECIAL METHODS ###
def __enter__(self):
self.session.active_moments.append(self)
if self.propagate:
self.session._apply_transitions(self.state.offset)
return self
def __eq__(self, expr):
if not isinstance(expr, type(self)):
return False
if expr.session is not self.session:
return False
return expr.offset == self.offset
def __exit__(self, exc_type, exc_value, traceback):
self.session.active_moments.pop()
if self.propagate:
self.session._apply_transitions(self.state.offset)
def __lt__(self, expr):
if not isinstance(expr, type(self)) or expr.session is not self.session:
raise ValueError(expr)
return self.offset < expr.offset
def __repr__(self):
return "<{} @{!r}>".format(type(self).__name__, self.offset)
### PUBLIC PROPERTIES ###
@property
def offset(self):
return self._offset
@property
def propagate(self):
return self._propagate
@property
def state(self):
return self._state
| mit | -4,129,431,815,755,473,400 | 24.942857 | 80 | 0.590859 | false | 4.117914 | false | false | false |
dennisss/sympy | sympy/functions/special/tests/test_error_functions.py | 2 | 24070 | from sympy import (
symbols, expand, expand_func, nan, oo, Float, conjugate, diff,
re, im, Abs, O, factorial, exp_polar, polar_lift, gruntz, limit,
Symbol, I, integrate, S,
sqrt, sin, cos, sinh, cosh, exp, log, pi, EulerGamma,
erf, erfc, erfi, erf2, erfinv, erfcinv, erf2inv,
gamma, uppergamma, loggamma,
Ei, expint, E1, li, Li, Si, Ci, Shi, Chi,
fresnels, fresnelc,
hyper, meijerg)
from sympy.functions.special.error_functions import _erfs, _eis
from sympy.core.function import ArgumentIndexError
from sympy.utilities.pytest import raises
x, y, z = symbols('x,y,z')
w = Symbol("w", real=True)
n = Symbol("n", integer=True)
def test_erf():
assert erf(nan) == nan
assert erf(oo) == 1
assert erf(-oo) == -1
assert erf(0) == 0
assert erf(I*oo) == oo*I
assert erf(-I*oo) == -oo*I
assert erf(-2) == -erf(2)
assert erf(-x*y) == -erf(x*y)
assert erf(-x - y) == -erf(x + y)
assert erf(erfinv(x)) == x
assert erf(erfcinv(x)) == 1 - x
assert erf(erf2inv(0, x)) == x
assert erf(erf2inv(0, erf(erfcinv(1 - erf(erfinv(x)))))) == x
assert erf(I).is_real is False
assert erf(0).is_real is True
assert conjugate(erf(z)) == erf(conjugate(z))
assert erf(x).as_leading_term(x) == 2*x/sqrt(pi)
assert erf(1/x).as_leading_term(x) == erf(1/x)
assert erf(z).rewrite('uppergamma') == sqrt(z**2)*erf(sqrt(z**2))/z
assert erf(z).rewrite('erfc') == S.One - erfc(z)
assert erf(z).rewrite('erfi') == -I*erfi(I*z)
assert erf(z).rewrite('fresnels') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('fresnelc') == (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erf(z).rewrite('hyper') == 2*z*hyper([S.Half], [3*S.Half], -z**2)/sqrt(pi)
assert erf(z).rewrite('meijerg') == z*meijerg([S.Half], [], [0], [-S.Half], z**2)/sqrt(pi)
assert erf(z).rewrite('expint') == sqrt(z**2)/z - z*expint(S.Half, z**2)/sqrt(S.Pi)
assert limit(exp(x)*exp(x**2)*(erf(x + 1/exp(x)) - erf(x)), x, oo) == \
2/sqrt(pi)
assert limit((1 - erf(z))*exp(z**2)*z, z, oo) == 1/sqrt(pi)
assert limit((1 - erf(x))*exp(x**2)*sqrt(pi)*x, x, oo) == 1
assert limit(((1 - erf(x))*exp(x**2)*sqrt(pi)*x - 1)*2*x**2, x, oo) == -1
assert erf(x).as_real_imag() == \
((erf(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erf(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erf(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erf(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erf(x).fdiff(2))
def test_erf_series():
assert erf(x).series(x, 0, 7) == 2*x/sqrt(pi) - \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erf_evalf():
assert abs( erf(Float(2.0)) - 0.995322265 ) < 1E-8 # XXX
def test__erfs():
assert _erfs(z).diff(z) == -2/sqrt(S.Pi) + 2*z*_erfs(z)
assert _erfs(1/z).series(z) == \
z/sqrt(pi) - z**3/(2*sqrt(pi)) + 3*z**5/(4*sqrt(pi)) + O(z**6)
assert expand(erf(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== erf(z).diff(z)
assert _erfs(z).rewrite("intractable") == (-erf(z) + 1)*exp(z**2)
def test_erfc():
assert erfc(nan) == nan
assert erfc(oo) == 0
assert erfc(-oo) == 2
assert erfc(0) == 1
assert erfc(I*oo) == -oo*I
assert erfc(-I*oo) == oo*I
assert erfc(-x) == S(2) - erfc(x)
assert erfc(erfcinv(x)) == x
assert erfc(I).is_real is False
assert erfc(0).is_real is True
assert conjugate(erfc(z)) == erfc(conjugate(z))
assert erfc(x).as_leading_term(x) == S.One
assert erfc(1/x).as_leading_term(x) == erfc(1/x)
assert erfc(z).rewrite('erf') == 1 - erf(z)
assert erfc(z).rewrite('erfi') == 1 + I*erfi(I*z)
assert erfc(z).rewrite('fresnels') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('fresnelc') == 1 - (1 + I)*(fresnelc(z*(1 - I)/sqrt(pi)) -
I*fresnels(z*(1 - I)/sqrt(pi)))
assert erfc(z).rewrite('hyper') == 1 - 2*z*hyper([S.Half], [3*S.Half], -z**2)/sqrt(pi)
assert erfc(z).rewrite('meijerg') == 1 - z*meijerg([S.Half], [], [0], [-S.Half], z**2)/sqrt(pi)
assert erfc(z).rewrite('uppergamma') == 1 - sqrt(z**2)*erf(sqrt(z**2))/z
assert erfc(z).rewrite('expint') == S.One - sqrt(z**2)/z + z*expint(S.Half, z**2)/sqrt(S.Pi)
assert erfc(x).as_real_imag() == \
((erfc(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erfc(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erfc(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erfc(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erfc(x).fdiff(2))
def test_erfc_series():
assert erfc(x).series(x, 0, 7) == 1 - 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) - x**5/5/sqrt(pi) + O(x**7)
def test_erfc_evalf():
assert abs( erfc(Float(2.0)) - 0.00467773 ) < 1E-8 # XXX
def test_erfi():
assert erfi(nan) == nan
assert erfi(oo) == S.Infinity
assert erfi(-oo) == S.NegativeInfinity
assert erfi(0) == S.Zero
assert erfi(I*oo) == I
assert erfi(-I*oo) == -I
assert erfi(-x) == -erfi(x)
assert erfi(I*erfinv(x)) == I*x
assert erfi(I*erfcinv(x)) == I*(1 - x)
assert erfi(I*erf2inv(0, x)) == I*x
assert erfi(I).is_real is False
assert erfi(0).is_real is True
assert conjugate(erfi(z)) == erfi(conjugate(z))
assert erfi(z).rewrite('erf') == -I*erf(I*z)
assert erfi(z).rewrite('erfc') == I*erfc(I*z) - I
assert erfi(z).rewrite('fresnels') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('fresnelc') == (1 - I)*(fresnelc(z*(1 + I)/sqrt(pi)) -
I*fresnels(z*(1 + I)/sqrt(pi)))
assert erfi(z).rewrite('hyper') == 2*z*hyper([S.Half], [3*S.Half], z**2)/sqrt(pi)
assert erfi(z).rewrite('meijerg') == z*meijerg([S.Half], [], [0], [-S.Half], -z**2)/sqrt(pi)
assert erfi(z).rewrite('uppergamma') == (sqrt(-z**2)/z*(uppergamma(S.Half,
-z**2)/sqrt(S.Pi) - S.One))
assert erfi(z).rewrite('expint') == sqrt(-z**2)/z - z*expint(S.Half, -z**2)/sqrt(S.Pi)
assert erfi(x).as_real_imag() == \
((erfi(re(x) - I*re(x)*Abs(im(x))/Abs(re(x)))/2 +
erfi(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))/2,
I*(erfi(re(x) - I*re(x)*Abs(im(x))/Abs(re(x))) -
erfi(re(x) + I*re(x)*Abs(im(x))/Abs(re(x)))) *
re(x)*Abs(im(x))/(2*im(x)*Abs(re(x)))))
raises(ArgumentIndexError, lambda: erfi(x).fdiff(2))
def test_erfi_series():
assert erfi(x).series(x, 0, 7) == 2*x/sqrt(pi) + \
2*x**3/3/sqrt(pi) + x**5/5/sqrt(pi) + O(x**7)
def test_erfi_evalf():
assert abs( erfi(Float(2.0)) - 18.5648024145756 ) < 1E-13 # XXX
def test_erf2():
assert erf2(0, 0) == S.Zero
assert erf2(x, x) == S.Zero
assert erf2(nan, 0) == nan
assert erf2(-oo, y) == erf(y) + 1
assert erf2( oo, y) == erf(y) - 1
assert erf2( x, oo) == 1 - erf(x)
assert erf2( x,-oo) == -1 - erf(x)
assert erf2(x, erf2inv(x, y)) == y
assert erf2(-x, -y) == -erf2(x,y)
assert erf2(-x, y) == erf(y) + erf(x)
assert erf2( x, -y) == -erf(y) - erf(x)
assert erf2(x, y).rewrite('fresnels') == erf(y).rewrite(fresnels)-erf(x).rewrite(fresnels)
assert erf2(x, y).rewrite('fresnelc') == erf(y).rewrite(fresnelc)-erf(x).rewrite(fresnelc)
assert erf2(x, y).rewrite('hyper') == erf(y).rewrite(hyper)-erf(x).rewrite(hyper)
assert erf2(x, y).rewrite('meijerg') == erf(y).rewrite(meijerg)-erf(x).rewrite(meijerg)
assert erf2(x, y).rewrite('uppergamma') == erf(y).rewrite(uppergamma) - erf(x).rewrite(uppergamma)
assert erf2(x, y).rewrite('expint') == erf(y).rewrite(expint)-erf(x).rewrite(expint)
assert erf2(I, 0).is_real is False
assert erf2(0, 0).is_real is True
#assert conjugate(erf2(x, y)) == erf2(conjugate(x), conjugate(y))
assert erf2(x, y).rewrite('erf') == erf(y) - erf(x)
assert erf2(x, y).rewrite('erfc') == erfc(x) - erfc(y)
assert erf2(x, y).rewrite('erfi') == I*(erfi(I*x) - erfi(I*y))
raises(ArgumentIndexError, lambda: erfi(x).fdiff(3))
def test_erfinv():
assert erfinv(0) == 0
assert erfinv(1) == S.Infinity
assert erfinv(nan) == S.NaN
assert erfinv(erf(w)) == w
assert erfinv(erf(-w)) == -w
assert erfinv(x).diff() == sqrt(pi)*exp(erfinv(x)**2)/2
assert erfinv(z).rewrite('erfcinv') == erfcinv(1-z)
def test_erfinv_evalf():
assert abs( erfinv(Float(0.2)) - 0.179143454621292 ) < 1E-13
def test_erfcinv():
assert erfcinv(1) == 0
assert erfcinv(0) == S.Infinity
assert erfcinv(nan) == S.NaN
assert erfcinv(x).diff() == -sqrt(pi)*exp(erfcinv(x)**2)/2
assert erfcinv(z).rewrite('erfinv') == erfinv(1-z)
def test_erf2inv():
assert erf2inv(0, 0) == S.Zero
assert erf2inv(0, 1) == S.Infinity
assert erf2inv(1, 0) == S.One
assert erf2inv(0, y) == erfinv(y)
assert erf2inv(oo,y) == erfcinv(-y)
assert erf2inv(x, y).diff(x) == exp(-x**2 + erf2inv(x, y)**2)
assert erf2inv(x, y).diff(y) == sqrt(pi)*exp(erf2inv(x, y)**2)/2
# NOTE we multiply by exp_polar(I*pi) and need this to be on the principal
# branch, hence take x in the lower half plane (d=0).
def mytn(expr1, expr2, expr3, x, d=0):
from sympy.utilities.randtest import test_numerically, random_complex_number
subs = {}
for a in expr1.free_symbols:
if a != x:
subs[a] = random_complex_number()
return expr2 == expr3 and test_numerically(expr1.subs(subs),
expr2.subs(subs), x, d=d)
def mytd(expr1, expr2, x):
from sympy.utilities.randtest import test_derivative_numerically, \
random_complex_number
subs = {}
for a in expr1.free_symbols:
if a != x:
subs[a] = random_complex_number()
return expr1.diff(x) == expr2 and test_derivative_numerically(expr1.subs(subs), x)
def tn_branch(func, s=None):
from sympy import I, pi, exp_polar
from random import uniform
def fn(x):
if s is None:
return func(x)
return func(s, x)
c = uniform(1, 5)
expr = fn(c*exp_polar(I*pi)) - fn(c*exp_polar(-I*pi))
eps = 1e-15
expr2 = fn(-c + eps*I) - fn(-c - eps*I)
return abs(expr.n() - expr2.n()).n() < 1e-10
def test_ei():
pos = Symbol('p', positive=True)
neg = Symbol('n', negative=True)
assert Ei(-pos) == Ei(polar_lift(-1)*pos) - I*pi
assert Ei(neg) == Ei(polar_lift(neg)) - I*pi
assert tn_branch(Ei)
assert mytd(Ei(x), exp(x)/x, x)
assert mytn(Ei(x), Ei(x).rewrite(uppergamma),
-uppergamma(0, x*polar_lift(-1)) - I*pi, x)
assert mytn(Ei(x), Ei(x).rewrite(expint),
-expint(1, x*polar_lift(-1)) - I*pi, x)
assert Ei(x).rewrite(expint).rewrite(Ei) == Ei(x)
assert Ei(x*exp_polar(2*I*pi)) == Ei(x) + 2*I*pi
assert Ei(x*exp_polar(-2*I*pi)) == Ei(x) - 2*I*pi
assert mytn(Ei(x), Ei(x).rewrite(Shi), Chi(x) + Shi(x), x)
assert mytn(Ei(x*polar_lift(I)), Ei(x*polar_lift(I)).rewrite(Si),
Ci(x) + I*Si(x) + I*pi/2, x)
assert Ei(log(x)).rewrite(li) == li(x)
assert Ei(2*log(x)).rewrite(li) == li(x**2)
assert gruntz(Ei(x+exp(-x))*exp(-x)*x, x, oo) == 1
assert Ei(x).series(x) == EulerGamma + log(x) + x + x**2/4 + \
x**3/18 + x**4/96 + x**5/600 + O(x**6)
def test_expint():
assert mytn(expint(x, y), expint(x, y).rewrite(uppergamma),
y**(x - 1)*uppergamma(1 - x, y), x)
assert mytd(
expint(x, y), -y**(x - 1)*meijerg([], [1, 1], [0, 0, 1 - x], [], y), x)
assert mytd(expint(x, y), -expint(x - 1, y), y)
assert mytn(expint(1, x), expint(1, x).rewrite(Ei),
-Ei(x*polar_lift(-1)) + I*pi, x)
assert expint(-4, x) == exp(-x)/x + 4*exp(-x)/x**2 + 12*exp(-x)/x**3 \
+ 24*exp(-x)/x**4 + 24*exp(-x)/x**5
assert expint(-S(3)/2, x) == \
exp(-x)/x + 3*exp(-x)/(2*x**2) - 3*sqrt(pi)*erf(sqrt(x))/(4*x**S('5/2')) \
+ 3*sqrt(pi)/(4*x**S('5/2'))
assert tn_branch(expint, 1)
assert tn_branch(expint, 2)
assert tn_branch(expint, 3)
assert tn_branch(expint, 1.7)
assert tn_branch(expint, pi)
assert expint(y, x*exp_polar(2*I*pi)) == \
x**(y - 1)*(exp(2*I*pi*y) - 1)*gamma(-y + 1) + expint(y, x)
assert expint(y, x*exp_polar(-2*I*pi)) == \
x**(y - 1)*(exp(-2*I*pi*y) - 1)*gamma(-y + 1) + expint(y, x)
assert expint(2, x*exp_polar(2*I*pi)) == 2*I*pi*x + expint(2, x)
assert expint(2, x*exp_polar(-2*I*pi)) == -2*I*pi*x + expint(2, x)
assert expint(1, x).rewrite(Ei).rewrite(expint) == expint(1, x)
assert mytn(E1(x), E1(x).rewrite(Shi), Shi(x) - Chi(x), x)
assert mytn(E1(polar_lift(I)*x), E1(polar_lift(I)*x).rewrite(Si),
-Ci(x) + I*Si(x) - I*pi/2, x)
assert mytn(expint(2, x), expint(2, x).rewrite(Ei).rewrite(expint),
-x*E1(x) + exp(-x), x)
assert mytn(expint(3, x), expint(3, x).rewrite(Ei).rewrite(expint),
x**2*E1(x)/2 + (1 - x)*exp(-x)/2, x)
assert expint(S(3)/2, z).nseries(z) == \
2 + 2*z - z**2/3 + z**3/15 - z**4/84 + z**5/540 - \
2*sqrt(pi)*sqrt(z) + O(z**6)
assert E1(z).series(z) == -EulerGamma - log(z) + z - \
z**2/4 + z**3/18 - z**4/96 + z**5/600 + O(z**6)
assert expint(4, z).series(z) == S(1)/3 - z/2 + z**2/2 + \
z**3*(log(z)/6 - S(11)/36 + EulerGamma/6) - z**4/24 + \
z**5/240 + O(z**6)
def test__eis():
assert _eis(z).diff(z) == -_eis(z) + 1/z
assert _eis(1/z).series(z) == \
z + z**2 + 2*z**3 + 6*z**4 + 24*z**5 + O(z**6)
assert Ei(z).rewrite('tractable') == exp(z)*_eis(z)
assert li(z).rewrite('tractable') == z*_eis(log(z))
assert _eis(z).rewrite('intractable') == exp(-z)*Ei(z)
assert expand(li(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== li(z).diff(z)
assert expand(Ei(z).rewrite('tractable').diff(z).rewrite('intractable')) \
== Ei(z).diff(z)
assert _eis(z).series(z, n=3) == EulerGamma + log(z) + z*(-log(z) - \
EulerGamma + 1) + z**2*(log(z)/2 - S(3)/4 + EulerGamma/2) + O(z**3*log(z))
def tn_arg(func):
def test(arg, e1, e2):
from random import uniform
v = uniform(1, 5)
v1 = func(arg*x).subs(x, v).n()
v2 = func(e1*v + e2*1e-15).n()
return abs(v1 - v2).n() < 1e-10
return test(exp_polar(I*pi/2), I, 1) and \
test(exp_polar(-I*pi/2), -I, 1) and \
test(exp_polar(I*pi), -1, I) and \
test(exp_polar(-I*pi), -1, -I)
def test_li():
z = Symbol("z")
zr = Symbol("z", real=True)
zp = Symbol("z", positive=True)
zn = Symbol("z", negative=True)
assert li(0) == 0
assert li(1) == -oo
assert li(oo) == oo
assert isinstance(li(z), li)
assert diff(li(z), z) == 1/log(z)
assert conjugate(li(z)) == li(conjugate(z))
assert conjugate(li(-zr)) == li(-zr)
assert conjugate(li(-zp)) == conjugate(li(-zp))
assert conjugate(li(zn)) == conjugate(li(zn))
assert li(z).rewrite(Li) == Li(z) + li(2)
assert li(z).rewrite(Ei) == Ei(log(z))
assert li(z).rewrite(uppergamma) == (-log(1/log(z))/2 - log(-log(z)) +
log(log(z))/2 - expint(1, -log(z)))
assert li(z).rewrite(Si) == (-log(I*log(z)) - log(1/log(z))/2 +
log(log(z))/2 + Ci(I*log(z)) + Shi(log(z)))
assert li(z).rewrite(Ci) == (-log(I*log(z)) - log(1/log(z))/2 +
log(log(z))/2 + Ci(I*log(z)) + Shi(log(z)))
assert li(z).rewrite(Shi) == (-log(1/log(z))/2 + log(log(z))/2 +
Chi(log(z)) - Shi(log(z)))
assert li(z).rewrite(Chi) == (-log(1/log(z))/2 + log(log(z))/2 +
Chi(log(z)) - Shi(log(z)))
assert li(z).rewrite(hyper) ==(log(z)*hyper((1, 1), (2, 2), log(z)) -
log(1/log(z))/2 + log(log(z))/2 + EulerGamma)
assert li(z).rewrite(meijerg) == (-log(1/log(z))/2 - log(-log(z)) + log(log(z))/2 -
meijerg(((), (1,)), ((0, 0), ()), -log(z)))
assert gruntz(1/li(z), z, oo) == 0
def test_Li():
assert Li(2) == 0
assert Li(oo) == oo
assert isinstance(Li(z), Li)
assert diff(Li(z), z) == 1/log(z)
assert gruntz(1/Li(z), z, oo) == 0
assert Li(z).rewrite(li) == li(z) - li(2)
def test_si():
assert Si(I*x) == I*Shi(x)
assert Shi(I*x) == I*Si(x)
assert Si(-I*x) == -I*Shi(x)
assert Shi(-I*x) == -I*Si(x)
assert Si(-x) == -Si(x)
assert Shi(-x) == -Shi(x)
assert Si(exp_polar(2*pi*I)*x) == Si(x)
assert Si(exp_polar(-2*pi*I)*x) == Si(x)
assert Shi(exp_polar(2*pi*I)*x) == Shi(x)
assert Shi(exp_polar(-2*pi*I)*x) == Shi(x)
assert Si(oo) == pi/2
assert Si(-oo) == -pi/2
assert Shi(oo) == oo
assert Shi(-oo) == -oo
assert mytd(Si(x), sin(x)/x, x)
assert mytd(Shi(x), sinh(x)/x, x)
assert mytn(Si(x), Si(x).rewrite(Ei),
-I*(-Ei(x*exp_polar(-I*pi/2))/2
+ Ei(x*exp_polar(I*pi/2))/2 - I*pi) + pi/2, x)
assert mytn(Si(x), Si(x).rewrite(expint),
-I*(-expint(1, x*exp_polar(-I*pi/2))/2 +
expint(1, x*exp_polar(I*pi/2))/2) + pi/2, x)
assert mytn(Shi(x), Shi(x).rewrite(Ei),
Ei(x)/2 - Ei(x*exp_polar(I*pi))/2 + I*pi/2, x)
assert mytn(Shi(x), Shi(x).rewrite(expint),
expint(1, x)/2 - expint(1, x*exp_polar(I*pi))/2 - I*pi/2, x)
assert tn_arg(Si)
assert tn_arg(Shi)
assert Si(x).nseries(x, n=8) == \
x - x**3/18 + x**5/600 - x**7/35280 + O(x**9)
assert Shi(x).nseries(x, n=8) == \
x + x**3/18 + x**5/600 + x**7/35280 + O(x**9)
assert Si(sin(x)).nseries(x, n=5) == x - 2*x**3/9 + 17*x**5/450 + O(x**6)
assert Si(x).nseries(x, 1, n=3) == \
Si(1) + (x - 1)*sin(1) + (x - 1)**2*(-sin(1)/2 + cos(1)/2) + O((x - 1)**3, (x, 1))
def test_ci():
m1 = exp_polar(I*pi)
m1_ = exp_polar(-I*pi)
pI = exp_polar(I*pi/2)
mI = exp_polar(-I*pi/2)
assert Ci(m1*x) == Ci(x) + I*pi
assert Ci(m1_*x) == Ci(x) - I*pi
assert Ci(pI*x) == Chi(x) + I*pi/2
assert Ci(mI*x) == Chi(x) - I*pi/2
assert Chi(m1*x) == Chi(x) + I*pi
assert Chi(m1_*x) == Chi(x) - I*pi
assert Chi(pI*x) == Ci(x) + I*pi/2
assert Chi(mI*x) == Ci(x) - I*pi/2
assert Ci(exp_polar(2*I*pi)*x) == Ci(x) + 2*I*pi
assert Chi(exp_polar(-2*I*pi)*x) == Chi(x) - 2*I*pi
assert Chi(exp_polar(2*I*pi)*x) == Chi(x) + 2*I*pi
assert Ci(exp_polar(-2*I*pi)*x) == Ci(x) - 2*I*pi
assert Ci(oo) == 0
assert Ci(-oo) == I*pi
assert Chi(oo) == oo
assert Chi(-oo) == oo
assert mytd(Ci(x), cos(x)/x, x)
assert mytd(Chi(x), cosh(x)/x, x)
assert mytn(Ci(x), Ci(x).rewrite(Ei),
Ei(x*exp_polar(-I*pi/2))/2 + Ei(x*exp_polar(I*pi/2))/2, x)
assert mytn(Chi(x), Chi(x).rewrite(Ei),
Ei(x)/2 + Ei(x*exp_polar(I*pi))/2 - I*pi/2, x)
assert tn_arg(Ci)
assert tn_arg(Chi)
from sympy import O, EulerGamma, log, limit
assert Ci(x).nseries(x, n=4) == \
EulerGamma + log(x) - x**2/4 + x**4/96 + O(x**5)
assert Chi(x).nseries(x, n=4) == \
EulerGamma + log(x) + x**2/4 + x**4/96 + O(x**5)
assert limit(log(x) - Ci(2*x), x, 0) == -log(2) - EulerGamma
def test_fresnel():
assert fresnels(0) == 0
assert fresnels(oo) == S.Half
assert fresnels(-oo) == -S.Half
assert fresnels(z) == fresnels(z)
assert fresnels(-z) == -fresnels(z)
assert fresnels(I*z) == -I*fresnels(z)
assert fresnels(-I*z) == I*fresnels(z)
assert conjugate(fresnels(z)) == fresnels(conjugate(z))
assert fresnels(z).diff(z) == sin(pi*z**2/2)
assert fresnels(z).rewrite(erf) == (S.One + I)/4 * (
erf((S.One + I)/2*sqrt(pi)*z) - I*erf((S.One - I)/2*sqrt(pi)*z))
assert fresnels(z).rewrite(hyper) == \
pi*z**3/6 * hyper([S(3)/4], [S(3)/2, S(7)/4], -pi**2*z**4/16)
assert fresnels(z).series(z, n=15) == \
pi*z**3/6 - pi**3*z**7/336 + pi**5*z**11/42240 + O(z**15)
assert fresnels(w).is_real is True
assert fresnels(z).as_real_imag() == \
((fresnels(re(z) - I*re(z)*Abs(im(z))/Abs(re(z)))/2 +
fresnels(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))/2,
I*(fresnels(re(z) - I*re(z)*Abs(im(z))/Abs(re(z))) -
fresnels(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))) *
re(z)*Abs(im(z))/(2*im(z)*Abs(re(z)))))
assert fresnels(2 + 3*I).as_real_imag() == (
fresnels(2 + 3*I)/2 + fresnels(2 - 3*I)/2,
I*(fresnels(2 - 3*I) - fresnels(2 + 3*I))/2
)
assert expand_func(integrate(fresnels(z), z)) == \
z*fresnels(z) + cos(pi*z**2/2)/pi
assert fresnels(z).rewrite(meijerg) == sqrt(2)*pi*z**(S(9)/4) * \
meijerg(((), (1,)), ((S(3)/4,),
(S(1)/4, 0)), -pi**2*z**4/16)/(2*(-z)**(S(3)/4)*(z**2)**(S(3)/4))
assert fresnelc(0) == 0
assert fresnelc(oo) == S.Half
assert fresnelc(-oo) == -S.Half
assert fresnelc(z) == fresnelc(z)
assert fresnelc(-z) == -fresnelc(z)
assert fresnelc(I*z) == I*fresnelc(z)
assert fresnelc(-I*z) == -I*fresnelc(z)
assert conjugate(fresnelc(z)) == fresnelc(conjugate(z))
assert fresnelc(z).diff(z) == cos(pi*z**2/2)
assert fresnelc(z).rewrite(erf) == (S.One - I)/4 * (
erf((S.One + I)/2*sqrt(pi)*z) + I*erf((S.One - I)/2*sqrt(pi)*z))
assert fresnelc(z).rewrite(hyper) == \
z * hyper([S.One/4], [S.One/2, S(5)/4], -pi**2*z**4/16)
assert fresnelc(z).series(z, n=15) == \
z - pi**2*z**5/40 + pi**4*z**9/3456 - pi**6*z**13/599040 + O(z**15)
# issue 6510
assert fresnels(z).series(z, S.Infinity) == \
(-1/(pi**2*z**3) + O(z**(-6), (z, oo)))*sin(pi*z**2/2) + \
(3/(pi**3*z**5) - 1/(pi*z) + O(z**(-6), (z, oo)))*cos(pi*z**2/2) + S.Half
assert fresnelc(z).series(z, S.Infinity) == \
(-1/(pi**2*z**3) + O(z**(-6), (z, oo)))*cos(pi*z**2/2) + \
(-3/(pi**3*z**5) + 1/(pi*z) + O(z**(-6), (z, oo)))*sin(pi*z**2/2) + S.Half
assert fresnels(1/z).series(z) == \
(-z**3/pi**2 + O(z**6))*sin(pi/(2*z**2)) + (-z/pi + 3*z**5/pi**3 + \
O(z**6))*cos(pi/(2*z**2)) + S.Half
assert fresnelc(1/z).series(z) == \
(-z**3/pi**2 + O(z**6))*cos(pi/(2*z**2)) + (z/pi - 3*z**5/pi**3 + \
O(z**6))*sin(pi/(2*z**2)) + S.Half
assert fresnelc(w).is_real is True
assert fresnelc(z).as_real_imag() == \
((fresnelc(re(z) - I*re(z)*Abs(im(z))/Abs(re(z)))/2 +
fresnelc(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))/2,
I*(fresnelc(re(z) - I*re(z)*Abs(im(z))/Abs(re(z))) -
fresnelc(re(z) + I*re(z)*Abs(im(z))/Abs(re(z)))) *
re(z)*Abs(im(z))/(2*im(z)*Abs(re(z)))))
assert fresnelc(2 + 3*I).as_real_imag() == (
fresnelc(2 - 3*I)/2 + fresnelc(2 + 3*I)/2,
I*(fresnelc(2 - 3*I) - fresnelc(2 + 3*I))/2
)
assert expand_func(integrate(fresnelc(z), z)) == \
z*fresnelc(z) - sin(pi*z**2/2)/pi
assert fresnelc(z).rewrite(meijerg) == sqrt(2)*pi*z**(S(3)/4) * \
meijerg(((), (1,)), ((S(1)/4,),
(S(3)/4, 0)), -pi**2*z**4/16)/(2*(-z)**(S(1)/4)*(z**2)**(S(1)/4))
from sympy.utilities.randtest import test_numerically
test_numerically(re(fresnels(z)), fresnels(z).as_real_imag()[0], z)
test_numerically(im(fresnels(z)), fresnels(z).as_real_imag()[1], z)
test_numerically(fresnels(z), fresnels(z).rewrite(hyper), z)
test_numerically(fresnels(z), fresnels(z).rewrite(meijerg), z)
test_numerically(re(fresnelc(z)), fresnelc(z).as_real_imag()[0], z)
test_numerically(im(fresnelc(z)), fresnelc(z).as_real_imag()[1], z)
test_numerically(fresnelc(z), fresnelc(z).rewrite(hyper), z)
test_numerically(fresnelc(z), fresnelc(z).rewrite(meijerg), z)
| bsd-3-clause | -8,657,577,822,490,563,000 | 34.712166 | 102 | 0.523889 | false | 2.313089 | true | false | false |
kdebrab/pandas | pandas/core/resample.py | 1 | 52045 | from datetime import timedelta
import numpy as np
import warnings
import copy
from textwrap import dedent
import pandas as pd
from pandas.core.groupby.base import GroupByMixin
from pandas.core.groupby.ops import BinGrouper
from pandas.core.groupby.groupby import (
_GroupBy, GroupBy, groupby, _pipe_template
)
from pandas.core.groupby.grouper import Grouper
from pandas.core.groupby.generic import SeriesGroupBy, PanelGroupBy
from pandas.tseries.frequencies import to_offset, is_subperiod, is_superperiod
from pandas.core.indexes.datetimes import DatetimeIndex, date_range
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset, Tick, Day, delta_to_nanoseconds
from pandas.core.indexes.period import PeriodIndex
from pandas.errors import AbstractMethodError
import pandas.core.algorithms as algos
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
import pandas.compat as compat
from pandas.compat.numpy import function as nv
from pandas._libs import lib
from pandas._libs.tslibs import Timestamp, NaT
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.util._decorators import Appender, Substitution
from pandas.core.generic import _shared_docs
_shared_docs_kwargs = dict()
class Resampler(_GroupBy):
"""
Class for resampling datetimelike data, a groupby-like operation.
See aggregate, transform, and apply functions on this object.
It's easiest to use obj.resample(...) to use Resampler.
Parameters
----------
obj : pandas object
groupby : a TimeGrouper object
axis : int, default 0
kind : str or None
'period', 'timestamp' to override default index treatement
Notes
-----
After resampling, see aggregate, apply, and transform functions.
Returns
-------
a Resampler of the appropriate type
"""
# to the groupby descriptor
_attributes = ['freq', 'axis', 'closed', 'label', 'convention',
'loffset', 'base', 'kind']
def __init__(self, obj, groupby=None, axis=0, kind=None, **kwargs):
self.groupby = groupby
self.keys = None
self.sort = True
self.axis = axis
self.kind = kind
self.squeeze = False
self.group_keys = True
self.as_index = True
self.exclusions = set()
self.binner = None
self.grouper = None
if self.groupby is not None:
self.groupby._set_grouper(self._convert_obj(obj), sort=True)
def __unicode__(self):
""" provide a nice str repr of our rolling object """
attrs = ["{k}={v}".format(k=k, v=getattr(self.groupby, k))
for k in self._attributes if
getattr(self.groupby, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self.__class__.__name__,
attrs=', '.join(attrs))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self._attributes:
return getattr(self.groupby, attr)
if attr in self.obj:
return self[attr]
return object.__getattribute__(self, attr)
@property
def obj(self):
return self.groupby.obj
@property
def ax(self):
return self.groupby.ax
@property
def _typ(self):
""" masquerade for compat as a Series or a DataFrame """
if isinstance(self._selected_obj, pd.Series):
return 'series'
return 'dataframe'
@property
def _from_selection(self):
""" is the resampling from a DataFrame column or MultiIndex level """
# upsampling and PeriodIndex resampling do not work
# with selection, this state used to catch and raise an error
return (self.groupby is not None and
(self.groupby.key is not None or
self.groupby.level is not None))
def _convert_obj(self, obj):
"""
provide any conversions for the object in order to correctly handle
Parameters
----------
obj : the object to be resampled
Returns
-------
obj : converted object
"""
obj = obj._consolidate()
return obj
def _get_binner_for_time(self):
raise AbstractMethodError(self)
def _set_binner(self):
"""
setup our binners
cache these as we are an immutable object
"""
if self.binner is None:
self.binner, self.grouper = self._get_binner()
def _get_binner(self):
"""
create the BinGrouper, assume that self.set_grouper(obj)
has already been called
"""
binner, bins, binlabels = self._get_binner_for_time()
bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer)
return binner, bin_grouper
def _assure_grouper(self):
""" make sure that we are creating our binner & grouper """
self._set_binner()
@Substitution(klass='Resampler',
versionadded='.. versionadded:: 0.23.0',
examples="""
>>> df = pd.DataFrame({'A': [1, 2, 3, 4]},
... index=pd.date_range('2012-08-02', periods=4))
>>> df
A
2012-08-02 1
2012-08-03 2
2012-08-04 3
2012-08-05 4
To get the difference between each 2-day period's maximum and minimum value in
one pass, you can do
>>> df.resample('2D').pipe(lambda x: x.max() - x.min())
A
2012-08-02 1
2012-08-04 1""")
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return super(Resampler, self).pipe(func, *args, **kwargs)
_agg_doc = dedent("""
Examples
--------
>>> s = pd.Series([1,2,3,4,5],
index=pd.date_range('20130101', periods=5,freq='s'))
2013-01-01 00:00:00 1
2013-01-01 00:00:01 2
2013-01-01 00:00:02 3
2013-01-01 00:00:03 4
2013-01-01 00:00:04 5
Freq: S, dtype: int64
>>> r = s.resample('2s')
DatetimeIndexResampler [freq=<2 * Seconds>, axis=0, closed=left,
label=left, convention=start, base=0]
>>> r.agg(np.sum)
2013-01-01 00:00:00 3
2013-01-01 00:00:02 7
2013-01-01 00:00:04 5
Freq: 2S, dtype: int64
>>> r.agg(['sum','mean','max'])
sum mean max
2013-01-01 00:00:00 3 1.5 2
2013-01-01 00:00:02 7 3.5 4
2013-01-01 00:00:04 5 5.0 5
>>> r.agg({'result' : lambda x: x.mean() / x.std(),
'total' : np.sum})
total result
2013-01-01 00:00:00 3 2.121320
2013-01-01 00:00:02 7 4.949747
2013-01-01 00:00:04 5 NaN
See also
--------
pandas.DataFrame.groupby.aggregate
pandas.DataFrame.resample.transform
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='DataFrame',
versionadded='',
axis=''))
def aggregate(self, arg, *args, **kwargs):
self._set_binner()
result, how = self._aggregate(arg, *args, **kwargs)
if result is None:
result = self._groupby_and_aggregate(arg,
*args,
**kwargs)
result = self._apply_loffset(result)
return result
agg = aggregate
apply = aggregate
def transform(self, arg, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> resampled.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
return self._selected_obj.groupby(self.groupby).transform(
arg, *args, **kwargs)
def _downsample(self, f):
raise AbstractMethodError(self)
def _upsample(self, f, limit=None, fill_value=None):
raise AbstractMethodError(self)
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
self._set_binner()
grouper = self.grouper
if subset is None:
subset = self.obj
grouped = groupby(subset, by=None, grouper=grouper, axis=self.axis)
# try the key selection
try:
return grouped[key]
except KeyError:
return grouped
def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):
""" re-evaluate the obj with a groupby aggregation """
if grouper is None:
self._set_binner()
grouper = self.grouper
obj = self._selected_obj
try:
grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis)
except TypeError:
# panel grouper
grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis)
try:
if isinstance(obj, ABCDataFrame) and compat.callable(how):
# Check if the function is reducing or not.
result = grouped._aggregate_item_by_item(how, *args, **kwargs)
else:
result = grouped.aggregate(how, *args, **kwargs)
except Exception:
# we have a non-reducing function
# try to evaluate
result = grouped.apply(how, *args, **kwargs)
result = self._apply_loffset(result)
return self._wrap_result(result)
def _apply_loffset(self, result):
"""
if loffset is set, offset the result index
This is NOT an idempotent routine, it will be applied
exactly once to the result.
Parameters
----------
result : Series or DataFrame
the result of resample
"""
needs_offset = (
isinstance(self.loffset, (DateOffset, timedelta)) and
isinstance(result.index, DatetimeIndex) and
len(result.index) > 0
)
if needs_offset:
result.index = result.index + self.loffset
self.loffset = None
return result
def _get_resampler_for_grouping(self, groupby, **kwargs):
""" return the correct class for resampling with groupby """
return self._resampler_for_grouping(self, groupby=groupby, **kwargs)
def _wrap_result(self, result):
""" potentially wrap any results """
if isinstance(result, ABCSeries) and self._selection is not None:
result.name = self._selection
if isinstance(result, ABCSeries) and result.empty:
obj = self.obj
result.index = obj.index._shallow_copy(freq=to_offset(self.freq))
result.name = getattr(obj, 'name', None)
return result
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
Returns
-------
an upsampled Series
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self._upsample('pad', limit=limit)
ffill = pad
def nearest(self, limit=None):
"""
Fill values with nearest neighbor starting from center
Parameters
----------
limit : integer, optional
limit of how many values to fill
.. versionadded:: 0.21.0
Returns
-------
an upsampled Series
See Also
--------
Series.fillna
DataFrame.fillna
"""
return self._upsample('nearest', limit=limit)
def backfill(self, limit=None):
"""
Backward fill the new missing values in the resampled data.
In statistics, imputation is the process of replacing missing data with
substituted values [1]_. When resampling data, missing values may
appear (e.g., when the resampling frequency is higher than the original
frequency). The backward fill will replace NaN values that appeared in
the resampled data with the next value in the original sequence.
Missing values that existed in the original data will not be modified.
Parameters
----------
limit : integer, optional
Limit of how many values to fill.
Returns
-------
Series, DataFrame
An upsampled Series or DataFrame with backward filled NaN values.
See Also
--------
bfill : Alias of backfill.
fillna : Fill NaN values using the specified method, which can be
'backfill'.
nearest : Fill NaN values with nearest neighbor starting from center.
pad : Forward fill NaN values.
pandas.Series.fillna : Fill NaN values in the Series using the
specified method, which can be 'backfill'.
pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the
specified method, which can be 'backfill'.
References
----------
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
Examples
--------
Resampling a Series:
>>> s = pd.Series([1, 2, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
2018-01-01 02:00:00 3
Freq: H, dtype: int64
>>> s.resample('30min').backfill()
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('15min').backfill(limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
2018-01-01 01:15:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 01:45:00 3.0
2018-01-01 02:00:00 3.0
Freq: 15T, dtype: float64
Resampling a DataFrame that has missing values:
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
... index=pd.date_range('20180101', periods=3,
... freq='h'))
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
>>> df.resample('30min').backfill()
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
2018-01-01 01:00:00 NaN 3
2018-01-01 01:30:00 6.0 5
2018-01-01 02:00:00 6.0 5
>>> df.resample('15min').backfill(limit=2)
a b
2018-01-01 00:00:00 2.0 1.0
2018-01-01 00:15:00 NaN NaN
2018-01-01 00:30:00 NaN 3.0
2018-01-01 00:45:00 NaN 3.0
2018-01-01 01:00:00 NaN 3.0
2018-01-01 01:15:00 NaN NaN
2018-01-01 01:30:00 6.0 5.0
2018-01-01 01:45:00 6.0 5.0
2018-01-01 02:00:00 6.0 5.0
"""
return self._upsample('backfill', limit=limit)
bfill = backfill
def fillna(self, method, limit=None):
"""
Fill missing values introduced by upsampling.
In statistics, imputation is the process of replacing missing data with
substituted values [1]_. When resampling data, missing values may
appear (e.g., when the resampling frequency is higher than the original
frequency).
Missing values that existed in the original data will
not be modified.
Parameters
----------
method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'}
Method to use for filling holes in resampled data
* 'pad' or 'ffill': use previous valid observation to fill gap
(forward fill).
* 'backfill' or 'bfill': use next valid observation to fill gap.
* 'nearest': use nearest valid observation to fill gap.
limit : integer, optional
Limit of how many consecutive missing values to fill.
Returns
-------
Series or DataFrame
An upsampled Series or DataFrame with missing values filled.
See Also
--------
backfill : Backward fill NaN values in the resampled data.
pad : Forward fill NaN values in the resampled data.
nearest : Fill NaN values in the resampled data
with nearest neighbor starting from center.
interpolate : Fill NaN values using interpolation.
pandas.Series.fillna : Fill NaN values in the Series using the
specified method, which can be 'bfill' and 'ffill'.
pandas.DataFrame.fillna : Fill NaN values in the DataFrame using the
specified method, which can be 'bfill' and 'ffill'.
Examples
--------
Resampling a Series:
>>> s = pd.Series([1, 2, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> s
2018-01-01 00:00:00 1
2018-01-01 01:00:00 2
2018-01-01 02:00:00 3
Freq: H, dtype: int64
Without filling the missing values you get:
>>> s.resample("30min").asfreq()
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 2.0
2018-01-01 01:30:00 NaN
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> s.resample('30min').fillna("backfill")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('15min').fillna("backfill", limit=2)
2018-01-01 00:00:00 1.0
2018-01-01 00:15:00 NaN
2018-01-01 00:30:00 2.0
2018-01-01 00:45:00 2.0
2018-01-01 01:00:00 2.0
2018-01-01 01:15:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 01:45:00 3.0
2018-01-01 02:00:00 3.0
Freq: 15T, dtype: float64
>>> s.resample('30min').fillna("pad")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 1
2018-01-01 01:00:00 2
2018-01-01 01:30:00 2
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
>>> s.resample('30min').fillna("nearest")
2018-01-01 00:00:00 1
2018-01-01 00:30:00 2
2018-01-01 01:00:00 2
2018-01-01 01:30:00 3
2018-01-01 02:00:00 3
Freq: 30T, dtype: int64
Missing values present before the upsampling are not affected.
>>> sm = pd.Series([1, None, 3],
... index=pd.date_range('20180101', periods=3, freq='h'))
>>> sm
2018-01-01 00:00:00 1.0
2018-01-01 01:00:00 NaN
2018-01-01 02:00:00 3.0
Freq: H, dtype: float64
>>> sm.resample('30min').fillna('backfill')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> sm.resample('30min').fillna('pad')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 1.0
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 NaN
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
>>> sm.resample('30min').fillna('nearest')
2018-01-01 00:00:00 1.0
2018-01-01 00:30:00 NaN
2018-01-01 01:00:00 NaN
2018-01-01 01:30:00 3.0
2018-01-01 02:00:00 3.0
Freq: 30T, dtype: float64
DataFrame resampling is done column-wise. All the same options are
available.
>>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]},
... index=pd.date_range('20180101', periods=3,
... freq='h'))
>>> df
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 01:00:00 NaN 3
2018-01-01 02:00:00 6.0 5
>>> df.resample('30min').fillna("bfill")
a b
2018-01-01 00:00:00 2.0 1
2018-01-01 00:30:00 NaN 3
2018-01-01 01:00:00 NaN 3
2018-01-01 01:30:00 6.0 5
2018-01-01 02:00:00 6.0 5
References
----------
.. [1] https://en.wikipedia.org/wiki/Imputation_(statistics)
"""
return self._upsample(method, limit=limit)
@Appender(_shared_docs['interpolate'] % _shared_docs_kwargs)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', limit_area=None,
downcast=None, **kwargs):
"""
Interpolate values according to different methods.
.. versionadded:: 0.18.1
"""
result = self._upsample(None)
return result.interpolate(method=method, axis=axis, limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast, **kwargs)
def asfreq(self, fill_value=None):
"""
return the values at the new freq,
essentially a reindex
Parameters
----------
fill_value: scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
.. versionadded:: 0.20.0
See Also
--------
Series.asfreq
DataFrame.asfreq
"""
return self._upsample('asfreq', fill_value=fill_value)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_resampler_func('std', args, kwargs)
return self._downsample('std', ddof=ddof)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_resampler_func('var', args, kwargs)
return self._downsample('var', ddof=ddof)
@Appender(GroupBy.size.__doc__)
def size(self):
# It's a special case as higher level does return
# a copy of 0-len objects. GH14962
result = self._downsample('size')
if not len(self.ax) and isinstance(self._selected_obj, ABCDataFrame):
result = pd.Series([], index=result.index, dtype='int64')
return result
# downsample methods
for method in ['sum', 'prod']:
def f(self, _method=method, min_count=0, *args, **kwargs):
nv.validate_resampler_func(_method, args, kwargs)
return self._downsample(_method, min_count=min_count)
f.__doc__ = getattr(GroupBy, method).__doc__
setattr(Resampler, method, f)
# downsample methods
for method in ['min', 'max', 'first', 'last', 'mean', 'sem',
'median', 'ohlc']:
def f(self, _method=method, *args, **kwargs):
nv.validate_resampler_func(_method, args, kwargs)
return self._downsample(_method)
f.__doc__ = getattr(GroupBy, method).__doc__
setattr(Resampler, method, f)
# groupby & aggregate methods
for method in ['count']:
def f(self, _method=method):
return self._downsample(_method)
f.__doc__ = getattr(GroupBy, method).__doc__
setattr(Resampler, method, f)
# series only methods
for method in ['nunique']:
def f(self, _method=method):
return self._downsample(_method)
f.__doc__ = getattr(SeriesGroupBy, method).__doc__
setattr(Resampler, method, f)
def _maybe_process_deprecations(r, how=None, fill_method=None, limit=None):
""" potentially we might have a deprecation warning, show it
but call the appropriate methods anyhow """
if how is not None:
# .resample(..., how='sum')
if isinstance(how, compat.string_types):
method = "{0}()".format(how)
# .resample(..., how=lambda x: ....)
else:
method = ".apply(<func>)"
# if we have both a how and fill_method, then show
# the following warning
if fill_method is None:
warnings.warn("how in .resample() is deprecated\n"
"the new syntax is "
".resample(...).{method}".format(
method=method),
FutureWarning, stacklevel=3)
r = r.aggregate(how)
if fill_method is not None:
# show the prior function call
method = '.' + method if how is not None else ''
args = "limit={0}".format(limit) if limit is not None else ""
warnings.warn("fill_method is deprecated to .resample()\n"
"the new syntax is .resample(...){method}"
".{fill_method}({args})".format(
method=method,
fill_method=fill_method,
args=args),
FutureWarning, stacklevel=3)
if how is not None:
r = getattr(r, fill_method)(limit=limit)
else:
r = r.aggregate(fill_method, limit=limit)
return r
class _GroupByMixin(GroupByMixin):
""" provide the groupby facilities """
def __init__(self, obj, *args, **kwargs):
parent = kwargs.pop('parent', None)
groupby = kwargs.pop('groupby', None)
if parent is None:
parent = obj
# initialize our GroupByMixin object with
# the resampler attributes
for attr in self._attributes:
setattr(self, attr, kwargs.get(attr, getattr(parent, attr)))
super(_GroupByMixin, self).__init__(None)
self._groupby = groupby
self._groupby.mutated = True
self._groupby.grouper.mutated = True
self.groupby = copy.copy(parent.groupby)
def _apply(self, f, **kwargs):
"""
dispatch to _upsample; we are stripping all of the _upsample kwargs and
performing the original function call on the grouped object
"""
def func(x):
x = self._shallow_copy(x, groupby=self.groupby)
if isinstance(f, compat.string_types):
return getattr(x, f)(**kwargs)
return x.apply(f, **kwargs)
result = self._groupby.apply(func)
return self._wrap_result(result)
_upsample = _apply
_downsample = _apply
_groupby_and_aggregate = _apply
class DatetimeIndexResampler(Resampler):
@property
def _resampler_for_grouping(self):
return DatetimeIndexResamplerGroupby
def _get_binner_for_time(self):
# this is how we are actually creating the bins
if self.kind == 'period':
return self.groupby._get_time_period_bins(self.ax)
return self.groupby._get_time_bins(self.ax)
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
self._set_binner()
how = self._is_cython_func(how) or how
ax = self.ax
obj = self._selected_obj
if not len(ax):
# reset to the new freq
obj = obj.copy()
obj.index.freq = self.freq
return obj
# do we have a regular frequency
if ax.freq is not None or ax.inferred_freq is not None:
if len(self.grouper.binlabels) > len(ax) and how is None:
# let's do an asfreq
return self.asfreq()
# we are downsampling
# we want to call the actual grouper method here
result = obj.groupby(
self.grouper, axis=self.axis).aggregate(how, **kwargs)
result = self._apply_loffset(result)
return self._wrap_result(result)
def _adjust_binner_for_upsample(self, binner):
""" adjust our binner when upsampling """
if self.closed == 'right':
binner = binner[1:]
else:
binner = binner[:-1]
return binner
def _upsample(self, method, limit=None, fill_value=None):
"""
method : string {'backfill', 'bfill', 'pad',
'ffill', 'asfreq'} method for upsampling
limit : int, default None
Maximum size gap to fill when reindexing
fill_value : scalar, default None
Value to use for missing values
See also
--------
.fillna
"""
self._set_binner()
if self.axis:
raise AssertionError('axis must be 0')
if self._from_selection:
raise ValueError("Upsampling from level= or on= selection"
" is not supported, use .set_index(...)"
" to explicitly set index to"
" datetime-like")
ax = self.ax
obj = self._selected_obj
binner = self.binner
res_index = self._adjust_binner_for_upsample(binner)
# if we have the same frequency as our axis, then we are equal sampling
if limit is None and to_offset(ax.inferred_freq) == self.freq:
result = obj.copy()
result.index = res_index
else:
result = obj.reindex(res_index, method=method,
limit=limit, fill_value=fill_value)
result = self._apply_loffset(result)
return self._wrap_result(result)
def _wrap_result(self, result):
result = super(DatetimeIndexResampler, self)._wrap_result(result)
# we may have a different kind that we were asked originally
# convert if needed
if self.kind == 'period' and not isinstance(result.index, PeriodIndex):
result.index = result.index.to_period(self.freq)
return result
class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return DatetimeIndexResampler
class PeriodIndexResampler(DatetimeIndexResampler):
@property
def _resampler_for_grouping(self):
return PeriodIndexResamplerGroupby
def _get_binner_for_time(self):
if self.kind == 'timestamp':
return super(PeriodIndexResampler, self)._get_binner_for_time()
return self.groupby._get_period_bins(self.ax)
def _convert_obj(self, obj):
obj = super(PeriodIndexResampler, self)._convert_obj(obj)
if self._from_selection:
# see GH 14008, GH 12871
msg = ("Resampling from level= or on= selection"
" with a PeriodIndex is not currently supported,"
" use .set_index(...) to explicitly set index")
raise NotImplementedError(msg)
if self.loffset is not None:
# Cannot apply loffset/timedelta to PeriodIndex -> convert to
# timestamps
self.kind = 'timestamp'
# convert to timestamp
if self.kind == 'timestamp':
obj = obj.to_timestamp(how=self.convention)
return obj
def _downsample(self, how, **kwargs):
"""
Downsample the cython defined function
Parameters
----------
how : string / cython mapped function
**kwargs : kw args passed to how function
"""
# we may need to actually resample as if we are timestamps
if self.kind == 'timestamp':
return super(PeriodIndexResampler, self)._downsample(how, **kwargs)
how = self._is_cython_func(how) or how
ax = self.ax
if is_subperiod(ax.freq, self.freq):
# Downsampling
return self._groupby_and_aggregate(how, grouper=self.grouper)
elif is_superperiod(ax.freq, self.freq):
if how == 'ohlc':
# GH #13083
# upsampling to subperiods is handled as an asfreq, which works
# for pure aggregating/reducing methods
# OHLC reduces along the time dimension, but creates multiple
# values for each period -> handle by _groupby_and_aggregate()
return self._groupby_and_aggregate(how, grouper=self.grouper)
return self.asfreq()
elif ax.freq == self.freq:
return self.asfreq()
raise IncompatibleFrequency(
'Frequency {} cannot be resampled to {}, as they are not '
'sub or super periods'.format(ax.freq, self.freq))
def _upsample(self, method, limit=None, fill_value=None):
"""
method : string {'backfill', 'bfill', 'pad', 'ffill'}
method for upsampling
limit : int, default None
Maximum size gap to fill when reindexing
fill_value : scalar, default None
Value to use for missing values
See also
--------
.fillna
"""
# we may need to actually resample as if we are timestamps
if self.kind == 'timestamp':
return super(PeriodIndexResampler, self)._upsample(
method, limit=limit, fill_value=fill_value)
self._set_binner()
ax = self.ax
obj = self.obj
new_index = self.binner
# Start vs. end of period
memb = ax.asfreq(self.freq, how=self.convention)
# Get the fill indexer
indexer = memb.get_indexer(new_index, method=method, limit=limit)
return self._wrap_result(_take_new_index(
obj, indexer, new_index, axis=self.axis))
class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return PeriodIndexResampler
class TimedeltaIndexResampler(DatetimeIndexResampler):
@property
def _resampler_for_grouping(self):
return TimedeltaIndexResamplerGroupby
def _get_binner_for_time(self):
return self.groupby._get_time_delta_bins(self.ax)
def _adjust_binner_for_upsample(self, binner):
""" adjust our binner when upsampling """
ax = self.ax
if is_subperiod(ax.freq, self.freq):
# We are actually downsampling
# but are in the asfreq path
# GH 12926
if self.closed == 'right':
binner = binner[1:]
else:
binner = binner[:-1]
return binner
class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler):
"""
Provides a resample of a groupby implementation
.. versionadded:: 0.18.1
"""
@property
def _constructor(self):
return TimedeltaIndexResampler
def resample(obj, kind=None, **kwds):
""" create a TimeGrouper and return our resampler """
tg = TimeGrouper(**kwds)
return tg._get_resampler(obj, kind=kind)
resample.__doc__ = Resampler.__doc__
def get_resampler_for_grouping(groupby, rule, how=None, fill_method=None,
limit=None, kind=None, **kwargs):
""" return our appropriate resampler when grouping as well """
# .resample uses 'on' similar to how .groupby uses 'key'
kwargs['key'] = kwargs.pop('on', None)
tg = TimeGrouper(freq=rule, **kwargs)
resampler = tg._get_resampler(groupby.obj, kind=kind)
r = resampler._get_resampler_for_grouping(groupby=groupby)
return _maybe_process_deprecations(r,
how=how,
fill_method=fill_method,
limit=limit)
class TimeGrouper(Grouper):
"""
Custom groupby class for time-interval grouping
Parameters
----------
freq : pandas date offset or offset alias for identifying bin edges
closed : closed end of interval; 'left' or 'right'
label : interval boundary to use for labeling; 'left' or 'right'
convention : {'start', 'end', 'e', 's'}
If axis is PeriodIndex
"""
_attributes = Grouper._attributes + ('closed', 'label', 'how',
'loffset', 'kind', 'convention',
'base')
def __init__(self, freq='Min', closed=None, label=None, how='mean',
axis=0, fill_method=None, limit=None, loffset=None,
kind=None, convention=None, base=0, **kwargs):
# Check for correctness of the keyword arguments which would
# otherwise silently use the default if misspelled
if label not in {None, 'left', 'right'}:
raise ValueError('Unsupported value {} for `label`'.format(label))
if closed not in {None, 'left', 'right'}:
raise ValueError('Unsupported value {} for `closed`'.format(
closed))
if convention not in {None, 'start', 'end', 'e', 's'}:
raise ValueError('Unsupported value {} for `convention`'
.format(convention))
freq = to_offset(freq)
end_types = set(['M', 'A', 'Q', 'BM', 'BA', 'BQ', 'W'])
rule = freq.rule_code
if (rule in end_types or
('-' in rule and rule[:rule.find('-')] in end_types)):
if closed is None:
closed = 'right'
if label is None:
label = 'right'
else:
if closed is None:
closed = 'left'
if label is None:
label = 'left'
self.closed = closed
self.label = label
self.kind = kind
self.convention = convention or 'E'
self.convention = self.convention.lower()
if isinstance(loffset, compat.string_types):
loffset = to_offset(loffset)
self.loffset = loffset
self.how = how
self.fill_method = fill_method
self.limit = limit
self.base = base
# always sort time groupers
kwargs['sort'] = True
super(TimeGrouper, self).__init__(freq=freq, axis=axis, **kwargs)
def _get_resampler(self, obj, kind=None):
"""
return my resampler or raise if we have an invalid axis
Parameters
----------
obj : input object
kind : string, optional
'period','timestamp','timedelta' are valid
Returns
-------
a Resampler
Raises
------
TypeError if incompatible axis
"""
self._set_grouper(obj)
ax = self.ax
if isinstance(ax, DatetimeIndex):
return DatetimeIndexResampler(obj,
groupby=self,
kind=kind,
axis=self.axis)
elif isinstance(ax, PeriodIndex) or kind == 'period':
return PeriodIndexResampler(obj,
groupby=self,
kind=kind,
axis=self.axis)
elif isinstance(ax, TimedeltaIndex):
return TimedeltaIndexResampler(obj,
groupby=self,
axis=self.axis)
raise TypeError("Only valid with DatetimeIndex, "
"TimedeltaIndex or PeriodIndex, "
"but got an instance of %r" % type(ax).__name__)
def _get_grouper(self, obj, validate=True):
# create the resampler and return our binner
r = self._get_resampler(obj)
r._set_binner()
return r.binner, r.grouper, r.obj
def _get_time_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
if len(ax) == 0:
binner = labels = DatetimeIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
first, last = ax.min(), ax.max()
first, last = _get_range_edges(first, last, self.freq,
closed=self.closed,
base=self.base)
tz = ax.tz
# GH #12037
# use first/last directly instead of call replace() on them
# because replace() will swallow the nanosecond part
# thus last bin maybe slightly before the end if the end contains
# nanosecond part and lead to `Values falls after last bin` error
binner = labels = DatetimeIndex(freq=self.freq,
start=first,
end=last,
tz=tz,
name=ax.name)
# GH 15549
# In edge case of tz-aware resapmling binner last index can be
# less than the last variable in data object, this happens because of
# DST time change
if len(binner) > 1 and binner[-1] < last:
extra_date_range = pd.date_range(binner[-1], last + self.freq,
freq=self.freq, tz=tz,
name=ax.name)
binner = labels = binner.append(extra_date_range[1:])
# a little hack
trimmed = False
if (len(binner) > 2 and binner[-2] == last and
self.closed == 'right'):
binner = binner[:-1]
trimmed = True
ax_values = ax.asi8
binner, bin_edges = self._adjust_bin_edges(binner, ax_values)
# general version, knowing nothing about relative frequencies
bins = lib.generate_bins_dt64(
ax_values, bin_edges, self.closed, hasnans=ax.hasnans)
if self.closed == 'right':
labels = binner
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
else:
if self.label == 'right':
labels = labels[1:]
elif not trimmed:
labels = labels[:-1]
if ax.hasnans:
binner = binner.insert(0, NaT)
labels = labels.insert(0, NaT)
# if we end up with more labels than bins
# adjust the labels
# GH4076
if len(bins) < len(labels):
labels = labels[:len(bins)]
return binner, bins, labels
def _adjust_bin_edges(self, binner, ax_values):
# Some hacks for > daily data, see #1471, #1458, #1483
bin_edges = binner.asi8
if self.freq != 'D' and is_superperiod(self.freq, 'D'):
day_nanos = delta_to_nanoseconds(timedelta(1))
if self.closed == 'right':
bin_edges = bin_edges + day_nanos - 1
# intraday values on last day
if bin_edges[-2] > ax_values.max():
bin_edges = bin_edges[:-1]
binner = binner[:-1]
return binner, bin_edges
def _get_time_delta_bins(self, ax):
if not isinstance(ax, TimedeltaIndex):
raise TypeError('axis must be a TimedeltaIndex, but got '
'an instance of %r' % type(ax).__name__)
if not len(ax):
binner = labels = TimedeltaIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
start = ax[0]
end = ax[-1]
labels = binner = TimedeltaIndex(start=start,
end=end,
freq=self.freq,
name=ax.name)
end_stamps = labels + 1
bins = ax.searchsorted(end_stamps, side='left')
# Addresses GH #10530
if self.base > 0:
labels += type(self.freq)(self.base)
return binner, bins, labels
def _get_time_period_bins(self, ax):
if not isinstance(ax, DatetimeIndex):
raise TypeError('axis must be a DatetimeIndex, but got '
'an instance of %r' % type(ax).__name__)
if not len(ax):
binner = labels = PeriodIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
labels = binner = PeriodIndex(start=ax[0],
end=ax[-1],
freq=self.freq,
name=ax.name)
end_stamps = (labels + 1).asfreq(self.freq, 's').to_timestamp()
if ax.tzinfo:
end_stamps = end_stamps.tz_localize(ax.tzinfo)
bins = ax.searchsorted(end_stamps, side='left')
return binner, bins, labels
def _get_period_bins(self, ax):
if not isinstance(ax, PeriodIndex):
raise TypeError('axis must be a PeriodIndex, but got '
'an instance of %r' % type(ax).__name__)
memb = ax.asfreq(self.freq, how=self.convention)
# NaT handling as in pandas._lib.lib.generate_bins_dt64()
nat_count = 0
if memb.hasnans:
nat_count = np.sum(memb._isnan)
memb = memb[~memb._isnan]
# if index contains no valid (non-NaT) values, return empty index
if not len(memb):
binner = labels = PeriodIndex(
data=[], freq=self.freq, name=ax.name)
return binner, [], labels
start = ax.min().asfreq(self.freq, how=self.convention)
end = ax.max().asfreq(self.freq, how='end')
labels = binner = PeriodIndex(start=start, end=end,
freq=self.freq, name=ax.name)
i8 = memb.asi8
freq_mult = self.freq.n
# when upsampling to subperiods, we need to generate enough bins
expected_bins_count = len(binner) * freq_mult
i8_extend = expected_bins_count - (i8[-1] - i8[0])
rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult)
rng += freq_mult
bins = memb.searchsorted(rng, side='left')
if nat_count > 0:
# NaT handling as in pandas._lib.lib.generate_bins_dt64()
# shift bins by the number of NaT
bins += nat_count
bins = np.insert(bins, 0, nat_count)
binner = binner.insert(0, NaT)
labels = labels.insert(0, NaT)
return binner, bins, labels
def _take_new_index(obj, indexer, new_index, axis=0):
from pandas.core.api import Series, DataFrame
if isinstance(obj, Series):
new_values = algos.take_1d(obj.values, indexer)
return Series(new_values, index=new_index, name=obj.name)
elif isinstance(obj, DataFrame):
if axis == 1:
raise NotImplementedError("axis 1 is not supported")
return DataFrame(obj._data.reindex_indexer(
new_axis=new_index, indexer=indexer, axis=1))
else:
raise ValueError("'obj' should be either a Series or a DataFrame")
def _get_range_edges(first, last, offset, closed='left', base=0):
if isinstance(offset, compat.string_types):
offset = to_offset(offset)
if isinstance(offset, Tick):
is_day = isinstance(offset, Day)
day_nanos = delta_to_nanoseconds(timedelta(1))
# #1165
if (is_day and day_nanos % offset.nanos == 0) or not is_day:
return _adjust_dates_anchored(first, last, offset,
closed=closed, base=base)
if not isinstance(offset, Tick): # and first.time() != last.time():
# hack!
first = first.normalize()
last = last.normalize()
if closed == 'left':
first = Timestamp(offset.rollback(first))
else:
first = Timestamp(first - offset)
last = Timestamp(last + offset)
return first, last
def _adjust_dates_anchored(first, last, offset, closed='right', base=0):
# First and last offsets should be calculated from the start day to fix an
# error cause by resampling across multiple days when a one day period is
# not a multiple of the frequency.
#
# See https://github.com/pandas-dev/pandas/issues/8683
# 14682 - Since we need to drop the TZ information to perform
# the adjustment in the presence of a DST change,
# save TZ Info and the DST state of the first and last parameters
# so that we can accurately rebuild them at the end.
first_tzinfo = first.tzinfo
last_tzinfo = last.tzinfo
first_dst = bool(first.dst())
last_dst = bool(last.dst())
first = first.tz_localize(None)
last = last.tz_localize(None)
start_day_nanos = first.normalize().value
base_nanos = (base % offset.n) * offset.nanos // offset.n
start_day_nanos += base_nanos
foffset = (first.value - start_day_nanos) % offset.nanos
loffset = (last.value - start_day_nanos) % offset.nanos
if closed == 'right':
if foffset > 0:
# roll back
fresult = first.value - foffset
else:
fresult = first.value - offset.nanos
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
# already the end of the road
lresult = last.value
else: # closed == 'left'
if foffset > 0:
fresult = first.value - foffset
else:
# start of the road
fresult = first.value
if loffset > 0:
# roll forward
lresult = last.value + (offset.nanos - loffset)
else:
lresult = last.value + offset.nanos
return (Timestamp(fresult).tz_localize(first_tzinfo, ambiguous=first_dst),
Timestamp(lresult).tz_localize(last_tzinfo, ambiguous=last_dst))
def asfreq(obj, freq, method=None, how=None, normalize=False, fill_value=None):
"""
Utility frequency conversion method for Series/DataFrame
"""
if isinstance(obj.index, PeriodIndex):
if method is not None:
raise NotImplementedError("'method' argument is not supported")
if how is None:
how = 'E'
new_obj = obj.copy()
new_obj.index = obj.index.asfreq(freq, how=how)
elif len(obj.index) == 0:
new_obj = obj.copy()
new_obj.index = obj.index._shallow_copy(freq=to_offset(freq))
else:
dti = date_range(obj.index[0], obj.index[-1], freq=freq)
dti.name = obj.index.name
new_obj = obj.reindex(dti, method=method, fill_value=fill_value)
if normalize:
new_obj.index = new_obj.index.normalize()
return new_obj
| bsd-3-clause | -5,954,581,995,773,467,000 | 31.630094 | 79 | 0.550562 | false | 3.969568 | false | false | false |
grze/parentheses | clc/eucadmin/eucadmin/describeinstances.py | 1 | 2047 | # Software License Agreement (BSD License)
#
# Copyright (c) 2013 Eucalyptus Systems, Inc.
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms, with or
# without modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from boto.roboto.param import Param
import eucadmin
class DescribeInstances(eucadmin.EucadminRequest):
ServiceClass = eucadmin.EucAdmin
ServicePath = '/services/Eucalyptus'
Args = [Param(name='InstanceId', long_name='InstanceId', ptype='array',
optional=True)]
def __init__(self, **args):
eucadmin.EucadminRequest.__init__(self, **args)
self.list_markers = ['reservationSet', 'instancesSet', 'tagSet']
self.item_markers = ['item']
self.get_connection().APIVersion = '2012-07-20' # cheap hack
def main(self, **args):
return self.send(**args)
| gpl-3.0 | -8,749,896,958,695,378,000 | 40.77551 | 77 | 0.737665 | false | 4.186094 | false | false | false |
ryansb/disq | tests/test_job_commands.py | 1 | 2851 | # Copyright 2015 Ryan Brown <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pytest
import time
import disq
def test_round_trip(dq):
qname = 'rttq'
assert dq.getjob('empty', timeout_ms=1) is None
id = dq.addjob(qname, 'foobar')
assert id
job = dq.getjob(qname, timeout_ms=1)
assert len(job) == 3
assert job[0] == qname
assert job[1] == id
assert job[2] == b'foobar'
id = dq.addjob(qname, 'foobar')
jobs = dq.getjobs(qname, timeout_ms=1)
job = jobs[0]
assert job[0] == qname
assert job[1] == id
assert job[2] == b'foobar'
def test_del_job(dq):
qname = 'delq'
assert dq.getjob(qname, timeout_ms=1) is None
id = dq.addjob(qname, 'foobar')
assert dq.qlen(qname) == 1
assert dq.deljob(id) == 1
assert dq.qlen(qname) == 0
def test_expiring_job(dq):
qname = 'expq'
assert dq.getjob(qname, timeout_ms=1) is None
dq.addjob(qname, 'foobar', ttl_secs=1)
assert dq.qlen(qname) == 1
time.sleep(1.5)
assert dq.qlen(qname) == 0
def test_delay_job(dq):
qname = 'delayq'
assert dq.getjob(qname, timeout_ms=1) is None
dq.addjob(qname, 'foobar', delay_secs=1)
assert dq.qlen(qname) == 0
time.sleep(0.5)
assert dq.qlen(qname) == 0
time.sleep(1)
assert dq.qlen(qname) == 1
def test_async_job(dq):
qname = 'delayq'
assert dq.getjob(qname, timeout_ms=1) is None
dq.addjob(qname, 'foobar', async=True)
assert dq.getjob(qname)
def test_unreplicated_job(dq, dq2):
qname = 'unreplq'
assert dq.getjob(qname, timeout_ms=1) is None
assert dq2.getjob(qname, timeout_ms=1) is None
id = dq.addjob(qname, 'foobar', replicate=1)
print(id,)
assert dq2.getjob(qname, timeout_ms=1) is None
assert dq.getjob(qname, timeout_ms=1)
def test_overcrowded_job(dq, dq2):
qname = 'crowdedq'
assert dq.getjob(qname, timeout_ms=1) is None
for i in range(11):
dq.addjob(qname, 'foobar {0}'.format(i), maxlen=10)
with pytest.raises(disq.ResponseError):
dq.addjob(qname, 'foobar', maxlen=10)
def test_json_job():
qname = 'jsonq'
job = {'hello': 'world'}
q = disq.Disque()
q.set_response_callback('GETJOB', disq.parsers.read_json_job)
q.addjob(qname, json.dumps(job))
j = q.getjob(qname)
assert j[2] == job
| apache-2.0 | 4,155,654,131,376,370,000 | 26.152381 | 74 | 0.649947 | false | 2.885628 | true | false | false |
brkwon/arista_eapi | modules/arista_eapi_urllib2.py | 1 | 3060 | # EAPI call program using urllib2 and json
import urllib2
#import random
import json
# eAPI JSON template
enableCmd = {"input" : "test", "cmd" : "enable"}
jsonCmd = {"params" : {"format" : "json", "version" : 1, "cmds" : "command"}, "jsonrpc" : "2.0", "method" : "runCmds", "id" : 0}
# Create json based on enable_password and eAPI command
def jsonCreate(eapi_command, enable_password):
if enable_password == None:
jsonCmd["params"]["cmds"] = [eapi_command]
return jsonCmd
else:
enableCmd["input"] = enable_password
jsonCmd["params"]["cmds"] = [enableCmd] + eapi_command
return jsonCmd
# HTTP REST request function for eAPI call
def switchReq(switch, username, password, jsonCmds):
credential = switch
urlString = "http://" +credential+ "/command-api"
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, urlString, username, password)
# create an authenticate hander, opener and install opener
auth = urllib2.HTTPBasicAuthHandler(password_manager)
opener = urllib2.build_opener(auth)
urllib2.install_opener(opener)
# create request call
request = urllib2.Request(urlString, jsonCmds)
request.add_header("Content-type", "application/json")
# call switch via urillib2, and close ti
f = urllib2.urlopen(request)
response = f.read()
f.close()
return response
# Add VLAN to a switch
def show_vlan(switch, username, password):
# Create JSON eapi-command
json_data = jsonCreate("show vlan", None)
jsonCmds = json.dumps(json_data)
# Send JSON command to the switch
response = switchReq(switch, username, password, jsonCmds)
# Strip VLAN ids for return
json_string = json.loads(response)
result = [str(item) for item in (json_string['result'][0]['vlans'].keys())]
return result
# Check if supplied VLAN is in switch or not
def check_vlan(switch, username, password, vlans):
# Create JSON eapi-command
json_data = jsonCreate("show vlan", None)
jsonCmds = json.dumps(json_data)
# Send JSON command to the switch
response = switchReq(switch, username, password, jsonCmds)
# Strip VLAN ids for checkup
json_string = json.loads(response)
result = [str(item) for item in (json_string['result'][0]['vlans'].keys())]
if (str(vlans) in result) == True:
return True
else:
return False
# Add VLAN to a switch
def add_vlan(switch, username, password, enable_password, vlans):
eapi_command = ["configure", "vlan " +vlans]
json_data = jsonCreate(eapi_command, enable_password)
jsonCmds = json.dumps(json_data)
response = switchReq(switch, username, password, jsonCmds)
# Delete VLAN to a switch
def del_vlan(switch, username, password, enable_password, vlans):
eapi_command = ["configure", "no vlan " +vlans]
json_data = jsonCreate(eapi_command, enable_password)
jsonCmds = json.dumps(json_data)
response = switchReq(switch, username, password, jsonCmds)
| lgpl-3.0 | -2,830,876,098,484,342,300 | 32.26087 | 128 | 0.675817 | false | 3.625592 | false | false | false |
chdecultot/frappe | frappe/model/base_document.py | 1 | 25095 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
from six import iteritems, string_types
import datetime
import frappe, sys
from frappe import _
from frappe.utils import (cint, flt, now, cstr, strip_html, getdate, get_datetime, to_timedelta,
sanitize_html, sanitize_email, cast_fieldtype)
from frappe.model import default_fields
from frappe.model.naming import set_new_name
from frappe.model.utils.link_count import notify_link_count
from frappe.modules import load_doctype_module
from frappe.model import display_fieldtypes
from frappe.model.db_schema import type_map, varchar_len
from frappe.utils.password import get_decrypted_password, set_encrypted_password
_classes = {}
def get_controller(doctype):
"""Returns the **class** object of the given DocType.
For `custom` type, returns `frappe.model.document.Document`.
:param doctype: DocType name as string."""
from frappe.model.document import Document
global _classes
if not doctype in _classes:
module_name, custom = frappe.db.get_value("DocType", doctype, ["module", "custom"]) \
or ["Core", False]
if custom:
_class = Document
else:
module = load_doctype_module(doctype, module_name)
classname = doctype.replace(" ", "").replace("-", "")
if hasattr(module, classname):
_class = getattr(module, classname)
if issubclass(_class, BaseDocument):
_class = getattr(module, classname)
else:
raise ImportError(doctype)
else:
raise ImportError(doctype)
_classes[doctype] = _class
return _classes[doctype]
class BaseDocument(object):
ignore_in_getter = ("doctype", "_meta", "meta", "_table_fields", "_valid_columns")
def __init__(self, d):
self.update(d)
self.dont_update_if_missing = []
if hasattr(self, "__setup__"):
self.__setup__()
@property
def meta(self):
if not hasattr(self, "_meta"):
self._meta = frappe.get_meta(self.doctype)
return self._meta
def update(self, d):
if "doctype" in d:
self.set("doctype", d.get("doctype"))
# first set default field values of base document
for key in default_fields:
if key in d:
self.set(key, d.get(key))
for key, value in iteritems(d):
self.set(key, value)
return self
def update_if_missing(self, d):
if isinstance(d, BaseDocument):
d = d.get_valid_dict()
if "doctype" in d:
self.set("doctype", d.get("doctype"))
for key, value in iteritems(d):
# dont_update_if_missing is a list of fieldnames, for which, you don't want to set default value
if (self.get(key) is None) and (value is not None) and (key not in self.dont_update_if_missing):
self.set(key, value)
def get_db_value(self, key):
return frappe.db.get_value(self.doctype, self.name, key)
def get(self, key=None, filters=None, limit=None, default=None):
if key:
if isinstance(key, dict):
return _filter(self.get_all_children(), key, limit=limit)
if filters:
if isinstance(filters, dict):
value = _filter(self.__dict__.get(key, []), filters, limit=limit)
else:
default = filters
filters = None
value = self.__dict__.get(key, default)
else:
value = self.__dict__.get(key, default)
if value is None and key not in self.ignore_in_getter \
and key in (d.fieldname for d in self.meta.get_table_fields()):
self.set(key, [])
value = self.__dict__.get(key)
return value
else:
return self.__dict__
def getone(self, key, filters=None):
return self.get(key, filters=filters, limit=1)[0]
def set(self, key, value, as_value=False):
if isinstance(value, list) and not as_value:
self.__dict__[key] = []
self.extend(key, value)
else:
self.__dict__[key] = value
def delete_key(self, key):
if key in self.__dict__:
del self.__dict__[key]
def append(self, key, value=None):
if value==None:
value={}
if isinstance(value, (dict, BaseDocument)):
if not self.__dict__.get(key):
self.__dict__[key] = []
value = self._init_child(value, key)
self.__dict__[key].append(value)
# reference parent document
value.parent_doc = self
return value
else:
raise ValueError(
"Document attached to child table must be a dict or BaseDocument, not " + str(type(value))[1:-1]
)
def extend(self, key, value):
if isinstance(value, list):
for v in value:
self.append(key, v)
else:
raise ValueError
def remove(self, doc):
self.get(doc.parentfield).remove(doc)
def _init_child(self, value, key):
if not self.doctype:
return value
if not isinstance(value, BaseDocument):
if "doctype" not in value:
value["doctype"] = self.get_table_field_doctype(key)
if not value["doctype"]:
raise AttributeError(key)
value = get_controller(value["doctype"])(value)
value.init_valid_columns()
value.parent = self.name
value.parenttype = self.doctype
value.parentfield = key
if value.docstatus is None:
value.docstatus = 0
if not getattr(value, "idx", None):
value.idx = len(self.get(key) or []) + 1
if not getattr(value, "name", None):
value.__dict__['__islocal'] = 1
return value
def get_valid_dict(self, sanitize=True, convert_dates_to_str=False):
d = frappe._dict()
for fieldname in self.meta.get_valid_columns():
d[fieldname] = self.get(fieldname)
# if no need for sanitization and value is None, continue
if not sanitize and d[fieldname] is None:
continue
df = self.meta.get_field(fieldname)
if df:
if df.fieldtype=="Check":
if d[fieldname]==None:
d[fieldname] = 0
elif (not isinstance(d[fieldname], int) or d[fieldname] > 1):
d[fieldname] = 1 if cint(d[fieldname]) else 0
elif df.fieldtype=="Int" and not isinstance(d[fieldname], int):
d[fieldname] = cint(d[fieldname])
elif df.fieldtype in ("Currency", "Float", "Percent") and not isinstance(d[fieldname], float):
d[fieldname] = flt(d[fieldname])
elif df.fieldtype in ("Datetime", "Date") and d[fieldname]=="":
d[fieldname] = None
elif df.get("unique") and cstr(d[fieldname]).strip()=="":
# unique empty field should be set to None
d[fieldname] = None
if isinstance(d[fieldname], list) and df.fieldtype != 'Table':
frappe.throw(_('Value for {0} cannot be a list').format(_(df.label)))
if convert_dates_to_str and isinstance(d[fieldname], (datetime.datetime, datetime.time, datetime.timedelta)):
d[fieldname] = str(d[fieldname])
return d
def init_valid_columns(self):
for key in default_fields:
if key not in self.__dict__:
self.__dict__[key] = None
if key in ("idx", "docstatus") and self.__dict__[key] is None:
self.__dict__[key] = 0
for key in self.get_valid_columns():
if key not in self.__dict__:
self.__dict__[key] = None
def get_valid_columns(self):
if self.doctype not in frappe.local.valid_columns:
if self.doctype in ("DocField", "DocPerm") and self.parent in ("DocType", "DocField", "DocPerm"):
from frappe.model.meta import get_table_columns
valid = get_table_columns(self.doctype)
else:
valid = self.meta.get_valid_columns()
frappe.local.valid_columns[self.doctype] = valid
return frappe.local.valid_columns[self.doctype]
def is_new(self):
return self.get("__islocal")
def as_dict(self, no_nulls=False, no_default_fields=False, convert_dates_to_str=False):
doc = self.get_valid_dict(convert_dates_to_str=convert_dates_to_str)
doc["doctype"] = self.doctype
for df in self.meta.get_table_fields():
children = self.get(df.fieldname) or []
doc[df.fieldname] = [d.as_dict(no_nulls=no_nulls) for d in children]
if no_nulls:
for k in list(doc):
if doc[k] is None:
del doc[k]
if no_default_fields:
for k in list(doc):
if k in default_fields:
del doc[k]
for key in ("_user_tags", "__islocal", "__onload", "_liked_by", "__run_link_triggers"):
if self.get(key):
doc[key] = self.get(key)
return doc
def as_json(self):
return frappe.as_json(self.as_dict())
def get_table_field_doctype(self, fieldname):
return self.meta.get_field(fieldname).options
def get_parentfield_of_doctype(self, doctype):
fieldname = [df.fieldname for df in self.meta.get_table_fields() if df.options==doctype]
return fieldname[0] if fieldname else None
def db_insert(self):
"""INSERT the document (with valid columns) in the database."""
if not self.name:
# name will be set by document class in most cases
set_new_name(self)
if not self.creation:
self.creation = self.modified = now()
self.created_by = self.modifield_by = frappe.session.user
d = self.get_valid_dict(convert_dates_to_str=True)
columns = list(d)
try:
frappe.db.sql("""insert into `tab{doctype}`
({columns}) values ({values})""".format(
doctype = self.doctype,
columns = ", ".join(["`"+c+"`" for c in columns]),
values = ", ".join(["%s"] * len(columns))
), list(d.values()))
except Exception as e:
if e.args[0]==1062:
if "PRIMARY" in cstr(e.args[1]):
if self.meta.autoname=="hash":
# hash collision? try again
self.name = None
self.db_insert()
return
frappe.msgprint(_("Duplicate name {0} {1}").format(self.doctype, self.name))
raise frappe.DuplicateEntryError(self.doctype, self.name, e)
elif "Duplicate" in cstr(e.args[1]):
# unique constraint
self.show_unique_validation_message(e)
else:
raise
else:
raise
self.set("__islocal", False)
def db_update(self):
if self.get("__islocal") or not self.name:
self.db_insert()
return
d = self.get_valid_dict(convert_dates_to_str=True)
# don't update name, as case might've been changed
name = d['name']
del d['name']
columns = list(d)
try:
frappe.db.sql("""update `tab{doctype}`
set {values} where name=%s""".format(
doctype = self.doctype,
values = ", ".join(["`"+c+"`=%s" for c in columns])
), list(d.values()) + [name])
except Exception as e:
if e.args[0]==1062 and "Duplicate" in cstr(e.args[1]):
self.show_unique_validation_message(e)
else:
raise
def show_unique_validation_message(self, e):
type, value, traceback = sys.exc_info()
fieldname, label = str(e).split("'")[-2], None
# unique_first_fieldname_second_fieldname is the constraint name
# created using frappe.db.add_unique
if "unique_" in fieldname:
fieldname = fieldname.split("_", 1)[1]
df = self.meta.get_field(fieldname)
if df:
label = df.label
frappe.msgprint(_("{0} must be unique".format(label or fieldname)))
# this is used to preserve traceback
raise frappe.UniqueValidationError(self.doctype, self.name, e)
def update_modified(self):
'''Update modified timestamp'''
self.set("modified", now())
frappe.db.set_value(self.doctype, self.name, 'modified', self.modified, update_modified=False)
def _fix_numeric_types(self):
for df in self.meta.get("fields"):
if df.fieldtype == "Check":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif self.get(df.fieldname) is not None:
if df.fieldtype == "Int":
self.set(df.fieldname, cint(self.get(df.fieldname)))
elif df.fieldtype in ("Float", "Currency", "Percent"):
self.set(df.fieldname, flt(self.get(df.fieldname)))
if self.docstatus is not None:
self.docstatus = cint(self.docstatus)
def _get_missing_mandatory_fields(self):
"""Get mandatory fields that do not have any values"""
def get_msg(df):
if df.fieldtype == "Table":
return "{}: {}: {}".format(_("Error"), _("Data missing in table"), _(df.label))
elif self.parentfield:
return "{}: {} {} #{}: {}: {}".format(_("Error"), frappe.bold(_(self.doctype)),
_("Row"), self.idx, _("Value missing for"), _(df.label))
else:
return _("Error: Value missing for {0}: {1}").format(_(df.parent), _(df.label))
missing = []
for df in self.meta.get("fields", {"reqd": ('=', 1)}):
if self.get(df.fieldname) in (None, []) or not strip_html(cstr(self.get(df.fieldname))).strip():
missing.append((df.fieldname, get_msg(df)))
# check for missing parent and parenttype
if self.meta.istable:
for fieldname in ("parent", "parenttype"):
if not self.get(fieldname):
missing.append((fieldname, get_msg(frappe._dict(label=fieldname))))
return missing
def get_invalid_links(self, is_submittable=False):
'''Returns list of invalid links and also updates fetch values if not set'''
def get_msg(df, docname):
if self.parentfield:
return "{} #{}: {}: {}".format(_("Row"), self.idx, _(df.label), docname)
else:
return "{}: {}".format(_(df.label), docname)
invalid_links = []
cancelled_links = []
for df in (self.meta.get_link_fields()
+ self.meta.get("fields", {"fieldtype": ('=', "Dynamic Link")})):
docname = self.get(df.fieldname)
if docname:
if df.fieldtype=="Link":
doctype = df.options
if not doctype:
frappe.throw(_("Options not set for link field {0}").format(df.fieldname))
else:
doctype = self.get(df.options)
if not doctype:
frappe.throw(_("{0} must be set first").format(self.meta.get_label(df.options)))
# MySQL is case insensitive. Preserve case of the original docname in the Link Field.
# get a map of values ot fetch along with this link query
# that are mapped as link_fieldname.source_fieldname in Options of
# Readonly or Data or Text type fields
fields_to_fetch = [
_df for _df in self.meta.get_fields_to_fetch(df.fieldname)
if not self.get(_df.fieldname)
]
if not fields_to_fetch:
# cache a single value type
values = frappe._dict(name=frappe.db.get_value(doctype, docname,
'name', cache=True))
else:
values_to_fetch = ['name'] + [_df.fetch_from.split('.')[-1]
for _df in fields_to_fetch]
# don't cache if fetching other values too
values = frappe.db.get_value(doctype, docname,
values_to_fetch, as_dict=True)
if frappe.get_meta(doctype).issingle:
values.name = doctype
if values:
setattr(self, df.fieldname, values.name)
for _df in fields_to_fetch:
setattr(self, _df.fieldname, values[_df.fetch_from.split('.')[-1]])
notify_link_count(doctype, docname)
if not values.name:
invalid_links.append((df.fieldname, docname, get_msg(df, docname)))
elif (df.fieldname != "amended_from"
and (is_submittable or self.meta.is_submittable) and frappe.get_meta(doctype).is_submittable
and cint(frappe.db.get_value(doctype, docname, "docstatus"))==2):
cancelled_links.append((df.fieldname, docname, get_msg(df, docname)))
return invalid_links, cancelled_links
def _validate_selects(self):
if frappe.flags.in_import:
return
for df in self.meta.get_select_fields():
if df.fieldname=="naming_series" or not (self.get(df.fieldname) and df.options):
continue
options = (df.options or "").split("\n")
# if only empty options
if not filter(None, options):
continue
# strip and set
self.set(df.fieldname, cstr(self.get(df.fieldname)).strip())
value = self.get(df.fieldname)
if value not in options and not (frappe.flags.in_test and value.startswith("_T-")):
# show an elaborate message
prefix = _("Row #{0}:").format(self.idx) if self.get("parentfield") else ""
label = _(self.meta.get_label(df.fieldname))
comma_options = '", "'.join(_(each) for each in options)
frappe.throw(_('{0} {1} cannot be "{2}". It should be one of "{3}"').format(prefix, label,
value, comma_options))
def _validate_constants(self):
if frappe.flags.in_import or self.is_new() or self.flags.ignore_validate_constants:
return
constants = [d.fieldname for d in self.meta.get("fields", {"set_only_once": ('=',1)})]
if constants:
values = frappe.db.get_value(self.doctype, self.name, constants, as_dict=True)
for fieldname in constants:
df = self.meta.get_field(fieldname)
# This conversion to string only when fieldtype is Date
if df.fieldtype == 'Date' or df.fieldtype == 'Datetime':
value = str(values.get(fieldname))
else:
value = values.get(fieldname)
if self.get(fieldname) != value:
frappe.throw(_("Value cannot be changed for {0}").format(self.meta.get_label(fieldname)),
frappe.CannotChangeConstantError)
def _validate_length(self):
if frappe.flags.in_install:
return
if self.meta.issingle:
# single doctype value type is mediumtext
return
for fieldname, value in iteritems(self.get_valid_dict()):
df = self.meta.get_field(fieldname)
if df and df.fieldtype in type_map and type_map[df.fieldtype][0]=="varchar":
max_length = cint(df.get("length")) or cint(varchar_len)
if len(cstr(value)) > max_length:
if self.parentfield and self.idx:
reference = _("{0}, Row {1}").format(_(self.doctype), self.idx)
else:
reference = "{0} {1}".format(_(self.doctype), self.name)
frappe.throw(_("{0}: '{1}' ({3}) will get truncated, as max characters allowed is {2}")\
.format(reference, _(df.label), max_length, value), frappe.CharacterLengthExceededError, title=_('Value too big'))
def _validate_update_after_submit(self):
# get the full doc with children
db_values = frappe.get_doc(self.doctype, self.name).as_dict()
for key in self.as_dict():
df = self.meta.get_field(key)
db_value = db_values.get(key)
if df and not df.allow_on_submit and (self.get(key) or db_value):
if df.fieldtype=="Table":
# just check if the table size has changed
# individual fields will be checked in the loop for children
self_value = len(self.get(key))
db_value = len(db_value)
else:
self_value = self.get_value(key)
if self_value != db_value:
frappe.throw(_("Not allowed to change {0} after submission").format(df.label),
frappe.UpdateAfterSubmitError)
def _sanitize_content(self):
"""Sanitize HTML and Email in field values. Used to prevent XSS.
- Ignore if 'Ignore XSS Filter' is checked or fieldtype is 'Code'
"""
if frappe.flags.in_install:
return
for fieldname, value in self.get_valid_dict().items():
if not value or not isinstance(value, string_types):
continue
value = frappe.as_unicode(value)
if (u"<" not in value and u">" not in value):
# doesn't look like html so no need
continue
elif "<!-- markdown -->" in value and not ("<script" in value or "javascript:" in value):
# should be handled separately via the markdown converter function
continue
df = self.meta.get_field(fieldname)
sanitized_value = value
if df and df.get("fieldtype") in ("Data", "Code", "Small Text") and df.get("options")=="Email":
sanitized_value = sanitize_email(value)
elif df and (df.get("ignore_xss_filter")
or (df.get("fieldtype")=="Code" and df.get("options")!="Email")
or df.get("fieldtype") in ("Attach", "Attach Image")
# cancelled and submit but not update after submit should be ignored
or self.docstatus==2
or (self.docstatus==1 and not df.get("allow_on_submit"))):
continue
else:
sanitized_value = sanitize_html(value, linkify=df.fieldtype=='Text Editor')
self.set(fieldname, sanitized_value)
def _save_passwords(self):
'''Save password field values in __Auth table'''
if self.flags.ignore_save_passwords is True:
return
for df in self.meta.get('fields', {'fieldtype': ('=', 'Password')}):
if self.flags.ignore_save_passwords and df.fieldname in self.flags.ignore_save_passwords: continue
new_password = self.get(df.fieldname)
if new_password and not self.is_dummy_password(new_password):
# is not a dummy password like '*****'
set_encrypted_password(self.doctype, self.name, new_password, df.fieldname)
# set dummy password like '*****'
self.set(df.fieldname, '*'*len(new_password))
def get_password(self, fieldname='password', raise_exception=True):
if self.get(fieldname) and not self.is_dummy_password(self.get(fieldname)):
return self.get(fieldname)
return get_decrypted_password(self.doctype, self.name, fieldname, raise_exception=raise_exception)
def is_dummy_password(self, pwd):
return ''.join(set(pwd))=='*'
def precision(self, fieldname, parentfield=None):
"""Returns float precision for a particular field (or get global default).
:param fieldname: Fieldname for which precision is required.
:param parentfield: If fieldname is in child table."""
from frappe.model.meta import get_field_precision
if parentfield and not isinstance(parentfield, string_types):
parentfield = parentfield.parentfield
cache_key = parentfield or "main"
if not hasattr(self, "_precision"):
self._precision = frappe._dict()
if cache_key not in self._precision:
self._precision[cache_key] = frappe._dict()
if fieldname not in self._precision[cache_key]:
self._precision[cache_key][fieldname] = None
doctype = self.meta.get_field(parentfield).options if parentfield else self.doctype
df = frappe.get_meta(doctype).get_field(fieldname)
if df.fieldtype in ("Currency", "Float", "Percent"):
self._precision[cache_key][fieldname] = get_field_precision(df, self)
return self._precision[cache_key][fieldname]
def get_formatted(self, fieldname, doc=None, currency=None, absolute_value=False, translated=False):
from frappe.utils.formatters import format_value
df = self.meta.get_field(fieldname)
if not df and fieldname in default_fields:
from frappe.model.meta import get_default_df
df = get_default_df(fieldname)
val = self.get(fieldname)
if translated:
val = _(val)
if absolute_value and isinstance(val, (int, float)):
val = abs(self.get(fieldname))
if not doc:
doc = getattr(self, "parent_doc", None) or self
return format_value(val, df=df, doc=doc, currency=currency)
def is_print_hide(self, fieldname, df=None, for_print=True):
"""Returns true if fieldname is to be hidden for print.
Print Hide can be set via the Print Format Builder or in the controller as a list
of hidden fields. Example
class MyDoc(Document):
def __setup__(self):
self.print_hide = ["field1", "field2"]
:param fieldname: Fieldname to be checked if hidden.
"""
meta_df = self.meta.get_field(fieldname)
if meta_df and meta_df.get("__print_hide"):
return True
print_hide = 0
if self.get(fieldname)==0 and not self.meta.istable:
print_hide = ( df and df.print_hide_if_no_value ) or ( meta_df and meta_df.print_hide_if_no_value )
if not print_hide:
if df and df.print_hide is not None:
print_hide = df.print_hide
elif meta_df:
print_hide = meta_df.print_hide
return print_hide
def in_format_data(self, fieldname):
"""Returns True if shown via Print Format::`format_data` property.
Called from within standard print format."""
doc = getattr(self, "parent_doc", self)
if hasattr(doc, "format_data_map"):
return fieldname in doc.format_data_map
else:
return True
def reset_values_if_no_permlevel_access(self, has_access_to, high_permlevel_fields):
"""If the user does not have permissions at permlevel > 0, then reset the values to original / default"""
to_reset = []
for df in high_permlevel_fields:
if df.permlevel not in has_access_to and df.fieldtype not in display_fieldtypes:
to_reset.append(df)
if to_reset:
if self.is_new():
# if new, set default value
ref_doc = frappe.new_doc(self.doctype)
else:
# get values from old doc
if self.parent:
self.parent_doc.get_latest()
ref_doc = [d for d in self.parent_doc.get(self.parentfield) if d.name == self.name][0]
else:
ref_doc = self.get_latest()
for df in to_reset:
self.set(df.fieldname, ref_doc.get(df.fieldname))
def get_value(self, fieldname):
df = self.meta.get_field(fieldname)
val = self.get(fieldname)
return self.cast(val, df)
def cast(self, value, df):
return cast_fieldtype(df.fieldtype, value)
def _extract_images_from_text_editor(self):
from frappe.utils.file_manager import extract_images_from_doc
if self.doctype != "DocType":
for df in self.meta.get("fields", {"fieldtype": ('=', "Text Editor")}):
extract_images_from_doc(self, df.fieldname)
def _filter(data, filters, limit=None):
"""pass filters as:
{"key": "val", "key": ["!=", "val"],
"key": ["in", "val"], "key": ["not in", "val"], "key": "^val",
"key" : True (exists), "key": False (does not exist) }"""
out, _filters = [], {}
if not data:
return out
# setup filters as tuples
if filters:
for f in filters:
fval = filters[f]
if not isinstance(fval, (tuple, list)):
if fval is True:
fval = ("not None", fval)
elif fval is False:
fval = ("None", fval)
elif isinstance(fval, string_types) and fval.startswith("^"):
fval = ("^", fval[1:])
else:
fval = ("=", fval)
_filters[f] = fval
for d in data:
add = True
for f, fval in iteritems(_filters):
if not frappe.compare(getattr(d, f, None), fval[0], fval[1]):
add = False
break
if add:
out.append(d)
if limit and (len(out)-1)==limit:
break
return out
| mit | -9,026,169,210,520,244,000 | 29.905172 | 120 | 0.66308 | false | 3.15859 | false | false | false |
Sult/evetool | evetool/settings.py | 1 | 4146 | """
Django settings for evetool project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'u^ez9)ak2z9*x(ujdaoxtmfysb@hb4!li3-x8d4&@&la4jd2_q'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'evetool.urls'
WSGI_APPLICATION = 'evetool.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "evetool",
'USER': "sult",
'PASSWORD': "admin",
'HOST': "localhost",
'PORT': "",
},
'eveassets': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "evetool_eveassets",
'USER': "eve",
'PASSWORD': "admin",
'HOST': "localhost",
'PORT': "",
},
'metrics': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': "evetool_metrics",
'USER': "eve",
'PASSWORD': "admin",
'HOST': "localhost",
'PORT': "",
},
}
DATABASE_ROUTERS = ['evetool.router.Router']
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_collected')
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
MEDIA_URL = '/media/'
# STATICFILES_FINDERS = (
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# # 'django.contrib.staticfiles.finders.DefaultStorageFinder',
# )
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#installed packages
'django_extensions',
#my packages
'metrics',
'eveassets',
'users',
'apis',
'tasks',
'characters',
)
LOGIN_URL = "/"
#### Basevalues for all sorts ofthings
IMAGE_SIZES = (
("Tiny", 32),
("Small", 64),
("Medium", 128),
("Large", 256),
("Huge", 512),
("Special", 200),
)
#amount of allowed api requests to the evesite per second
EVE_API_REQUESTS = 10
| mit | 8,719,298,617,498,412,000 | 23.975904 | 73 | 0.646406 | false | 3.437811 | false | false | false |
SEL-Columbia/commcare-hq | corehq/apps/users/bulkupload.py | 1 | 19528 | from StringIO import StringIO
import logging
from couchdbkit.exceptions import (
BulkSaveError,
MultipleResultsFound,
ResourceNotFound,
)
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from corehq.apps.groups.models import Group
from corehq.apps.users.forms import CommCareAccountForm
from corehq.apps.users.util import normalize_username, raw_username
from corehq.apps.users.models import CommCareUser, CouchUser
from corehq.apps.domain.models import Domain
from couchexport.writers import Excel2007ExportWriter
from dimagi.utils.excel import flatten_json, json_to_headers, \
alphanumeric_sort_key
from corehq.apps.commtrack.util import get_supply_point, submit_mapping_case_block
from corehq.apps.commtrack.models import CommTrackUser, SupplyPointCase
from soil import DownloadBase
class UserUploadError(Exception):
pass
required_headers = set(['username'])
allowed_headers = set(['password', 'phone-number', 'email', 'user_id', 'name', 'group', 'data', 'language']) | required_headers
def check_headers(user_specs):
headers = set(user_specs.fieldnames)
illegal_headers = headers - allowed_headers
missing_headers = required_headers - headers
messages = []
for header_set, label in (missing_headers, 'required'), (illegal_headers, 'illegal'):
if header_set:
messages.append(_('The following are {label} column headers: {headers}.').format(
label=label, headers=', '.join(header_set)))
if messages:
raise UserUploadError('\n'.join(messages))
class GroupMemoizer(object):
"""
If you use this to get a group, do not set group.name directly;
use group_memoizer.rename_group(group, name) instead.
"""
def __init__(self, domain):
self.groups_by_name = {}
self.groups_by_id = {}
self.groups = set()
self.domain = domain
def load_all(self):
for group in Group.by_domain(self.domain):
self.add_group(group)
def add_group(self, new_group):
# todo
# this has the possibility of missing two rows one with id one with name
# that actually refer to the same group
# and overwriting one with the other
assert new_group.name
if new_group.get_id:
self.groups_by_id[new_group.get_id] = new_group
self.groups_by_name[new_group.name] = new_group
self.groups.add(new_group)
def by_name(self, group_name):
if not self.groups_by_name.has_key(group_name):
group = Group.by_name(self.domain, group_name)
if not group:
self.groups_by_name[group_name] = None
return None
self.add_group(group)
return self.groups_by_name[group_name]
def get(self, group_id):
if not self.groups_by_id.has_key(group_id):
group = Group.get(group_id)
if group.domain != self.domain:
raise ResourceNotFound()
self.add_group(group)
return self.groups_by_id[group_id]
def create(self, domain, name):
group = Group(domain=domain, name=name)
self.add_group(group)
return group
def rename_group(self, group, name):
# This isn't always true, you can rename A => B and then B => C,
# and what was A will now be called B when you try to change
# what was B to be called C. That's fine, but you don't want to
# delete someone else's entry
if self.groups_by_name.get(group.name) is group:
del self.groups_by_name[group.name]
group.name = name
self.add_group(group)
def save_all(self):
Group.bulk_save(self.groups)
def _fmt_phone(phone_number):
if phone_number and not isinstance(phone_number, basestring):
phone_number = str(int(phone_number))
return phone_number.lstrip("+")
class LocationCache(object):
def __init__(self):
self.cache = {}
def get(self, site_code, domain):
if not site_code:
return None
if site_code in self.cache:
return self.cache[site_code]
else:
supply_point = get_supply_point(
domain,
site_code
)['case']
self.cache[site_code] = supply_point
return supply_point
class UserLocMapping(object):
def __init__(self, username, domain, location_cache):
self.username = username
self.domain = domain
self.to_add = set()
self.to_remove = set()
self.location_cache = location_cache
def get_supply_point_from_location(self, sms_code):
return self.location_cache.get(sms_code, self.domain)
def save(self):
"""
Calculate which locations need added or removed, then submit
one caseblock to handle this
"""
user = CommTrackUser.get_by_username(self.username)
if not user:
raise UserUploadError(_('no username with {} found!'.format(self.username)))
# have to rewrap since we need to force it to a commtrack user
user = CommTrackUser.wrap(user.to_json())
current_locations = user.locations
current_location_codes = [loc.site_code for loc in current_locations]
commit_list = {}
messages = []
def _add_loc(loc, clear=False):
sp = self.get_supply_point_from_location(loc)
if sp is None:
messages.append(_("No supply point found for location '{}'. "
"Make sure the location type is not set to administrative only "
"and that the location has a valid sms code."
).format(loc or ''))
else:
commit_list.update(user.supply_point_index_mapping(sp, clear))
for loc in self.to_add:
if loc not in current_location_codes:
_add_loc(loc)
for loc in self.to_remove:
if loc in current_location_codes:
_add_loc(loc, clear=True)
if commit_list:
submit_mapping_case_block(user, commit_list)
return messages
def create_or_update_locations(domain, location_specs, log):
location_cache = LocationCache()
users = {}
for row in location_specs:
username = row.get('username')
try:
username = normalize_username(username, domain)
except ValidationError:
log['errors'].append(_("Username must be a valid email address: %s") % username)
else:
location_code = unicode(row.get('location-sms-code'))
if username in users:
user_mapping = users[username]
else:
user_mapping = UserLocMapping(username, domain, location_cache)
users[username] = user_mapping
if row.get('remove') == 'y':
user_mapping.to_remove.add(location_code)
else:
user_mapping.to_add.add(location_code)
for username, mapping in users.iteritems():
try:
messages = mapping.save()
log['errors'].extend(messages)
except UserUploadError as e:
log['errors'].append(_('Unable to update locations for {user} because {message}'.format(
user=username, message=e
)))
def create_or_update_groups(domain, group_specs, log):
group_memoizer = GroupMemoizer(domain)
group_memoizer.load_all()
group_names = set()
for row in group_specs:
group_id = row.get('id')
group_name = row.get('name')
case_sharing = row.get('case-sharing')
reporting = row.get('reporting')
data = row.get('data')
# check that group_names are unique
if group_name in group_names:
log['errors'].append('Your spreadsheet has multiple groups called "%s" and only the first was processed' % group_name)
continue
else:
group_names.add(group_name)
# check that there's a group_id or a group_name
if not group_id and not group_name:
log['errors'].append('Your spreadsheet has a group with no name or id and it has been ignored')
continue
try:
if group_id:
group = group_memoizer.get(group_id)
else:
group = group_memoizer.by_name(group_name)
if not group:
group = group_memoizer.create(domain=domain, name=group_name)
except ResourceNotFound:
log["errors"].append('There are no groups on CommCare HQ with id "%s"' % group_id)
except MultipleResultsFound:
log["errors"].append("There are multiple groups on CommCare HQ named: %s" % group_name)
else:
if group_name:
group_memoizer.rename_group(group, group_name)
group.case_sharing = case_sharing
group.reporting = reporting
group.metadata = data
return group_memoizer
def create_or_update_users_and_groups(domain, user_specs, group_specs, location_specs, task=None):
ret = {"errors": [], "rows": []}
total = len(user_specs) + len(group_specs) + len(location_specs)
def _set_progress(progress):
if task is not None:
DownloadBase.set_progress(task, progress, total)
group_memoizer = create_or_update_groups(domain, group_specs, log=ret)
current = len(group_specs)
usernames = set()
user_ids = set()
allowed_groups = set(group_memoizer.groups)
allowed_group_names = [group.name for group in allowed_groups]
try:
for row in user_specs:
_set_progress(current)
current += 1
data, email, group_names, language, name, password, phone_number, user_id, username = (
row.get(k) for k in sorted(allowed_headers)
)
if password:
password = unicode(password)
group_names = group_names or []
try:
username = normalize_username(str(username), domain)
except TypeError:
username = None
except ValidationError:
ret['rows'].append({
'username': username,
'row': row,
'flag': _('username cannot contain spaces or symbols'),
})
continue
status_row = {
'username': raw_username(username) if username else None,
'row': row,
}
if username in usernames or user_id in user_ids:
status_row['flag'] = 'repeat'
elif not username and not user_id:
status_row['flag'] = 'missing-data'
else:
try:
if username:
usernames.add(username)
if user_id:
user_ids.add(user_id)
if user_id:
user = CommCareUser.get_by_user_id(user_id, domain)
else:
user = CommCareUser.get_by_username(username)
def is_password(password):
if not password:
return False
for c in password:
if c != "*":
return True
return False
if user:
if user.domain != domain:
raise UserUploadError(_(
'User with username %(username)r is '
'somehow in domain %(domain)r'
) % {'username': user.username, 'domain': user.domain})
if username and user.username != username:
user.change_username(username)
if is_password(password):
user.set_password(password)
status_row['flag'] = 'updated'
else:
if len(raw_username(username)) > CommCareAccountForm.max_len_username:
ret['rows'].append({
'username': username,
'row': row,
'flag': _("username cannot contain greater than %d characters" %
CommCareAccountForm.max_len_username)
})
continue
if not is_password(password):
raise UserUploadError(_("Cannot create a new user with a blank password"))
user = CommCareUser.create(domain, username, password, uuid=user_id or '', commit=False)
status_row['flag'] = 'created'
if phone_number:
user.add_phone_number(_fmt_phone(phone_number), default=True)
if name:
user.set_full_name(name)
if data:
user.user_data.update(data)
if language:
user.language = language
if email:
user.email = email
user.save()
if is_password(password):
# Without this line, digest auth doesn't work.
# With this line, digest auth works.
# Other than that, I'm not sure what's going on
user.get_django_user().check_password(password)
for group_id in Group.by_user(user, wrap=False):
group = group_memoizer.get(group_id)
if group.name not in group_names:
group.remove_user(user, save=False)
for group_name in group_names:
if group_name not in allowed_group_names:
raise UserUploadError(_(
"Can't add to group '%s' "
"(try adding it to your spreadsheet)"
) % group_name)
group_memoizer.by_name(group_name).add_user(user, save=False)
except (UserUploadError, CouchUser.Inconsistent) as e:
status_row['flag'] = unicode(e)
ret["rows"].append(status_row)
finally:
try:
group_memoizer.save_all()
except BulkSaveError as e:
_error_message = (
"Oops! We were not able to save some of your group changes. "
"Please make sure no one else is editing your groups "
"and try again."
)
logging.exception((
'BulkSaveError saving groups. '
'User saw error message "%s". Errors: %s'
) % (_error_message, e.errors))
ret['errors'].append(_error_message)
create_or_update_locations(domain, location_specs, log=ret)
_set_progress(total)
return ret
class GroupNameError(Exception):
def __init__(self, blank_groups):
self.blank_groups = blank_groups
@property
def message(self):
return "The following group ids have a blank name: %s." % (
', '.join([group.get_id for group in self.blank_groups])
)
def get_location_rows(domain):
users = CommTrackUser.by_domain(domain)
mappings = []
for user in users:
locations = user.locations
for location in locations:
mappings.append([
user.raw_username,
location.site_code,
location.name
])
return mappings
def dump_users_and_groups(response, domain):
file = StringIO()
writer = Excel2007ExportWriter()
users = CommCareUser.by_domain(domain)
user_data_keys = set()
user_groups_length = 0
user_dicts = []
group_data_keys = set()
group_dicts = []
group_memoizer = GroupMemoizer(domain=domain)
# load groups manually instead of calling group_memoizer.load_all()
# so that we can detect blank groups
blank_groups = set()
for group in Group.by_domain(domain):
if group.name:
group_memoizer.add_group(group)
else:
blank_groups.add(group)
if blank_groups:
raise GroupNameError(blank_groups=blank_groups)
for user in users:
data = user.user_data
group_names = sorted(map(
lambda id: group_memoizer.get(id).name,
Group.by_user(user, wrap=False)
), key=alphanumeric_sort_key)
# exclude password and user_id
user_dicts.append({
'data': data,
'group': group_names,
'name': user.full_name,
# dummy display string for passwords
'password': "********",
'phone-number': user.phone_number,
'email': user.email,
'username': user.raw_username,
'language': user.language,
'user_id': user._id,
})
user_data_keys.update(user.user_data.keys() if user.user_data else {})
user_groups_length = max(user_groups_length, len(group_names))
sorted_groups = sorted(group_memoizer.groups, key=lambda group: alphanumeric_sort_key(group.name))
for group in sorted_groups:
group_dicts.append({
'id': group.get_id,
'name': group.name,
'case-sharing': group.case_sharing,
'reporting': group.reporting,
'data': group.metadata,
})
group_data_keys.update(group.metadata.keys() if group.metadata else {})
# include obscured password column for adding new users
user_headers = ['username', 'password', 'name', 'phone-number', 'email', 'language', 'user_id']
user_headers.extend(json_to_headers(
{'data': dict([(key, None) for key in user_data_keys])}
))
user_headers.extend(json_to_headers(
{'group': range(1, user_groups_length + 1)}
))
group_headers = ['id', 'name', 'case-sharing?', 'reporting?']
group_headers.extend(json_to_headers(
{'data': dict([(key, None) for key in group_data_keys])}
))
headers = [
('users', [user_headers]),
('groups', [group_headers]),
]
commtrack_enabled = Domain.get_by_name(domain).commtrack_enabled
if commtrack_enabled:
headers.append(
('locations', [['username', 'location-sms-code', 'location name (optional)']])
)
writer.open(
header_table=headers,
file=file,
)
def get_user_rows():
for user_dict in user_dicts:
row = dict(flatten_json(user_dict))
yield [row.get(header) or '' for header in user_headers]
def get_group_rows():
for group_dict in group_dicts:
row = dict(flatten_json(group_dict))
yield [row.get(header) or '' for header in group_headers]
rows = [
('users', get_user_rows()),
('groups', get_group_rows()),
]
if commtrack_enabled:
rows.append(
('locations', get_location_rows(domain))
)
writer.write(rows)
writer.close()
response.write(file.getvalue())
| bsd-3-clause | 4,689,480,229,207,205,000 | 35.500935 | 130 | 0.552284 | false | 4.252613 | false | false | false |
Jarn/jarn.viewdoc | jarn/viewdoc/testing.py | 1 | 1874 | import sys
import os
import unittest
import tempfile
import shutil
import functools
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
from os.path import realpath, isdir
class ChdirStack(object):
"""Stack of current working directories."""
def __init__(self):
self.stack = []
def __len__(self):
return len(self.stack)
def push(self, dir):
"""Push cwd on stack and change to 'dir'.
"""
self.stack.append(os.getcwd())
os.chdir(dir or os.getcwd())
def pop(self):
"""Pop dir off stack and change to it.
"""
if len(self.stack):
os.chdir(self.stack.pop())
class JailSetup(unittest.TestCase):
"""Manage a temporary working directory."""
dirstack = None
tempdir = None
def setUp(self):
self.dirstack = ChdirStack()
try:
self.tempdir = realpath(self.mkdtemp())
self.dirstack.push(self.tempdir)
except:
self.cleanUp()
raise
def tearDown(self):
self.cleanUp()
def cleanUp(self):
if self.dirstack is not None:
while self.dirstack:
self.dirstack.pop()
if self.tempdir is not None:
if isdir(self.tempdir):
shutil.rmtree(self.tempdir)
def mkdtemp(self):
return tempfile.mkdtemp()
def mkfile(self, name, body=''):
with open(name, 'wt') as file:
file.write(body)
def quiet(func):
"""Decorator swallowing stdout and stderr output.
"""
def wrapper(*args, **kw):
saved = sys.stdout, sys.stderr
sys.stdout = sys.stderr = StringIO()
try:
return func(*args, **kw)
finally:
sys.stdout, sys.stderr = saved
return functools.wraps(func)(wrapper)
| bsd-2-clause | -3,124,150,782,722,566,000 | 21.309524 | 53 | 0.570971 | false | 4.056277 | false | false | false |
halfakop/Teacup_Firmware | configtool/gui.py | 1 | 19693 |
import sys
import time
try:
import wx
except:
print("ImportError: No module named wx\n\n"
"wxPython is not installed. This program requires wxPython to run.\n"
"See your package manager and/or http://wxpython.org/download.php.")
time.sleep(10)
sys.exit(-1)
import os.path
from configtool.data import reHelpText
from configtool.decoration import Decoration
from configtool.settings import Settings, SettingsDlg
from configtool.printerpanel import PrinterPanel
from configtool.boardpanel import BoardPanel
from configtool.build import Build, Upload
from configtool.data import reInclude
ID_LOAD_PRINTER = 1000
ID_SAVE_PRINTER = 1001
ID_SAVE_PRINTER_AS = 1002
ID_LOAD_BOARD = 1010
ID_SAVE_BOARD = 1011
ID_SAVE_BOARD_AS = 1012
ID_LOAD_CONFIG = 1020
ID_SAVE_CONFIG = 1021
ID_BUILD = 1030
ID_UPLOAD = 1031
ID_SETTINGS = 1040
ID_HELP = 1050
ID_REPORT = 1051
ID_ABOUT = 1052
class ConfigFrame(wx.Frame):
def __init__(self, settings):
wx.Frame.__init__(self, None, -1, "Teacup Configtool", size = (880, 550))
self.Bind(wx.EVT_CLOSE, self.onClose)
self.Bind(wx.EVT_SIZE, self.onResize)
self.deco = Decoration()
panel = wx.Panel(self, -1)
panel.SetBackgroundColour(self.deco.getBackgroundColour())
panel.Bind(wx.EVT_PAINT, self.deco.onPaintBackground)
self.settings = settings
self.settings.app = self
self.settings.font = wx.Font(8, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD)
self.heaters = []
self.savePrtEna = False
self.saveBrdEna = False
self.protPrtFile = False
self.protBrdFile = False
sz = wx.BoxSizer(wx.HORIZONTAL)
self.nb = wx.Notebook(panel, wx.ID_ANY, size = (880, 550),
style = wx.BK_DEFAULT)
self.nb.SetBackgroundColour(self.deco.getBackgroundColour())
self.nb.SetFont(self.settings.font)
self.printerFileName = None
self.printerTabDecor = ""
self.printerBaseText = "Printer"
self.pgPrinter = PrinterPanel(self, self.nb, self.settings)
self.nb.AddPage(self.pgPrinter, self.printerBaseText)
self.boardFileName = None
self.boardTabDecor = ""
self.boardBaseText = "Board"
self.pgBoard = BoardPanel(self, self.nb, self.settings)
self.nb.AddPage(self.pgBoard, self.boardBaseText)
panel.Fit()
self.panel = panel
sz.Add(self.nb, 1, wx.EXPAND + wx.ALL, 5)
self.SetSizer(sz)
self.makeMenu()
def onClose(self, evt):
if not self.pgPrinter.confirmLoseChanges("exit"):
return
if not self.pgBoard.confirmLoseChanges("exit"):
return
self.Destroy()
def onResize(self, evt):
self.panel.SetSize(self.GetClientSize())
self.Refresh()
evt.Skip();
def setPrinterTabFile(self, fn):
self.printerFileName = fn
self.updatePrinterTab()
def setPrinterTabDecor(self, prefix):
self.printerTabDecor = prefix
self.updatePrinterTab()
def updatePrinterTab(self):
txt = self.printerTabDecor + self.printerBaseText
if self.printerFileName:
txt += " <%s>" % self.printerFileName
self.nb.SetPageText(0, txt)
def setBoardTabFile(self, fn):
self.boardFileName = fn
self.updateBoardTab()
def setBoardTabDecor(self, prefix):
self.boardTabDecor = prefix
self.updateBoardTab()
def updateBoardTab(self):
txt = self.boardTabDecor + self.boardBaseText
if self.boardFileName:
txt += " <%s>" % self.boardFileName
self.nb.SetPageText(1, txt)
def setHeaters(self, ht):
self.heaters = ht
self.pgPrinter.setHeaters(ht)
def makeMenu(self):
file_menu = wx.Menu()
file_menu.Append(ID_LOAD_CONFIG, "Load config.h",
"Load config.h and its named printer and board files.")
self.Bind(wx.EVT_MENU, self.onLoadConfig, id = ID_LOAD_CONFIG)
file_menu.Enable(ID_LOAD_CONFIG, False)
file_menu.Append(ID_SAVE_CONFIG, "Save config.h", "Save config.h file.")
self.Bind(wx.EVT_MENU, self.onSaveConfig, id = ID_SAVE_CONFIG)
file_menu.Enable(ID_SAVE_CONFIG, False)
file_menu.AppendSeparator()
file_menu.Append(ID_LOAD_PRINTER, "Load printer",
"Load a printer configuration file.")
self.Bind(wx.EVT_MENU, self.pgPrinter.onLoadConfig, id = ID_LOAD_PRINTER)
file_menu.Append(ID_SAVE_PRINTER, "Save printer",
"Save printer configuration.")
self.Bind(wx.EVT_MENU, self.onSavePrinterConfig, id = ID_SAVE_PRINTER)
file_menu.Enable(ID_SAVE_PRINTER, False)
file_menu.Append(ID_SAVE_PRINTER_AS, "Save printer as...",
"Save printer configuration to a new file.")
self.Bind(wx.EVT_MENU, self.onSavePrinterConfigAs, id = ID_SAVE_PRINTER_AS)
file_menu.Enable(ID_SAVE_PRINTER_AS, False)
file_menu.AppendSeparator()
file_menu.Append(ID_LOAD_BOARD, "Load board",
"Load a board configuration file.")
self.Bind(wx.EVT_MENU, self.pgBoard.onLoadConfig, id = ID_LOAD_BOARD)
file_menu.Append(ID_SAVE_BOARD, "Save board", "Save board configuration.")
self.Bind(wx.EVT_MENU, self.onSaveBoardConfig, id = ID_SAVE_BOARD)
file_menu.Enable(ID_SAVE_BOARD, False)
file_menu.Append(ID_SAVE_BOARD_AS, "Save board as...",
"Save board configuration to a new file.")
self.Bind(wx.EVT_MENU, self.onSaveBoardConfigAs, id = ID_SAVE_BOARD_AS)
file_menu.Enable(ID_SAVE_BOARD_AS, False)
file_menu.AppendSeparator()
file_menu.Append(wx.ID_EXIT, "E&xit", "Exit the application.")
self.Bind(wx.EVT_MENU, self.onClose, id = wx.ID_EXIT)
self.fileMenu = file_menu
menu_bar = wx.MenuBar()
menu_bar.Append(file_menu, "&File")
edit_menu = wx.Menu()
edit_menu.Append(ID_SETTINGS, "Settings", "Change settings.")
self.Bind(wx.EVT_MENU, self.onEditSettings, id = ID_SETTINGS)
self.editMenu = edit_menu
menu_bar.Append(edit_menu, "&Edit")
build_menu = wx.Menu()
build_menu.Append(ID_BUILD, "Build", "Build the executable.")
self.Bind(wx.EVT_MENU, self.onBuild, id = ID_BUILD)
build_menu.Append(ID_UPLOAD, "Upload", "Upload the executable.")
self.Bind(wx.EVT_MENU, self.onUpload, id = ID_UPLOAD)
self.buildMenu = build_menu
menu_bar.Append(build_menu, "&Build")
help_menu = wx.Menu()
help_menu.Append(ID_HELP, "Help", "Find help.")
self.Bind(wx.EVT_MENU, self.onHelp, id = ID_HELP)
help_menu.Append(ID_REPORT, "Report problem",
"Report a problem to Teacup maintainers.")
self.Bind(wx.EVT_MENU, self.onReportProblem, id = ID_REPORT)
help_menu.AppendSeparator()
help_menu.Append(ID_ABOUT, "About Teacup")
self.Bind(wx.EVT_MENU, self.onAbout, id = ID_ABOUT)
self.helpMenu = help_menu
menu_bar.Append(help_menu, "&Help")
self.SetMenuBar(menu_bar)
loadFlag = self.checkEnableLoadConfig()
self.checkEnableUpload()
if loadFlag:
self.loadConfigFile("config.h")
def onSaveBoardConfig(self, evt):
rc = self.pgBoard.onSaveConfig(evt)
if rc:
self.checkEnableLoadConfig()
return rc
def onSaveBoardConfigAs(self, evt):
rc = self.pgBoard.onSaveConfigAs(evt)
if rc:
self.checkEnableLoadConfig()
return rc
def onSavePrinterConfig(self, evt):
rc = self.pgPrinter.onSaveConfig(evt)
if rc:
self.checkEnableLoadConfig()
return rc
def onSavePrinterConfigAs(self, evt):
rc = self.pgPrinter.onSaveConfigAs(evt)
if rc:
self.checkEnableLoadConfig()
return rc
def checkEnableLoadConfig(self):
fn = os.path.join(self.settings.folder, "config.h")
if os.path.isfile(fn):
self.fileMenu.Enable(ID_LOAD_CONFIG, True)
self.buildMenu.Enable(ID_BUILD, True)
return True
else:
self.fileMenu.Enable(ID_LOAD_CONFIG, False)
self.buildMenu.Enable(ID_BUILD, False)
return False
def checkEnableUpload(self):
fn = os.path.join(self.settings.folder, "teacup.hex")
if os.path.isfile(fn):
self.buildMenu.Enable(ID_UPLOAD, True)
else:
self.buildMenu.Enable(ID_UPLOAD, False)
def enableSavePrinter(self, saveFlag, saveAsFlag):
self.fileMenu.Enable(ID_SAVE_PRINTER, saveFlag)
self.fileMenu.Enable(ID_SAVE_PRINTER_AS, saveAsFlag)
self.savePrtEna = saveAsFlag
self.protPrtFile = not saveFlag
if self.savePrtEna and self.saveBrdEna:
self.enableSaveConfig(True)
else:
self.enableSaveConfig(False)
def enableSaveBoard(self, saveFlag, saveAsFlag):
self.fileMenu.Enable(ID_SAVE_BOARD, saveFlag)
self.fileMenu.Enable(ID_SAVE_BOARD_AS, saveAsFlag)
self.saveBrdEna = saveAsFlag
self.protBrdFile = not saveFlag
if self.savePrtEna and self.saveBrdEna:
self.enableSaveConfig(True)
else:
self.enableSaveConfig(False)
def enableSaveConfig(self, flag):
self.fileMenu.Enable(ID_SAVE_CONFIG, flag)
def onLoadConfig(self, evt):
self.loadConfigFile("config.h")
def loadConfigFile(self, fn):
if not self.pgPrinter.confirmLoseChanges("load config"):
return False
if not self.pgBoard.confirmLoseChanges("load config"):
return False
pfile, bfile = self.getConfigFileNames(fn)
if not pfile:
self.message("Config file did not contain a printer file "
"include statement.", "Config error")
return False
else:
if not self.pgPrinter.loadConfigFile(pfile):
self.message("There was a problem loading the printer config file:\n%s"
% pfile, "Config error")
return False
if not bfile:
self.message("Config file did not contain a board file "
"include statement.", "Config error")
return False
else:
if not self.pgBoard.loadConfigFile(bfile):
self.message("There was a problem loading the board config file:\n%s"
% bfile, "Config error")
return False
return True
def getConfigFileNames(self, fn):
pfile = None
bfile = None
path = os.path.join(self.settings.folder, fn)
try:
cfgBuffer = list(open(path))
except:
self.message("Unable to process config file %s." % fn, "File error")
return None, None
for ln in cfgBuffer:
if not ln.lstrip().startswith("#include"):
continue
m = reInclude.search(ln)
if m:
t = m.groups()
if len(t) == 1:
if "printer." in t[0]:
if pfile:
self.message("Multiple printer file include statements.\n"
"Ignoring %s." % ln, "Config error",
wx.OK + wx.ICON_WARNING)
else:
pfile = os.path.join(self.settings.folder, t[0])
elif "board." in t[0]:
if bfile:
self.message("Multiple board file include statements.\n"
"Ignoring %s." % ln, "Config error",
wx.OK + wx.ICON_WARNING)
else:
bfile = os.path.join(self.settings.folder, t[0])
else:
self.message("Unable to parse include statement:\n%s" % ln,
"Config error")
return pfile, bfile
def onSaveConfig(self, evt):
fn = os.path.join(self.settings.folder, "config.h")
try:
fp = open(fn, 'w')
except:
self.message("Unable to open config.h for output.", "File error")
return False
bfn = self.pgBoard.getFileName()
if self.pgBoard.isModified() and self.pgBoard.isValid():
if not self.pgBoard.saveConfigFile(bfn):
return False
else:
self.pgBoard.generateTempTables()
pfn = self.pgPrinter.getFileName()
if self.pgPrinter.isModified() and self.pgPrinter.isValid():
if not self.pgPrinter.saveConfigFile(pfn):
return False
prefix = self.settings.folder + os.path.sep
lpfx = len(prefix)
if bfn.startswith(prefix):
rbfn = bfn[lpfx:]
else:
rbfn = bfn
if pfn.startswith(prefix):
rpfn = pfn[lpfx:]
else:
rpfn = pfn
fp.write("\n")
fp.write("// Configuration for controller board.\n")
fp.write("#include \"%s\"\n" % rbfn)
fp.write("\n")
fp.write("// Configuration for printer board.\n")
fp.write("#include \"%s\"\n" % rpfn)
fp.close()
self.checkEnableLoadConfig()
return True
def onBuild(self, evt):
self.onBuildorUpload(True)
def onUpload(self, evt):
self.onBuildorUpload(False)
def onBuildorUpload(self, buildFlag):
if not (self.pgPrinter.hasData() or self.pgBoard.hasData()):
dlg = wx.MessageDialog(self, "Data needs to be loaded. "
"Click Yes to load config.h.",
"Data missing",
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)
rc = dlg.ShowModal()
dlg.Destroy()
if rc != wx.ID_YES:
return
self.loadConfigFile("config.h")
else:
if self.pgPrinter.isModified():
dlg = wx.MessageDialog(self, "Printer data needs to be saved. Click "
"Yes to save printer configuration.",
"Changes pending",
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)
rc = dlg.ShowModal()
dlg.Destroy()
if rc != wx.ID_YES:
return
if self.protPrtFile:
rc = self.onSavePrinterConfigAs(None)
else:
rc = self.onSavePrinterConfig(None)
if not rc:
return
if self.pgBoard.isModified():
dlg = wx.MessageDialog(self, "Board data needs to be saved. Click "
"Yes to save board configuration.",
"Changes pending",
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)
rc = dlg.ShowModal()
dlg.Destroy()
if rc != wx.ID_YES:
return
if self.protBrdFile:
rc = self.onSaveBoardConfigAs(None)
else:
rc = self.onSaveBoardConfig(None)
if not rc:
return
if not self.verifyConfigLoaded():
dlg = wx.MessageDialog(self, "Loaded configuration does not match the "
"config.h file. Click Yes to save config.h.",
"Configuration changed",
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)
rc = dlg.ShowModal()
dlg.Destroy()
if rc != wx.ID_YES:
return
if not self.onSaveConfig(None):
return
f_cpu, cpu = self.pgBoard.getCPUInfo()
if not cpu:
dlg = wx.MessageDialog(self, "Unable to determine CPU type.",
"CPU type error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if not f_cpu:
dlg = wx.MessageDialog(self, "Unable to determine CPU clock rate.",
"CPU clock rate error", wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if buildFlag:
dlg = Build(self, self.settings, f_cpu, cpu)
dlg.ShowModal()
dlg.Destroy()
self.checkEnableUpload()
else:
dlg = Upload(self, self.settings, f_cpu, cpu)
dlg.ShowModal()
dlg.Destroy()
def verifyConfigLoaded(self):
pfile, bfile = self.getConfigFileNames("config.h")
lpfile = self.pgPrinter.getFileName()
lbfile = self.pgBoard.getFileName()
return ((pfile == lpfile) and (bfile == lbfile))
def onEditSettings(self, evt):
dlg = SettingsDlg(self, self.settings)
rc = dlg.ShowModal()
dlg.Destroy()
def onHelp(self, evt):
self.message("Find help by hovering slowly over the buttons and text "
"fields. Tooltip should appear, explaining things.",
"Find help", style = wx.OK)
def onReportProblem(self, evt):
import urllib
import webbrowser
import subprocess
from sys import platform
# Testing allowed URLs up to 32 kB in size. Longer URLs are simply chopped.
mailRecipients ="reply+0004dc756da9f0641af0a3834c580ad5be469f4f6b" \
"5d4cfc92cf00000001118c958a92a169ce051faa8c@" \
"reply.github.com,[email protected]"
mailSubject = "Teacup problem report"
mailBody = "Please answer these questions before hitting \"send\":\n\n" \
"What did you try to do?\n\n\n" \
"What did you expect to happen?\n\n\n" \
"What happened instead?\n\n\n\n" \
"To allow developers to help, configuration files are " \
"attached, with help comments stripped:\n"
for f in self.pgBoard.getFileName(), self.pgPrinter.getFileName():
if not f:
mailBody += "\n(no file loaded)\n"
continue
mailBody += "\n" + os.path.basename(f) + ":\n"
mailBody += "----------------------------------------------\n"
try:
fc = open(f).read()
fc = reHelpText.sub("", fc)
mailBody += fc
except:
mailBody += "(could not read this file)\n"
mailBody += "----------------------------------------------\n"
url = "mailto:" + urllib.quote(mailRecipients) + \
"?subject=" + urllib.quote(mailSubject) + \
"&body=" + urllib.quote(mailBody)
# This is a work around a bug in gvfs-open coming with (at least) Ubuntu
# 15.04. gvfs-open would open mailto:///[email protected] instead of
# the requested mailto:[email protected].
if platform.startswith("linux"):
try:
subprocess.check_output(["gvfs-open", "--help"])
# Broken gvfs-open exists, so it might be used.
# Try to open the URL directly.
for urlOpener in "thunderbird", "evolution", "firefox", "mozilla", \
"epiphany", "konqueror", "chromium-browser", \
"google-chrome":
try:
subprocess.check_output([urlOpener, url], stderr=subprocess.STDOUT)
return
except:
pass
except:
pass
webbrowser.open_new(url)
def onAbout(self, evt):
# Get the contributors' top 10 with something like this:
# export B=experimental
# git log $B | grep "Author:" | sort | uniq | while \
# read A; do N=$(git log $B | grep "$A" | wc -l); echo "$N $A"; done | \
# sort -rn
self.message("Teacup Firmware is a 3D Printer and CNC machine controlling "
"firmware with emphasis on performance, efficiency and "
"outstanding quality. What Teacup does, shall it do very well."
"\n\n\n"
"Lots of people hard at work! Top 10 contributors:\n\n"
" Markus Hitter (542 commits)\n"
" Michael Moon (322 commits)\n"
" Phil Hord (55 commits)\n"
" Jeff Bernardis (51 commits)\n"
" Markus Amsler (47 commits)\n"
" David Forrest (27 commits)\n"
" Jim McGee (15 commits)\n"
" Ben Jackson (12 commits)\n"
" Bas Laarhoven (10 commits)\n"
" Stephan Walter (9 commits)\n"
" Roland Brochard (3 commits)\n"
" Jens Ch. Restemeier (3 commits)\n",
"About Teacup", style = wx.OK)
def message(self, text, title, style = wx.OK + wx.ICON_ERROR):
dlg = wx.MessageDialog(self, text, title, style)
dlg.ShowModal()
dlg.Destroy()
def StartGui(settings):
app = wx.App(False)
frame = ConfigFrame(settings)
frame.Show(True)
app.MainLoop()
| gpl-2.0 | 6,312,573,059,711,797,000 | 30.917342 | 80 | 0.607779 | false | 3.543819 | true | false | false |
ahwkuepper/stdme | app/constants.py | 1 | 8339 | import numpy as np
#labels for summary plots
d_label = np.array(["You", "Your gender", "Your age group", "Your race / ethnicity", "Your location"])
#US statistics
gender_number = {}
gender_number["Male"] = 155651602
gender_number["Female"] = 160477237
race_number = {}
race_number["Native"] = 1942876.0
race_number["Asian"] = 12721721.0
race_number["Black"] = 29489649.0
race_number["Hispanic"] = 46407173.0
race_number["Multiple"] = 5145135.0
race_number["Pacific"] = 473703.0
race_number["White"] = 161443167.0
age_number = {}
age_number["0-14"] = 61089123.0
age_number["15-19"] = 21158964.0
age_number["20-24"] = 22795438.0
age_number["25-29"] = 21580198.0
age_number["30-34"] = 21264389.0
age_number["35-39"] = 19603770.0
age_number["40-44"] = 20848920.0
age_number["45-54"] = 43767532.0
age_number["55-64"] = 39316431.0
age_number["65+"] = 44704074.0
#Chlamydia statistics
gender_rate = {}
gender_factor = {}
gender_rate["Male"] = 278.4e-5
gender_rate["Female"] = 627.2e-5
rate_average = ((gender_rate["Male"]*gender_number["Male"]
+gender_rate["Female"]*gender_number["Male"])
/(gender_number["Male"]+gender_number["Female"]))
gender_factor["Male"] = gender_rate["Male"]/rate_average
gender_factor["Female"] = gender_rate["Female"]/rate_average
gender_factor["Female"], gender_factor["Male"]
race_rate = {}
race_factor = {}
race_rate["Native"] = 689.1e-5
race_rate["Asian"] = 115.8e-5
race_rate["Black"] = 1152.6e-5
race_rate["Hispanic"] = 376.2e-5
race_rate["Multiple"] = 116.1e-5
race_rate["Pacific"] = 641.5e-5
race_rate["White"] = 187.0e-5
race_factor["Native"] = race_rate["Native"]/rate_average
race_factor["Asian"] = race_rate["Asian"]/rate_average
race_factor["Black"] = race_rate["Black"]/rate_average
race_factor["Hispanic"] = race_rate["Hispanic"]/rate_average
race_factor["Multiple"] = race_rate["Multiple"]/rate_average
race_factor["Pacific"] = race_rate["Pacific"]/rate_average
race_factor["White"] = race_rate["White"]/rate_average
age_rate = {}
age_factor = {}
age_rate["0-14"] = 20.0e-5
age_rate["15-19"] = 1804.0e-5
age_rate["20-24"] = 2484.6e-5
age_rate["25-29"] = 1176.2e-5
age_rate["30-34"] = 532.4e-5
age_rate["35-39"] = 268.0e-5
age_rate["40-44"] = 131.5e-5
age_rate["45-54"] = 56.6e-5
age_rate["55-64"] = 16.6e-5
age_rate["65+"] = 3.2e-5
age_factor["0-14"] = age_rate["0-14"]/rate_average
age_factor["15-19"] = age_rate["15-19"]/rate_average
age_factor["20-24"] = age_rate["20-24"]/rate_average
age_factor["25-29"] = age_rate["25-29"]/rate_average
age_factor["30-34"] = age_rate["30-34"]/rate_average
age_factor["35-39"] = age_rate["35-39"]/rate_average
age_factor["40-44"] = age_rate["40-44"]/rate_average
age_factor["45-54"] = age_rate["45-54"]/rate_average
age_factor["55-64"] = age_rate["55-64"]/rate_average
age_factor["65+"] = age_rate["65+"]/rate_average
#Gonorrhea statistics
gender_rate_gonorrhea = {}
gender_factor_gonorrhea = {}
gender_rate_gonorrhea["Male"] = 120.1e-5
gender_rate_gonorrhea["Female"] = 101.3e-5
rate_average_gonorrhea = ((gender_rate_gonorrhea["Male"]*gender_number["Male"]
+gender_rate_gonorrhea["Female"]*gender_number["Male"])
/(gender_number["Male"]+gender_number["Female"]))
gender_factor_gonorrhea["Male"] = gender_rate_gonorrhea["Male"]/rate_average
gender_factor_gonorrhea["Female"] = gender_rate_gonorrhea["Female"]/rate_average
gender_factor_gonorrhea["Female"], gender_factor["Male"]
race_rate_gonorrhea = {}
race_factor_gonorrhea = {}
race_rate_gonorrhea["Native"] = 103.2e-5
race_rate_gonorrhea["Asian"] = 19.9e-5
race_rate_gonorrhea["Black"] = 422.9e-5
race_rate_gonorrhea["Hispanic"] = 72.7e-5
race_rate_gonorrhea["Multiple"] = 39.1e-5
race_rate_gonorrhea["Pacific"] = 103.2e-5
race_rate_gonorrhea["White"] = 39.8e-5
race_factor_gonorrhea["Native"] = race_rate_gonorrhea["Native"]/rate_average_gonorrhea
race_factor_gonorrhea["Asian"] = race_rate_gonorrhea["Asian"]/rate_average_gonorrhea
race_factor_gonorrhea["Black"] = race_rate_gonorrhea["Black"]/rate_average_gonorrhea
race_factor_gonorrhea["Hispanic"] = race_rate_gonorrhea["Hispanic"]/rate_average_gonorrhea
race_factor_gonorrhea["Multiple"] = race_rate_gonorrhea["Multiple"]/rate_average_gonorrhea
race_factor_gonorrhea["Pacific"] = race_rate_gonorrhea["Pacific"]/rate_average_gonorrhea
race_factor_gonorrhea["White"] = race_rate_gonorrhea["White"]/rate_average_gonorrhea
age_rate_gonorrhea = {}
age_factor_gonorrhea = {}
age_rate_gonorrhea["0-14"] = 4.3e-5
age_rate_gonorrhea["15-19"] = 323.6e-5
age_rate_gonorrhea["20-24"] = 509.8e-5
age_rate_gonorrhea["25-29"] = 322.5e-5
age_rate_gonorrhea["30-34"] = 180.6e-5
age_rate_gonorrhea["35-39"] = 106.1e-5
age_rate_gonorrhea["40-44"] = 60.9e-5
age_rate_gonorrhea["45-54"] = 35.0e-5
age_rate_gonorrhea["55-64"] = 11.6e-5
age_rate_gonorrhea["65+"] = 2.0e-5
age_factor_gonorrhea["0-14"] = age_rate_gonorrhea["0-14"]/rate_average_gonorrhea
age_factor_gonorrhea["15-19"] = age_rate_gonorrhea["15-19"]/rate_average_gonorrhea
age_factor_gonorrhea["20-24"] = age_rate_gonorrhea["20-24"]/rate_average_gonorrhea
age_factor_gonorrhea["25-29"] = age_rate_gonorrhea["25-29"]/rate_average_gonorrhea
age_factor_gonorrhea["30-34"] = age_rate_gonorrhea["30-34"]/rate_average_gonorrhea
age_factor_gonorrhea["35-39"] = age_rate_gonorrhea["35-39"]/rate_average_gonorrhea
age_factor_gonorrhea["40-44"] = age_rate_gonorrhea["40-44"]/rate_average_gonorrhea
age_factor_gonorrhea["45-54"] = age_rate_gonorrhea["45-54"]/rate_average_gonorrhea
age_factor_gonorrhea["55-64"] = age_rate_gonorrhea["55-64"]/rate_average_gonorrhea
age_factor_gonorrhea["65+"] = age_rate_gonorrhea["65+"]/rate_average_gonorrhea
#Syphilis statistics
gender_rate_syphilis = {}
gender_factor_syphilis = {}
gender_rate_syphilis["Male"] = 11.7e-5
gender_rate_syphilis["Female"] = 1.1e-5
rate_average_syphilis = ((gender_rate_syphilis["Male"]*gender_number["Male"]
+gender_rate_syphilis["Female"]*gender_number["Male"])
/(gender_number["Male"]+gender_number["Female"]))
gender_factor_syphilis["Male"] = gender_rate_syphilis["Male"]/rate_average
gender_factor_syphilis["Female"] = gender_rate_syphilis["Female"]/rate_average
gender_factor_syphilis["Female"], gender_factor["Male"]
race_rate_syphilis = {}
race_factor_syphilis = {}
race_rate_syphilis["Native"] = 7.9e-5
race_rate_syphilis["Asian"] = 2.8e-5
race_rate_syphilis["Black"] = 18.9e-5
race_rate_syphilis["Hispanic"] = 7.4e-5
race_rate_syphilis["Multiple"] = 2.3e-5
race_rate_syphilis["Pacific"] = 6.7e-5
race_rate_syphilis["White"] = 3.4e-5
race_factor_syphilis["Native"] = race_rate_syphilis["Native"]/rate_average_syphilis
race_factor_syphilis["Asian"] = race_rate_syphilis["Asian"]/rate_average_syphilis
race_factor_syphilis["Black"] = race_rate_syphilis["Black"]/rate_average_syphilis
race_factor_syphilis["Hispanic"] = race_rate_syphilis["Hispanic"]/rate_average_syphilis
race_factor_syphilis["Multiple"] = race_rate_syphilis["Multiple"]/rate_average_syphilis
race_factor_syphilis["Pacific"] = race_rate_syphilis["Pacific"]/rate_average_syphilis
race_factor_syphilis["White"] = race_rate_syphilis["White"]/rate_average_syphilis
age_rate_syphilis = {}
age_factor_syphilis = {}
age_rate_syphilis["0-14"] = 0.0e-5
age_rate_syphilis["15-19"] = 4.8e-5
age_rate_syphilis["20-24"] = 18.1e-5
age_rate_syphilis["25-29"] = 19.0e-5
age_rate_syphilis["30-34"] = 13.6e-5
age_rate_syphilis["35-39"] = 10.4e-5
age_rate_syphilis["40-44"] = 8.4e-5
age_rate_syphilis["45-54"] = 6.8e-5
age_rate_syphilis["55-64"] = 2.3e-5
age_rate_syphilis["65+"] = 0.4e-5
age_factor_syphilis["0-14"] = age_rate_syphilis["0-14"]/rate_average_syphilis
age_factor_syphilis["15-19"] = age_rate_syphilis["15-19"]/rate_average_syphilis
age_factor_syphilis["20-24"] = age_rate_syphilis["20-24"]/rate_average_syphilis
age_factor_syphilis["25-29"] = age_rate_syphilis["25-29"]/rate_average_syphilis
age_factor_syphilis["30-34"] = age_rate_syphilis["30-34"]/rate_average_syphilis
age_factor_syphilis["35-39"] = age_rate_syphilis["35-39"]/rate_average_syphilis
age_factor_syphilis["40-44"] = age_rate_syphilis["40-44"]/rate_average_syphilis
age_factor_syphilis["45-54"] = age_rate_syphilis["45-54"]/rate_average_syphilis
age_factor_syphilis["55-64"] = age_rate_syphilis["55-64"]/rate_average_syphilis
age_factor_syphilis["65+"] = age_rate_syphilis["65+"]/rate_average_syphilis
| mit | 658,293,424,676,495,500 | 37.428571 | 102 | 0.699844 | false | 2.242873 | false | false | false |
riolet/rioauth | provider/pages/register.py | 1 | 1949 | import web
import common
import base
class Register(base.Page):
def __init__(self):
base.Page.__init__(self, "Register")
def GET(self):
# show login page
return common.render.register()
@staticmethod
def send_conf_email(user_id, name, email):
duration = 1800 # 30 minutes
key = common.email_loopback.add(user_id, '/login', duration=duration)
subject = "Riolet Registration"
link = "{uri_prefix}/confirmemail?key={key}".format(
uri_prefix=web.ctx.home,
key=key)
body = """
Hello, {name}
Thank you for registering with Riolet. To complete your registration, please follow the link below:
{link}
This link will be valid for the next {duration} minutes. If it expires, you will need to register again.
Thanks,
Riolet
""".format(name=name, link=link, duration=duration/60)
common.sendmail(email, subject, body)
def POST(self):
email = self.data.get('email')
name = self.data.get('name')
password = self.data.get('password')
confirmpassword = self.data.get('confirmpassword')
if not email or not name or not password or not confirmpassword:
self.errors.append('Error processing form.')
return common.render.register(self.errors)
if password != confirmpassword:
self.errors.append("Passwords don't match")
return common.render.register(self.errors)
try:
self.user_id = common.users.add(email, password, name)
except (common.ValidationError, KeyError) as e:
self.errors.append('Error: {0}'.format(e.message))
return common.render.register(self.errors)
# send the user an email to have the use confirm their email address
self.send_conf_email(self.user_id, name, email)
# send them back to the login page
self.redirect('/login?register=success')
| gpl-3.0 | 6,252,393,982,004,596,000 | 30.95082 | 104 | 0.636224 | false | 3.851779 | false | false | false |
siacs/HttpUploadComponent | httpupload/server.py | 2 | 13933 | #!/usr/bin/env python3
import argparse
import base64
import errno
import hashlib
import logging
import mimetypes
import os
import random
import shutil
import ssl
import string
import sys
import time
import urllib.parse
import yaml
from sleekxmpp.componentxmpp import ComponentXMPP
from threading import Event
from threading import Lock
from threading import Thread
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
LOGLEVEL=logging.DEBUG
global files
global files_lock
global config
global quotas
def normalize_path(path, sub_url_length):
"""
Normalizes the URL to prevent users from grabbing arbitrary files via `../'
and the like.
"""
return os.path.normcase(os.path.normpath(path))[sub_url_length:]
def expire(quotaonly=False, kill_event=None):
"""
Expire all files over 'user_quota_soft' and older than 'expire_maxage'
- quotaonly - If true don't delete anything just calculate the
used space per user and return. Otherwise make an exiry run
every config['expire_interval'] seconds.
- kill_event - threading.Event to listen to. When set, quit to
prevent hanging on KeyboardInterrupt. Only applicable when
quotaonly = False
"""
global config
global quotas
while True:
if not quotaonly:
# Wait expire_interval secs or return on kill_event
if kill_event.wait(config['expire_interval']):
return
now = time.time()
# Scan each senders upload directories seperatly
for sender in os.listdir(config['storage_path']):
senderdir = os.path.join(config['storage_path'], sender)
quota = 0
filelist = []
# Traverse sender directory, delete anything older expire_maxage and collect file stats.
for dirname, dirs, files in os.walk(senderdir, topdown=False):
removed = []
for name in files:
fullname = os.path.join(dirname, name)
stats = os.stat(fullname)
if not quotaonly:
if now - stats.st_mtime > config['expire_maxage']:
logging.debug('Expiring %s. Age: %s', fullname, now - stats.st_mtime)
try:
os.unlink(fullname)
removed += [name]
except OSError as e:
logging.warning("Exception '%s' deleting file '%s'.", e, fullname)
quota += stats.st_size
filelist += [(stats.st_mtime, fullname, stats.st_size)]
else:
quota += stats.st_size
filelist += [(stats.st_mtime, fullname, stats.st_size)]
if dirs == [] and removed == files: # Directory is empty, so we can remove it
logging.debug('Removing directory %s.', dirname)
try:
os.rmdir(dirname)
except OSError as e:
logging.warning("Exception '%s' deleting directory '%s'.", e, dirname)
if not quotaonly and config['user_quota_soft']:
# Delete oldest files of sender until occupied space is <= user_quota_soft
filelist.sort()
while quota > config['user_quota_soft']:
entry = filelist[0]
try:
logging.debug('user_quota_soft exceeded. Removing %s. Age: %s', entry[1], now - entry[0])
os.unlink(entry[1])
quota -= entry[2]
except OSError as e:
logging.warning("Exception '%s' deleting file '%s'.", e, entry[1])
filelist.pop(0)
quotas[sender] = quota
logging.debug('Expire run finished in %fs', time.time() - now)
if quotaonly:
return
class MissingComponent(ComponentXMPP):
def __init__(self, jid, secret, port):
ComponentXMPP.__init__(self, jid, secret, "localhost", port)
self.register_plugin('xep_0030')
self.register_plugin('upload',module='plugins.upload')
self.add_event_handler('request_upload_slot',self.request_upload_slot)
def request_upload_slot(self, iq):
global config
global files
global files_lock
request = iq['request']
maxfilesize = int(config['max_file_size'])
if not request['filename'] or not request['size']:
self._sendError(iq,'modify','bad-request','please specify filename and size')
elif maxfilesize < int(request['size']):
self._sendError(iq,'modify','not-acceptable','file too large. max file size is '+str(maxfilesize))
elif 'whitelist' not in config or iq['from'].domain in config['whitelist'] or iq['from'].bare in config['whitelist']:
sender = iq['from'].bare
sender_hash = base64.urlsafe_b64encode(hashlib.sha1(sender.encode()).digest()).decode('ascii').rstrip('=')
if config['user_quota_hard'] and quotas.setdefault(sender_hash, 0) + int(request['size']) > config['user_quota_hard']:
msg = 'quota would be exceeded. max file size is %d' % (config['user_quota_hard'] - quotas[sender_hash])
logging.debug(msg)
self._sendError(iq, 'modify', 'not-acceptable', msg)
return
filename = request['filename']
folder = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(int(len(sender_hash) / 2)))
sane_filename = "".join([c for c in filename if (c == '_' or c == '.' or ord(c) >= 48 and ord(c) <= 122)]).rstrip()
path = os.path.join(sender_hash, folder)
if sane_filename:
path = os.path.join(path, sane_filename)
with files_lock:
files.add(path)
print(path)
reply = iq.reply()
reply['slot']['get'] = urllib.parse.urljoin(config['get_url'], path)
reply['slot']['put'] = urllib.parse.urljoin(config['put_url'], path)
reply.send()
else:
self._sendError(iq,'cancel','not-allowed','not allowed to request upload slots')
def _sendError(self, iq, error_type, condition, text):
reply = iq.reply()
iq.error()
iq['error']['type'] = error_type
iq['error']['condition'] = condition
iq['error']['text'] = text
iq.send()
class HttpHandler(BaseHTTPRequestHandler):
def do_PUT(self):
print('do put')
global files
global files_lock
global config
path = normalize_path(self.path, config['put_sub_url_len'])
length = int(self.headers['Content-Length'])
maxfilesize = int(config['max_file_size'])
if config['user_quota_hard']:
sender_hash = path.split('/')[0]
maxfilesize = min(maxfilesize, config['user_quota_hard'] - quotas.setdefault(sender_hash, 0))
if maxfilesize < length:
self.send_response(400,'file too large')
self.end_headers()
else:
print('path: '+path)
files_lock.acquire()
if path in files:
files.remove(path)
files_lock.release()
filename = os.path.join(config['storage_path'], path)
os.makedirs(os.path.dirname(filename))
remaining = length
with open(filename,'wb') as f:
data = self.rfile.read(min(4096,remaining))
while data and remaining >= 0:
databytes = len(data)
remaining -= databytes
if config['user_quota_hard']:
quotas[sender_hash] += databytes
f.write(data)
data = self.rfile.read(min(4096,remaining))
self.send_response(200,'ok')
self.end_headers()
else:
files_lock.release()
self.send_response(403,'invalid slot')
self.end_headers()
def do_GET(self, body=True):
global config
path = normalize_path(self.path, config['get_sub_url_len'])
slashcount = path.count('/')
if path[0] in ('/', '\\') or slashcount < 1 or slashcount > 2:
self.send_response(404,'file not found')
self.end_headers()
else:
filename = os.path.join(config['storage_path'], path)
print('requesting file: '+filename)
try:
with open(filename,'rb') as f:
self.send_response(200)
mime, _ = mimetypes.guess_type(filename)
if mime is None:
mime = 'application/octet-stream'
self.send_header("Content-Type", mime)
if mime[:6] != 'image/':
self.send_header("Content-Disposition", 'attachment; filename="{}"'.format(os.path.basename(filename)))
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs.st_size))
self.end_headers()
if body:
shutil.copyfileobj(f, self.wfile)
except FileNotFoundError:
self.send_response(404,'file not found')
self.end_headers()
def do_HEAD(self):
self.do_GET(body=False)
def do_OPTIONS(self):
if 'allow_web_clients' in config and config['allow_web_clients']:
self.send_response(200, 'OK')
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-Methods", "GET,PUT")
self.end_headers()
else:
self.send_response(501, 'NO OPTIONS')
self.end_headers()
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", default='config.yml', help='Specify alternate config file.')
parser.add_argument("-l", "--logfile", default=None, help='File where the server log will be stored. If not specified log to stdout.')
args = parser.parse_args()
with open(args.config,'r') as ymlfile:
config = yaml.load(ymlfile)
files = set()
files_lock = Lock()
kill_event = Event()
logging.basicConfig(level=LOGLEVEL,
format='%(asctime)-24s %(levelname)-8s %(message)s',
filename=args.logfile)
if not config['get_url'].endswith('/'):
config['get_url'] = config['get_url'] + '/'
if not config['put_url'].endswith('/'):
config['put_url'] = config['put_url'] + '/'
try:
config['get_sub_url_len'] = len(urllib.parse.urlparse(config['get_url']).path)
config['put_sub_url_len'] = len(urllib.parse.urlparse(config['put_url']).path)
except ValueError:
logging.warning("Invalid get_sub_url ('%s') or put_sub_url ('%s'). sub_url's disabled.", config['get_sub_url'], config['put_sub_url'])
config['get_sub_url_int'] = 1
config['put_sub_url_int'] = 1
# Sanitize config['user_quota_*'] and calculate initial quotas
quotas = {}
try:
config['user_quota_hard'] = int(config.get('user_quota_hard', 0))
config['user_quota_soft'] = int(config.get('user_quota_soft', 0))
if config['user_quota_soft'] or config['user_quota_hard']:
expire(quotaonly=True)
except ValueError:
logging.warning("Invalid user_quota_hard ('%s') or user_quota_soft ('%s'). Quotas disabled.", config['user_quota_soft'], config['user_quota_soft'])
config['user_quota_soft'] = 0
config['user_quota_hard'] = 0
# Sanitize config['expire_*'] and start expiry thread
try:
config['expire_interval'] = float(config.get('expire_interval', 0))
config['expire_maxage'] = float(config.get('expire_maxage', 0))
if config['expire_interval'] > 0 and (config['user_quota_soft'] or config['expire_maxage']):
t = Thread(target=expire, kwargs={'kill_event': kill_event})
t.start()
else:
logging.info('Expiring disabled.')
except ValueError:
logging.warning("Invalid expire_interval ('%s') or expire_maxage ('%s') set in config file. Expiring disabled.",
config['expire_interval'], config['expire_maxage'])
try:
server = ThreadedHTTPServer((config['http_address'], config['http_port']), HttpHandler)
except Exception as e:
import traceback
logging.debug(traceback.format_exc())
kill_event.set()
sys.exit(1)
if 'http_keyfile' in config and 'http_certfile' in config:
server.socket = ssl.wrap_socket(server.socket, keyfile=config['http_keyfile'], certfile=config['http_certfile'])
jid = config['component_jid']
secret = config['component_secret']
port = int(config.get('component_port',5347))
xmpp = MissingComponent(jid,secret,port)
if xmpp.connect():
xmpp.process()
print("connected")
try:
server.serve_forever()
except (KeyboardInterrupt, Exception) as e:
if e == KeyboardInterrupt:
logging.debug('Ctrl+C pressed')
else:
import traceback
logging.debug(traceback.format_exc())
kill_event.set()
else:
print("unable to connect")
kill_event.set()
| agpl-3.0 | 1,377,332,783,470,547,000 | 41.478659 | 165 | 0.55975 | false | 4.219564 | true | false | false |
joaofanti/TrabRedesIIFinal | Modelos/Mapa/MapFactory.py | 1 | 2000 | import json
from Map import Map
from Door import Door
from Room import Room
from Item import Item
"""
Define um gerador de mapas.
"""
class MapFactory:
"""
Cria uma nova instancia de gerador de mapas.
"""
def __init__(self):
self.RoomsList = [] # Lista de salas geradas.
self.DoorsList = [] # Lista de portas geradas.
pass
"""
Gera um mapa a partir de um arquivo de texto com a definicao do mapa em JSON.
"""
def GenerateMap(self, mapJson, mapDesignText):
# Para cada sala no arquivo JSON, gera um novo objeto Sala e entao o salva na lista de salas.
for roomJson in mapJson:
newRoom = self.GenerateRoom(roomJson)
self.RoomsList.append(newRoom)
return Map(self.RoomsList, mapDesignText)
"""
Gera uma sala a partir de um JSON de sala.
"""
def GenerateRoom(self, roomJson):
currentRoomID = roomJson["ID"]
doors = []
for connectedRoom in roomJson["ConnectedRoomsID"]:
door = self.GenerateDoor(currentRoomID, connectedRoom)
doors.append(door)
objects = []
for objectJson in roomJson["Objects"]:
# Se existe "State" nas configuracoes do objeto, adiciona! Se nao, usa None
if ("State" in objectJson):
newObject = Item(objectJson["Name"], objectJson["Description"], objectJson["State"])
else:
newObject = Item(objectJson["Name"], objectJson["Description"])
objects.append(newObject)
newRoom = Room(currentRoomID, roomJson["StartRoom"], doors, objects)
return newRoom
"""
Gera uma porta a partir de um JSON de porta ou, caso ela ja exista, utiliza a ja existente.
"""
def GenerateDoor(self, room1ID, room2JSON):
room2ID = room2JSON["Room"]
room2Direction = room2JSON["Direction"]
room1Direction = "N"
if (room2Direction == "N"):
room1Direction = "S"
elif (room2Direction == "L"):
room1Direction = "E"
elif (room2Direction == "E"):
room1Direction = "L"
door = Door(room1ID, room1Direction, room2ID, room2Direction, room2JSON["Opened"] == "True")
self.DoorsList.append(door)
return door | mit | -4,662,907,654,454,719,000 | 25.328947 | 95 | 0.697 | false | 2.86123 | false | false | false |
onyg/aserializer | aserializer/django/fields.py | 1 | 2229 | # -*- coding: utf-8 -*-
from collections import Iterable
try:
from django.db.models.query import QuerySet
from django.db.models import Manager
except ImportError:
QuerySet = None
Manager = None
from aserializer.fields import ListSerializerField
from aserializer.django.utils import get_local_fields, get_related_fields
class RelatedManagerListSerializerField(ListSerializerField):
def __init__(self, serializer, sort_by=None, use_prefetch=False, *args, **kwargs):
super(RelatedManagerListSerializerField, self).__init__(serializer=serializer, sort_by=sort_by, *args, **kwargs)
self.use_prefetch = use_prefetch
def pre_value(self, fields=None, exclude=None, **extras):
super(RelatedManagerListSerializerField, self).pre_value(fields=fields, exclude=exclude, **extras)
self.use_prefetch = extras.get('use_prefetch', self.use_prefetch)
def set_value(self, value):
if value is None:
return
elif isinstance(value, Iterable):
values = value
elif isinstance(value, (QuerySet, Manager)):
# if using prefetch_related, we can't use only as it will re-fetch the data
if not self.use_prefetch and (self.only_fields or self.exclude):
local_fields = get_local_fields(value.model)
related_fields = get_related_fields(value.model)
only_fields = [f.name for f in local_fields]
if self.only_fields:
only_fields = [f for f in only_fields if f in self.only_fields]
exclude_fields = [f.name for f in local_fields if f.name in self.exclude]
if exclude_fields:
only_fields = [f for f in only_fields if f not in exclude_fields]
only_fields += [f.name for f in related_fields]
# .only() returns a QuerySet of RelatedDjangoModel_Deferred objects?
values = value.only(*only_fields)
else:
values = value.all()
else:
return
self.items[:] = []
self._native_items[:] = []
self._python_items[:] = []
for item in values:
self.add_item(source=item)
| mit | -4,767,140,626,325,645,000 | 42.705882 | 120 | 0.61956 | false | 4.158582 | false | false | false |
pedrobaeza/l10n-spain | l10n_es_partner/models/l10n_es_partner.py | 1 | 7222 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2008 Spanish Localization Team
# Copyright (c) 2009 Zikzakmedia S.L. (http://zikzakmedia.com)
# Jordi Esteve <[email protected]>
# Copyright (c) 2012-2014 Acysos S.L. (http://acysos.com)
# Ignacio Ibeas <[email protected]>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
class ResPartnerBank(orm.Model):
_inherit = 'res.partner.bank'
_columns = {
'acc_country_id': fields.many2one(
"res.country", 'Bank country',
help="If the country of the bank is Spain, it validates the bank "
"code or IBAN, formatting it accordingly."),
}
def _crc(self, cTexto):
"""Calculo el CRC de un número de 10 dígitos
ajustados con ceros por la izquierda"""
factor = (1, 2, 4, 8, 5, 10, 9, 7, 3, 6)
# Cálculo CRC
nCRC = 0
for n in range(10):
nCRC += int(cTexto[n]) * factor[n]
# Reducción del CRC a un dígi9to
nValor = 11 - nCRC % 11
if nValor == 10:
nValor = 1
elif nValor == 11:
nValor = 0
return nValor
def calcCC(self, cBanco, cSucursal, cCuenta):
"""Cálculo del código de control bancario"""
cTexto = "00%04d%04d" % (int(cBanco), int(cSucursal))
dc1 = self._crc(cTexto)
cTexto = "%010d" % long(cCuenta)
dc2 = self._crc(cTexto)
return "%1d%1d" % (dc1, dc2)
def checkBankAccount(self, account):
number = ""
for i in account:
if i.isdigit():
number += i
if len(number) != 20:
return 'invalid-size'
bank = number[0:4]
office = number[4:8]
dc = number[8:10]
account = number[10:20]
if dc != self.calcCC(bank, office, account):
return 'invalid-dc'
return '%s %s %s %s' % (bank, office, dc, account)
def _pretty_iban(self, iban_str):
"""return iban_str in groups of four characters separated
by a single space"""
res = []
while iban_str:
res.append(iban_str[:4])
iban_str = iban_str[4:]
return ' '.join(res)
def onchange_banco(self, cr, uid, ids, account, country_id,
state, context=None):
if account and country_id:
country = self.pool.get('res.country').browse(cr, uid, country_id,
context=context)
if country.code.upper() == 'ES':
bank_obj = self.pool.get('res.bank')
if state == 'bank':
account = account.replace(' ', '')
number = self.checkBankAccount(account)
if number == 'invalid-size':
return {
'warning': {
'title': _('Warning'),
'message': _('Bank account should have 20 '
'digits.')
}
}
if number == 'invalid-dc':
return {
'warning': {
'title': _('Warning'),
'message': _('Invalid bank account.')
}
}
bank_ids = bank_obj.search(cr, uid,
[('code', '=', number[:4])],
context=context)
if bank_ids:
return {'value': {'acc_number': number,
'bank': bank_ids[0]}}
else:
return {'value': {'acc_number': number}}
elif state == 'iban':
partner_bank_obj = self.pool['res.partner.bank']
if partner_bank_obj.is_iban_valid(cr, uid, account, context):
number = self._pretty_iban(account.replace(" ", ""))
bank_ids = bank_obj.search(cr, uid,
[('code', '=', number[5:9])],
context=context)
if bank_ids:
return {'value': {'acc_number': number,
'bank': bank_ids[0]}}
else:
return {'value': {'acc_number': number}}
else:
return {'warning': {'title': _('Warning'),
'message': _('IBAN account is not valid')}}
return {'value': {}}
class ResBank(orm.Model):
_inherit = 'res.bank'
_columns = {
'code': fields.char('Code', size=64),
'lname': fields.char('Long name', size=128),
'vat': fields.char('VAT code', size=32, help="Value Added Tax number"),
'website': fields.char('Website', size=64),
'code': fields.char('Code', size=64),
}
class ResPartner(orm.Model):
_inherit = 'res.partner'
_columns = {
'comercial': fields.char('Trade name', size=128, select=True),
}
def name_search(self, cr, uid, name, args=None, operator='ilike',
context=None, limit=100):
if not args:
args = []
partners = super(ResPartner, self).name_search(cr, uid, name, args,
operator, context,
limit)
ids = [x[0] for x in partners]
if name and len(ids) == 0:
ids = self.search(cr, uid, [('comercial', operator, name)] + args,
limit=limit, context=context)
return self.name_get(cr, uid, ids, context=context)
def vat_change(self, cr, uid, ids, value, context=None):
result = super(ResPartner, self).vat_change(cr, uid, ids, value,
context=context)
if value:
result['value']['vat'] = value.upper()
return result
| agpl-3.0 | 6,887,857,847,909,223,000 | 40.705202 | 81 | 0.461123 | false | 4.229191 | false | false | false |
151706061/SimpleITK | Examples/Segmentation/ConnectedThresholdImageFilter.py | 1 | 2202 | '''=========================================================================
'
' Copyright Insight Software Consortium
'
' Licensed under the Apache License, Version 2.0 (the "License");
' you may not use this file except in compliance with the License.
' You may obtain a copy of the License at
'
' http://www.apache.org/licenses/LICENSE-2.0.txt
'
' Unless required by applicable law or agreed to in writing, software
' distributed under the License is distributed on an "AS IS" BASIS,
' WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
' See the License for the specific language governing permissions and
' limitations under the License.
'
'========================================================================='''
from __future__ import print_function
import SimpleITK
import sys
if __name__ == '__main__':
#
# Check Command Line
#
if len( sys.argv ) < 7:
print("Usage: ConnectedThresholdImageFilter inputImage outputImage lowerThreshold upperThreshold seedX seedY [seed2X seed2Y ... ]");
sys.exit( 1 )
#
# Read the image
#
reader = SimpleITK.ImageFileReader()
reader.SetFileName( sys.argv[1] )
image = reader.Execute();
#
# Set up the writer
#
writer = SimpleITK.ImageFileWriter()
writer.SetFileName( sys.argv[2] )
#
# Blur using CurvatureFlowImageFilter
#
blurFilter = SimpleITK.CurvatureFlowImageFilter()
blurFilter.SetNumberOfIterations( 5 )
blurFilter.SetTimeStep( 0.125 )
image = blurFilter.Execute( image )
#
# Set up ConnectedThresholdImageFilter for segmentation
#
segmentationFilter = SimpleITK.ConnectedThresholdImageFilter()
segmentationFilter.SetLower( float(sys.argv[3]) )
segmentationFilter.SetUpper( float(sys.argv[4]) )
segmentationFilter.SetReplaceValue( 255 )
for i in range( 5, len(sys.argv)-1, 2 ):
seed = SimpleITK.Index( int(sys.argv[i]), int(sys.argv[i+1]) )
segmentationFilter.AddSeed( seed )
print( "Adding seed at ", seed.ToString() )
# Run the segmentation filter
image = segmentationFilter.Execute( image )
#
# Write out the result
#
writer.Execute( image )
sys.exit(0)
| apache-2.0 | 7,621,793,930,765,058,000 | 26.525 | 136 | 0.645322 | false | 3.829565 | false | false | false |
dbrattli/RxPY | rx/linq/observable/sequenceequal.py | 1 | 3360 | import collections
from rx import AnonymousObservable, Observable
from rx.disposables import CompositeDisposable
from rx.internal import default_comparer
from rx.internal import extensionmethod
@extensionmethod(Observable)
def sequence_equal(self, second, comparer=None):
"""Determines whether two sequences are equal by comparing the
elements pairwise using a specified equality comparer.
1 - res = source.sequence_equal([1,2,3])
2 - res = source.sequence_equal([{ "value": 42 }], lambda x, y: x.value == y.value)
3 - res = source.sequence_equal(Observable.return_value(42))
4 - res = source.sequence_equal(Observable.return_value({ "value": 42 }), lambda x, y: x.value == y.value)
second -- Second observable sequence or array to compare.
comparer -- [Optional] Comparer used to compare elements of both sequences.
Returns an observable sequence that contains a single element which
indicates whether both sequences are of equal length and their
corresponding elements are equal according to the specified equality
comparer.
"""
first = self
comparer = comparer or default_comparer
if isinstance(second, collections.Iterable):
second = Observable.from_iterable(second)
def subscribe(observer):
donel = [False]
doner = [False]
ql = []
qr = []
def on_next1(x):
if len(qr) > 0:
v = qr.pop(0)
try:
equal = comparer(v, x)
except Exception as e:
observer.on_error(e)
return
if not equal:
observer.on_next(False)
observer.on_completed()
elif doner[0]:
observer.on_next(False)
observer.on_completed()
else:
ql.append(x)
def on_completed1():
donel[0] = True
if not len(ql):
if len(qr) > 0:
observer.on_next(False)
observer.on_completed()
elif doner[0]:
observer.on_next(True)
observer.on_completed()
def on_next2(x):
if len(ql) > 0:
v = ql.pop(0)
try:
equal = comparer(v, x)
except Exception as exception:
observer.on_error(exception)
return
if not equal:
observer.on_next(False)
observer.on_completed()
elif donel[0]:
observer.on_next(False)
observer.on_completed()
else:
qr.append(x)
def on_completed2():
doner[0] = True
if not len(qr):
if len(ql) > 0:
observer.on_next(False)
observer.on_completed()
elif donel[0]:
observer.on_next(True)
observer.on_completed()
subscription1 = first.subscribe(on_next1, observer.on_error, on_completed1)
subscription2 = second.subscribe(on_next2, observer.on_error, on_completed2)
return CompositeDisposable(subscription1, subscription2)
return AnonymousObservable(subscribe)
| apache-2.0 | 2,095,176,045,865,010,000 | 32.267327 | 110 | 0.545238 | false | 4.409449 | false | false | false |
laborautonomo/django-newsletter | newsletter/migrations/0005_auto__del_emailtemplate__del_unique_emailtemplate_title_action__del_fi.py | 1 | 11955 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from ..utils import get_user_model
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'EmailTemplate', fields ['title', 'action']
db.delete_unique('newsletter_emailtemplate', ['title', 'action'])
# Deleting model 'EmailTemplate'
db.delete_table('newsletter_emailtemplate')
# Deleting field 'Newsletter.update_template'
db.delete_column('newsletter_newsletter', 'update_template_id')
# Deleting field 'Newsletter.unsubscribe_template'
db.delete_column('newsletter_newsletter', 'unsubscribe_template_id')
# Deleting field 'Newsletter.message_template'
db.delete_column('newsletter_newsletter', 'message_template_id')
# Deleting field 'Newsletter.subscribe_template'
db.delete_column('newsletter_newsletter', 'subscribe_template_id')
def backwards(self, orm):
# Adding model 'EmailTemplate'
db.create_table('newsletter_emailtemplate', (
('title', self.gf('django.db.models.fields.CharField')(default=u'Default', max_length=200)),
('text', self.gf('django.db.models.fields.TextField')()),
('html', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('action', self.gf('django.db.models.fields.CharField')(max_length=16, db_index=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('subject', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('newsletter', ['EmailTemplate'])
# Adding unique constraint on 'EmailTemplate', fields ['title', 'action']
db.create_unique('newsletter_emailtemplate', ['title', 'action'])
# User chose to not deal with backwards NULL issues for 'Newsletter.update_template'
raise RuntimeError("Cannot reverse this migration. 'Newsletter.update_template' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Newsletter.unsubscribe_template'
raise RuntimeError("Cannot reverse this migration. 'Newsletter.unsubscribe_template' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Newsletter.message_template'
raise RuntimeError("Cannot reverse this migration. 'Newsletter.message_template' and its values cannot be restored.")
# User chose to not deal with backwards NULL issues for 'Newsletter.subscribe_template'
raise RuntimeError("Cannot reverse this migration. 'Newsletter.subscribe_template' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'newsletter.article': {
'Meta': {'ordering': "('sortorder',)", 'object_name': 'Article'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'articles'", 'to': "orm['newsletter.Message']"}),
'sortorder': ('django.db.models.fields.PositiveIntegerField', [], {'default': '20', 'db_index': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'newsletter.message': {
'Meta': {'unique_together': "(('slug', 'newsletter'),)", 'object_name': 'Message'},
'date_create': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modify': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'newsletter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['newsletter.Newsletter']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'newsletter.newsletter': {
'Meta': {'object_name': 'Newsletter'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'site': ('django.db.models.fields.related.ManyToManyField', [], {'default': '[1]', 'to': "orm['sites.Site']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'})
},
'newsletter.submission': {
'Meta': {'object_name': 'Submission'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['newsletter.Message']"}),
'newsletter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['newsletter.Newsletter']"}),
'prepared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'publish': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 16, 0, 0)', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'sending': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'subscriptions': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'to': "orm['newsletter.Subscription']", 'symmetrical': 'False', 'blank': 'True'})
},
'newsletter.subscription': {
'Meta': {'unique_together': "(('user', 'email_field', 'newsletter'),)", 'object_name': 'Subscription'},
'activation_code': ('django.db.models.fields.CharField', [], {'default': "'cfac7ee20279d5842214a4e8371475175ed8f00b'", 'max_length': '40'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email_field': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'db_column': "'email'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'name_field': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'db_column': "'name'", 'blank': 'True'}),
'newsletter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['newsletter.Newsletter']"}),
'subscribe_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subscribed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'unsubscribe_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'unsubscribed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['newsletter']
| agpl-3.0 | -1,749,140,199,191,356,700 | 71.018072 | 187 | 0.582016 | false | 3.733604 | false | false | false |
rakhimov/cppdep | setup.py | 1 | 1609 | #!/usr/bin/env python
"""The setup script to generate dist files for PyPi.
To upload the release to PyPi:
$ ./setup.py sdist bdist_wheel --universal
$ twine upload dist/*
"""
from setuptools import setup
from cppdep import cppdep
setup(
name="cppdep",
version=cppdep.VERSION,
maintainer="Olzhas Rakhimov",
maintainer_email="[email protected]",
description="Dependency analyzer for C/C++ projects",
download_url="https://github.com/rakhimov/cppdep",
license="GPLv3+",
install_requires=[
"networkx",
"pydot",
"pydotplus",
"PyYAML",
"PyKwalify>=1.6.0"
],
keywords=["c++", "c", "static analysis", "dependency analysis"],
url="http://github.com/rakhimov/cppdep",
packages=["cppdep"],
package_data={"cppdep": ["config_schema.yml"]},
entry_points={"console_scripts": ["cppdep = cppdep.__main__:main"]},
long_description=open("README.rst").read(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Quality Assurance",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Programming Language :: C",
"Programming Language :: C++",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Environment :: Console",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5"
],
)
| gpl-3.0 | 8,897,259,033,538,249,000 | 31.836735 | 85 | 0.607209 | false | 3.867788 | false | false | false |
ScientificDataFormat/SDF-Python | sdf/hdf5.py | 1 | 5874 | # Copyright (c) 2019 Dassault Systemes. All rights reserved.
import h5py
import sdf
import numpy as np
import os
import sys
def _to_python_str(s):
""" Convert to Python string """
if isinstance(s, bytes):
return s.decode('utf-8')
else:
return s
def load(filename, objectname):
with h5py.File(filename, 'r') as f:
datasets = {}
dsobj = f[objectname]
class_name = dsobj.__class__.__name__
if class_name == 'Group':
group = _create_group(dsobj, datasets)
_restore_scales(datasets)
return group
elif class_name == 'Dataset':
dataset = _create_dataset(dsobj, datasets)
for ri in range(dsobj.ndim):
if dsobj.dims[ri]:
sobj = dsobj.dims[ri][0]
s = _create_dataset(sobj, dict())
s.is_scale = True
dataset.scales[ri] = s
return dataset
else:
raise Exception('Unexpected object')
def save(filename, group):
with h5py.File(filename, 'w') as f:
datasets = dict()
_write_group(f, group, '/', datasets)
# attach the scales
for ds, h5ds in datasets.items():
for i, s in enumerate(ds.scales):
if s is None:
continue
elif s in datasets:
h5s = datasets[s]
dimname = s._display_name
if dimname is None:
dimname = ''
h5s.make_scale(_str(dimname))
h5ds.dims[i].attach_scale(h5s)
else:
print("Cannot attach scale for '" + h5ds.name +
"' because the referenced scale for dimension " + str(i) + " is not part of the file")
def _create_group(gobj, datasets):
""" Create an sdf.Group from an h5py group """
ds_obj_list = []
g_obj_list = []
group_attrs = {key: gobj.attrs[key] for key in gobj.attrs.keys() if key != 'COMMENT'}
comment = gobj.attrs.get('COMMENT')
for ds_name in gobj.keys():
# TODO: fix this?
if isinstance(gobj[ds_name], h5py._hl.dataset.Dataset):
ds_obj_list.append(gobj[ds_name])
elif isinstance(gobj[ds_name], h5py._hl.group.Group):
g_obj_list.append(gobj[ds_name])
child_groups = []
for cgobj in g_obj_list:
child_groups.append(_create_group(cgobj, datasets))
ds_list = [_create_dataset(dsobj, datasets) for dsobj in ds_obj_list]
name = gobj.name.split('/')[-1]
return sdf.Group(name=name, comment=comment, attributes=group_attrs, groups=child_groups, datasets=ds_list)
def _create_dataset(dsobj, datasets):
""" Create a dataset from an h5py dataset """
_, name = os.path.split(dsobj.name)
ds = sdf.Dataset(name, data=dsobj[()])
for attr in dsobj.attrs:
if attr == 'COMMENT':
ds.comment = _to_python_str(dsobj.attrs[attr])
elif attr == 'NAME':
ds.display_name = _to_python_str(dsobj.attrs[attr])
elif attr == 'RELATIVE_QUANTITY' and _to_python_str(dsobj.attrs[attr]) == 'TRUE':
ds.relative_quantity = True
elif attr == 'UNIT':
ds.unit = _to_python_str(dsobj.attrs[attr])
elif attr == 'DISPLAY_UNIT':
ds.display_unit = _to_python_str(dsobj.attrs[attr])
elif attr == 'CLASS' and _to_python_str(dsobj.attrs[attr]) == 'DIMENSION_SCALE':
ds.is_scale = True
elif attr == 'REFERENCE_LIST':
ds.is_scale = True
elif attr in ['REFERENCE_LIST', 'DIMENSION_LIST']:
pass
else:
ds.attributes[attr] = _to_python_str(dsobj.attrs[attr])
ds.scales = [None] * ds.data.ndim
datasets[dsobj] = ds
return ds
def _restore_scales(datasets):
for dsobj, ds in datasets.items():
for i in range(ds.data.ndim):
if dsobj.dims[i]:
sobj = dsobj.dims[i][0]
scale = datasets[sobj]
scale.is_scale = True
ds.scales[i] = scale
pass
def _str(s):
""" Convert to byte string """
if sys.version_info.major >= 3 and isinstance(s, bytes):
return s
else:
# convert the string to an fixed-length utf-8 byte string
return np.string_(s.encode('utf-8'))
def _write_group(f, g, path, datasets):
if path == '/':
gobj = f
else:
gobj = f.create_group(path)
# iterate over the child groups
for subgroup in g.groups:
_write_group(f, subgroup, path + subgroup.name + '/', datasets)
if g.comment is not None:
gobj.attrs['COMMENT'] = _str(g.comment)
for key, value in g.attributes.items():
gobj.attrs[key] = _str(value)
# write the datasets
for ds in g.datasets:
_write_dataset(f, ds, path, datasets)
def _write_dataset(f, ds, path, datasets):
f[path + ds.name] = ds.data
dsobj = f[path + ds.name]
datasets[ds] = dsobj
if ds.comment:
dsobj.attrs['COMMENT'] = _str(ds.comment)
if ds._display_name:
dsobj.attrs['NAME'] = _str(ds.display_name)
if ds.relative_quantity:
dsobj.attrs['RELATIVE_QUANTITY'] = _str('TRUE')
if ds.unit:
dsobj.attrs['UNIT'] = _str(ds.unit)
if ds.display_unit != ds.unit:
dsobj.attrs['DISPLAY_UNIT'] = _str(ds.display_unit)
if ds.is_scale:
dimname = ds.display_name
if dimname is None:
dimname = ''
h5py.h5ds.set_scale(dsobj.id, _str(dimname))
return dsobj
| bsd-3-clause | 7,269,598,341,535,490,000 | 26.240385 | 112 | 0.52843 | false | 3.632653 | false | false | false |
chirpradio/chirpradio-machine | chirp/stream/statistics.py | 1 | 6045 | """
Compute statistics about a consumed sequence of messages.
"""
from chirp.common import http_console_server
from chirp.common import timestamp
from chirp.stream import message
class _ConnectionInfo(object):
MAX_NUM_ERRORS = 25
connection_id = None
start_timestamp_ms = None
last_timestamp_ms = None
num_frames = 0
size_frames = 0
duration_frames_ms = 0
freq_frame_kbps = None # Initialized as {} in constructor
first_frame_timestamp_ms = None
last_frame_timestamp_ms = None
num_blocks = 0
size_blocks = 0
last_block_timestamp_ms = None
errors = None # Initialized as [] in constructor
def __init__(self):
self.freq_frame_kbps = {}
self.errors = []
def process(self, msg):
self.last_timestamp_ms = msg.end_timestamp_ms
if msg.message_type == message.CONNECTED:
self.connection_id = msg.connection_id
self.start_timestamp_ms = msg.start_timestamp_ms
elif msg.message_type == message.FRAME:
self.num_frames += 1
self.size_frames += len(msg.payload)
self.duration_frames_ms += msg.mp3_header.duration_ms
key = msg.mp3_header.bit_rate_kbps
self.freq_frame_kbps[key] = self.freq_frame_kbps.get(key, 0) + 1
if self.first_frame_timestamp_ms is None:
self.first_frame_timestamp_ms = msg.start_timestamp_ms
self.last_frame_timestamp_ms = msg.end_timestamp_ms
elif msg.message_type == message.BLOCK:
self.num_blocks += 1
self.size_blocks += len(msg.payload)
self.last_block_timestamp_ms = msg.end_timestamp_ms
elif msg.message_type == message.ERROR:
self.errors.append(msg)
self.last_error_timestamp_ms = msg.start_timestamp_ms
if len(self.errors) > self.MAX_NUM_ERRORS:
self.errors.pop(0)
def html(self):
now_ms = timestamp.now_ms()
# Note my use of nested tables. I suck at HTML.
contents = ["<table border=1 cellpadding=4><tr><td><table>"]
def add(key, val):
contents.append(
"<tr><td><i>%s</i></td><td>%s</td></tr>" % (key, val))
def add_since_ms(key, ts_ms):
add(key, "%s (%s ago)" % (
timestamp.get_pretty_ms(ts_ms),
timestamp.get_human_readable_duration_ms(now_ms-ts_ms)))
add("start time", timestamp.get_pretty_ms(self.start_timestamp_ms))
duration_ms = self.last_timestamp_ms - self.start_timestamp_ms
add("duration", timestamp.get_human_readable_duration_ms(duration_ms))
if self.num_frames:
add("frames", "%d / %.2fM" % (self.num_frames,
float(self.size_frames) / (1 << 20)))
subtable = ["<table cellpadding=2>"]
vbr = 0
for key, num in sorted(self.freq_frame_kbps.items()):
perc = 100.0 * num / self.num_frames
vbr += float(key * num) / self.num_frames
subtable.append(
"<tr><td>%d kbps</td><td>%.1f%%</td><td>%d</td></tr>" %
(key, perc, num))
subtable.append("</table>")
add("frame distribution", "".join(subtable))
add("average bit rate", "%.2f kbps" % vbr)
since_last_ms = now_ms - self.last_frame_timestamp_ms
add_since_ms("last frame", self.last_frame_timestamp_ms)
frame_span_ms = (self.last_frame_timestamp_ms -
self.first_frame_timestamp_ms)
add("frame deficit",
"%.1fms" % (frame_span_ms - self.duration_frames_ms))
if self.num_blocks:
add("junk blocks", "%d / %db" % (
self.num_blocks, self.size_blocks))
add_since_ms("last junk", self.last_block_timestamp_ms)
if self.errors:
error_list = [
"%s - %s / %s / %s" % (
timestamp.get_pretty_ms(err.start_timestamp_ms),
err.error_type, err.error_code, err.error_text)
for err in reversed(self.errors)]
add("errors", "<br>".join(error_list))
contents.append("</table></td></tr></table>")
return "\n".join(contents)
class Statistics(message.MessageConsumer):
MAX_NUM_RECENT_CONNECTIONS = 20
def __init__(self, src):
message.MessageConsumer.__init__(self, src)
self._current_connection_info = None
self._recent_connections = []
def _process_message(self, msg):
if msg.message_type == message.CONNECTED:
if self._current_connection_info:
self._recent_connections.append(self._current_connection_info)
if (len(self._recent_connections)
> self.MAX_NUM_RECENT_CONNECTIONS):
self._recent_connections.pop(0)
self._current_connection_info = _ConnectionInfo()
if self._current_connection_info is not None:
self._current_connection_info.process(msg)
def _connections_html(self, unused_request):
contents = ["<html><head><title>Connection Log</title></head><body>"]
contents.append("<h1>Connection Log</h1>")
contents.append("The current time is %s" % timestamp.get_pretty())
contents.append("<h2>Current</h2>")
if self._current_connection_info:
contents.append(self._current_connection_info.html())
else:
contents.append("<i>No connections yet</i>")
if self._recent_connections:
contents.append("<h2>Recent</h2>")
contents.extend(con.html()
for con in reversed(self._recent_connections))
contents.append("</body></html>")
return "\n".join(contents)
def export(self, path=None):
http_console_server.register("/connections", self._connections_html)
| apache-2.0 | -8,816,550,096,536,408,000 | 37.503185 | 79 | 0.561952 | false | 3.782854 | false | false | false |
DanLindeman/memegen | tests/test_api_aliases.py | 1 | 1225 | # pylint: disable=unused-variable,misplaced-comparison-constant,expression-not-assigned
from expecter import expect
from .utils import load
def describe_get():
def it_requires_a_name_to_return_aliases(client):
status, data = load(client.get("/api/aliases/"))
expect(status) == 200
expect(data) == []
def it_redirects_with_param(client):
status, text = load(client.get("/api/aliases/?name=foo"), as_json=False)
expect(status) == 302
expect(text).contains('<a href="/api/aliases/foo">')
def describe_filter():
def with_single_match(client):
status, data = load(client.get("/api/aliases/sad-biden"))
expect(status) == 200
expect(data) == {
'sad-biden': {
'styles': [
'down',
'scowl',
'window',
],
'template': "http://localhost/api/templates/sad-biden"
}
}
def with_many_matches(client):
status, data = load(client.get("/api/aliases/votestakes"))
expect(status) == 200
expect(len(data)) == 5
| mit | -1,296,583,061,009,426,400 | 27.488372 | 87 | 0.516735 | false | 4.042904 | false | false | false |
PoolC/pythonstudy | entity.py | 1 | 1669 | #coding:utf-8
import types
class Channel(object):
def __init__(self, owner):
self._owner = owner
def _process_message(self, sender, msg, msgargs):
return self._owner._handle(sender, msg, msgargs)
def sendto(self, receiver, msg, msgargs = None):
return receiver._process_message(self, msg, msgargs)
class Entity(object):
def __init__(self):
self.channel = Channel(self)
self._nextcomp = None
self._children = []
def attach(self, comp):
assert comp._nextcomp is None
c = self
while c._nextcomp is not None:
c = c._nextcomp
c._nextcomp = comp
comp.channel = self.channel
return self
def _handle_attach(self, sender, msgargs):
self.attach(msgargs)
def components(self):
c = self
while c._nextcomp is not None:
yield c._nextcomp
c = c._nextcomp
def add(self, entity):
self._children.append(entity)
def _handle(self, sender, msg, msgargs):
c = self
while c is not None:
try:
handler = getattr(c, '_handle_' + msg)
except AttributeError:
handler = None
if handler:
return handler(sender, msgargs)
c = c._nextcomp
return None
def tick(compcls):
for comp in compcls.instances:
comp.tick()
def tickall():
for compcls in component_classes:
tick(compcls)
def send(sender, receiver, msg, msgargs):
receiver.send(sender, msg, msgargs)
| gpl-3.0 | -5,939,575,256,779,529,000 | 24.287879 | 60 | 0.537448 | false | 4.193467 | false | false | false |
keenondrums/sovrin-node | sovrin_client/agent/agent_issuer.py | 1 | 3182 | import json
from plenum.common.types import f
from anoncreds.protocol.issuer import Issuer
from anoncreds.protocol.types import ID
from anoncreds.protocol.types import ClaimRequest
from sovrin_client.agent.constants import EVENT_NOTIFY_MSG, CLAIMS_LIST_FIELD
from sovrin_client.agent.msg_constants import CLAIM, CLAIM_REQ_FIELD, CLAIM_FIELD, \
AVAIL_CLAIM_LIST, REVOC_REG_SEQ_NO, SCHEMA_SEQ_NO, ISSUER_DID
from sovrin_common.identity import Identity
from plenum.common.constants import DATA
from sovrin_client.client.wallet.attribute import Attribute
class AgentIssuer:
def __init__(self, issuer: Issuer):
self.issuer = issuer
async def processReqAvailClaims(self, msg):
body, (frm, ha) = msg
link = self.verifyAndGetLink(msg)
data = {
CLAIMS_LIST_FIELD: self.get_available_claim_list(link)
}
resp = self.getCommonMsg(AVAIL_CLAIM_LIST, data)
self.signAndSend(resp, link.localIdentifier, frm)
async def processReqClaim(self, msg):
body, (frm, _) = msg
link = self.verifyAndGetLink(msg)
if not link:
raise NotImplementedError
claimReqDetails = body[DATA]
schemaId = ID(schemaId=claimReqDetails[SCHEMA_SEQ_NO])
schema = await self.issuer.wallet.getSchema(schemaId)
if not self.is_claim_available(link, schema.name):
self.notifyToRemoteCaller(
EVENT_NOTIFY_MSG, "This claim is not yet available.",
self.wallet.defaultId, frm,
origReqId=body.get(f.REQ_ID.nm))
return
public_key = await self.issuer.wallet.getPublicKey(schemaId)
claimReq = ClaimRequest.from_str_dict(
claimReqDetails[CLAIM_REQ_FIELD], public_key.N)
self._add_attribute(
schemaKey=schema.getKey(),
proverId=claimReq.userId,
link=link)
claim_signature, claim_attributes = await self.issuer.issueClaim(schemaId, claimReq)
claimDetails = {
f.SIG.nm: claim_signature.to_str_dict(),
ISSUER_DID: schema.issuerId,
CLAIM_FIELD: json.dumps({k: v.to_str_dict() for k, v in claim_attributes.items()}),
REVOC_REG_SEQ_NO: None,
SCHEMA_SEQ_NO: claimReqDetails[SCHEMA_SEQ_NO]
}
resp = self.getCommonMsg(CLAIM, claimDetails)
self.signAndSend(resp, link.localIdentifier, frm,
origReqId=body.get(f.REQ_ID.nm))
def _add_attribute(self, schemaKey, proverId, link):
attr = self.issuer_backend.get_record_by_internal_id(link.internalId)
self.issuer._attrRepo.addAttributes(schemaKey=schemaKey,
userId=proverId,
attributes=attr)
def publish_trust_anchor(self, idy: Identity):
self.wallet.addTrustAnchoredIdentity(idy)
reqs = self.wallet.preparePending()
self.client.submitReqs(*reqs)
def publish_trust_anchor_attribute(self, attrib: Attribute):
self.wallet.addAttribute(attrib)
reqs = self.wallet.preparePending()
self.client.submitReqs(*reqs)
| apache-2.0 | 4,596,627,729,626,327,000 | 37.337349 | 95 | 0.641735 | false | 3.644903 | false | false | false |
zhewang/lcvis | main.py | 1 | 3262 | import json
import numpy as np
import pca
import process_object as po
from flask import Flask, request, jsonify, render_template, url_for
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/supernova')
def supernova():
return render_template('supernova.html')
@app.route('/new')
def splinenew():
return render_template('index_new.html')
@app.route('/plotusers', methods=['post'])
def plotusers():
data = request.get_json()
position = po.project(data['lc'], data['p'])
return jsonify(position)
@app.route('/fastpca')
def fastpca():
return render_template('fastpca.html')
@app.route('/calculatepca', methods=['post'])
def calculatepca():
global LAST_PCA
uids = request.get_json()
idlist, matrix, status = get_data_by_id(uids)
pca_result = {}
if status == 'ok':
pca_result = pca.calculate(idlist, matrix, LAST_PCA)
LAST_PCA = pca_result
final_result = [[{} for x in range(len(pca_result[0]))] for x in range(len(pca_result))]
for i in range(len(pca_result)):
for j in range(len(pca_result[0])):
final_result[i][j] = {'count':pca_result[i][j]}
return jsonify({'status':status, 'data':final_result})
@app.route('/calculate_average_lc', methods=['post'])
def calculate_average_lc():
global LCDATA, LCPHASE
uids = request.get_json()
# TODO: band as parameter
band = 'V'
matrix = []
for i in uids:
i = str(i)
if i in LCDATA:
if band in LCDATA[i]['bands']:
vec = LCDATA[i][band]['mag']
if len(vec) > 0: # some id may not have lc data
matrix.append(LCDATA[i][band]['mag'])
mean = np.mean(np.array(matrix), axis=0)
std = np.std(np.array(matrix), axis=0)
return jsonify({'mean':mean.tolist(),
'std':std.tolist(),
'phase':LCPHASE})
def load_lc_data():
lcdata = {}
lcphase = [] # Assume all phase for different surveys are the same
surveys = json.load(open("./static/data_ogle/list.json"))['surveys']
for s in surveys:
path = "./static/data_ogle/lightcurves/{}/fit.json".format(s)
data = json.load(open(path))
lcphase = data['phase']
for objid in data['data']:
lcdata[objid] = data['data'][objid]
return lcdata, lcphase
def get_data_by_id(ids, band='V'):
global LCDATA
ids_exist = [] # some id may not have lc data
matrix = []
status = 'ok'
for i in ids:
i = str(i)
if i in LCDATA:
if band in LCDATA[i]['bands']:
vec = LCDATA[i][band]['mag']
if len(vec) > 0:
ids_exist.append(i)
matrix.append(vec)
if len(matrix) == 0:
return ids_exist, matrix, 'no light curve data'
c_length = len(matrix[0])
matrix = np.array(matrix)
if len(matrix) < c_length:
status = "numrows < numcols"
else:
try:
matrix.shape = (len(matrix), c_length)
except:
status = "not all rows have same numcols"
return ids_exist, matrix, status
LCDATA, LCPHASE = load_lc_data()
LAST_PCA = []
app.run(port=8080, debug=True)
| gpl-2.0 | 1,719,386,529,822,531,600 | 26.411765 | 92 | 0.57572 | false | 3.355967 | false | false | false |
datascopeanalytics/sensor_fusion | main.py | 1 | 6487 | import copy
import datetime
import math
import random
from collections import defaultdict
import matplotlib as mpl
import matplotlib.animation
from matplotlib.ticker import FormatStrFormatter
import numpy as np
import scipy
import seaborn as sns
import traces
# local imports
from kalman import Estimate, Reading
from matplotlib import pyplot as plt
from sensor import Sensor
from traincar import TrainCar
class SensorAnimation(matplotlib.animation.FuncAnimation):
def __init__(self, time_array, truth, reading_array, estimate_array):
self.fig, (self.ax2, self.ax1) = plt.subplots(
1, 2, sharey=True,
gridspec_kw={"width_ratios":[3, 1]},
figsize=(8, 4)
)
plt.tight_layout(pad=2.0)
self.time_array = time_array
self.estimate_array = estimate_array
self.ax1.set_ylim(0, 120)
self.ax1.set_xlim(0, 20)
self.ax1.set_xlabel("Probability")
self.ax1.xaxis.set_major_formatter(FormatStrFormatter('%d%%'))
self.estimate_line = self.ax1.plot(
[], [], color='purple', label='estimate')
self.lines = []
for sensor in reading_array:
self.lines += self.ax1.plot(
[], [], color=sensor.color, label=sensor.name)
self.truth_line = self.ax1.hlines(truth[0], 0, 20, color='red', label='Occupancy')
self.ax1.legend()
self.ax2.plot(time_array, truth, color='red', label='Occupancy')
# self.ax2.set_ylim(0, 150)
self.ax2.set_title("Train car occupancy over time")
self.ax2.set_xlabel("Time (minutes)")
self.ax2.set_ylabel("Occupants")
self.estimate_ts = self.ax2.plot(
[], [], color='purple', label='estimate')
self.fill_lines = self.ax2.fill_between(
[], [], color='purple', alpha=0.5)
self.truth = truth
self.reading_array = reading_array
super().__init__(
self.fig, self.update,
frames=len(time_array),
blit=True
)
def update(self, i):
"""updates frame i of the animation"""
self.ax1.set_title("{}".format(
datetime.timedelta(minutes=self.time_array[i]))
)
for sensor, line in zip(self.reading_array.keys(), self.lines):
reading = self.reading_array.get(sensor)[i]
x, y = reading.vectorize(self.ax1.get_ylim())
line.set_data(y, x)
estimate = self.estimate_array[i]
self.estimate_line[0].set_data(
estimate.vectorize(self.ax1.get_ylim())[1],
estimate.vectorize(self.ax1.get_ylim())[0],
)
self.truth_line.remove()
self.truth_line = self.ax1.hlines(truth[i], 0, 20, color='red', label='Occupancy')
self.estimate_ts[0].set_data(
self.time_array[:i], self.estimate_array[:i])
self.fill_lines.remove()
self.fill_lines = self.ax2.fill_between(
self.time_array[:i],
[e.mu - 2 * e.sigma for e in self.estimate_array[:i]],
[e.mu + 2 * e.sigma for e in self.estimate_array[:i]],
color='purple',
alpha=0.5
)
return tuple(self.lines + self.estimate_line + self.estimate_ts + [self.fill_lines] + [self.truth_line])
if __name__ == "__main__":
# create some crappy sensors
co2_sensor = Sensor("CO$_2$", intersect=350, slope=15,
sigma=10, round_level=500, proc_sigma=30, units="ppm")
# sigma=500, round_level=500, proc_sigma=0)
temp_sensor = Sensor("Temperature", intersect=0, slope=0.25,
sigma=5, round_level=10, proc_sigma=5, units="$^{\circ}$C")
# put the sensors on a train car
train_car = TrainCar(sensor_array=[co2_sensor, temp_sensor])
# run some experiments to model/calibrate the sensors
train_car.run_experiment(datapoints=250)
train_car.plot_experiment(path="experiment_plots")
# generate some "real" occupancy data
train_car.generate_occupancy() # defaults to 5 stations and 30 minutes
time_array = np.arange(0, 30, 1.0 / 10)
reading_array = defaultdict(list)
truth = []
estimate_array = []
estimate = Estimate()
for t in time_array:
for reading in train_car.read_sensors(t):
reading_array[reading.sensor].append(reading)
estimate.add_reading(reading)
estimate_array.append(copy.deepcopy(estimate))
# if the last point was in a station
if truth and train_car.occupants_trace[t] != truth[-1]:
estimate = Estimate()
truth.append(train_car.occupants_trace[t])
# plt.clf()
# plt.plot(time_array, reading_array[co2_sensor])
# plt.savefig("co2.png")
plt.clf()
animation = SensorAnimation(
time_array, truth, reading_array, estimate_array
)
animation.save("30minutes.mp4", fps=10, bitrate=1024)
plt.clf()
plt.xlabel("Number of people in the train car")
plt.ylabel("Probability")
plt.gca().yaxis.set_major_formatter(FormatStrFormatter('%.1f%%'))
reading_1 = Reading(co2_sensor, 60)
print("reading_1 = ", (reading_1.value, reading_1.mu))
plt.plot(*reading_1.vectorize((0,120)), color=co2_sensor.color, label="CO$_2$ sensor")
plt.vlines(reading_1, 0, max(reading_1.vectorize((0,120))[1]), linestyles='dashed')
plt.legend()
plt.savefig("reading_plots/1_co2.svg")
reading_2 = Reading(co2_sensor, 60)
print("reading_2 = ", (reading_2.value, reading_2.mu))
plt.plot(*reading_2.vectorize((0,120)), color=co2_sensor.color)
plt.vlines(reading_2, 0, max(reading_2.vectorize((0,120))[1]), linestyles='dashed')
plt.savefig("reading_plots/2_co2.svg")
estimate = Estimate()
estimate.add_reading(reading_1)
estimate.add_reading(reading_2)
estimate_line = plt.plot(*estimate.vectorize((0,120)), color='purple', label="Estimate")
plt.legend()
plt.savefig("reading_plots/3_co2.svg")
reading_3 = Reading(temp_sensor, 60)
print("reading_3 = ", (reading_3.value, reading_3.mu))
plt.plot(*reading_3.vectorize((0,120)), color=temp_sensor.color, label="Temperature sensor")
plt.vlines(reading_3, 0, max(reading_3.vectorize((0,120))[1]), linestyles='dashed')
estimate.add_reading(reading_3)
estimate_line[0].remove()
estimate_line = plt.plot(*estimate.vectorize((0,120)), color='purple', label="Estimate")
plt.legend()
plt.savefig("reading_plots/4_co2.svg")
| unlicense | -2,849,462,378,540,954,000 | 34.839779 | 112 | 0.617389 | false | 3.350723 | false | false | false |
minrk/sympy | sympy/tensor/indexed.py | 1 | 13604 | """Module that defines indexed objects
The classes IndexedBase, Indexed and Idx would represent a matrix element
M[i, j] as in the following graph::
1) The Indexed class represents the entire indexed object.
|
___|___
' '
M[i, j]
/ \__\______
| |
| |
| 2) The Idx class represent indices and each Idx can
| optionally contain information about its range.
|
3) IndexedBase represents the `stem' of an indexed object, here `M'.
The stem used by itself is usually taken to represent the entire
array.
There can be any number of indices on an Indexed object. No transformation
properties are implemented in these Base objects, but implicit contraction
of repeated indices is supported.
Note that the support for complicated (i.e. non-atomic) integer expressions
as indices is limited. (This should be improved in future releases.)
Examples
========
To express the above matrix element example you would write:
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> M = IndexedBase('M')
>>> i, j = map(Idx, ['i', 'j'])
>>> M[i, j]
M[i, j]
Repreated indices in a product implies a summation, so to express a
matrix-vector product in terms of Indexed objects:
>>> x = IndexedBase('x')
>>> M[i, j]*x[j]
M[i, j]*x[j]
If the indexed objects will be converted to component based arrays, e.g.
with the code printers or the autowrap framework, you also need to provide
(symbolic or numerical) dimensions. This can be done by passing an
optional shape parameter to IndexedBase upon construction:
>>> dim1, dim2 = symbols('dim1 dim2', integer=True)
>>> A = IndexedBase('A', shape=(dim1, 2*dim1, dim2))
>>> A.shape
Tuple(dim1, 2*dim1, dim2)
>>> A[i, j, 3].shape
Tuple(dim1, 2*dim1, dim2)
If an IndexedBase object has no shape information, it is assumed that the
array is as large as the ranges of it's indices:
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> M[i, j].shape
Tuple(m, n)
>>> M[i, j].ranges
[Tuple(0, m - 1), Tuple(0, n - 1)]
The above can be compared with the following:
>>> A[i, 2, j].shape
Tuple(dim1, 2*dim1, dim2)
>>> A[i, 2, j].ranges
[Tuple(0, m - 1), None, Tuple(0, n - 1)]
To analyze the structure of indexed expressions, you can use the methods
get_indices() and get_contraction_structure():
>>> from sympy.tensor import get_indices, get_contraction_structure
>>> get_indices(A[i, j, j])
(set([i]), {})
>>> get_contraction_structure(A[i, j, j])
{(j,): set([A[i, j, j]])}
See the appropriate docstrings for a detailed explanation of the output.
"""
# TODO: (some ideas for improvement)
#
# o test and guarantee numpy compatibility
# - implement full support for broadcasting
# - strided arrays
#
# o more functions to analyze indexed expressions
# - identify standard constructs, e.g matrix-vector product in a subexpression
#
# o functions to generate component based arrays (numpy and sympy.Matrix)
# - generate a single array directly from Indexed
# - convert simple sub-expressions
#
# o sophisticated indexing (possibly in subclasses to preserve simplicity)
# - Idx with range smaller than dimension of Indexed
# - Idx with stepsize != 1
# - Idx with step determined by function call
from sympy.core import Expr, Basic, Tuple, Symbol, Integer, sympify, S
from sympy.core.compatibility import ordered_iter
class IndexException(Exception):
pass
class IndexedBase(Expr):
"""Represent the base or stem of an indexed object
The IndexedBase class represent an array that contains elements. The main purpose
of this class is to allow the convenient creation of objects of the Indexed
class. The __getitem__ method of IndexedBase returns an instance of
Indexed. Alone, without indices, the IndexedBase class can be used as a
notation for e.g. matrix equations, resembling what you could do with the
Symbol class. But, the IndexedBase class adds functionality that is not
available for Symbol instances:
- An IndexedBase object can optionally store shape information. This can
be used in to check array conformance and conditions for numpy
broadcasting. (TODO)
- An IndexedBase object implements syntactic sugar that allows easy symbolic
representation of array operations, using implicit summation of
repeated indices.
- The IndexedBase object symbolizes a mathematical structure equivalent
to arrays, and is recognized as such for code generation and automatic
compilation and wrapping.
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols
>>> A = IndexedBase('A'); A
A
>>> type(A)
<class 'sympy.tensor.indexed.IndexedBase'>
When an IndexedBase object recieves indices, it returns an array with named
axes, represented by an Indexed object:
>>> i, j = symbols('i j', integer=True)
>>> A[i, j, 2]
A[i, j, 2]
>>> type(A[i, j, 2])
<class 'sympy.tensor.indexed.Indexed'>
The IndexedBase constructor takes an optional shape argument. If given,
it overrides any shape information in the indices. (But not the index
ranges!)
>>> m, n, o, p = symbols('m n o p', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', n)
>>> A[i, j].shape
Tuple(m, n)
>>> B = IndexedBase('B', shape=(o, p))
>>> B[i, j].shape
Tuple(o, p)
"""
is_commutative = False
def __new__(cls, label, shape=None, **kw_args):
if isinstance(label, basestring):
label = Symbol(label)
obj = Expr.__new__(cls, label, **kw_args)
if ordered_iter(shape):
obj._shape = Tuple(*shape)
else:
obj._shape = shape
return obj
@property
def args(self):
if self._shape:
return self._args + (self._shape,)
else:
return self._args
def _hashable_content(self):
return Expr._hashable_content(self) + (self._shape,)
def __getitem__(self, indices, **kw_args):
if ordered_iter(indices):
# Special case needed because M[*my_tuple] is a syntax error.
if self.shape and len(self.shape) != len(indices):
raise IndexException("Rank mismatch")
return Indexed(self, *indices, **kw_args)
else:
if self.shape and len(self.shape) != 1:
raise IndexException("Rank mismatch")
return Indexed(self, indices, **kw_args)
@property
def shape(self):
return self._shape
@property
def label(self):
return self.args[0]
def _sympystr(self, p):
return p.doprint(self.label)
class Indexed(Expr):
"""Represents a mathematical object with indices.
>>> from sympy.tensor import Indexed, IndexedBase, Idx
>>> from sympy import symbols
>>> i, j = map(Idx, ['i', 'j'])
>>> Indexed('A', i, j)
A[i, j]
It is recommended that Indexed objects are created via IndexedBase:
>>> A = IndexedBase('A')
>>> Indexed('A', i, j) == A[i, j]
True
"""
is_commutative = False
def __new__(cls, base, *args, **kw_args):
if not args: raise IndexException("Indexed needs at least one index")
if isinstance(base, (basestring, Symbol)):
base = IndexedBase(base)
elif not isinstance(base, IndexedBase):
raise TypeError("Indexed expects string, Symbol or IndexedBase as base")
return Expr.__new__(cls, base, *args, **kw_args)
@property
def base(self):
return self.args[0]
@property
def indices(self):
return self.args[1:]
@property
def rank(self):
"""returns the number of indices"""
return len(self.args)-1
@property
def shape(self):
"""returns a list with dimensions of each index.
Dimensions is a property of the array, not of the indices. Still, if
the IndexedBase does not define a shape attribute, it is assumed that
the ranges of the indices correspond to the shape of the array.
>>> from sympy.tensor.indexed import IndexedBase, Idx
>>> from sympy import symbols
>>> n, m = symbols('n m', integer=True)
>>> i = Idx('i', m)
>>> j = Idx('j', m)
>>> A = IndexedBase('A', shape=(n, n))
>>> B = IndexedBase('B')
>>> A[i, j].shape
Tuple(n, n)
>>> B[i, j].shape
Tuple(m, m)
"""
if self.base.shape:
return self.base.shape
try:
return Tuple(*[i.upper - i.lower + 1 for i in self.indices])
except AttributeError:
raise IndexException("Range is not defined for all indices in: %s" % self)
except TypeError:
raise IndexException("Shape cannot be inferred from Idx with undefined range: %s"%self)
@property
def ranges(self):
"""returns a list of tuples with lower and upper range of each index
If an index does not define the data members upper and lower, the
corresponding slot in the list contains ``None'' instead of a tuple.
"""
ranges = []
for i in self.indices:
try:
ranges.append(Tuple(i.lower, i.upper))
except AttributeError:
ranges.append(None)
return ranges
def _sympystr(self, p):
indices = map(p.doprint, self.indices)
return "%s[%s]" % (p.doprint(self.base), ", ".join(indices))
class Idx(Expr):
"""Represents an index, either symbolic or integer.
There are a number of ways to create an Idx object. The constructor
takes two arguments:
``label``
An integer or a symbol that labels the index.
``range``
Optionally you can specify a range as either
- Symbol or integer: This is interpreted as dimension. lower and
upper ranges are set to 0 and range-1
- tuple: This is interpreted as the lower and upper bounds in the
range.
Note that the Idx constructor is rather pedantic, and will not accept
non-integer symbols. The only exception is that you can use oo and -oo to
specify an unbounded range. For all other cases, both label and bounds
must be declared as integers, in the sense that for a index label n,
n.is_integer must return True.
For convenience, if the label is given as a string, it is automatically
converted to an integer symbol. (Note that this conversion is not done for
range or dimension arguments.)
:Examples:
>>> from sympy.tensor import IndexedBase, Idx
>>> from sympy import symbols, oo
>>> n, i, L, U = symbols('n i L U', integer=True)
0) Construction from a string. An integer symbol is created from the
string.
>>> Idx('qwerty')
qwerty
1) Both upper and lower bound specified
>>> idx = Idx(i, (L, U)); idx
i
>>> idx.lower, idx.upper
(L, U)
2) Only dimension specified, lower bound defaults to 0
>>> idx = Idx(i, n); idx.lower, idx.upper
(0, n - 1)
>>> idx = Idx(i, 4); idx.lower, idx.upper
(0, 3)
>>> idx = Idx(i, oo); idx.lower, idx.upper
(0, oo)
3) No bounds given, interpretation of this depends on context.
>>> idx = Idx(i); idx.lower, idx.upper
(None, None)
4) for a literal integer instead of a symbolic label the bounds are still
there:
>>> idx = Idx(2, n); idx.lower, idx.upper
(0, n - 1)
"""
is_integer = True
def __new__(cls, label, range=None, **kw_args):
if isinstance(label, basestring):
label = Symbol(label, integer=True)
label, range = map(sympify, (label, range))
if not label.is_integer:
raise TypeError("Idx object requires an integer label")
elif ordered_iter(range):
assert len(range) == 2, "Idx got range tuple with wrong length"
for bound in range:
if not (bound.is_integer or abs(bound) is S.Infinity):
raise TypeError("Idx object requires integer bounds")
args = label, Tuple(*range)
elif isinstance(range, Expr):
if not (range.is_integer or range is S.Infinity):
raise TypeError("Idx object requires an integer dimension")
args = label, Tuple(S.Zero, range-S.One)
elif range:
raise TypeError("range must be ordered iterable or integer sympy expression")
else:
args = label,
obj = Expr.__new__(cls, *args, **kw_args)
return obj
@property
def label(self):
"""Returns the name/label of the index, or it's integer value"""
return self.args[0]
@property
def lower(self):
"""Returns the lower bound of the index"""
try:
return self.args[1][0]
except IndexError:
return
@property
def upper(self):
"""Returns the upper bound of the index"""
try:
return self.args[1][1]
except IndexError:
return
def _sympystr(self, p):
return p.doprint(self.label)
| bsd-3-clause | -3,039,205,285,116,504,000 | 31.623501 | 99 | 0.60394 | false | 4.016534 | false | false | false |
1503051/webhookshipping | app.py | 1 | 1887 | #!/usr/bin/env python
import urllib
import json
import os
from flask import Flask
from flask import request
from flask import make_response
from pymongo import MongoClient
# Flask app should start in global layout
app = Flask(__name__)
client = MongoClient('mongodb://localhost:27017')
@app.route('/star', methods=['POST'])
def get_one_star():
db = client['hrvisual']
collect=db['ORG_DEPT_EMP_2016']
post1 = collect.find_one({'emp_number': '1503051'})
if post1 is None:
return 'no record'
else:
return post1['emp_name']
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "branchcontact":
return {}
result = req.get("result")
parameters = result.get("parameters")
contact = {'Narl':'02-6630-0151', 'Ndl':'03-5726100', 'Nchc':'03-5776085', 'Cic':'03-7777777', '1503051':'0911111111'}
speech ="unknow"
branch = parameters.get("branch")
if branch is not None:
speech = "The contact information for " + branch + " is " + contact[branch]
anytxt = parameters.get("any")
if anytxt is not None:
speech = "The contact information for " + anytxt + " is " + contact[anytxt]
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
app.run(debug=True, port=port, host='0.0.0.0')
| apache-2.0 | -2,935,963,759,269,244,400 | 24.849315 | 126 | 0.612083 | false | 3.387792 | false | false | false |
rajanandakumar/DIRAC | DataManagementSystem/Client/ReplicaManager.py | 1 | 113878 | """
:mod: ReplicaManager
.. module: ReplicaManager
:synopsis: ReplicaManager links the functionalities of StorageElement and FileCatalog.
This module consists ReplicaManager and related classes.
OBSOLETED !!! DO NOT USE THIS ANYMORE!!!! USE THE DataManager CLASS
"""
# # imports
from datetime import datetime, timedelta
import fnmatch
import os
import time
from types import StringTypes, ListType, DictType, StringType, TupleType
# # from DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.Core.Utilities.Adler import fileAdler, compareAdler
from DIRAC.Core.Utilities.File import makeGuid, getSize
from DIRAC.Core.Utilities.List import randomize
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite, isSameSiteSE, getSEsForCountry
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
class CatalogBase( object ):
"""
.. class:: CatalogBase
This class stores the two wrapper functions for interacting with the FileCatalog.
"""
def __init__( self ):
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
self.useCatalogPFN = Operations().getValue( 'DataManagement/UseCatalogPFN', True )
def _callFileCatalogFcnSingleFile( self, lfn, method, argsDict = None, catalogs = None ):
""" A wrapper around :CatalogBase._callFileCatalogFcn: for a single file. It parses
the output of :CatalogBase_callFileCatalogFcn: for the first file provided as input.
If this file is found in::
* res['Value']['Successful'] an S_OK() is returned with the value.
* res['Value']['Failed'] an S_ERROR() is returned with the error message.
:warning: this function is executed only for the first LFN provided, in case of dict of LFNs
the order of keys are NOT preserved, so the output is undefined
:param self: self reference
:param mixed lfn: LFN as string or list with LFNs or dict with LFNs as keys
:param str method: :FileCatalog: method name
:param dict argsDict: kwargs for method
:param list catalogs: list with catalog names
"""
# # default values
argsDict = argsDict if argsDict else dict()
catalogs = catalogs if catalogs else list()
# # checjk type
if not lfn or type( lfn ) not in StringTypes + ( ListType, DictType ):
return S_ERROR( "wrong type (%s) for argument 'lfn'" % type( lfn ) )
singleLfn = lfn
if type( lfn ) == ListType:
singleLfn = lfn[0]
elif type( lfn ) == DictType:
singleLfn = lfn.keys()[0]
# # call only for single lfn
res = self._callFileCatalogFcn( lfn, method, argsDict, catalogs = catalogs )
if not res["OK"]:
return res
elif singleLfn in res["Value"]["Failed"]:
return S_ERROR( res["Value"]["Failed"][singleLfn] )
if not singleLfn in res["Value"]["Successful"]:
result = S_OK( {} )
for catalog in catalogs:
result['Value'][catalog] = 'OK'
return result
return S_OK( res["Value"]["Successful"][singleLfn] )
def _callFileCatalogFcn( self, lfn, method, argsDict = None, catalogs = None ):
""" A simple wrapper around the file catalog functionality
This is a wrapper around the available :FileCatalog: functions.
The :lfn: and :method: arguments must be provided.
:param self: self reference
:param mixed lfn: a single LFN string or a list of LFNs or dictionary with LFNs stored as keys.
:param str method: name of the FileCatalog function to be invoked
:param dict argsDict: aditional keyword arguments that are requred for the :method:
:param list catalogs: list of catalogs the operation is to be performed on, by default this
is all available catalogs; examples are 'LcgFileCatalogCombined', 'BookkeepingDB',
'ProductionDB' etc.
"""
# # default values
argsDict = argsDict if argsDict else dict()
catalogs = catalogs if catalogs else list()
lfns = None
if not lfn or type( lfn ) not in StringTypes + ( ListType, DictType ):
errStr = "_callFileCatalogFcn: Wrong 'lfn' argument."
self.log.error( errStr )
return S_ERROR( errStr )
elif type( lfn ) in StringTypes:
lfns = { lfn : False }
elif type( lfn ) == ListType:
lfns = dict.fromkeys( lfn, False )
elif type( lfn ) == DictType:
lfns = lfn.copy()
# # lfns supplied?
if not lfns:
errMsg = "_callFileCatalogFcn: No lfns supplied."
self.log.error( errMsg )
return S_ERROR( errMsg )
self.log.debug( "_callFileCatalogFcn: Will execute '%s' method with %s lfns." % ( method, len( lfns ) ) )
# # create FileCatalog instance
fileCatalog = FileCatalog( catalogs = catalogs )
if not fileCatalog.isOK():
return S_ERROR( "Can't get FileCatalogs %s" % catalogs )
# # get symbol
fcFcn = getattr( fileCatalog, method ) if hasattr( fileCatalog, method ) else None
# # check if it is callable
fcFcn = fcFcn if callable( fcFcn ) else None
if not fcFcn:
errMsg = "_callFileCatalogFcn: '%s' isn't a member function in FileCatalog." % method
self.log.error( errMsg )
return S_ERROR( errMsg )
# # call it at least
res = fcFcn( lfns, **argsDict )
if not res["OK"]:
self.log.error( "_callFileCatalogFcn: Failed to execute '%s'." % method, res["Message"] )
return res
def _fcFuncWrapper( self, singleFile = False ):
""" choose wrapper to call
:param self: self reference
:param bool singleFile: flag to choose wrapper function, default :False: will
execute :FileCatalog._callFileCatalogFcn:
"""
return { True: self._callFileCatalogFcnSingleFile,
False: self._callFileCatalogFcn }[singleFile]
class CatalogFile( CatalogBase ):
"""
.. class:: CatalogFile
Wrappers for various :FileCatalog: methods concering operations on files.
"""
def __init__( self ):
""" c'tor """
CatalogBase.__init__( self )
def getCatalogExists( self, lfn, singleFile = False, catalogs = None ):
""" determine whether the path is registered in the :FileCatalog: by calling
:FileCatalog.exists: method.
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "exists", catalogs = catalogs )
def getCatalogIsFile( self, lfn, singleFile = False, catalogs = None ):
""" determine whether the path is registered as a file in the :FileCatalog:
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "isFile", catalogs = catalogs )
def getCatalogFileMetadata( self, lfn, singleFile = False, catalogs = None ):
""" get the metadata associated to the LFN in the :FileCatalog:
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getFileMetadata", catalogs = catalogs )
def getCatalogFileSize( self, lfn, singleFile = False, catalogs = None ):
""" get the size registered for files in the FileCatalog
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getFileSize", catalogs = catalogs )
def getCatalogReplicas( self, lfn, allStatus = False, singleFile = False, catalogs = None ):
""" Get the replicas registered for files in the FileCatalog
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool allStatus: ???
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getReplicas", argsDict = { "allStatus" : allStatus },
catalogs = catalogs )
def getCatalogLFNForPFN( self, pfn, singleFile = False, catalogs = None ):
""" get the LFNs registered with the supplied PFNs from the FileCatalog
:param self: self reference
:param mixed pfn: the files to obtain (can be a single PFN or list of PFNs)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( pfn, 'getLFNForPFN', catalogs = catalogs )
def addCatalogFile( self, lfn, singleFile = False, catalogs = None ):
""" Add a new file to the FileCatalog
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "addFile", catalogs = catalogs )
def removeCatalogFile( self, lfn, singleFile = False, catalogs = None ):
""" remove a file from the FileCatalog
:param self: self reference
:param mixed lfn: LFN as string or list of LFN strings or dict with LFNs as keys
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
# # make sure lfns are sorted from the longest to the shortest
if type( lfn ) == ListType:
lfn = sorted( lfn, reverse = True )
return self._fcFuncWrapper( singleFile )( lfn, "removeFile", catalogs = catalogs )
class CatalogReplica( CatalogBase ):
"""
.. class:: CatalogReplica
Wrappers for various :FileCatalog: methods concering operations on replicas.
"""
def getCatalogReplicaStatus( self, lfn, singleFile = False, catalogs = None ):
""" get the status of the replica as registered in the :FileCatalog:
:param self: self reference
:param dict lfn: dict containing { LFN : SE }
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getReplicaStatus", catalogs = catalogs )
def addCatalogReplica( self, lfn, singleFile = False, catalogs = None ):
""" add a new replica to the :FileCatalog:
:param self: self reference
:param dict lfn: dictionary containing the replica properties
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "addReplica", catalogs = catalogs )
def removeCatalogReplica( self, lfn, singleFile = False, catalogs = None ):
""" remove a replica from the :FileCatalog:
:param self: self reference
:param mixed lfn: lfn to be removed
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "removeReplica", catalogs = catalogs )
def setCatalogReplicaStatus( self, lfn, singleFile = False, catalogs = None ):
""" Change the status for a replica in the :FileCatalog:
:param self: self reference
:param mixed lfn: dict with replica information to change
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "setReplicaStatus", catalogs = catalogs )
def setCatalogReplicaHost( self, lfn, singleFile = False, catalogs = None ):
""" change the registered SE for a replica in the :FileCatalog:
:param self: self reference
:param mixed lfn: dict with replica information to change
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "setReplicaHost", catalogs = catalogs )
class CatalogDirectory( CatalogBase ):
"""
.. class:: CatalogDirectory
Wrappers for various :FileCatalog: methods concering operations on folders.
"""
def __init__( self ):
""" c'tor """
CatalogBase.__init__( self )
def getCatalogIsDirectory( self, lfn, singleFile = False, catalogs = None ):
""" determine whether the path is registered as a directory in the :FileCatalog:
:param self: self reference
:param mixed lfn: files to check (can be a single file or list of lfns)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "isDirectory", catalogs = catalogs )
def getCatalogDirectoryMetadata( self, lfn, singleFile = False, catalogs = None ):
""" get the metadata associated to a directory in the :FileCatalog:
:param self: self reference
:param mixed lfn: folders to check (can be a single directory or list of directories)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getDirectoryMetadata", catalogs = catalogs )
def getCatalogDirectoryReplicas( self, lfn, singleFile = False, catalogs = None ):
""" get the replicas for the contents of a directory in the FileCatalog
:param self: self reference
:param mixed lfn: folders to check (can be a single directory or list of directories)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getDirectoryReplicas", catalogs = catalogs )
def getCatalogListDirectory( self, lfn, verbose = False, singleFile = False, catalogs = None ):
""" get the contents of a directory in the :FileCatalog:
:param self: self reference
:param mixed lfn: folders to check (can be a single directory or list of directories)
:param bool verbose: shout
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "listDirectory", argsDict = {"verbose": verbose},
catalogs = catalogs )
def getCatalogDirectorySize( self, lfn, singleFile = False, catalogs = None ):
""" get the size a directory in the :FileCatalog:
:param self: self reference
:param mixed lfn: folders to check (can be a single directory or list of directories)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "getDirectorySize", catalogs = catalogs )
def createCatalogDirectory( self, lfn, singleFile = False, catalogs = None ):
""" mkdir in the :FileCatalog:
:param self: self reference
:param mixed lfn: the directory to create
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "createDirectory", catalogs = catalogs )
def removeCatalogDirectory( self, lfn, recursive = False, singleFile = False, catalogs = None ):
""" rmdir from the :FileCatalog:
:param self: self reference
:param mixed lfn: the directory to remove
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "removeDirectory", argsDict = {"recursive" : recursive},
catalogs = catalogs )
class CatalogLink( CatalogBase ):
"""
.. class:: CatalogReplica
Wrappers for various :FileCatalog: methods concering operations on links.
"""
def __init__( self ):
""" c'tor """
CatalogBase.__init__( self )
def getCatalogIsLink( self, lfn, singleFile = False, catalogs = None ):
""" determine whether the path is registered as a link in the :FileCatalog:
:param self: self reference
:param mixed lfn: path to be checked (string of list of strings)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "isLink", catalogs = catalogs )
def getCatalogReadLink( self, lfn, singleFile = False, catalogs = None ):
""" get the target of a link as registered in the :FileCatalog:
:param self: self reference
:param mixed lfn: path to be checked (string of list of strings)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "readLink", catalogs = catalogs )
def createCatalogLink( self, lfn, singleFile = False, catalogs = None ):
""" ln in the :FileCatalog: (create the link)
:param self: self reference
:param mixed lfn: link dictionary containing the target lfn and link name to create
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
return self._fcFuncWrapper( singleFile )( lfn, "createLink", catalogs = catalogs )
def removeCatalogLink( self, lfn, singleFile = False, catalogs = None ):
""" rm the link supplied from the :FileCatalog:
:param self: self reference
:param mixed lfn: link to be removed (string of list of strings)
:param bool singleFile: execute for the first LFN only
:param list catalogs: catalogs' names
"""
catalogs = catalogs if catalogs else list()
self._fcFuncWrapper( singleFile )( lfn, "removeLink", catalogs = catalogs )
class CatalogInterface( CatalogFile, CatalogReplica, CatalogDirectory, CatalogLink ):
"""
.. class:: CatalogInterface
Dummy class to expose all the methods of the CatalogInterface
"""
pass
class StorageBase( object ):
"""
.. class:: StorageBase
This class stores the two wrapper functions for interacting with the StorageElement.
"""
def __init__( self ):
""" c'tor """
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
def _callStorageElementFcnSingleFile( self, storageElementName, pfn, method, argsDict = None ):
""" wrapper around :StorageBase._callStorageElementFcn: for single file execution
It parses the output of :StorageBase._callStorageElementFcn: for the first pfn provided as input.
If this pfn is found in::
* res['Value']['Successful'] an S_OK() is returned with the value.
* res['Value']['Failed'] an S_ERROR() is returned with the error message.
:param self: self reference
:param str storageElementName: DIRAC SE name to be accessed e.g. CERN-DST
:param mixed pfn: contains a single PFN string or a list of PFNs or dictionary containing PFNs
:param str method: name of the :StorageElement: method to be invoked
:param dict argsDict: additional keyword arguments that are required for the :method:
"""
argsDict = argsDict if argsDict else {}
# # call wrapper
res = self._callStorageElementFcn( storageElementName, pfn, method, argsDict )
# # check type
if type( pfn ) == ListType:
pfn = pfn[0]
elif type( pfn ) == DictType:
pfn = pfn.keys()[0]
# # check results
if not res["OK"]:
return res
elif pfn in res["Value"]["Failed"]:
errorMessage = res["Value"]["Failed"][pfn]
return S_ERROR( errorMessage )
else:
return S_OK( res["Value"]["Successful"][pfn] )
def _callStorageElementFcn( self, storageElementName, pfn, method, argsDict = None ):
""" a simple wrapper around the :StorageElement: functionality
:param self: self reference
:param str storageElementName: DIRAC SE name to be accessed e.g. CERN-DST
:param mixed pfn: contains a single PFN string or a list of PFNs or dictionary containing PFNs
:param str method: name of the :StorageElement: method to be invoked
:param dict argsDict: additional keyword arguments that are required for the :method:
"""
argsDict = argsDict if argsDict else {}
# # check pfn type
if type( pfn ) in StringTypes:
pfns = {pfn : False}
elif type( pfn ) == ListType:
pfns = dict.fromkeys( pfn, False )
elif type( pfn ) == DictType:
pfns = pfn.copy()
else:
errStr = "_callStorageElementFcn: Supplied pfns must be a str, list of str or dict."
self.log.error( errStr )
return S_ERROR( errStr )
# # have we got some pfns?
if not pfns:
errMessage = "_callStorageElementFcn: No pfns supplied."
self.log.error( errMessage )
return S_ERROR( errMessage )
self.log.debug( "_callStorageElementFcn: Will execute '%s' with %s pfns." % ( method, len( pfns ) ) )
storageElement = StorageElement( storageElementName )
res = storageElement.isValid( method )
if not res['OK']:
errStr = "_callStorageElementFcn: Failed to instantiate Storage Element"
self.log.error( errStr, "for performing %s at %s." % ( method, storageElementName ) )
return res
# # get sybmbol
fcFcn = getattr( storageElement, method ) if hasattr( storageElement, method ) else None
# # make sure it is callable
fcFcn = fcFcn if callable( fcFcn ) else None
if not fcFcn:
errMsg = "_callStorageElementFcn: '%s' isn't a member function in StorageElement." % method
self.log.error( errMsg )
return S_ERROR( errMsg )
# # call it at least
res = fcFcn( pfns, **argsDict )
# # return the output
if not res["OK"]:
errStr = "_callStorageElementFcn: Completely failed to perform %s." % method
self.log.error( errStr, '%s : %s' % ( storageElementName, res["Message"] ) )
return res
def _seFuncWrapper( self, singleFile = False ):
""" choose wrapper to call
:param self: self reference
:param bool singleFile: flag to choose wrapper function, default :False: will
execute :StorageBase._callStorageElementFcn:
"""
return { True: self._callStorageElementFcnSingleFile,
False: self._callStorageElementFcn }[singleFile]
def getPfnForLfn( self, lfns, storageElementName ):
""" get PFNs for supplied LFNs at :storageElementName: SE
:param self: self reference
:param list lfns: list of LFNs
:param str stotrageElementName: DIRAC SE name
"""
if type( lfns ) == type( '' ):
lfns = [lfns]
storageElement = StorageElement( storageElementName )
res = storageElement.isValid( "getPfnForLfn" )
if not res['OK']:
self.log.error( "getPfnForLfn: Failed to instantiate StorageElement at %s" % storageElementName )
return res
retDict = { "Successful" : {}, "Failed" : {} }
for lfn in lfns:
res = storageElement.getPfnForLfn( lfn )
if res["OK"] and lfn in res['Value']['Successful']:
retDict["Successful"][lfn] = res["Value"]['Successful'][lfn]
else:
retDict["Failed"][lfn] = res.get( "Message", res.get( 'Value', {} ).get( 'Failed', {} ).get( lfn ) )
return S_OK( retDict )
def getLfnForPfn( self, pfns, storageElementName ):
""" get LFNs for supplied PFNs at :storageElementName: SE
:param self: self reference
:param list lfns: list of LFNs
:param str stotrageElementName: DIRAC SE name
"""
storageElement = StorageElement( storageElementName )
res = storageElement.isValid( "getPfnPath" )
if not res['OK']:
self.log.error( "getLfnForPfn: Failed to instantiate StorageElement at %s" % storageElementName )
return res
retDict = { "Successful" : {}, "Failed" : {} }
for pfn in pfns:
res = storageElement.getPfnPath( pfn )
if res["OK"]:
retDict["Successful"][pfn] = res["Value"]
else:
retDict["Failed"][pfn] = res["Message"]
return S_OK( retDict )
def getPfnForProtocol( self, pfns, storageElementName, protocol = "SRM2", withPort = True ):
""" create PFNs strings at :storageElementName: SE using protocol :protocol:
:param self: self reference
:param list pfns: list of PFNs
:param str storageElementName: DIRAC SE name
:param str protocol: protocol name (default: 'SRM2')
:param bool withPort: flag to include port in PFN (default: True)
"""
storageElement = StorageElement( storageElementName )
res = storageElement.isValid( "getPfnForProtocol" )
if not res["OK"]:
self.log.error( "getPfnForProtocol: Failed to instantiate StorageElement at %s" % storageElementName )
return res
retDict = { "Successful" : {}, "Failed" : {}}
for pfn in pfns:
res = returnSingleResult( storageElement.getPfnForProtocol( pfn, protocol, withPort = withPort ) )
if res["OK"]:
retDict["Successful"][pfn] = res["Value"]
else:
retDict["Failed"][pfn] = res["Message"]
return S_OK( retDict )
class StorageFile( StorageBase ):
"""
.. class:: StorageFile
Wrappers for various :StorageElement: methods concering operations on files.
"""
def __init__( self ):
""" c'tor """
StorageBase.__init__( self )
def getStorageFileExists( self, physicalFile, storageElementName, singleFile = False ):
""" determine the existance of the physical files
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "exists" )
def getStorageFileIsFile( self, physicalFile, storageElementName, singleFile = False ):
""" determine if supplied physical paths are files
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "isFile" )
def getStorageFileSize( self, physicalFile, storageElementName, singleFile = False ):
""" get the size of the physical files
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "getFileSize" )
def getStorageFileAccessUrl( self, physicalFile, storageElementName, protocol = None, singleFile = False ):
""" get the access url for a physical file
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
protocol = protocol if protocol else list()
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile,
"getAccessUrl", argsDict = {"protocol" : protocol} )
def getStorageFileMetadata( self, physicalFile, storageElementName, singleFile = False ):
""" get the metadatas for physical files
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "getFileMetadata" )
def removeStorageFile( self, physicalFile, storageElementName, singleFile = False ):
""" rm supplied physical files from :storageElementName: DIRAC SE
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "removeFile" )
def prestageStorageFile( self, physicalFile, storageElementName, lifetime = 86400, singleFile = False ):
""" prestage physical files
:param self: self reference
:param mixed physicalFile: PFNs to be prestaged
:param str storageElement: SE name
:param int lifetime: 24h in seconds
:param bool singleFile: flag to prestage only one file
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile,
"prestageFile", argsDict = {"lifetime" : lifetime} )
def getPrestageStorageFileStatus( self, physicalFile, storageElementName, singleFile = False ):
""" get the status of a pre-stage request
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "prestageFileStatus" )
def pinStorageFile( self, physicalFile, storageElementName, lifetime = 86400, singleFile = False ):
""" pin physical files with a given lifetime
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile,
"pinFile", argsDict = {"lifetime": lifetime} )
def releaseStorageFile( self, physicalFile, storageElementName, singleFile = False ):
""" release the pin on physical files
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "releaseFile" )
def getStorageFile( self, physicalFile, storageElementName, localPath = False, singleFile = False ):
""" create a local copy of a physical file
:param self: self reference
:param mixed physicalFile: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: string with local paht to use or False (if False, os.getcwd() will be used)
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile,
"getFile", argsDict = {"localPath": localPath} )
def putStorageFile( self, physicalFile, storageElementName, singleFile = False ):
""" put the local file to the storage element
:param self: self reference
:param mixed physicalFile: dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile, "putFile" )
def replicateStorageFile( self, physicalFile, size, storageElementName, singleFile = False ):
""" replicate a physical file to a storage element
:param self: self reference
:param mixed physicalFile: dictionary with PFN information
:param int size: size of PFN in bytes
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first PFN only
"""
return self._seFuncWrapper( singleFile )( storageElementName, physicalFile,
'replicateFile', argsDict = {'sourceSize': size} )
class StorageDirectory( StorageBase ):
"""
.. class:: StorageDirectory
Wrappers for various :StorageElement: methods concering operations on folders.
"""
def __init__( self ):
""" c'tor """
StorageBase.__init__( self )
def getStorageDirectoryIsDirectory( self, storageDirectory, storageElementName, singleDirectory = False ):
""" determine if the storage paths are directories
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory, "isDirectory" )
def getStorageDirectoryMetadata( self, storageDirectory, storageElementName, singleDirectory = False ):
""" get the metadata for storage directories
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory, "getDirectoryMetadata" )
def getStorageDirectorySize( self, storageDirectory, storageElementName, singleDirectory = False ):
""" get the size of the storage directories
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory, "getDirectorySize" )
def getStorageListDirectory( self, storageDirectory, storageElementName, singleDirectory = False ):
""" ls of a directory in the Storage Element
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory, "listDirectory" )
def getStorageDirectory( self, storageDirectory, storageElementName, localPath = False, singleDirectory = False ):
""" copy the contents of a directory from the Storage Element to local folder
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param mixed localPath: destination folder, if False, so.getcwd() will be used
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory,
"getDirectory", argsDict = {'localPath': localPath} )
def putStorageDirectory( self, storageDirectory, storageElementName, singleDirectory = False ):
""" put the local directory to the storage element
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory, "putDirectory" )
def removeStorageDirectory( self, storageDirectory, storageElementName, recursive = False, singleDirectory = False ):
""" rmdir a directory from the storage element
:param self: self reference
:param mixed storageDirectory: string with PFN or list with PFNs or dictionary with PFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleDirectory: execute for the first PFN only
"""
return self._seFuncWrapper( singleDirectory )( storageElementName, storageDirectory,
"removeDirectory", argsDict = {"recursive": recursive} )
class StorageInterface( StorageFile, StorageDirectory ):
"""
.. class:: StorageInterface
Dummy class to expose all the methods of the StorageInterface
"""
def __init__( self ):
""" c'tor """
StorageFile.__init__( self )
StorageDirectory.__init__( self )
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
class CatalogToStorage( CatalogInterface, StorageInterface ):
"""
.. class:: CatalogToStorage
Collection of functions doing simple replica<-->Storage element operations.
"""
def __init__( self ):
""" c'tor """
CatalogInterface.__init__( self )
StorageInterface.__init__( self )
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
def _replicaSEFcnWrapper( self, singleFile = False ):
""" choose wrapper to call
:param self: self reference
:param bool singleFile: flag to choose wrapper function, default :False: will
execute :CatalogToStorage._callReplicaSEFcn:
"""
return { True: self._callReplicaSEFcnSingleFile,
False: self._callReplicaSEFcn }[singleFile]
def _callReplicaSEFcnSingleFile( self, storageElementName, lfn, method, argsDict = None ):
""" call :method: of StorageElement :storageElementName: for single :lfn: using :argsDict: kwargs
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: LFN
:param str method: StorageElement function name
:param dict argsDict: kwargs of :method:
"""
# # default value
argsDict = argsDict if argsDict else {}
# # get single LFN
singleLfn = lfn
if type( lfn ) == ListType:
singleLfn = lfn[0]
elif type( lfn ) == DictType:
singleLfn = lfn.keys()[0]
# # call method
res = self._callReplicaSEFcn( storageElementName, singleLfn, method, argsDict )
# # check results
if not res["OK"]:
return res
elif singleLfn in res["Value"]["Failed"]:
return S_ERROR( res["Value"]["Failed"][singleLfn] )
return S_OK( res["Value"]["Successful"][singleLfn] )
def _callReplicaSEFcn( self, storageElementName, lfn, method, argsDict = None ):
""" a simple wrapper that allows replica querying then perform the StorageElement operation
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: a LFN str, list of LFNs or dict with LFNs as keys
"""
# # default value
argsDict = argsDict if argsDict else {}
# # get replicas for lfn
res = self._callFileCatalogFcn( lfn, "getReplicas" )
if not res["OK"]:
errStr = "_callReplicaSEFcn: Completely failed to get replicas for LFNs."
self.log.error( errStr, res["Message"] )
return res
# # returned dict, get failed replicase
retDict = { "Failed": res["Value"]["Failed"],
"Successful" : {} }
# # print errors
for lfn, reason in retDict["Failed"].items():
self.log.error( "_callReplicaSEFcn: Failed to get replicas for file.", "%s %s" % ( lfn, reason ) )
# # good replicas
lfnReplicas = res["Value"]["Successful"]
# # store PFN to LFN mapping
pfnDict = {}
for lfn, replicas in lfnReplicas.items():
if storageElementName in replicas:
useCatalogPFN = Operations().getValue( 'DataManagement/UseCatalogPFN', True )
if useCatalogPFN:
pfn = replicas[storageElementName]
else:
res = self.getPfnForLfn( lfn, storageElementName )
pfn = res.get( 'Value', {} ).get( 'Successful', {} ).get( lfn, replicas[storageElementName] )
pfnDict[pfn] = lfn
else:
errStr = "_callReplicaSEFcn: File hasn't got replica at supplied Storage Element."
self.log.error( errStr, "%s %s" % ( lfn, storageElementName ) )
retDict["Failed"][lfn] = errStr
# # call StorageElement function at least
res = self._callStorageElementFcn( storageElementName, pfnDict.keys(), method, argsDict )
# # check result
if not res["OK"]:
errStr = "_callReplicaSEFcn: Failed to execute %s StorageElement method." % method
self.log.error( errStr, res["Message"] )
return res
# # filter out failed nad successful
for pfn, pfnRes in res["Value"]["Successful"].items():
retDict["Successful"][pfnDict[pfn]] = pfnRes
for pfn, errorMessage in res["Value"]["Failed"].items():
retDict["Failed"][pfnDict[pfn]] = errorMessage
return S_OK( retDict )
def getReplicaIsFile( self, lfn, storageElementName, singleFile = False ):
""" determine whether the supplied lfns are files at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn, "isFile" )
def getReplicaSize( self, lfn, storageElementName, singleFile = False ):
""" get the size of files for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn, "getFileSize" )
def getReplicaAccessUrl( self, lfn, storageElementName, singleFile = False ):
""" get the access url for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn, "getAccessUrl" )
def getReplicaMetadata( self, lfn, storageElementName, singleFile = False ):
""" get the file metadata for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn, "getFileMetadata" )
def prestageReplica( self, lfn, storageElementName, lifetime = 86400, singleFile = False ):
""" issue a prestage requests for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn,
"prestageFile", argsDict = {"lifetime": lifetime} )
def getPrestageReplicaStatus( self, lfn, storageElementName, singleFile = False ):
""" This functionality is not supported.
Then what is it doing here? Not supported -> delete it!
"""
return S_ERROR( "Not supported functionality. Please use getReplicaMetadata and check the 'Cached' element." )
def pinReplica( self, lfn, storageElementName, lifetime = 86400, singleFile = False ):
""" pin the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn,
"pinFile", argsDict = {"lifetime": lifetime} )
def releaseReplica( self, lfn, storageElementName, singleFile = False ):
""" release pins for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn, "releaseFile" )
def getReplica( self, lfn, storageElementName, localPath = False, singleFile = False ):
""" copy replicas from DIRAC SE to local directory
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: path in the local file system, if False, os.getcwd() will be used
:param bool singleFile: execute for the first LFN only
"""
return self._replicaSEFcnWrapper( singleFile )( storageElementName, lfn,
"getFile", argsDict = {"localPath": localPath} )
class ReplicaManager( CatalogToStorage ):
"""
.. class:: ReplicaManager
A ReplicaManager is putting all possible StorageElement and FileCatalog functionalities togehter.
"""
def __init__( self ):
""" c'tor
:param self: self reference
"""
CatalogToStorage.__init__( self )
self.fileCatalogue = FileCatalog()
self.accountingClient = None
self.registrationProtocol = ['SRM2', 'DIP']
self.thirdPartyProtocols = ['SRM2', 'DIP']
self.resourceStatus = ResourceStatus()
self.ignoreMissingInFC = Operations().getValue( 'DataManagement/IgnoreMissingInFC', False )
def setAccountingClient( self, client ):
""" Set Accounting Client instance
"""
self.accountingClient = client
def __verifyOperationPermission( self, path ):
""" Check if we have write permission to the given directory
"""
if type( path ) in StringTypes:
paths = [ path ]
else:
paths = path
fc = FileCatalog()
res = fc.getPathPermissions( paths )
if not res['OK']:
return res
for path in paths:
if not res['Value']['Successful'].get( path, {} ).get( 'Write', False ):
return S_OK( False )
return S_OK( True )
##########################################################################
#
# These are the bulk removal methods
#
def cleanLogicalDirectory( self, lfnDir ):
""" Clean the logical directory from the catalog and storage
"""
if type( lfnDir ) in StringTypes:
lfnDir = [ lfnDir ]
retDict = { "Successful" : {}, "Failed" : {} }
for folder in lfnDir:
res = self.__cleanDirectory( folder )
if not res['OK']:
self.log.error( "Failed to clean directory.", "%s %s" % ( folder, res['Message'] ) )
retDict["Failed"][folder] = res['Message']
else:
self.log.info( "Successfully removed directory.", folder )
retDict["Successful"][folder] = res['Value']
return S_OK( retDict )
def __cleanDirectory( self, folder ):
""" delete all files from directory :folder: in FileCatalog and StorageElement
:param self: self reference
:param str folder: directory name
"""
res = self.__verifyOperationPermission( folder )
if not res['OK']:
return res
if not res['Value']:
errStr = "__cleanDirectory: Write access not permitted for this credential."
self.log.error( errStr, folder )
return S_ERROR( errStr )
res = self.__getCatalogDirectoryContents( [ folder ] )
if not res['OK']:
return res
res = self.removeFile( res['Value'].keys() + [ '%s/dirac_directory' % folder ] )
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].items():
self.log.error( "Failed to remove file found in the catalog", "%s %s" % ( lfn, reason ) )
storageElements = gConfig.getValue( 'Resources/StorageElementGroups/SE_Cleaning_List', [] )
failed = False
for storageElement in sorted( storageElements ):
res = self.__removeStorageDirectory( folder, storageElement )
if not res['OK']:
failed = True
if failed:
return S_ERROR( "Failed to clean storage directory at all SEs" )
res = self.removeCatalogDirectory( folder, recursive = True, singleFile = True )
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory( self, directory, storageElement ):
""" delete SE directory
:param self: self reference
:param str directory: folder to be removed
:param str storageElement: DIRAC SE name
"""
self.log.info( 'Removing the contents of %s at %s' % ( directory, storageElement ) )
res = self.getPfnForLfn( [directory], storageElement )
if not res['OK']:
self.log.error( "Failed to get PFN for directory", res['Message'] )
return res
for directory, error in res['Value']['Failed'].items():
self.log.error( 'Failed to obtain directory PFN from LFN', '%s %s' % ( directory, error ) )
if res['Value']['Failed']:
return S_ERROR( 'Failed to obtain directory PFN from LFNs' )
storageDirectory = res['Value']['Successful'].values()[0]
res = self.getStorageFileExists( storageDirectory, storageElement, singleFile = True )
if not res['OK']:
self.log.error( "Failed to obtain existance of directory", res['Message'] )
return res
exists = res['Value']
if not exists:
self.log.info( "The directory %s does not exist at %s " % ( directory, storageElement ) )
return S_OK()
res = self.removeStorageDirectory( storageDirectory, storageElement, recursive = True, singleDirectory = True )
if not res['OK']:
self.log.error( "Failed to remove storage directory", res['Message'] )
return res
self.log.info( "Successfully removed %d files from %s at %s" % ( res['Value']['FilesRemoved'],
directory,
storageElement ) )
return S_OK()
def __getCatalogDirectoryContents( self, directories ):
""" ls recursively all files in directories
:param self: self reference
:param list directories: folder names
"""
self.log.info( 'Obtaining the catalog contents for %d directories:' % len( directories ) )
for directory in directories:
self.log.info( directory )
activeDirs = directories
allFiles = {}
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
res = self.getCatalogListDirectory( currentDir, singleFile = True )
activeDirs.remove( currentDir )
if not res['OK'] and res['Message'].endswith( 'The supplied path does not exist' ):
self.log.info( "The supplied directory %s does not exist" % currentDir )
elif not res['OK']:
self.log.error( 'Failed to get directory contents', '%s %s' % ( currentDir, res['Message'] ) )
else:
dirContents = res['Value']
activeDirs.extend( dirContents['SubDirs'] )
allFiles.update( dirContents['Files'] )
self.log.info( "Found %d files" % len( allFiles ) )
return S_OK( allFiles )
def getReplicasFromDirectory( self, directory ):
""" get all replicas from a given directory
:param self: self reference
:param mixed directory: list of directories or one directory
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
res = self.__getCatalogDirectoryContents( directories )
if not res['OK']:
return res
allReplicas = {}
for lfn, metadata in res['Value'].items():
allReplicas[lfn] = metadata['Replicas']
return S_OK( allReplicas )
def getFilesFromDirectory( self, directory, days = 0, wildcard = '*' ):
""" get all files from :directory: older than :days: days matching to :wildcard:
:param self: self reference
:param mixed directory: list of directories or directory name
:param int days: ctime days
:param str wildcard: pattern to match
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
self.log.info( "Obtaining the files older than %d days in %d directories:" % ( days, len( directories ) ) )
for folder in directories:
self.log.info( folder )
activeDirs = directories
allFiles = []
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
# We only need the metadata (verbose) if a limit date is given
res = self.getCatalogListDirectory( currentDir, verbose = ( days != 0 ), singleFile = True )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.error( "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] ) )
else:
dirContents = res['Value']
subdirs = dirContents['SubDirs']
files = dirContents['Files']
self.log.info( "%s: %d files, %d sub-directories" % ( currentDir, len( files ), len( subdirs ) ) )
for subdir in subdirs:
if ( not days ) or self.__isOlderThan( subdirs[subdir]['CreationDate'], days ):
if subdir[0] != '/':
subdir = currentDir + '/' + subdir
activeDirs.append( subdir )
for fileName in files:
fileInfo = files[fileName]
fileInfo = fileInfo.get( 'Metadata', fileInfo )
if ( not days ) or not fileInfo.get( 'CreationDate' ) or self.__isOlderThan( fileInfo['CreationDate'], days ):
if wildcard == '*' or fnmatch.fnmatch( fileName, wildcard ):
fileName = fileInfo.get( 'LFN', fileName )
allFiles.append( fileName )
return S_OK( allFiles )
def __isOlderThan( self, stringTime, days ):
timeDelta = timedelta( days = days )
maxCTime = datetime.utcnow() - timeDelta
# st = time.strptime( stringTime, "%a %b %d %H:%M:%S %Y" )
# cTimeStruct = datetime( st[0], st[1], st[2], st[3], st[4], st[5], st[6], None )
cTimeStruct = stringTime
if cTimeStruct < maxCTime:
return True
return False
##########################################################################
#
# These are the data transfer methods
#
def getFile( self, lfn, destinationDir = '' ):
""" Get a local copy of a LFN from Storage Elements.
'lfn' is the logical file name for the desired file
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "getFile: Supplied lfn must be string or list of strings."
self.log.error( errStr )
return S_ERROR( errStr )
self.log.verbose( "getFile: Attempting to get %s files." % len( lfns ) )
res = self.getActiveReplicas( lfns )
if not res['OK']:
return res
failed = res['Value']['Failed']
lfnReplicas = res['Value']['Successful']
res = self.getCatalogFileMetadata( lfnReplicas.keys() )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
fileMetadata = res['Value']['Successful']
successful = {}
for lfn in fileMetadata:
res = self.__getFile( lfn, lfnReplicas[lfn], fileMetadata[lfn], destinationDir )
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = res['Value']
return S_OK( { 'Successful': successful, 'Failed' : failed } )
def __getFile( self, lfn, replicas, metadata, destinationDir ):
if not replicas:
self.log.error( "No accessible replicas found" )
return S_ERROR( "No accessible replicas found" )
# Determine the best replicas
res = self._getSEProximity( replicas.keys() )
if not res['OK']:
return res
for storageElementName in res['Value']:
physicalFile = replicas[storageElementName]
# print '__getFile', physicalFile, replicas[storageElementName]
res = self.getStorageFile( physicalFile,
storageElementName,
localPath = os.path.realpath( destinationDir ),
singleFile = True )
if not res['OK']:
self.log.error( "Failed to get %s from %s" % ( lfn, storageElementName ), res['Message'] )
else:
localFile = os.path.realpath( os.path.join( destinationDir, os.path.basename( lfn ) ) )
localAdler = fileAdler( localFile )
if ( metadata['Size'] != res['Value'] ):
self.log.error( "Size of downloaded file (%d) does not match catalog (%d)" % ( res['Value'],
metadata['Size'] ) )
elif ( metadata['Checksum'] ) and ( not compareAdler( metadata['Checksum'], localAdler ) ):
self.log.error( "Checksum of downloaded file (%s) does not match catalog (%s)" % ( localAdler,
metadata['Checksum'] ) )
else:
return S_OK( localFile )
self.log.error( "getFile: Failed to get local copy from any replicas.", lfn )
return S_ERROR( "ReplicaManager.getFile: Failed to get local copy from any replicas." )
def _getSEProximity( self, ses ):
""" get SE proximity """
siteName = DIRAC.siteName()
localSEs = [se for se in getSEsForSite( siteName )['Value'] if se in ses]
countrySEs = []
countryCode = str( siteName ).split( '.' )[-1]
res = getSEsForCountry( countryCode )
if res['OK']:
countrySEs = [se for se in res['Value'] if se in ses and se not in localSEs]
sortedSEs = randomize( localSEs ) + randomize( countrySEs )
sortedSEs += randomize( [se for se in ses if se not in sortedSEs] )
return S_OK( sortedSEs )
def putAndRegister( self, lfn, fileName, diracSE, guid = None, path = None, checksum = None, catalog = None, ancestors = None ):
""" Put a local file to a Storage Element and register in the File Catalogues
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'guid' is the guid with which the file is to be registered (if not provided will be generated)
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
"""
ancestors = ancestors if ancestors else list()
res = self.__verifyOperationPermission( os.path.dirname( lfn ) )
if not res['OK']:
return res
if not res['Value']:
errStr = "putAndRegister: Write access not permitted for this credential."
self.log.error( errStr, lfn )
return S_ERROR( errStr )
# Instantiate the desired file catalog
if catalog:
self.fileCatalogue = FileCatalog( catalog )
if not self.fileCatalogue.isOK():
return S_ERROR( "Can't get FileCatalog %s" % catalog )
else:
self.fileCatalogue = FileCatalog()
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "putAndRegister: Supplied file does not exist."
self.log.error( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "putAndRegister: Supplied file is zero size."
self.log.error( errStr, fileName )
return S_ERROR( errStr )
# If the GUID is not given, generate it here
if not guid:
guid = makeGuid( fileName )
if not checksum:
self.log.info( "putAndRegister: Checksum information not provided. Calculating adler32." )
checksum = fileAdler( fileName )
self.log.info( "putAndRegister: Checksum calculated to be %s." % checksum )
res = self.fileCatalogue.exists( {lfn:guid} )
if not res['OK']:
errStr = "putAndRegister: Completely failed to determine existence of destination LFN."
self.log.error( errStr, lfn )
return res
if lfn not in res['Value']['Successful']:
errStr = "putAndRegister: Failed to determine existence of destination LFN."
self.log.error( errStr, lfn )
return S_ERROR( errStr )
if res['Value']['Successful'][lfn]:
if res['Value']['Successful'][lfn] == lfn:
errStr = "putAndRegister: The supplied LFN already exists in the File Catalog."
self.log.error( errStr, lfn )
else:
errStr = "putAndRegister: This file GUID already exists for another file. " \
"Please remove it and try again."
self.log.error( errStr, res['Value']['Successful'][lfn] )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Successful'][lfn] ) )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "putAndRegister: The storage element is not currently valid."
self.log.error( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
destinationSE = storageElement.getStorageElementName()['Value']
res = storageElement.getPfnForLfn( lfn )
if not res['OK'] or lfn not in res['Value']['Successful']:
errStr = "putAndRegister: Failed to generate destination PFN."
self.log.error( errStr, res.get( 'Message', res.get( 'Value', {} ).get( 'Failed', {} ).get( lfn ) ) )
return S_ERROR( errStr )
destPfn = res['Value']['Successful'][lfn]
fileDict = {destPfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
oDataOperation = self.__initialiseAccountingObject( 'putAndRegister', diracSE, 1 )
oDataOperation.setStartTime()
oDataOperation.setValueByKey( 'TransferSize', size )
startTime = time.time()
res = storageElement.putFile( fileDict, singleFile = True )
putTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', putTime )
if not res['OK']:
errStr = "putAndRegister: Failed to put file to Storage Element."
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.info( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
self.log.error( errStr, "%s: %s" % ( fileName, res['Message'] ) )
return S_ERROR( "%s %s" % ( errStr, res['Message'] ) )
successful[lfn] = {'put': putTime}
###########################################################
# Perform the registration here
oDataOperation.setValueByKey( 'RegistrationTotal', 1 )
fileTuple = ( lfn, destPfn, size, destinationSE, guid, checksum )
registerDict = {'LFN':lfn, 'PFN':destPfn, 'Size':size, 'TargetSE':destinationSE, 'GUID':guid, 'Addler':checksum}
startTime = time.time()
res = self.registerFile( fileTuple, catalog = catalog )
registerTime = time.time() - startTime
oDataOperation.setValueByKey( 'RegistrationTime', registerTime )
if not res['OK']:
errStr = "putAndRegister: Completely failed to register file."
self.log.error( errStr, res['Message'] )
failed[lfn] = { 'register' : registerDict }
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
elif lfn in res['Value']['Failed']:
errStr = "putAndRegister: Failed to register file."
self.log.error( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
failed[lfn] = { 'register' : registerDict }
else:
successful[lfn]['register'] = registerTime
oDataOperation.setValueByKey( 'RegistrationOK', 1 )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.info( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
return S_OK( {'Successful': successful, 'Failed': failed } )
def replicateAndRegister( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' , catalog = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
successful = {}
failed = {}
self.log.verbose( "replicateAndRegister: Attempting to replicate %s to %s." % ( lfn, destSE ) )
startReplication = time.time()
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
replicationTime = time.time() - startReplication
if not res['OK']:
errStr = "ReplicaManager.replicateAndRegister: Completely failed to replicate file."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
if not res['Value']:
# The file was already present at the destination SE
self.log.info( "replicateAndRegister: %s already present at %s." % ( lfn, destSE ) )
successful[lfn] = { 'replicate' : 0, 'register' : 0 }
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
successful[lfn] = { 'replicate' : replicationTime }
destPfn = res['Value']['DestPfn']
destSE = res['Value']['DestSE']
self.log.verbose( "replicateAndRegister: Attempting to register %s at %s." % ( destPfn, destSE ) )
replicaTuple = ( lfn, destPfn, destSE )
startRegistration = time.time()
res = self.registerReplica( replicaTuple, catalog = catalog )
registrationTime = time.time() - startRegistration
if not res['OK']:
# Need to return to the client that the file was replicated but not registered
errStr = "replicateAndRegister: Completely failed to register replica."
self.log.error( errStr, res['Message'] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
else:
if lfn in res['Value']['Successful']:
self.log.info( "replicateAndRegister: Successfully registered replica." )
successful[lfn]['register'] = registrationTime
else:
errStr = "replicateAndRegister: Failed to register replica."
self.log.info( errStr, res['Value']['Failed'][lfn] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
return S_OK( {'Successful': successful, 'Failed': failed} )
def replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE without registering the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
self.log.verbose( "replicate: Attempting to replicate %s to %s." % ( lfn, destSE ) )
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
if not res['OK']:
errStr = "replicate: Replication failed."
self.log.error( errStr, "%s %s" % ( lfn, destSE ) )
return res
if not res['Value']:
# The file was already present at the destination SE
self.log.info( "replicate: %s already present at %s." % ( lfn, destSE ) )
return res
return S_OK( lfn )
def __replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
"""
###########################################################
# Check that we have write permissions to this directory.
res = self.__verifyOperationPermission( lfn )
if not res['OK']:
return res
if not res['Value']:
errStr = "__replicate: Write access not permitted for this credential."
self.log.error( errStr, lfn )
return S_ERROR( errStr )
self.log.verbose( "__replicate: Performing replication initialization." )
res = self.__initializeReplication( lfn, sourceSE, destSE )
if not res['OK']:
self.log.error( "__replicate: Replication initialisation failed.", lfn )
return res
destStorageElement = res['Value']['DestStorage']
lfnReplicas = res['Value']['Replicas']
destSE = res['Value']['DestSE']
catalogueSize = res['Value']['CatalogueSize']
###########################################################
# If the LFN already exists at the destination we have nothing to do
if destSE in lfnReplicas:
self.log.info( "__replicate: LFN is already registered at %s." % destSE )
return S_OK()
###########################################################
# Resolve the best source storage elements for replication
self.log.verbose( "__replicate: Determining the best source replicas." )
res = self.__resolveBestReplicas( lfn, sourceSE, lfnReplicas, catalogueSize )
if not res['OK']:
self.log.error( "__replicate: Best replica resolution failed.", lfn )
return res
replicaPreference = res['Value']
###########################################################
# Now perform the replication for the file
if destPath:
destPath = '%s/%s' % ( destPath, os.path.basename( lfn ) )
else:
destPath = lfn
res = destStorageElement.getPfnForLfn( destPath )
if not res['OK'] or destPath not in res['Value']['Successful']:
errStr = "__replicate: Failed to generate destination PFN."
self.log.error( errStr, res.get( 'Message', res.get( 'Value', {} ).get( 'Failed', {} ).get( destPath ) ) )
return S_ERROR( errStr )
destPfn = res['Value']['Successful'][destPath]
# Find out if there is a replica already at the same site
localReplicas = []
otherReplicas = []
for sourceSE, sourcePfn in replicaPreference:
if sourcePfn == destPfn:
continue
res = isSameSiteSE( sourceSE, destSE )
if res['OK'] and res['Value']:
localReplicas.append( ( sourceSE, sourcePfn ) )
else:
otherReplicas.append( ( sourceSE, sourcePfn ) )
replicaPreference = localReplicas + otherReplicas
for sourceSE, sourcePfn in replicaPreference:
self.log.verbose( "__replicate: Attempting replication from %s to %s." % ( sourceSE, destSE ) )
fileDict = {destPfn:sourcePfn}
if sourcePfn == destPfn:
continue
localFile = ''
#FIXME: this should not be hardcoded!!!
if sourcePfn.find( 'srm' ) == -1 or destPfn.find( 'srm' ) == -1:
# No third party transfer is possible, we have to replicate through the local cache
localDir = '.'
if localCache:
localDir = localCache
self.getFile( lfn, localDir )
localFile = os.path.join( localDir, os.path.basename( lfn ) )
fileDict = {destPfn:localFile}
res = destStorageElement.replicateFile( fileDict, catalogueSize, singleFile = True )
if localFile and os.path.exists( localFile ):
os.remove( localFile )
if res['OK']:
self.log.info( "__replicate: Replication successful." )
resDict = {'DestSE':destSE, 'DestPfn':destPfn}
return S_OK( resDict )
else:
errStr = "__replicate: Replication failed."
self.log.error( errStr, "%s from %s to %s." % ( lfn, sourceSE, destSE ) )
##########################################################
# If the replication failed for all sources give up
errStr = "__replicate: Failed to replicate with all sources."
self.log.error( errStr, lfn )
return S_ERROR( errStr )
def __initializeReplication( self, lfn, sourceSE, destSE ):
# Horrible, but kept to not break current log messages
logStr = "__initializeReplication:"
###########################################################
# Check the sourceSE if specified
self.log.verbose( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE:
if not self.__SEActive( sourceSE ).get( 'Value', {} ).get( 'Read' ):
infoStr = "%s Supplied source Storage Element is not currently allowed for Read." % ( logStr )
self.log.info( infoStr, sourceSE )
return S_ERROR( infoStr )
###########################################################
# Check that the destination storage element is sane and resolve its name
self.log.verbose( "%s Verifying dest StorageElement validity (%s)." % ( logStr, destSE ) )
destStorageElement = StorageElement( destSE )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "%s The storage element is not currently valid." % logStr
self.log.error( errStr, "%s %s" % ( destSE, res['Message'] ) )
return S_ERROR( errStr )
destSE = destStorageElement.getStorageElementName()['Value']
self.log.info( "%s Destination Storage Element verified." % logStr )
###########################################################
# Check whether the destination storage element is banned
self.log.verbose( "%s Determining whether %s ( destination ) is Write-banned." % ( logStr, destSE ) )
if not self.__SEActive( destSE ).get( 'Value', {} ).get( 'Write' ):
infoStr = "%s Supplied destination Storage Element is not currently allowed for Write." % ( logStr )
self.log.info( infoStr, destSE )
return S_ERROR( infoStr )
###########################################################
# Get the LFN replicas from the file catalogue
self.log.verbose( "%s Attempting to obtain replicas for %s." % ( logStr, lfn ) )
res = self.getReplicas( lfn )
if not res[ 'OK' ]:
errStr = "%s Completely failed to get replicas for LFN." % logStr
self.log.error( errStr, "%s %s" % ( lfn, res['Message'] ) )
return res
if lfn not in res['Value']['Successful']:
errStr = "%s Failed to get replicas for LFN." % logStr
self.log.error( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Failed'][lfn] ) )
self.log.info( "%s Successfully obtained replicas for LFN." % logStr )
lfnReplicas = res['Value']['Successful'][lfn]
###########################################################
# Check the file is at the sourceSE
self.log.verbose( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE and sourceSE not in lfnReplicas:
errStr = "%s LFN does not exist at supplied source SE." % logStr
self.log.error( errStr, "%s %s" % ( lfn, sourceSE ) )
return S_ERROR( errStr )
###########################################################
# If the file catalogue size is zero fail the transfer
self.log.verbose( "%s Attempting to obtain size for %s." % ( logStr, lfn ) )
res = self.getFileSize( lfn )
if not res['OK']:
errStr = "%s Completely failed to get size for LFN." % logStr
self.log.error( errStr, "%s %s" % ( lfn, res['Message'] ) )
return res
if lfn not in res['Value']['Successful']:
errStr = "%s Failed to get size for LFN." % logStr
self.log.error( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Failed'][lfn] ) )
catalogueSize = res['Value']['Successful'][lfn]
if catalogueSize == 0:
errStr = "%s Registered file size is 0." % logStr
self.log.error( errStr, lfn )
return S_ERROR( errStr )
self.log.info( "%s File size determined to be %s." % ( logStr, catalogueSize ) )
###########################################################
# Check whether the destination storage element is banned
self.log.verbose( "%s Determining whether %s ( destination ) is Write-banned." % ( logStr, destSE ) )
usableDestSE = self.resourceStatus.isUsableStorage( destSE, 'WriteAccess' )
if not usableDestSE:
infoStr = "%s Destination Storage Element is currently unusable for Write" % logStr
self.log.info( infoStr, destSE )
return S_ERROR( infoStr )
self.log.info( "%s Destination site not banned for Write." % logStr )
###########################################################
# Check whether the supplied source SE is sane
self.log.verbose( "%s: Determining whether source Storage Element is sane." % logStr )
if sourceSE:
usableSourceSE = self.resourceStatus.isUsableStorage( sourceSE, 'ReadAccess' )
if sourceSE not in lfnReplicas:
errStr = "%s LFN does not exist at supplied source SE." % logStr
self.log.error( errStr, "%s %s" % ( lfn, sourceSE ) )
return S_ERROR( errStr )
elif not usableSourceSE:
infoStr = "%s Supplied source Storage Element is currently unusable for Read." % logStr
self.log.info( infoStr, sourceSE )
return S_ERROR( infoStr )
self.log.info( "%s Replication initialization successful." % logStr )
resDict = {
'DestStorage' : destStorageElement,
'DestSE' : destSE,
'Replicas' : lfnReplicas,
'CatalogueSize' : catalogueSize
}
return S_OK( resDict )
def __resolveBestReplicas( self, lfn, sourceSE, lfnReplicas, catalogueSize ):
""" find best replicas """
###########################################################
# Determine the best replicas (remove banned sources, invalid storage elements and file with the wrong size)
logStr = "__resolveBestReplicas:"
replicaPreference = []
for diracSE, pfn in lfnReplicas.items():
if sourceSE and diracSE != sourceSE:
self.log.info( "%s %s replica not requested." % ( logStr, diracSE ) )
continue
usableDiracSE = self.resourceStatus.isUsableStorage( diracSE, 'ReadAccess' )
if not usableDiracSE:
self.log.info( "%s %s is currently unusable as a source." % ( logStr, diracSE ) )
# elif diracSE in bannedSources:
# self.log.info( "__resolveBestReplicas: %s is currently banned as a source." % diracSE )
else:
self.log.info( "%s %s is available for use." % ( logStr, diracSE ) )
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "%s The storage element is not currently valid." % logStr
self.log.error( errStr, "%s %s" % ( diracSE, res['Message'] ) )
else:
#useCatalogPFN = Operations().getValue( 'DataManagement/UseCatalogPFN', True )
#if not useCatalogPFN:
# pfn = storageElement.getPfnForLfn( lfn ).get( 'Value', pfn )
if storageElement.getRemoteProtocols()['Value']:
self.log.verbose( "%s Attempting to get source pfns for remote protocols." % logStr )
res = returnSingleResult( storageElement.getPfnForProtocol( pfn, self.thirdPartyProtocols ) )
if res['OK']:
sourcePfn = res['Value']
self.log.verbose( "%s Attempting to get source file size." % logStr )
res = storageElement.getFileSize( sourcePfn )
if res['OK']:
if sourcePfn in res['Value']['Successful']:
sourceFileSize = res['Value']['Successful'][sourcePfn]
self.log.info( "%s Source file size determined to be %s." % ( logStr, sourceFileSize ) )
if catalogueSize == sourceFileSize:
fileTuple = ( diracSE, sourcePfn )
replicaPreference.append( fileTuple )
else:
errStr = "%s Catalogue size and physical file size mismatch." % logStr
self.log.error( errStr, "%s %s" % ( diracSE, sourcePfn ) )
else:
errStr = "%s Failed to get physical file size." % logStr
self.log.error( errStr, "%s %s: %s" % ( sourcePfn, diracSE, res['Value']['Failed'][sourcePfn] ) )
else:
errStr = "%s Completely failed to get physical file size." % logStr
self.log.error( errStr, "%s %s: %s" % ( sourcePfn, diracSE, res['Message'] ) )
else:
errStr = "%s Failed to get PFN for replication for StorageElement." % logStr
self.log.error( errStr, "%s %s" % ( diracSE, res['Message'] ) )
else:
errStr = "%s Source Storage Element has no remote protocols." % logStr
self.log.info( errStr, diracSE )
if not replicaPreference:
errStr = "%s Failed to find any valid source Storage Elements." % logStr
self.log.error( errStr )
return S_ERROR( errStr )
else:
return S_OK( replicaPreference )
###################################################################
#
# These are the file catalog write methods
#
def registerFile( self, fileTuple, catalog = '' ):
""" Register a file or a list of files
:param self: self reference
:param tuple fileTuple: (lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum )
:param str catalog: catalog name
"""
if type( fileTuple ) == ListType:
fileTuples = fileTuple
elif type( fileTuple ) == TupleType:
fileTuples = [fileTuple]
else:
errStr = "registerFile: Supplied file info must be tuple of list of tuples."
self.log.error( errStr )
return S_ERROR( errStr )
self.log.verbose( "registerFile: Attempting to register %s files." % len( fileTuples ) )
res = self.__registerFile( fileTuples, catalog )
if not res['OK']:
errStr = "registerFile: Completely failed to register files."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
return res
def __registerFile( self, fileTuples, catalog ):
""" register file to cataloge """
seDict = {}
fileDict = {}
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuples:
if storageElementName:
seDict.setdefault( storageElementName, [] ).append( ( lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum ) )
else:
# If no SE name, this could be just registration in a dummy catalog like LHCb bookkeeping
fileDict[lfn] = {'PFN':'', 'Size':fileSize, 'SE':storageElementName, 'GUID':fileGuid, 'Checksum':checksum}
failed = {}
for storageElementName, fileTuple in seDict.items():
destStorageElement = StorageElement( storageElementName )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "__registerFile: The storage element is not currently valid."
self.log.error( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.getStorageElementName()['Value']
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuple:
res = returnSingleResult( destStorageElement.getPfnForProtocol( physicalFile, self.registrationProtocol, withPort = False ) )
if not res['OK']:
pfn = physicalFile
else:
pfn = res['Value']
# tuple = ( lfn, pfn, fileSize, storageElementName, fileGuid, checksum )
fileDict[lfn] = {'PFN':pfn, 'Size':fileSize, 'SE':storageElementName, 'GUID':fileGuid, 'Checksum':checksum}
self.log.verbose( "__registerFile: Resolved %s files for registration." % len( fileDict ) )
if catalog:
fileCatalog = FileCatalog( catalog )
if not fileCatalog.isOK():
return S_ERROR( "Can't get FileCatalog %s" % catalog )
res = fileCatalog.addFile( fileDict )
else:
res = self.fileCatalogue.addFile( fileDict )
if not res['OK']:
errStr = "__registerFile: Completely failed to register files."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def registerReplica( self, replicaTuple, catalog = '' ):
""" Register a replica (or list of) supplied in the replicaTuples.
'replicaTuple' is a tuple or list of tuples of the form (lfn,pfn,se)
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [ replicaTuple ]
else:
errStr = "registerReplica: Supplied file info must be tuple of list of tuples."
self.log.error( errStr )
return S_ERROR( errStr )
self.log.verbose( "registerReplica: Attempting to register %s replicas." % len( replicaTuples ) )
res = self.__registerReplica( replicaTuples, catalog )
if not res['OK']:
errStr = "registerReplica: Completely failed to register replicas."
self.log.error( errStr, res['Message'] )
return res
def __registerReplica( self, replicaTuples, catalog ):
""" register replica to catalogue """
seDict = {}
for lfn, pfn, storageElementName in replicaTuples:
seDict.setdefault( storageElementName, [] ).append( ( lfn, pfn ) )
failed = {}
replicaTuples = []
for storageElementName, replicaTuple in seDict.items():
destStorageElement = StorageElement( storageElementName )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "__registerReplica: The storage element is not currently valid."
self.log.error( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
for lfn, pfn in replicaTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.getStorageElementName()['Value']
for lfn, pfn in replicaTuple:
res = returnSingleResult( destStorageElement.getPfnForProtocol( pfn, self.registrationProtocol, withPort = False ) )
if not res['OK']:
failed[lfn] = res['Message']
else:
replicaTuple = ( lfn, res['Value'], storageElementName, False )
replicaTuples.append( replicaTuple )
self.log.verbose( "__registerReplica: Successfully resolved %s replicas for registration." % len( replicaTuples ) )
# HACK!
replicaDict = {}
for lfn, pfn, se, _master in replicaTuples:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
if catalog:
fileCatalog = FileCatalog( catalog )
res = fileCatalog.addReplica( replicaDict )
else:
res = self.fileCatalogue.addReplica( replicaDict )
if not res['OK']:
errStr = "__registerReplica: Completely failed to register replicas."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
###################################################################
#
# These are the removal methods for physical and catalogue removal
#
def removeFile( self, lfn, force = None ):
""" Remove the file (all replicas) from Storage Elements and file catalogue
'lfn' is the file to be removed
"""
if force is None:
force = self.ignoreMissingInFC
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeFile: Supplied lfns must be string or list of strings."
self.log.error( errStr )
return S_ERROR( errStr )
# First check if the file exists in the FC
res = self.fileCatalogue.exists( lfns )
if not res['OK']:
return res
success = res['Value']['Successful']
lfns = [lfn for lfn in success if success[lfn] ]
if force:
# Files that don't exist are removed successfully
successful = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], True )
failed = {}
else:
successful = {}
failed = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], 'No such file or directory' )
# Check that we have write permissions to this directory.
if lfns:
res = self.__verifyOperationPermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removeFile: Write access not permitted for this credential."
self.log.error( errStr, lfns )
return S_ERROR( errStr )
self.log.verbose( "removeFile: Attempting to remove %s files from Storage and Catalogue. Get replicas first" % len( lfns ) )
res = self.fileCatalogue.getReplicas( lfns, True )
if not res['OK']:
errStr = "ReplicaManager.removeFile: Completely failed to get replicas for lfns."
self.log.error( errStr, res['Message'] )
return res
lfnDict = res['Value']['Successful']
for lfn, reason in res['Value'].get( 'Failed', {} ).items():
# Ignore files missing in FC if force is set
if reason == 'No such file or directory' and force:
successful[lfn] = True
elif reason == 'File has zero replicas':
lfnDict[lfn] = {}
else:
failed[lfn] = reason
res = self.__removeFile( lfnDict )
if not res['OK']:
errStr = "removeFile: Completely failed to remove files."
self.log.error( errStr, res['Message'] )
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
gDataStoreClient.commit()
return S_OK( resDict )
def __removeFile( self, lfnDict ):
""" remove file """
storageElementDict = {}
# # sorted and reversed
for lfn, repDict in sorted( lfnDict.items(), reverse = True ):
for se, pfn in repDict.items():
storageElementDict.setdefault( se, [] ).append( ( lfn, pfn ) )
failed = {}
successful = {}
for storageElementName in sorted( storageElementDict ):
fileTuple = storageElementDict[storageElementName]
res = self.__removeReplica( storageElementName, fileTuple )
if not res['OK']:
errStr = res['Message']
for lfn, pfn in fileTuple:
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
else:
for lfn, errStr in res['Value']['Failed'].items():
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
completelyRemovedFiles = []
for lfn in [lfn for lfn in lfnDict if lfn not in failed]:
completelyRemovedFiles.append( lfn )
if completelyRemovedFiles:
res = self.fileCatalogue.removeFile( completelyRemovedFiles )
if not res['OK']:
for lfn in completelyRemovedFiles:
failed[lfn] = "Failed to remove file from the catalog: %s" % res['Message']
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplica( self, storageElementName, lfn ):
""" Remove replica at the supplied Storage Element from Storage Element then file catalogue
'storageElementName' is the storage where the file is to be removed
'lfn' is the file to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplica: Supplied lfns must be string or list of strings."
self.log.error( errStr )
return S_ERROR( errStr )
# Check that we have write permissions to this directory.
res = self.__verifyOperationPermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removaReplica: Write access not permitted for this credential."
self.log.error( errStr, lfns )
return S_ERROR( errStr )
self.log.verbose( "removeReplica: Will remove catalogue entry for %s lfns at %s." % ( len( lfns ),
storageElementName ) )
res = self.fileCatalogue.getReplicas( lfns, True )
if not res['OK']:
errStr = "removeReplica: Completely failed to get replicas for lfns."
self.log.error( errStr, res['Message'] )
return res
failed = res['Value']['Failed']
successful = {}
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
elif len( repDict ) == 1:
# The file has only a single replica so don't remove
self.log.error( "The replica you are trying to remove is the only one.", "%s @ %s" % ( lfn,
storageElementName ) )
failed[lfn] = "Failed to remove sole replica"
else:
replicaTuples.append( ( lfn, repDict[storageElementName] ) )
res = self.__removeReplica( storageElementName, replicaTuples )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
gDataStoreClient.commit()
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def __removeReplica( self, storageElementName, fileTuple ):
""" remove replica """
lfnDict = {}
failed = {}
for lfn, pfn in fileTuple:
res = self.__verifyOperationPermission( lfn )
if not res['OK'] or not res['Value']:
errStr = "__removeReplica: Write access not permitted for this credential."
self.log.error( errStr, lfn )
failed[lfn] = errStr
else:
# This is the PFN as in hte FC
lfnDict[lfn] = pfn
# Now we should use the constructed PFNs if needed, for the physical removal
# Reverse lfnDict into pfnDict with required PFN
if self.useCatalogPFN:
pfnDict = dict( zip( lfnDict.values(), lfnDict.keys() ) )
else:
pfnDict = dict( [ ( self.getPfnForLfn( lfn, storageElementName )['Value'].get( 'Successful', {} ).get( lfn, lfnDict[lfn] ), lfn ) for lfn in lfnDict] )
# removePhysicalReplicas is called with real PFN list
res = self.__removePhysicalReplica( storageElementName, pfnDict.keys() )
if not res['OK']:
errStr = "__removeReplica: Failed to remove catalog replicas."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
failed.update( dict( [( pfnDict[pfn], error ) for pfn, error in res['Value']['Failed'].items()] ) )
# Here we use the FC PFN...
replicaTuples = [( pfnDict[pfn], lfnDict[pfnDict[lfn]], storageElementName ) for pfn in res['Value']['Successful']]
res = self.__removeCatalogReplica( replicaTuples )
if not res['OK']:
errStr = "__removeReplica: Completely failed to remove physical files."
self.log.error( errStr, res['Message'] )
failed.update( dict.fromkeys( [lfn for lfn, _pfn, _se in replicaTuples if lfn not in failed], res['Message'] ) )
successful = {}
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplicaFromCatalog( self, storageElementName, lfn ):
""" remove :lfn: replica from :storageElementName: SE
:param self: self reference
:param str storageElementName: SE name
:param mixed lfn: a single LFN or list of LFNs
"""
# Remove replica from the file catalog 'lfn' are the file
# to be removed 'storageElementName' is the storage where the file is to be removed
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplicaFromCatalog: Supplied lfns must be string or list of strings."
self.log.error( errStr )
return S_ERROR( errStr )
self.log.verbose( "removeReplicaFromCatalog: Will remove catalogue entry for %s lfns at %s." % \
( len( lfns ), storageElementName ) )
res = self.getCatalogReplicas( lfns, allStatus = True )
if not res['OK']:
errStr = "removeReplicaFromCatalog: Completely failed to get replicas for lfns."
self.log.error( errStr, res['Message'] )
return res
failed = {}
successful = {}
for lfn, reason in res['Value']['Failed'].items():
if reason in ( 'No such file or directory', 'File has zero replicas' ):
successful[lfn] = True
else:
failed[lfn] = reason
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
replicaTuples.append( ( lfn, repDict[storageElementName], storageElementName ) )
self.log.verbose( "removeReplicaFromCatalog: Resolved %s pfns for catalog removal at %s." % ( len( replicaTuples ),
storageElementName ) )
res = self.__removeCatalogReplica( replicaTuples )
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def removeCatalogPhysicalFileNames( self, replicaTuple ):
""" Remove replicas from the file catalog specified by replica tuple
'replicaTuple' is a tuple containing the replica to be removed and is of the form ( lfn, pfn, se )
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [replicaTuple]
else:
errStr = "removeCatalogPhysicalFileNames: Supplied info must be tuple or list of tuples."
self.log.error( errStr )
return S_ERROR( errStr )
return self.__removeCatalogReplica( replicaTuples )
def __removeCatalogReplica( self, replicaTuple ):
""" remove replica form catalogue """
oDataOperation = self.__initialiseAccountingObject( 'removeCatalogReplica', '', len( replicaTuple ) )
oDataOperation.setStartTime()
start = time.time()
# HACK!
replicaDict = {}
for lfn, pfn, se in replicaTuple:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
res = self.fileCatalogue.removeReplica( replicaDict )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'RegistrationTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'RegistrationOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removeCatalogReplica: Completely failed to remove replica."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
success = res['Value']['Successful']
if success:
self.log.info( "__removeCatalogReplica: Removed %d replicas" % len( success ) )
for lfn in success:
self.log.debug( "__removeCatalogReplica: Successfully removed replica.", lfn )
for lfn, error in res['Value']['Failed'].items():
self.log.error( "__removeCatalogReplica: Failed to remove replica.", "%s %s" % ( lfn, error ) )
oDataOperation.setValueByKey( 'RegistrationOK', len( success ) )
gDataStoreClient.addRegister( oDataOperation )
return res
def removePhysicalReplica( self, storageElementName, lfn ):
""" Remove replica from Storage Element.
'lfn' are the files to be removed
'storageElementName' is the storage where the file is to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removePhysicalReplica: Supplied lfns must be string or list of strings."
self.log.error( errStr )
return S_ERROR( errStr )
# Check that we have write permissions to this directory.
res = self.__verifyOperationPermission( lfns )
if not res['OK']:
return res
if not res['Value']:
errStr = "removePhysicalReplica: Write access not permitted for this credential."
self.log.error( errStr, lfns )
return S_ERROR( errStr )
self.log.verbose( "removePhysicalReplica: Attempting to remove %s lfns at %s." % ( len( lfns ),
storageElementName ) )
self.log.verbose( "removePhysicalReplica: Attempting to resolve replicas." )
res = self.getReplicas( lfns )
if not res['OK']:
errStr = "removePhysicalReplica: Completely failed to get replicas for lfns."
self.log.error( errStr, res['Message'] )
return res
failed = res['Value']['Failed']
successful = {}
pfnDict = {}
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
sePfn = repDict[storageElementName]
pfnDict[sePfn] = lfn
self.log.verbose( "removePhysicalReplica: Resolved %s pfns for removal at %s." % ( len( pfnDict ),
storageElementName ) )
res = self.__removePhysicalReplica( storageElementName, pfnDict.keys() )
for pfn, error in res['Value']['Failed'].items():
failed[pfnDict[pfn]] = error
for pfn in res['Value']['Successful']:
successful[pfnDict[pfn]] = True
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
def __removePhysicalReplica( self, storageElementName, pfnsToRemove ):
""" remove replica from storage element """
self.log.verbose( "__removePhysicalReplica: Attempting to remove %s pfns at %s." % ( len( pfnsToRemove ),
storageElementName ) )
storageElement = StorageElement( storageElementName )
res = storageElement.isValid()
if not res['OK']:
errStr = "__removePhysicalReplica: The storage element is not currently valid."
self.log.error( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
return S_ERROR( errStr )
oDataOperation = self.__initialiseAccountingObject( 'removePhysicalReplica',
storageElementName,
len( pfnsToRemove ) )
oDataOperation.setStartTime()
start = time.time()
res = storageElement.removeFile( pfnsToRemove )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removePhysicalReplica: Failed to remove replicas."
self.log.error( errStr, res['Message'] )
return S_ERROR( errStr )
else:
for surl, value in res['Value']['Failed'].items():
if 'No such file or directory' in value:
res['Value']['Successful'][surl] = surl
res['Value']['Failed'].pop( surl )
for surl in res['Value']['Successful']:
ret = returnSingleResult( storageElement.getPfnForProtocol( surl, self.registrationProtocol, withPort = False ) )
if not ret['OK']:
res['Value']['Successful'][surl] = surl
else:
res['Value']['Successful'][surl] = ret['Value']
oDataOperation.setValueByKey( 'TransferOK', len( res['Value']['Successful'] ) )
gDataStoreClient.addRegister( oDataOperation )
infoStr = "__removePhysicalReplica: Successfully issued accounting removal request."
self.log.verbose( infoStr )
return res
#########################################################################
#
# File transfer methods
#
def put( self, lfn, fileName, diracSE, path = None ):
""" Put a local file to a Storage Element
:param self: self reference
:param str lfn: LFN
:param str fileName: the full path to the local file
:param str diracSE: the Storage Element to which to put the file
:param str path: the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "put: Supplied file does not exist."
self.log.error( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "put: Supplied file is zero size."
self.log.error( errStr, fileName )
return S_ERROR( errStr )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE )
res = storageElement.isValid()
if not res['OK']:
errStr = "put: The storage element is not currently valid."
self.log.error( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
res = storageElement.getPfnForLfn( lfn )
if not res['OK']or lfn not in res['Value']['Successful']:
errStr = "put: Failed to generate destination PFN."
self.log.error( errStr, res.get( 'Message', res.get( 'Value', {} ).get( 'Failed', {} ).get( lfn ) ) )
return S_ERROR( errStr )
destPfn = res['Value']['Successful'][lfn]
fileDict = {destPfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = time.time()
res = storageElement.putFile( fileDict, singleFile = True )
putTime = time.time() - startTime
if not res['OK']:
errStr = "put: Failed to put file to Storage Element."
failed[lfn] = res['Message']
self.log.error( errStr, "%s: %s" % ( fileName, res['Message'] ) )
else:
self.log.info( "put: Put file to storage in %s seconds." % putTime )
successful[lfn] = destPfn
resDict = {'Successful': successful, 'Failed':failed}
return S_OK( resDict )
# def removeReplica(self,lfn,storageElementName,singleFile=False):
# def putReplica(self,lfn,storageElementName,singleFile=False):
# def replicateReplica(self,lfn,size,storageElementName,singleFile=False):
def getActiveReplicas( self, lfns ):
""" Get all the replicas for the SEs which are in Active status for reading.
"""
res = self.getReplicas( lfns, allStatus = False )
if not res['OK']:
return res
replicas = res['Value']
return self.checkActiveReplicas( replicas )
def checkActiveReplicas( self, replicaDict ):
""" Check a replica dictionary for active replicas
"""
if type( replicaDict ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict ) )
for key in [ 'Successful', 'Failed' ]:
if not key in replicaDict:
return S_ERROR( 'Missing key "%s" in replica dictionary' % key )
if type( replicaDict[key] ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict[key] ) )
seReadStatus = {}
for lfn, replicas in replicaDict['Successful'].items():
if type( replicas ) != DictType:
del replicaDict['Successful'][ lfn ]
replicaDict['Failed'][lfn] = 'Wrong replica info'
continue
for se in replicas.keys():
if se not in seReadStatus:
res = self.getSEStatus( se )
if res['OK']:
seReadStatus[se] = res['Value']['Read']
else:
seReadStatus[se] = False
if not seReadStatus[se]:
replicas.pop( se )
return S_OK( replicaDict )
def getSEStatus( self, se ):
""" check is SE is active """
result = StorageFactory().getStorageName( se )
if not result['OK']:
return S_ERROR( 'SE not known' )
resolvedName = result['Value']
res = self.resourceStatus.getStorageElementStatus( resolvedName, default = None )
if not res[ 'OK' ]:
return S_ERROR( 'SE not known' )
seStatus = { 'Read' : True, 'Write' : True }
if res['Value'][se].get( 'ReadAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Read' ] = False
if res['Value'][se].get( 'WriteAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Write' ] = False
return S_OK( seStatus )
def __initialiseAccountingObject( self, operation, se, files ):
""" create accouting record """
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'ReplicaManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( accountingDict )
return oDataOperation
##########################################
#
# Defunct methods only there before checking backward compatability
#
def onlineRetransfer( self, storageElementName, physicalFile ):
""" Requests the online system to re-transfer files
'storageElementName' is the storage element where the file should be removed from
'physicalFile' is the physical files
"""
return self._callStorageElementFcn( storageElementName, physicalFile, 'retransferOnlineFile' )
def getReplicas( self, lfns, allStatus = True ):
""" get replicas from catalogue """
res = self.getCatalogReplicas( lfns, allStatus = allStatus )
if not self.useCatalogPFN:
if res['OK']:
se_lfn = {}
catalogReplicas = res['Value']['Successful']
# We group the query to getPfnForLfn by storage element to gain in speed
for lfn in catalogReplicas:
for se in catalogReplicas[lfn]:
se_lfn.setdefault( se, [] ).append( lfn )
for se in se_lfn:
succPfn = self.getPfnForLfn( se_lfn[se], se ).get( 'Value', {} ).get( 'Successful', {} )
for lfn in succPfn:
# catalogReplicas still points res["value"]["Successful"] so res will be updated
catalogReplicas[lfn][se] = succPfn[lfn]
return res
def getFileSize( self, lfn ):
""" get file size from catalogue """
return self.getCatalogFileSize( lfn )
| gpl-3.0 | -7,696,063,295,908,667,000 | 43.10457 | 157 | 0.646042 | false | 3.947108 | false | false | false |
matrix-org/synapse | tests/storage/test_devices.py | 1 | 5336 | # Copyright 2016-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import synapse.api.errors
from tests.unittest import HomeserverTestCase
class DeviceStoreTestCase(HomeserverTestCase):
def prepare(self, reactor, clock, hs):
self.store = hs.get_datastore()
def test_store_new_device(self):
self.get_success(
self.store.store_device("user_id", "device_id", "display_name")
)
res = self.get_success(self.store.get_device("user_id", "device_id"))
self.assertDictContainsSubset(
{
"user_id": "user_id",
"device_id": "device_id",
"display_name": "display_name",
},
res,
)
def test_get_devices_by_user(self):
self.get_success(
self.store.store_device("user_id", "device1", "display_name 1")
)
self.get_success(
self.store.store_device("user_id", "device2", "display_name 2")
)
self.get_success(
self.store.store_device("user_id2", "device3", "display_name 3")
)
res = self.get_success(self.store.get_devices_by_user("user_id"))
self.assertEqual(2, len(res.keys()))
self.assertDictContainsSubset(
{
"user_id": "user_id",
"device_id": "device1",
"display_name": "display_name 1",
},
res["device1"],
)
self.assertDictContainsSubset(
{
"user_id": "user_id",
"device_id": "device2",
"display_name": "display_name 2",
},
res["device2"],
)
def test_count_devices_by_users(self):
self.get_success(
self.store.store_device("user_id", "device1", "display_name 1")
)
self.get_success(
self.store.store_device("user_id", "device2", "display_name 2")
)
self.get_success(
self.store.store_device("user_id2", "device3", "display_name 3")
)
res = self.get_success(self.store.count_devices_by_users())
self.assertEqual(0, res)
res = self.get_success(self.store.count_devices_by_users(["unknown"]))
self.assertEqual(0, res)
res = self.get_success(self.store.count_devices_by_users(["user_id"]))
self.assertEqual(2, res)
res = self.get_success(
self.store.count_devices_by_users(["user_id", "user_id2"])
)
self.assertEqual(3, res)
def test_get_device_updates_by_remote(self):
device_ids = ["device_id1", "device_id2"]
# Add two device updates with a single stream_id
self.get_success(
self.store.add_device_change_to_streams("user_id", device_ids, ["somehost"])
)
# Get all device updates ever meant for this remote
now_stream_id, device_updates = self.get_success(
self.store.get_device_updates_by_remote("somehost", -1, limit=100)
)
# Check original device_ids are contained within these updates
self._check_devices_in_updates(device_ids, device_updates)
def _check_devices_in_updates(self, expected_device_ids, device_updates):
"""Check that an specific device ids exist in a list of device update EDUs"""
self.assertEqual(len(device_updates), len(expected_device_ids))
received_device_ids = {
update["device_id"] for edu_type, update in device_updates
}
self.assertEqual(received_device_ids, set(expected_device_ids))
def test_update_device(self):
self.get_success(
self.store.store_device("user_id", "device_id", "display_name 1")
)
res = self.get_success(self.store.get_device("user_id", "device_id"))
self.assertEqual("display_name 1", res["display_name"])
# do a no-op first
self.get_success(self.store.update_device("user_id", "device_id"))
res = self.get_success(self.store.get_device("user_id", "device_id"))
self.assertEqual("display_name 1", res["display_name"])
# do the update
self.get_success(
self.store.update_device(
"user_id", "device_id", new_display_name="display_name 2"
)
)
# check it worked
res = self.get_success(self.store.get_device("user_id", "device_id"))
self.assertEqual("display_name 2", res["display_name"])
def test_update_unknown_device(self):
exc = self.get_failure(
self.store.update_device(
"user_id", "unknown_device_id", new_display_name="display_name 2"
),
synapse.api.errors.StoreError,
)
self.assertEqual(404, exc.value.code)
| apache-2.0 | 3,141,021,267,772,333,600 | 34.573333 | 88 | 0.587706 | false | 3.723657 | true | false | false |
JQIamo/artiq | artiq/coredevice/comm_moninj.py | 1 | 2537 | import asyncio
import logging
import struct
from enum import Enum
__all__ = ["TTLProbe", "TTLOverride", "CommMonInj"]
logger = logging.getLogger(__name__)
class TTLProbe(Enum):
level = 0
oe = 1
class TTLOverride(Enum):
en = 0
level = 1
oe = 2
class CommMonInj:
def __init__(self, monitor_cb, injection_status_cb, disconnect_cb=None):
self.monitor_cb = monitor_cb
self.injection_status_cb = injection_status_cb
self.disconnect_cb = disconnect_cb
async def connect(self, host, port=1383):
self._reader, self._writer = await asyncio.open_connection(host, port)
try:
self._writer.write(b"ARTIQ moninj\n")
self._receive_task = asyncio.ensure_future(self._receive_cr())
except:
self._writer.close()
del self._reader
del self._writer
raise
async def close(self):
self.disconnect_cb = None
try:
self._receive_task.cancel()
try:
await asyncio.wait_for(self._receive_task, None)
except asyncio.CancelledError:
pass
finally:
self._writer.close()
del self._reader
del self._writer
def monitor(self, enable, channel, probe):
packet = struct.pack(">bblb", 0, enable, channel, probe)
self._writer.write(packet)
def inject(self, channel, override, value):
packet = struct.pack(">blbb", 1, channel, override, value)
self._writer.write(packet)
def get_injection_status(self, channel, override):
packet = struct.pack(">blb", 2, channel, override)
self._writer.write(packet)
async def _receive_cr(self):
try:
while True:
ty = await self._reader.read(1)
if not ty:
return
if ty == b"\x00":
payload = await self._reader.read(9)
channel, probe, value = struct.unpack(">lbl", payload)
self.monitor_cb(channel, probe, value)
elif ty == b"\x01":
payload = await self._reader.read(6)
channel, override, value = struct.unpack(">lbb", payload)
self.injection_status_cb(channel, override, value)
else:
raise ValueError("Unknown packet type", ty)
finally:
if self.disconnect_cb is not None:
self.disconnect_cb()
| lgpl-3.0 | 838,825,745,491,158,500 | 29.202381 | 78 | 0.546709 | false | 4.125203 | false | false | false |
hzlf/openbroadcast.ch | app/media_embed/utils.py | 1 | 1679 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from urlparse import urlparse, parse_qs
except ImportError:
from urllib.parse import urlparse, parse_qs
EMBED_SERVICE_PROVIDERS = [
# Video
'Youtube',
'Vimeo',
# Audio
]
def process_provider_url(url, exclude_providers=[]):
provider = None
object_id = None
# youtube
if not 'youtube' in exclude_providers:
if '//youtube.com' in url or '//www.youtube.com' in url or '//youtu.be' in url:
provider = 'youtube'
object_id = get_youtube_id_by_url(url)
# vimeo
if not 'vimeo' in exclude_providers:
if '//vimeo.com' in url:
provider = 'vimeo'
object_id = get_vimeo_id_by_url(url)
return provider, object_id
def get_youtube_id_by_url(url):
"""
examples:
- http://youtu.be/SA2iWivDJiE
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
- http://www.youtube.com/embed/SA2iWivDJiE
- http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US
"""
query = urlparse(url)
if query.hostname == 'youtu.be':
return query.path[1:]
if query.hostname in ('www.youtube.com', 'youtube.com', 'm.youtube.com'):
if query.path == '/watch':
p = parse_qs(query.query)
return p['v'][0]
if query.path[:7] == '/embed/':
return query.path.split('/')[2]
if query.path[:3] == '/v/':
return query.path.split('/')[2]
return None
def get_vimeo_id_by_url(url):
"""
examples:
- https://vimeo.com/178240219
"""
query = urlparse(url)
return query.path.split('/')[1]
| gpl-3.0 | 296,941,797,632,779,200 | 23.691176 | 87 | 0.57832 | false | 3.144195 | false | false | false |
prats226/python-amazon-product-api-0.2.8 | tests/utils.py | 1 | 2424 |
from lxml import objectify
import re
try: # make it python2.4/2.5 compatible!
from urlparse import urlparse, parse_qs
except ImportError: # pragma: no cover
from urlparse import urlparse
from cgi import parse_qs
def convert_camel_case(operation):
"""
Converts ``CamelCaseOperationName`` into ``python_style_method_name``.
"""
return re.sub('([a-z])([A-Z])', r'\1_\2', operation).lower()
def extract_operations_from_wsdl(path):
"""
Extracts operations from Amazon's WSDL file.
"""
root = objectify.parse(open(path)).getroot()
wsdlns = 'http://schemas.xmlsoap.org/wsdl/'
return set(root.xpath('//ws:operation/@name', namespaces={'ws' : wsdlns}))
#: list of changeable and/or sensitive (thus ignorable) request arguments
IGNORABLE_ARGUMENTS = ('Signature', 'AWSAccessKeyId', 'Timestamp', 'AssociateTag')
def arguments_from_cached_xml(xml):
"""
Extracts request arguments from cached response file. (Almost) any request
sent to the API will be answered with an XML response containing the
arguments originally used in XML elements ::
<OperationRequest>
<Arguments>
<Argument Name="Service" Value="AWSECommerceService"/>
<Argument Name="Signature" Value="XXXXXXXXXXXXXXX"/>
<Argument Name="Operation" Value="BrowseNodeLookup"/>
<Argument Name="BrowseNodeId" Value="927726"/>
<Argument Name="AWSAccessKeyId" Value="XXXXXXXXXXXXXXX"/>
<Argument Name="Timestamp" Value="2010-10-15T22:09:00Z"/>
<Argument Name="Version" Value="2009-10-01"/>
</Arguments>
</OperationRequest>
"""
root = objectify.fromstring(xml).getroottree().getroot()
return dict((arg.get('Name'), arg.get('Value'))
for arg in root.OperationRequest.Arguments.Argument
if arg.get('Name') not in IGNORABLE_ARGUMENTS)
def arguments_from_url(url):
"""
Extracts request arguments from URL.
"""
params = parse_qs(urlparse(url).query)
for key, val in params.items():
# turn everything into unicode
if type(val) == list:
val = map(lambda x: unicode(x, encoding='utf-8'), val)
# reduce lists to single value
if type(val) == list and len(val) == 1:
params[key] = val[0]
if key in IGNORABLE_ARGUMENTS:
del params[key]
return params
| bsd-3-clause | 8,023,360,274,834,195,000 | 35.727273 | 82 | 0.636964 | false | 3.890851 | false | false | false |
imiyoo2010/mitmproxy | doc-src/index.py | 1 | 2510 | import os, sys, datetime
import countershape
from countershape import Page, Directory, PythonModule, markup, model
import countershape.template
sys.path.insert(0, "..")
from libmproxy import filt, version
MITMPROXY_SRC = os.environ.get("MITMPROXY_SRC", os.path.abspath(".."))
ns.VERSION = version.VERSION
if ns.options.website:
ns.idxpath = "doc/index.html"
this.layout = countershape.Layout("_websitelayout.html")
else:
ns.idxpath = "index.html"
this.layout = countershape.Layout("_layout.html")
ns.title = countershape.template.Template(None, "<h1>@!this.title!@</h1>")
this.titlePrefix = "%s - " % version.NAMEVERSION
this.markup = markup.Markdown(extras=["footnotes"])
ns.docMaintainer = "Aldo Cortesi"
ns.docMaintainerEmail = "[email protected]"
ns.copyright = u"\u00a9 mitmproxy project, %s" % datetime.date.today().year
def mpath(p):
p = os.path.join(MITMPROXY_SRC, p)
return os.path.expanduser(p)
with open(mpath("README.mkd")) as f:
readme = f.read()
ns.index_contents = readme.split("\n", 1)[1] #remove first line (contains build status)
def example(s):
d = file(mpath(s)).read().rstrip()
extemp = """<div class="example">%s<div class="example_legend">(%s)</div></div>"""
return extemp%(countershape.template.Syntax("py")(d), s)
ns.example = example
filt_help = []
for i in filt.filt_unary:
filt_help.append(
("~%s"%i.code, i.help)
)
for i in filt.filt_rex:
filt_help.append(
("~%s regex"%i.code, i.help)
)
for i in filt.filt_int:
filt_help.append(
("~%s int"%i.code, i.help)
)
filt_help.sort()
filt_help.extend(
[
("!", "unary not"),
("&", "and"),
("|", "or"),
("(...)", "grouping"),
]
)
ns.filt_help = filt_help
def nav(page, current, state):
if current.match(page, False):
pre = '<li class="active">'
else:
pre = "<li>"
p = state.application.getPage(page)
return pre + '<a href="%s">%s</a></li>'%(model.UrlTo(page), p.title)
ns.nav = nav
ns.navbar = countershape.template.File(None, "_nav.html")
pages = [
Page("index.html", "Introduction"),
Page("install.html", "Installation"),
Page("mitmproxy.html", "mitmproxy"),
Page("mitmdump.html", "mitmdump"),
Page("howmitmproxy.html", "How mitmproxy works"),
Page("ssl.html", "Overview"),
Directory("certinstall"),
Directory("scripting"),
Directory("tutorials"),
Page("transparent.html", "Overview"),
Directory("transparent"),
]
| mit | -5,763,400,009,474,492,000 | 27.202247 | 95 | 0.627888 | false | 3.042424 | false | false | false |
e7dal/hexy | setup.py | 1 | 1766 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import imp
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('readme.txt').read()
history = open('history.txt').read().replace('.. :changelog:', '')
curr_path = os.path.dirname(os.path.realpath(__file__))
deps = os.path.join(curr_path, 'requirements.in')
dev_deps = os.path.join(curr_path, 'dev_requirements.in')
requirements = open(deps).read()
test_requirements = open(dev_deps).read()
CODE_DIRECTORY = 'hexy'
metadata = imp.load_source(
'metadata', os.path.join(CODE_DIRECTORY, 'metadata.py'))
#orderdict needed for structlog
sys_version_str='.'.join((str(s) for s in sys.version_info[0:3]))
setup(
name=metadata.package,
version=metadata.version,
author=metadata.authors[0],
author_email=metadata.emails[0],
maintainer=metadata.authors[0],
maintainer_email=metadata.emails[0],
url=metadata.url,
description=metadata.description,
long_description=readme + '\n\n' + history,
packages=[
'hexy',
'hexy.util',
'hexy.commands'
],
package_dir={'hexy':
'hexy'},
py_modules=['hexy'],
include_package_data=True,
install_requires=requirements,
license="GPL-3.0",
zip_safe=False,
keywords='hexy, ascii,hexagonal,drawing,toolkit,widgets',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: POSIX :: Linux',
'Environment :: Console',
'Programming Language :: Python :: 3.5',
],
test_suite='tests',
tests_require=test_requirements,
entry_points='''
[console_scripts]
hexy = hexy.cli:cli
'''
)
| gpl-3.0 | -8,694,179,322,663,520,000 | 25.757576 | 66 | 0.637599 | false | 3.510934 | false | false | false |
evernote/pootle | pootle/core/views.py | 1 | 13622 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013 Zuza Software Foundation
# Copyright 2013-2014 Evernote Corporation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import json
import operator
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db.models import ObjectDoesNotExist, ProtectedError, Q
from django.forms.models import modelform_factory
from django.http import Http404, HttpResponse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.views.defaults import (permission_denied as django_403,
page_not_found as django_404,
server_error as django_500)
from django.views.generic import View
from pootle_misc.util import PootleJSONEncoder, ajax_required, jsonify
class SuperuserRequiredMixin(object):
"""Require users to have the `is_superuser` bit set."""
def dispatch(self, request, *args, **kwargs):
if not request.user.is_superuser:
msg = _('You do not have rights to administer Pootle.')
raise PermissionDenied(msg)
return super(SuperuserRequiredMixin, self) \
.dispatch(request, *args, **kwargs)
class LoginRequiredMixin(object):
"""Require a logged-in user."""
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
class TestUserFieldMixin(LoginRequiredMixin):
"""Require a field from the URL pattern to match a field of the
current user.
The URL pattern field used for comparing against the current user
can be customized by setting the `username_field` attribute.
Note that there's free way for admins.
"""
test_user_field = 'username'
def dispatch(self, *args, **kwargs):
user = self.request.user
url_field_value = kwargs[self.test_user_field]
field_value = getattr(user, self.test_user_field, '')
can_access = user.is_superuser or str(field_value) == url_field_value
if not can_access:
raise PermissionDenied(_('You cannot access this page.'))
return super(TestUserFieldMixin, self).dispatch(*args, **kwargs)
class NoDefaultUserMixin(object):
"""Removes the `default` special user from views."""
def dispatch(self, request, *args, **kwargs):
username = kwargs.get('username', None)
if username is not None and username == 'default':
raise Http404
return super(NoDefaultUserMixin, self) \
.dispatch(request, *args, **kwargs)
class AjaxResponseMixin(object):
"""Mixin to add AJAX support to a form.
This needs to be used with a `FormView`.
"""
@method_decorator(ajax_required)
def dispatch(self, *args, **kwargs):
return super(AjaxResponseMixin, self).dispatch(*args, **kwargs)
def render_to_json_response(self, context, **response_kwargs):
data = jsonify(context)
response_kwargs['content_type'] = 'application/json'
return HttpResponse(data, **response_kwargs)
def form_invalid(self, form):
response = super(AjaxResponseMixin, self).form_invalid(form)
return self.render_to_json_response(form.errors, status=400)
def form_valid(self, form):
response = super(AjaxResponseMixin, self).form_valid(form)
return self.render_to_json_response({})
class APIView(View):
"""View to implement internal RESTful APIs.
Based on djangbone https://github.com/af/djangbone
"""
# Model on which this view operates. Setting this is required
model = None
# Base queryset for accessing data. If `None`, model's default manager
# will be used
base_queryset = None
# Set this to restrict the view to a subset of the available methods
restrict_to_methods = None
# Field names to be included
fields = ()
# Individual forms to use for each method. By default it'll
# auto-populate model forms built using `self.model` and `self.fields`
add_form_class = None
edit_form_class = None
# Tuple of sensitive field names that will be excluded from any
# serialized responses
sensitive_field_names = ('password', 'pw')
# Set to an integer to enable GET pagination
page_size = None
# HTTP GET parameter to use for accessing pages
page_param_name = 'p'
# HTTP GET parameter to use for search queries
search_param_name = 'q'
# Field names in which searching will be allowed
search_fields = None
# Override these if you have custom JSON encoding/decoding needs
json_encoder = PootleJSONEncoder()
json_decoder = json.JSONDecoder()
@property
def allowed_methods(self):
methods = [m for m in self.http_method_names if hasattr(self, m)]
if self.restrict_to_methods is not None:
restricted_to = map(lambda x: x.lower(), self.restrict_to_methods)
methods = filter(lambda x: x in restricted_to, methods)
return methods
def __init__(self, *args, **kwargs):
if self.model is None:
raise ValueError('No model class specified.')
self.pk_field_name = self.model._meta.pk.name
if self.base_queryset is None:
self.base_queryset = self.model._default_manager
self._init_fields()
self._init_forms()
return super(APIView, self).__init__(*args, **kwargs)
def _init_fields(self):
if len(self.fields) < 1:
form = self.add_form_class or self.edit_form_class
if form is not None:
self.fields = form._meta.fields
else: # Assume all fields by default
self.fields = (f.name for f in self.model._meta.fields)
self.serialize_fields = (f for f in self.fields if
f not in self.sensitive_field_names)
def _init_forms(self):
if 'post' in self.allowed_methods and self.add_form_class is None:
self.add_form_class = modelform_factory(self.model,
fields=self.fields)
if 'put' in self.allowed_methods and self.edit_form_class is None:
self.edit_form_class = modelform_factory(self.model,
fields=self.fields)
def dispatch(self, request, *args, **kwargs):
if request.method.lower() in self.allowed_methods:
handler = getattr(self, request.method.lower(),
self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
return handler(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
"""GET handler."""
if kwargs.get(self.pk_field_name, None) is not None:
return self.get_single_item(request, *args, **kwargs)
return self.get_collection(request, *args, **kwargs)
def get_single_item(self, request, *args, **kwargs):
"""Returns a single model instance."""
try:
qs = self.base_queryset.filter(pk=kwargs[self.pk_field_name])
assert len(qs) == 1
except AssertionError:
raise Http404
return self.json_response(self.serialize_qs(qs))
def get_collection(self, request, *args, **kwargs):
"""Retrieve a full collection."""
return self.json_response(self.serialize_qs(self.base_queryset))
def post(self, request, *args, **kwargs):
"""Creates a new model instance.
The form to be used can be customized by setting
`self.add_form_class`. By default a model form will be used with
the fields from `self.fields`.
"""
try:
request_dict = self.json_decoder.decode(request.body)
except ValueError:
return self.status_msg('Invalid JSON data', status=400)
form = self.add_form_class(request_dict)
if form.is_valid():
new_object = form.save()
# Serialize the new object to json using our built-in methods.
# The extra DB read here is not ideal, but it keeps the code
# DRY:
wrapper_qs = self.base_queryset.filter(pk=new_object.pk)
return self.json_response(
self.serialize_qs(wrapper_qs, single_object=True)
)
return self.form_invalid(form)
def put(self, request, *args, **kwargs):
"""Update the current model."""
if self.pk_field_name not in kwargs:
return self.status_msg('PUT is not supported for collections',
status=405)
try:
request_dict = self.json_decoder.decode(request.body)
instance = self.base_queryset.get(pk=kwargs[self.pk_field_name])
except ValueError:
return self.status_msg('Invalid JSON data', status=400)
except ObjectDoesNotExist:
raise Http404
form = self.edit_form_class(request_dict, instance=instance)
if form.is_valid():
item = form.save()
wrapper_qs = self.base_queryset.filter(id=item.id)
return self.json_response(
self.serialize_qs(wrapper_qs, single_object=True)
)
return self.form_invalid(form)
def delete(self, request, *args, **kwargs):
"""Delete the model and return its JSON representation."""
if self.pk_field_name not in kwargs:
return self.status_msg('DELETE is not supported for collections',
status=405)
qs = self.base_queryset.filter(id=kwargs[self.pk_field_name])
if qs:
output = self.serialize_qs(qs)
obj = qs[0]
try:
obj.delete()
return self.json_response(output)
except ProtectedError as e:
return self.status_msg(e[0], status=405)
raise Http404
def serialize_qs(self, queryset, single_object=False):
"""Serialize a queryset into a JSON object.
:param single_object: if `True` (or the URL specified an id), it
will return a single JSON object.
If `False`, a JSON object is returned with an array of objects
in `models` and the total object count in `count`.
"""
if single_object or self.kwargs.get(self.pk_field_name):
values = queryset.values(*self.serialize_fields)
# For single-item requests, convert ValuesQueryset to a dict simply
# by slicing the first item
serialize_values = values[0]
else:
search_keyword = self.request.GET.get(self.search_param_name, None)
if search_keyword is not None:
filter_by = self.get_search_filter(search_keyword)
queryset = queryset.filter(filter_by)
values = queryset.values(*self.serialize_fields)
# Process pagination options if they are enabled
if isinstance(self.page_size, int):
try:
page_param = self.request.GET.get(self.page_param_name, 1)
page_number = int(page_param)
offset = (page_number - 1) * self.page_size
except ValueError:
offset = 0
values = values[offset:offset+self.page_size]
serialize_values = {
'models': list(values),
'count': queryset.count(),
}
return self.json_encoder.encode(serialize_values)
def get_search_filter(self, keyword):
search_fields = getattr(self, 'search_fields', None)
if search_fields is None:
search_fields = self.fields # Assume all fields
field_queries = list(
zip(map(lambda x: '%s__icontains' % x, search_fields),
(keyword,)*len(search_fields))
)
lookups = [Q(x) for x in field_queries]
return reduce(operator.or_, lookups)
def status_msg(self, msg, status=400):
data = self.json_encoder.encode({'msg': msg})
return self.json_response(data, status=status)
def form_invalid(self, form):
data = self.json_encoder.encode({'errors': form.errors})
return self.json_response(data, status=400)
def json_response(self, output, **response_kwargs):
response_kwargs['content_type'] = 'application/json'
return HttpResponse(output, **response_kwargs)
def permission_denied(request):
return django_403(request, template_name='errors/403.html')
def page_not_found(request):
return django_404(request, template_name='errors/404.html')
def server_error(request):
return django_500(request, template_name='errors/500.html')
| gpl-2.0 | -2,622,846,277,347,944,000 | 35.132626 | 79 | 0.624284 | false | 4.161931 | false | false | false |
simonbr73/nyc-subway-finder | interfaces.py | 1 | 4396 | import sys
class TextInterface:
"""Text-based interface that contains methods for getting input from the user and displaying search
results.
"""
def __init__(self, trains_list):
"""Initializes the TextInterface object with a list of all train objects in the simulation.
"""
self.trains = trains_list
def getUserInput(self):
"""This method prompts the user to specify a direction of travel and a station at which
they want to find trains. If a user types a direction other than n or s, the program will
ask them to provide a valid input before continuing.
"""
print "\nWelcome! This program lets you search for New York City subway trains running on the 1, 2, 3, 4, 5, 6, or S lines."
print "Note that for S trains, northbound is eastbound in real life and southbound is westbound in real life."
print "\nFirst, choose a direction - northbound or southbound. Type n for northbound or s for southbound."
# valid_input will remain False until either n or s is typed
valid_input = False
while valid_input == False:
direction = raw_input()
if direction == "n":
valid_input = True
direction = 'northbound'
elif direction == 's':
valid_input = True
direction = 'southbound'
# If you really don't like our program, you can quit by typing q
elif direction == 'q':
sys.exit()
else:
print "We didn't understand that. Please try again."
print "\nNow, search for the station you want trains from."
station = raw_input()
return direction, station
def showApproachingTrains(self, station, list_of_trains):
"""Takes 2 arguments, the station at which the user is looking for trains and a list of
trains currently approaching that station, where each item in the list is formatted
[train_index, stop_number, arrival_time]. If the list is empty, it informs the user that
no trains are near the station. Otherwise, it looks up information about each train in
the list and displays it to the user.
"""
print "..."
if len(list_of_trains) == 0:
print "Sorry, there aren't any trains currently approaching", station
else:
print "Here is a list of trains arriving at or departing from", station, "in the next 30 minutes:\n"
for train_list in list_of_trains:
train_number = train_list[0] # Used to look up train object in the master list of trains
stop_number = train_list[1]
if int(self.trains[train_number].getArrivalTime(stop_number)) <= 30:
self.trains[train_number].showInfo(stop_number)
print ""
def showStationSearchResults(self, results_list):
"""Takes 1 argument, a list of possible station results. If there is only one possible
result, this function will never be called, so it only has to handle list of length 0 or >1.
If the length of the list is 0, the program will ask the user whether they want to do
another search or quit. Otherwise, all possible results will be displayed next to a unique
integer, and the user will be asked to type in an integer to choose the station they want.
"""
print "..."
if len(results_list) == 0:
print "Sorry, we couldn't find a station with that name.\n"
self.againOrQuit()
else:
print "We found several stations with that name. Please choose one from the list below."
for i in range(len(results_list)):
print (i+1), ': ', results_list[i]
choice = int(raw_input("Type the number of the station you want: "))
return results_list[choice-1]
def againOrQuit(self):
"""Asks the user whether they want to perform a new search or quit the program.
"""
print "Type n to do a new search or q to exit the program."
choice = raw_input()
if choice == "n":
return True
if choice == "q":
return False
else:
print "We didn't understand that. Please try again."
return self.againOrQuit()
| mit | -7,588,274,881,581,400,000 | 48.954545 | 132 | 0.613285 | false | 4.422535 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.