repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 475
1M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,293,591B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
capitalk/treelearn | treelearn/regression_ensemble.py | 1 | 1162 | import numpy as np
from sklearn.linear_model import LinearRegression
from base_ensemble import BaseEnsemble
class RegressionEnsemble(BaseEnsemble):
def __init__(self,
base_model=LinearRegression(),
num_models = 50,
bagging_percent=0.5,
bagging_replacement=True,
feature_subset_percent = 1.0,
stacking_model=None,
randomize_params = {},
additive = False,
verbose=False):
BaseEnsemble.__init__(self,
base_model,
num_models,
bagging_percent,
bagging_replacement,
feature_subset_percent,
stacking_model,
randomize_params,
additive,
verbose)
def predict(self, X):
pred = self.transform(X)
if self.stacking_model:
return self.stacking_model.predict(pred)
else:
return np.dot(pred, self.weights)
def _init_fit(self, X, Y):
pass
def _created_model(self, X, Y, indices, i, model):
pass
| lgpl-3.0 | 771,531,069,544,880,400 | 27.341463 | 54 | 0.515491 | false |
pangeo-data/rechunker | rechunker/executors/prefect.py | 1 | 1850 | import prefect
from rechunker.types import ParallelPipelines, PipelineExecutor
class PrefectPipelineExecutor(PipelineExecutor[prefect.Flow]):
"""An execution engine based on Prefect.
Supports copying between any arrays that implement ``__getitem__`` and
``__setitem__`` for tuples of ``slice`` objects. Array must also be
serializable by Prefect (i.e., with pickle).
Execution plans for PrefectExecutor are prefect.Flow objects.
"""
def pipelines_to_plan(self, pipelines: ParallelPipelines) -> prefect.Flow:
return _make_flow(pipelines)
def execute_plan(self, plan: prefect.Flow, **kwargs):
state = plan.run(**kwargs)
return state
class MappedTaskWrapper(prefect.Task):
def __init__(self, stage, **kwargs):
self.stage = stage
super().__init__(**kwargs)
def run(self, key):
return self.stage.func(key)
class SingleTaskWrapper(prefect.Task):
def __init__(self, stage, **kwargs):
self.stage = stage
super().__init__(**kwargs)
def run(self):
return self.stage.func()
def _make_flow(pipelines: ParallelPipelines) -> prefect.Flow:
with prefect.Flow("Rechunker") as flow:
# iterate over different arrays in the group
for pipeline in pipelines:
stage_tasks = []
# iterate over the different stages of the array copying
for stage in pipeline:
if stage.map_args is None:
stage_task = SingleTaskWrapper(stage)
else:
stage_task = MappedTaskWrapper(stage).map(stage.map_args)
stage_tasks.append(stage_task)
# create dependence between stages
for n in range(len(stage_tasks) - 1):
stage_tasks[n + 1].set_upstream(stage_tasks[n])
return flow
| mit | 852,775,540,904,071,000 | 31.45614 | 78 | 0.625405 | false |
Orange-OpenSource/opnfv-cloudify-clearwater | scripts/monitoring/proxy_snmp/add-snmpproxy-collector.py | 1 | 1191 | from cloudify import ctx
from cloudify import exceptions
import diamond_agent.tasks as diamond
import os
workdir = ctx.plugin.workdir
paths = diamond.get_paths(workdir.replace("script","diamond"))
name = 'SNMPProxyCollector'
collector_dir = os.path.join(paths['collectors'], name)
if not os.path.exists(collector_dir):
os.mkdir(collector_dir)
collector_file = os.path.join(collector_dir, '{0}.py'.format(name))
ctx.download_resource('scripts/monitoring/proxy_snmp/snmpproxy.py', collector_file)
config = ctx.target.instance.runtime_properties.get('snmp_collector_config', {})
config.update({'enabled': True,
'hostname': '{0}.{1}.{2}'.format(diamond.get_host_id(ctx.target),
ctx.target.node.name,
ctx.target.instance.id)
})
config_full_path = os.path.join(paths['collectors_config'], '{0}.conf'.format(name))
diamond.write_config(config_full_path, config)
try:
diamond.stop_diamond(paths['config'])
except:
pass
try:
diamond.start_diamond(paths['config'])
except:
exceptions.RecoverableError("Failed to start diamond", 30)
pass
| apache-2.0 | -7,840,169,114,727,365,000 | 33.028571 | 87 | 0.655751 | false |
elbow-jason/flask-meta | flask_meta/appmeta/config/cfg_generator.py | 1 | 2916 | import os
def yaml_cfg():
current_dir = os.getcwd()
yaml_text = """
SECRET_KEY: 'appmeta_dev_key'
META_CREDENTIALS: 'metaadmin,password'
ADMIN_CREDENTIALS: 'admin,password'
SQLALCHEMY_DATABASE_URI: sqlite://{}/test.db
SQLALCHEMY_BINDS:
- appmeta: sqlite://{}/appmeta.db
SERVER_URL: "http://localhost:5000"
SQLALCHEMY_ECHO: True
DEBUG: True
SECURITY_PASSWORD_HASH: bcrypt
# Upon creation I am going to make sure that the user selects a desired
# database before proceeding to ensure no hangups upon attempting
# to go to production. See comments below for further production
# directives.
# Also, using a password protected database as your
# database (i.e. MySQL or my favorite PostgreSQL) will
# make your database much more secure than using SQLite (as
# is the default). SQLite itself has no concept of users or
# permissions, and instead relies on OS permissions for
# security.
# this config should absolutely be changed before
# production. Specifically, the SECRET_KEY, META_CREDENTIALS,
# and ADMIN_CREDENTIALS.
# this line is for testing (do not remove)
""".format(current_dir, current_dir)
return str(yaml_text)
def ini_cfg():
current_dir = os.getcwd()
text = """
[appmeta_config]
SECRET_KEY = 'appmeta_dev_key'
META_CREDENTIALS = {'name':'metaadmin','password':'password'}
ADMIN_CREDENTIALS = {'name':'admin','password':'password'}
SQLALCHEMY_DATABASE_URI = 'sqlite://%s/test.db'
SQLALCHEMY_BINDS = {'appmeta' : 'sqlite://%s/appmeta.db' }
SERVER_URL: "http://localhost:5000"
SQLALCHEMY_ECHO: True
DEBUG: True
SECURITY_PASSWORD_HASH: 'bcrypt'
# Upon creation I am going to make sure that the user selects a desired
# database before proceeding to ensure no hangups upon attempting
# to go to production. See comments below for further production
# directives.
# Also, using a password protected database as your
# database (i.e. MySQL or my favorite PostgreSQL) will
# make your database much more secure than using SQLite (as
# is the default). SQLite itself has no concept of users or
# permissions, and instead relies on OS permissions for
# security.
# this config should absolutely be changed before
# production. Specifically, the SECRET_KEY, META_CREDENTIALS,
# and ADMIN_CREDENTIALS.
# this line is for testing (do not remove)
""" % (current_dir, current_dir)
return str(text)
def write_file_safe(text, file_name, overwrite=False):
if not overwrite:
if not os.path.isfile(file_name):
new_file = open(file_name, 'w+')
new_file.write(text)
new_file.close()
else:
print """The file '{}' already exists.
To overwrite '{}' pass overwrite=True as a kwarg.
No action taken.""".format(file_name, file_name)
def write_yaml_cfg():
write_file_safe(yml_cfg(), 'config.yaml')
def write_ini_cfg():
write_file_safe(ini_cfg(), 'config.ini')
if __name__ == '__main__':
write_ini_cfg()
| mit | 4,527,859,070,266,051,000 | 31.043956 | 71 | 0.709877 | false |
simon-r/PyParticles | pyparticles/demo/fountain.py | 1 | 4044 | # PyParticles : Particles simulation in python
# Copyright (C) 2012 Simone Riva
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pyparticles.pset.particles_set as ps
import pyparticles.pset.opencl_context as occ
import pyparticles.ode.euler_solver as els
import pyparticles.ode.leapfrog_solver as lps
import pyparticles.ode.runge_kutta_solver as rks
import pyparticles.ode.stormer_verlet_solver as svs
import pyparticles.ode.midpoint_solver as mds
import pyparticles.forces.const_force as cf
import pyparticles.forces.drag as dr
import pyparticles.forces.multiple_force as mf
import pyparticles.animation.animated_ogl as aogl
import pyparticles.pset.default_boundary as db
from pyparticles.utils.pypart_global import test_pyopencl
def default_pos( pset , indx ):
t = default_pos.sim_time.time
pset.X[indx,:] = 0.01 * np.random.rand( len(indx) , pset.dim ).astype( pset.dtype )
fs = 1.0 / ( 1.0 + np.exp( -( t*4.0 - 2.0 ) ) )
alpha = 2.0 * np.pi * np.random.rand( len(indx) ).astype( pset.dtype )
vel_x = 2.0 * fs * np.cos( alpha )
vel_y = 2.0 * fs * np.sin( alpha )
pset.V[indx,0] = vel_x
pset.V[indx,1] = vel_y
pset.V[indx,2] = 10.0 * fs + 1.0 * fs * ( np.random.rand( len(indx)) )
def fountain():
"""
Fountain demo
"""
steps = 10000000
dt = 0.005
pcnt = 100000
fl = True
if test_pyopencl() :
print( "OpenCL is installed and enabled " )
print( " Try, at least, 200000 particles " )
while fl :
try :
print( " " )
pcnt = int( input('How many particles: ') )
except :
print( "Please insert a number! " )
else :
fl = False
pset = ps.ParticlesSet( pcnt , dtype=np.float32 )
pset.M[:] = 0.1
pset.X[:,2] = 0.7 * np.random.rand( pset.size )
grav = cf.ConstForce( pset.size , dim=pset.dim , u_force=( 0.0 , 0.0 , -10.0 ) )
occx = None
if test_pyopencl() :
occx = occ.OpenCLcontext( pset.size , pset.dim , ( occ.OCLC_X | occ.OCLC_V | occ.OCLC_A | occ.OCLC_M ) )
drag = dr.DragOCL( pset.size , dim=pset.dim , Consts=0.01 , ocl_context=occx )
else :
drag = dr.Drag( pset.size , dim=pset.dim , Consts=0.01 )
multi = mf.MultipleForce( pset.size , dim=pset.dim )
multi.append_force( grav )
multi.append_force( drag )
multi.set_masses( pset.M )
#solver = mds.MidpointSolver( multi , pset , dt )
if test_pyopencl() :
solver = els.EulerSolverOCL( multi , pset , dt , ocl_context=occx )
else :
solver = els.EulerSolver( multi , pset , dt )
solver.update_force()
default_pos.sim_time = solver.get_sim_time()
bd = ( -100.0 , 100.0 , -100.0 , 100.0 , 0.0 , 100.0 )
bound = db.DefaultBoundary( bd , dim=3 , defualt_pos=default_pos )
pset.set_boundary( bound )
a = aogl.AnimatedGl()
a.ode_solver = solver
a.pset = pset
a.steps = steps
a.draw_particles.set_draw_model( a.draw_particles.DRAW_MODEL_VECTOR )
a.init_rotation( -80 , [ 0.7 , 0.05 , 0 ] )
a.build_animation()
a.start()
return
| gpl-3.0 | 209,497,365,053,662,750 | 27.955556 | 112 | 0.589021 | false |
gaoce/ifttt | ifttt/feeds.py | 1 | 5089 | # -*- coding: utf-8 -*-
"""Parse configuration file to get all feed subscription, currently only opml
file is supported
"""
import feedparser
import xml.etree.ElementTree as eT
import multiprocessing
from multiprocessing.dummy import Pool
import time
from datetime import datetime
from functools import partial
import logging
import os
import socket
import xml
import re
# Set parsing timeout in seconds
TIMEOUT_IN_SECONDS = 120
socket.setdefaulttimeout(TIMEOUT_IN_SECONDS)
class TimeStamp(object):
""" Get the current time, store it in a obj, and test whether a date is
too old based on that time.
"""
def __init__(self, period=24 * 3600):
""" Get current time
:param int period: period of time in second (default to 1 day)
If period is None, test_expire will always return False
"""
self._now = time.mktime(datetime.now().timetuple())
if period is None:
period = self._now
self._period = period
def test_expire(self, date):
""" Test whether a __date__ is a __period__ of time old
:param time.struct_time date: date tuple
:return: True for expired, False for not
"""
if date is None:
return True
else:
# Convert date to seconds since epoch
date = time.mktime(date)
# Test whether date is old
if self._now - date > self._period:
return True
else:
return False
def get_feeds(file_path):
""" Process file containing feed list, return the urls as a list.
Note for now we are not going to validate the urls.
:param str file_path: path to feed list file
The file could be a simple list (one url per line) or a opml file (XML),
we determine the type simply by extension: .txt for the former, .opml for
the later.
:raise ValueError: file not fount
:return: a list of url strings
"""
feeds = []
if not os.path.exists(file_path):
raise ValueError('File ' + file_path + ' not found!')
if file_path.endswith('.txt'):
with open(file_path) as fi:
for line in fi:
line = line.rstrip()
if line == '':
continue
feeds.append(line)
elif file_path.endswith('.opml'):
tree = eT.parse(file_path)
root = tree.getroot()
outlines = root.findall('.//outline')
for outline in outlines:
feeds.append(outline.get('xmlUrl'))
else:
raise ValueError('Unsupported file type!')
return feeds
def parse_feed(feed_url, time_stamp=TimeStamp()):
"""
:param feed_url:
:param TimeStamp time_stamp:
:return dict: a dict mapping feed title to a list of new entries
"""
# Get parsed feed
fd = feedparser.parse(feed_url)
# Exception handling
if fd.bozo == 1:
err = None
# Return None if parsing failed or timeout
if isinstance(fd.bozo_exception, xml.sax.SAXException):
err = 'SAXException'
elif isinstance(fd.bozo_exception, socket.timeout):
err = 'Timeout'
if err is not None:
logging.warning(' '.join(['Failed parsing ', feed_url, err]))
return None, None
feed_title = fd.feed.get('title', feed_url)
entries = []
for entry in fd.entries:
# Get feed published date
date = entry.get('published_parsed', None)
if date is None:
date = entry.get('updated_parsed', None)
# Skipped if feed is expired
if time_stamp.test_expire(date):
continue
title = entry.get('title', feed_title)
link = entry.get('link')
desc = entry.get('description', u'No description')
# Remove image tags, so the email is not too complex
desc = re.sub('<img.*?/img>', '', desc)
desc = re.sub('<img.*?>', '', desc)
# Truncate long description
if len(desc) > 1000:
desc = desc[:1000] + u"..."
entries.append({'title': title, 'link': link, 'desc': desc,
'date': time.strftime("%Y-%m-%d %H:%M", date)})
logging.info('Parsed ' + feed_title)
return feed_title, entries
def update_feeds(file_path, time_stamp=TimeStamp()):
""" Update feed subscription
:param str file_path:
:param TimeStamp time_stamp:
:return:
"""
logging.info('Start updating feeds')
feeds = get_feeds(file_path)
num_cpu = multiprocessing.cpu_count()
# Save one core for other stuff
if num_cpu >= 2:
num_cpu -= 1
# Set up a pool to parse the feed
# Note we are using ThreadPool here, otherwise logging is a bit complicated
pool = Pool(num_cpu)
parser = partial(parse_feed, time_stamp=time_stamp)
feeds_parsed = pool.map(parser, feeds)
logging.info('Finish updating feeds')
return {title: entries for title, entries in feeds_parsed
if title is not None}
__all__ = ['TimeStamp', 'get_feeds', 'parse_feed', 'update_feeds']
| gpl-3.0 | 6,882,398,547,238,342,000 | 25.784211 | 79 | 0.598546 | false |
Mithrilwoodrat/Tombhub | tombhub/models.py | 1 | 1363 | import datetime
from sqlalchemy import Column, Integer, String, Text, DateTime, ForeignKeyConstraint
from tombhub.database import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True)
passwd = Column(String(50))
def __init__(self, name=None, passwd = None):
self.name = name
self.passwd = passwd
def __repr__(self):
return '<User %r>' % (self.name)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.id
class Thread(Base):
__tablename__ = 'threads'
id = Column(Integer, primary_key=True)
author_id = Column(Integer, nullable=False)
author_name = Column(String(50), nullable=False)
title = Column(String(50))
content = Column(Text)
created_date = Column(DateTime, default=datetime.datetime.utcnow)
ForeignKeyConstraint(['author_id','author_name'],['users.id','users.name'])
def __init__(self, title=None, author_id=None, content = None):
self.title = title
self.author_id = author_id
self.author_name = User.query.get(self.author_id).name
self.content = content
def __repr__(self):
return '<Thread %r>' % (self.title) | gpl-2.0 | 7,702,907,114,834,849,000 | 27.416667 | 84 | 0.630961 | false |
fnoop/maverick | manifests/maverick-modules/maverick_network/facts.d/netinfo.py | 1 | 6823 | #!/usr/bin/env python3
# This fact extracts network info for network interfaces
import os, re, sys, subprocess
sys.dont_write_bytecode = True # This is to prevent .pyc files in facts.d directory
sys.path.insert(0, '/usr/local/examples')
try:
import netifaces
import pyric # pyric errors
import pyric.pyw as pyw # iw functionality
from udevnet import Udevnet
netinfo = {}
except:
print("netinfo_present=no")
print("netinfo_interfaces=")
sys.exit(1)
class Netinfo(object):
def __init__(self, _if):
self.data = {}
self._if = str(_if)
self.udevnet = Udevnet()
self.udevnet.runall()
def getinfo(self):
try:
self.data['macaddress'] = netifaces.ifaddresses(self._if)[netifaces.AF_LINK][0]['addr']
except:
self.data['macaddress'] = None
try:
self.data['ipaddress'] = netifaces.ifaddresses(self._if)[netifaces.AF_INET][0]['addr']
except:
self.data['ipaddress'] = None
try:
self.data['vendorstr'] = self.udevnet.data[self._if+"_id_vendor_from_database"]
except:
self.data['vendorstr'] = None
try:
self.data['vendoroui'] = self.udevnet.data[self._if+"_id_oui_from_database"]
except:
self.data['vendoroui'] = None
try:
self.data['vendor'] = self.udevnet.data[self._if+"_id_vendor"]
except:
self.data['vendor'] = None
# Hack for onboard raspberry devices
if type(self.data['vendoroui']) is str:
if re.search("^Raspberry", self.data['vendoroui']):
self.data['vendor'] = "RaspberryPi"
try:
self.data['driver'] = self.udevnet.data[self._if+"_id_net_driver"]
except:
try:
self.data['driver'] = self.udevnet.data[self._if+"_id_usb_driver"]
except:
self.data['driver'] = None
try:
self.data['model'] = self.udevnet.data[self._if+"_id_model_id"]
except:
self.data['model'] = None
try:
self.data['modelstr'] = self.udevnet.data[self._if+"_id_model_from_database"]
except:
self.data['modelstr'] = None
try:
self.data['netname'] = self.udevnet.data[self._if+"_id_net_name_from_database"]
except:
try:
self.data['netname'] = self.udevnet.data[self._if+"_id_net_name_onboard"]
except:
try:
self.data['netname'] = self.udevnet.data[self._if+"_id_net_name_slot"]
except:
try:
self.data['netname'] = self.udevnet.data[self._if+"_id_net_name_path"]
except:
try:
self.data['netname'] = self.udevnet.data[self._if+"_id_net_name_mac"]
except:
self.data['netname'] = None
try:
self.data['type'] = self.udevnet.data[self._if+"_devtype"]
if self.data['type'] == "wlan": self.data['type'] = "Wireless"
except:
try:
if re.search("^en", self.data['netname']):
self.data['type'] = "Ethernet"
elif re.search("^wl", self.data['netname']):
self.data['type'] = "Wireless"
else:
self.data['type'] = None
except:
self.data['type'] = None
# Stop here if we don't have a wireless card
if self.data['type'] != "Wireless":
return
# Retrieve wireless info
try:
_ifobj = pyw.getcard(self._if)
_ifinfo = pyw.ifinfo(_ifobj)
_devinfo = pyw.devinfo(_ifobj)
_physinfo = pyw.phyinfo(_ifobj)
_linkinfo = pyw.link(_ifobj)
except:
pass
try:
self.data['isup'] = pyw.isup(_ifobj)
except:
self.data['isup'] = None
try:
self.data['blocked'] = pyw.isblocked(_ifobj)
except:
self.data['blocked'] = None
try:
self.data['mode'] = _devinfo['mode']
except:
self.data['mode'] = None
try:
self.data['modes'] = _physinfo['modes']
except:
self.data['modes'] = None
try:
self.data['bands'] = _physinfo['bands']
except:
self.data['bands'] = None
try:
self.data['standards'] = pyw.devstds(_ifobj)
except:
self.data['standards'] = None
try:
self.data['freqs'] = pyw.devfreqs(_ifobj)
except:
self.data['freqs'] = None
try:
self.data['txpower'] = pyw.txget(_ifobj)
except:
self.data['txpower'] = None
try:
self.data['chans'] = pyw.devchs(_ifobj)
except:
self.data['chans'] = None
try:
self.data['reg'] = pyw.regget(_ifobj)
except:
self.data['reg'] = None
try:
self.data['chipset'] = _ifinfo['chipset']
except:
self.data['chipset'] = None
try:
self.data['state'] = _linkinfo['stat']
except:
self.data['state'] = None
try:
self.data['ssid'] = _linkinfo['ssid']
except:
self.data['ssid'] = None
try:
self.data['chw'] = _devinfo['CHW']
except:
self.data['chw'] = None
try:
self.data['frequency'] = _devinfo['RF']
except:
self.data['frequency'] = None
try:
self.data['rss'] = _linkinfo['rss']
except:
self.data['rss'] = None
try:
self.data['wtx'] = _linkinfo['tx']
except:
self.data['wtx'] = None
try:
self.data['wrx'] = _linkinfo['rx']
except:
self.data['wrx'] = None
def runall(self):
pass
#If we're being called as a command, instantiate and report
if __name__ == '__main__':
try:
ifs = pyw.interfaces()
except pyric.error as e:
print("Error running netinfo, pyric not available")
sys.exit(1)
print("netinfo_present=yes")
with open ("/etc/hostname", "r") as etc_hostname:
data=etc_hostname.readlines()
if data:
print("netinfo_etchostname="+str(data[0].rstrip()))
print("netinfo_interfaces="+",".join(ifs))
for _if in ifs:
_netinfo = Netinfo(_if)
_netinfo.getinfo()
for key,val in sorted(_netinfo.data.items()):
print("netinfo_"+_if+"_%s=%s" % (key, val))
| gpl-3.0 | -9,157,286,942,456,641,000 | 32.446078 | 99 | 0.491866 | false |
b29308188/MMAI_final | src/datasets.py | 1 | 4394 | import cv2
import numpy as np
#labels for each kind of tag
label_maps = {"T":0 , "F": 1, "N": 2}
inv_label_maps = {v : k for (k, v) in label_maps.items()}
class Photo:
"""
This class is a photo which contains a list of faces.
image_ID : the ID of this photo
image : the path of the image
"""
class Face:
"""
This class represents a face start in (x , y).
w : width
h : height
tag : T/F/N
label : numbers corresponding to the tag
feature : the vector of features of this face
"""
def __init__(self, x, y, w, h, tag = None):
"""
This is the Constructor of Face
"""
self.x = int(float(x))
self.y = int(float(y))
self.w = int(float(w))
self.h = int(float(h))
self.tag = tag
if tag is not None:
self.label = label_maps[tag]
else:
self.label = None
self.feature = None
def __init__(self, image_ID = None):
"""
This is the constructor of Photo.
"""
self.image_ID = image_ID
self.faces = []
self.image = None
def read_image(self, image_path):
"""
Read image from the image_path and store it in memory
"""
self.image = cv2.resize(cv2.imread(image_path), (960, 720))
assert self.image is not None
def add_face(self, x, y, w, h, tag = None):
"""
Add a face to the list of faces.
"""
self.faces.append( self.Face(x, y, w, h, tag) )
def histogram(self, img):
Orishape = img.shape
hist = []
img = img.reshape((img.shape[0]*img.shape[1]*img.shape[2]),order='F')
a = np.histogram(img[0:Orishape[0]*Orishape[1]], bins=np.arange(0,257,64))[0]
hist += list(a.astype(float)/np.sum(a))
b = np.histogram(img[Orishape[0]*Orishape[1]:2*Orishape[0]*Orishape[1]], bins=np.arange(0,257,64))[0]
hist += list(b.astype(float)/np.sum(b))
c = np.histogram(img[2*Orishape[0]*Orishape[1]:3*Orishape[0]*Orishape[1]], bins=np.arange(0,257,32))[0]
hist += list(c.astype(float)/np.sum(c))
return hist
def colorgram(self, img):
cgram = []
for i in xrange(3): # RGB
cgram += [np.mean(img[0:,0:,i]), np.std(img[0:,0:,i])]
return cgram
def get_global_features(self):
gfs = []
gfs += [len(self.faces), self.image.shape[0]*self.image.shape[1]] # number of faces in this image
gfs += [np.mean([f.x for f in self.faces]), np.var([f.x for f in self.faces])]
gfs += [np.mean([f.y for f in self.faces]), np.var([f.y for f in self.faces])]
gfs += [np.mean([f.w for f in self.faces]), np.var([f.w for f in self.faces])]
gfs += [np.mean([f.h for f in self.faces]), np.var([f.h for f in self.faces])]
average_distance = 0.
self.disMatrix = np.zeros((len(self.faces), len(self.faces)))
for i, f1 in enumerate(self.faces):
for j, f2 in enumerate(self.faces):
dis = np.sqrt(((f1.x+f1.w/2) - (f2.x+f2.w/2))**2 + ((f1.y+f1.h/2) - (f2.y+f2.h/2))**2) #l2 dis
self.disMatrix[i, j] = dis
average_distance += dis
self.global_feature = gfs
def local_features(self, f, no):
lfs = [f.x, f.y, f.w, f.h]
lfs += self.colorgram(self.image[f.y : f.y+f.h, f.x : f.x+f.w])
lfs += [np.var(self.disMatrix[no, :]), np.mean(self.disMatrix[no, :])] # average distance to other faces
lfs += [f.x+f.w/2, f.y+f.h/2] # center
NinR = 0.0
R = 0.4 * self.image.shape[0]# percentage of image's width
for i in xrange(len(self.faces)):
if self.disMatrix[no, i] < R :
NinR += 1
lfs += [NinR/len(self.faces)]
return lfs
def extract_features(self):
"""
For each face in the list of faces, extract its features.
"""
if self.image is not None:
self.get_global_features()
for i, f in enumerate(self.faces):
if self.image is not None:
f.feature = np.array(self.local_features(f, i) + self.global_feature )
else:
f.feature = np.array([float(f.w*f.h)])
| gpl-2.0 | 5,503,500,688,097,726,000 | 33.873016 | 112 | 0.516614 | false |
Ektorus/bohrium | bridge/npbackend/setup.py | 1 | 7563 | #!/usr/bin/env python
"""
/*
This file is part of Bohrium and copyright (c) 2012 the Bohrium
http://bohrium.bitbucket.org
Bohrium is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3
of the License, or (at your option) any later version.
Bohrium is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the
GNU Lesser General Public License along with Bohrium.
If not, see <http://www.gnu.org/licenses/>.
*/
"""
from distutils.core import setup, Extension
from distutils.command.build import build
import os
import sys
import stat
import pprint
import json
import shutil
import numpy as np
from Cython.Distutils import build_ext
#We overload the setup.py with a 'buildpath=' argument that
#points to the root of the current build
build_path = None
for i,arg in enumerate(sys.argv):
if arg.startswith("buildpath="):
build_path = arg[len("buildpath="):]
sys.argv.pop(i)
def buildpath(*args):
if build_path is None:
return os.path.join(*args)
else:
return os.path.join(build_path, *args)
def srcpath(*args):
prefix = os.path.abspath(os.path.dirname(__file__))
assert len(prefix) > 0
return os.path.join(prefix, *args)
def get_timestamp(f):
st = os.stat(f)
mtime = st[stat.ST_MTIME] #modification time
return mtime
def set_timestamp(f,timestamp):
os.utime(f,(timestamp,timestamp))
#Returns the numpy data type name
def dtype_bh2np(bh_type_str):
return bh_type_str[3:].lower()#Remove BH_ and convert to lower case
#Merge bhc.i.head with the bh_c.h to create our SWIG interface bhc.i
time = 0
with open(buildpath("bhc.i"), 'w') as outfile:
for fname in [srcpath("bhc.i.head"),srcpath("..","c","codegen","output","bh_c.h")]:
t = get_timestamp(fname)
if t > time:
time = t
with open(fname) as infile:
for line in infile:
outfile.write(line)
set_timestamp(buildpath("bhc.i"),time)
#Create the _info.py file
time = get_timestamp(srcpath('setup.py'))
with open(buildpath("_info.py"), 'w') as o:
#Write header
o.write("#This file is auto generated by the setup.py\n")
o.write("import numpy as np\n")
#Find number of operands and type signature for each Bohrium opcode
#that Bohrium-C supports
t = get_timestamp(srcpath('..','..','core','codegen','opcodes.json'))
if t > time:
time = t
nops = {}
type_sig = {}
ufunc = {}
with open(srcpath('..','..','core','codegen','opcodes.json'), 'r') as f:
opcodes = json.loads(f.read())
for op in opcodes:
if op['elementwise'] and not op['system_opcode']:
#Convert the type signature to bhc names
type_sig = []
for sig in op['types']:
type_sig.append([dtype_bh2np(s) for s in sig])
name = op['opcode'].lower()[3:]#Removing BH_ and we have the NumPy and bohrium name
ufunc[name] = {'name': name,
'nop': int(op['nop']),
'type_sig': type_sig}
o.write("op = ")
pp = pprint.PrettyPrinter(indent=2, stream=o)
pp.pprint(ufunc)
#Find and write all supported data types
t = get_timestamp(srcpath('..','..','core','codegen','types.json'))
if t > time:
time = t
s = "numpy_types = ["
with open(srcpath('..','..','core','codegen','types.json'), 'r') as f:
types = json.loads(f.read())
for t in types:
if t['numpy'] == "unknown":
continue
s += "np.dtype('%s'), "%t['numpy']
s = s[:-2] + "]\n"
o.write(s)
set_timestamp(buildpath("_info.py"),time)
#We need to make sure that the extensions is build before the python module because of SWIG
#Furthermore, '_info.py' and 'bhc.py' should be copied to the build dir
class CustomBuild(build):
sub_commands = [
('build_ext', build.has_ext_modules),
('build_py', build.has_pure_modules),
('build_clib', build.has_c_libraries),
('build_scripts', build.has_scripts),
]
def run(self):
if not self.dry_run:
self.copy_file(buildpath('_info.py'),buildpath(self.build_lib,'bohrium','_info.py'))
self.copy_file(buildpath('bhc.py'),buildpath(self.build_lib,'bohrium','bhc.py'))
build.run(self)
if os.path.realpath(buildpath('random123.pyx')) != os.path.realpath(srcpath('random123.pyx')):
shutil.copy2(srcpath('random123.pyx'), buildpath('random123.pyx'))
shutil.copy2(srcpath('ndarray.pyx'), buildpath('ndarray.pyx'))
shutil.copy2(srcpath('_util.pyx'), buildpath('_util.pyx'))
setup(name='Bohrium',
version='0.2',
description='Bohrium NumPy',
long_description='Bohrium NumPy',
author='The Bohrium Team',
author_email='[email protected]',
url='http://www.bh107.org',
license='LGPLv3',
platforms='Linux, OSX',
cmdclass={'build': CustomBuild, 'build_ext':build_ext},
package_dir={'bohrium': srcpath('')},
packages=['bohrium', 'bohrium.target'],
ext_package='bohrium',
ext_modules=[Extension(name='_bhmodule',
sources=[srcpath('src','_bhmodule.c')],
depends=[srcpath('src','types.c'), srcpath('src','types.h'),
srcpath('src','operator_overload.c')],
include_dirs=[srcpath('..','c','codegen','output'),
srcpath('..','..','include')],
libraries=['dl','bhc', 'bh'],
library_dirs=[buildpath('..','c'),
buildpath('..','..','core')],
),
Extension(name='_bhc',
sources=[buildpath('bhc.i')],
include_dirs=[srcpath('..','c','codegen','output'),
srcpath('..','..','include')],
libraries=['dl','bhc', 'bh'],
library_dirs=[buildpath('..','c'),
buildpath('..','..','core')],
),
Extension(name='random123',
sources=[buildpath('random123.pyx')],
include_dirs=[srcpath('.'),
srcpath('..','..','thirdparty','Random123-1.08','include')],
libraries=[],
library_dirs=[],
),
Extension(name='_util',
sources=[buildpath('_util.pyx')],
include_dirs=[srcpath('.')],
libraries=[],
library_dirs=[],
),
Extension(name='ndarray',
sources=[buildpath('ndarray.pyx')],
include_dirs=[srcpath('.')],
libraries=[],
library_dirs=[],
)
]
)
| lgpl-3.0 | -1,784,800,386,496,291,800 | 37.390863 | 103 | 0.531667 | false |
manderelee/csc2521_final | scripts/adjust_vox.py | 1 | 1368 | import os, sys
if __name__ == "__main__":
filename = sys.argv[1]
########### read ############
header = []
f = open(filename, 'r')
header.append(f.readline())
header.append(f.readline())
header.append(f.readline())
header.append(f.readline())
header.append(f.readline())
num_nodes = header[-1]
node_lines = []
for i in range(int(num_nodes)):
line = f.readline().split(" ")
line[0] = str(int(line[0]) + 1)
line = " ".join(line)
node_lines.append(line)
between = []
between.append(f.readline())
between.append(f.readline())
between.append(f.readline())
num_elements = between[-1]
element_lines = []
for i in range(int(num_elements)):
line = f.readline().split(" ")
line[0] = str(int(line[0]) + 1)
for j in range(5,13):
line[j] = str(int(line[j]) + 1)
line = " ".join(line)
element_lines.append(line + '\n')
end = ['$EndElements']
f.close()
########### write ############
f = open(filename, 'w')
for line in header:
f.write(line)
for line in node_lines:
f.write(line)
for line in between:
f.write(line)
for line in element_lines:
f.write(line)
for line in end:
f.write(line)
f.close()
| mpl-2.0 | -5,231,434,740,971,981,000 | 23.872727 | 43 | 0.500731 | false |
kearnsw/Twitt.IR | src/alignment.py | 1 | 2873 | #This software is a free software. Thus, it is licensed under GNU General Public License.
#Python implementation to Smith-Waterman Algorithm for Homework 1 of Bioinformatics class.
#Forrest Bao, Sept. 26 <http://fsbao.net> <forrest.bao aT gmail.com>
# zeros() was origianlly from NumPy.
# This version is implemented by alevchuk 2011-04-10
def zeros(shape):
retval = []
for x in range(shape[0]):
retval.append([])
for y in range(shape[1]):
retval[-1].append(0)
return retval
match_award = 10
mismatch_penalty = -5
gap_penalty = -5 # both for opening and extanding
def match_score(alpha, beta):
if alpha == beta:
return match_award
elif alpha == '-' or beta == '-':
return gap_penalty
else:
return mismatch_penalty
def finalize(align1, align2):
align1 = align1[::-1] #reverse sequence 1
align2 = align2[::-1] #reverse sequence 2
i,j = 0,0
#calcuate identity, score and aligned sequeces
symbol = ''
found = 0
score = 0
identity = 0
return len(align1)
def water(seq1, seq2):
m, n = len(seq1), len(seq2) # length of two sequences
# Generate DP table and traceback path pointer matrix
score = zeros((m+1, n+1)) # the DP table
pointer = zeros((m+1, n+1)) # to store the traceback path
max_score = 0 # initial maximum score in DP table
# Calculate DP table and mark pointers
for i in range(1, m + 1):
for j in range(1, n + 1):
score_diagonal = score[i-1][j-1] + match_score(seq1[i-1], seq2[j-1])
score_up = score[i][j-1] + gap_penalty
score_left = score[i-1][j] + gap_penalty
score[i][j] = max(0,score_left, score_up, score_diagonal)
if score[i][j] == 0:
pointer[i][j] = 0 # 0 means end of the path
if score[i][j] == score_left:
pointer[i][j] = 1 # 1 means trace up
if score[i][j] == score_up:
pointer[i][j] = 2 # 2 means trace left
if score[i][j] == score_diagonal:
pointer[i][j] = 3 # 3 means trace diagonal
if score[i][j] >= max_score:
max_i = i
max_j = j
max_score = score[i][j];
align1, align2 = '', '' # initial sequences
i,j = max_i,max_j # indices of path starting point
#traceback, follow pointers
while pointer[i][j] != 0:
if pointer[i][j] == 3:
align1 += seq1[i-1]
align2 += seq2[j-1]
i -= 1
j -= 1
elif pointer[i][j] == 2:
align1 += '-'
align2 += seq2[j-1]
j -= 1
elif pointer[i][j] == 1:
align1 += seq1[i-1]
align2 += '-'
i -= 1
return finalize(align1, align2)
| gpl-3.0 | -6,357,484,585,676,314,000 | 32.022989 | 90 | 0.533241 | false |
iamaris/xpython | login/example_edited.py | 1 | 1531 | # -*- coding utf8 -*-
import tornado.web
import tornado.gen
from google_oath2 import GoogleOath2Mixin
class GoogleHandler(tornado.web.RequestHandler, GoogleOath2Mixin):
@tornado.web.addslash
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
if self.get_argument("code", False):
user = yield self.get_authenticated_user(
redirect_uri='http://localhost:8000/oauth2callback',
client_id="796019659754-2ak2t323shp3c336pspmohfj8dhdtg4v.apps.googleusercontent.com",
client_secret="O-Nu4I0f2QcqT81MEQekgst9",
code=self.get_argument("code"),
extra_fields=['email'])
print '--------------------------------GOOGLE--------------------------------'
print user
print '----------------------------------------------------------------------'
self.set_secure_cookie("user", tornado.escape.json_encode(user))
self.redirect("/")
else:
self.authorize_redirect(
redirect_uri='http://localhost:8000/oauth2callback',
client_id="796019659754-2ak2t323shp3c336pspmohfj8dhdtg4v.apps.googleusercontent.com",
extra_params={"scope": "https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile",
"state" : "profile",
"response_type": "code",})
| apache-2.0 | -1,084,315,582,388,091,800 | 50.033333 | 137 | 0.528413 | false |
eske/seq2seq | scripts/speech/extract.py | 1 | 2752 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division
import argparse
import numpy as np
import yaafelib
import tarfile
import tempfile
import os
from collections import Counter
parser = argparse.ArgumentParser()
parser.add_argument('inputs', nargs='+', help='tar archive which contains all the wav files')
parser.add_argument('output', help='output file')
parser.add_argument('--derivatives', action='store_true')
args = parser.parse_args()
parameters = dict(
step_size=160, # corresponds to 10 ms (at 16 kHz)
block_size=640, # corresponds to 40 ms
mfcc_coeffs=40,
mfcc_filters=41 # more filters? (needs to be at least mfcc_coeffs+1, because first coeff is ignored)
)
# TODO: ensure that all input files use this rate
fp = yaafelib.FeaturePlan(sample_rate=16000)
mfcc_features = 'MFCC MelNbFilters={mfcc_filters} CepsNbCoeffs={mfcc_coeffs} ' \
'blockSize={block_size} stepSize={step_size}'.format(**parameters)
energy_features = 'Energy blockSize={block_size} stepSize={step_size}'.format(**parameters)
fp.addFeature('mfcc: {}'.format(mfcc_features))
if args.derivatives:
fp.addFeature('mfcc_d1: {} > Derivate DOrder=1'.format(mfcc_features))
fp.addFeature('mfcc_d2: {} > Derivate DOrder=2'.format(mfcc_features))
fp.addFeature('energy: {}'.format(energy_features))
if args.derivatives:
fp.addFeature('energy_d1: {} > Derivate DOrder=1'.format(energy_features))
fp.addFeature('energy_d2: {} > Derivate DOrder=2'.format(energy_features))
if args.derivatives:
keys = ['mfcc', 'mfcc_d1', 'mfcc_d2', 'energy', 'energy_d1', 'energy_d2']
else:
keys = ['mfcc', 'energy']
df = fp.getDataFlow()
engine = yaafelib.Engine()
engine.load(df)
afp = yaafelib.AudioFileProcessor()
frame_counter = Counter()
outfile = open(args.output, 'wb')
total = 0
for filename in args.inputs:
tar = tarfile.open(filename)
total += len([f for f in tar if f.isfile()])
_, tmp_file = tempfile.mkstemp()
for j, filename in enumerate(args.inputs):
tar = tarfile.open(filename)
files = sorted([f for f in tar if f.isfile()], key=lambda f: f.name)
for i, fileinfo in enumerate(files):
file_ = tar.extractfile(fileinfo)
with open(tmp_file, 'wb') as f:
f.write(file_.read())
afp.processFile(engine, tmp_file)
feats = engine.readAllOutputs()
feats = np.concatenate([feats[k] for k in keys], axis=1)
frames, dim = feats.shape
feats = feats.astype(np.float32)
if frames == 0:
print(frames, dim, fileinfo.name)
raise Exception
if i == 0 and j == 0:
np.save(outfile, (total, dim))
np.save(outfile, feats)
outfile.close()
os.remove(tmp_file)
| apache-2.0 | -6,212,129,949,152,551,000 | 29.577778 | 105 | 0.666424 | false |
matthiask/zivinetz | tests/testapp/test_course_list.py | 1 | 1187 | from datetime import date
from django.test import TestCase
from testapp import factories
class CourseListTestCase(TestCase):
def test_course_list(self):
a1 = factories.AssignmentFactory.create(motor_saw_course_date=date.today())
a2 = factories.AssignmentFactory.create(
motor_saw_course_date=date.today(), environment_course_date=date.today()
)
a3 = factories.AssignmentFactory.create(environment_course_date=date.today())
factories.AssignmentFactory.create()
factories.AssignmentFactory.create()
factories.AssignmentFactory.create()
# Test the listing view.
admin = factories.UserFactory.create(is_staff=True, is_superuser=True)
self.client.login(username=admin.username, password="test")
response = self.client.get("/zivinetz/reporting/courses/")
# Four entries for three assignments, one header row and one week row.
self.assertContains(response, "<tr>", 4 + 1 + 1)
self.assertContains(response, a1.get_absolute_url(), 2)
self.assertContains(response, a2.get_absolute_url(), 4)
self.assertContains(response, a3.get_absolute_url(), 2)
| mit | -1,343,191,669,436,892,000 | 41.392857 | 85 | 0.693345 | false |
rlishtaba/py-algorithms | tests/py_algorithms/data_structures/deque_test.py | 1 | 2050 | import copy
from typing import List
import pytest
from py_algorithms.data_structures import new_deque
@pytest.fixture
def collection() -> List[int]:
return [0, 6, 7, 8, 9, 4, 5, 12]
class TestDeque:
def test_properties(self):
items = collection()
deque = new_deque(copy.deepcopy(items))
assert deque.is_empty() is False
assert deque.size == len(items)
assert deque.clear() == 0
assert deque.is_empty() is True
assert deque.size == 0
def test_push_front(self):
deque = new_deque()
deque.push_front(1)
assert deque.front == 1
assert deque.back == 1
deque.push_front(2)
assert deque.front == 2
assert deque.back == 1
def test_push_back(self):
deque = new_deque()
deque.push_back(1)
assert deque.front == 1
assert deque.back == 1
deque.push_back(2)
assert deque.front == 1
assert deque.back == 2
deque.push_back(3)
assert deque.front == 1
assert deque.back == 3
def test_pop_front_with_one_item(self):
deque = new_deque()
deque.push_front(1)
assert deque.front == 1
assert deque.back == 1
val = deque.pop_front()
assert val == 1
assert deque.clear() == 0
assert deque.is_empty() is True
assert deque.size == 0
def test_pop_front(self):
deque = new_deque()
items = [1, 3, 2, 7]
expected = []
for x in items:
# preserve order for a test
deque.push_back(x)
for _ in range(0, len(items)):
expected.append(deque.pop_front())
assert items == expected
def test_pop_back(self):
deque = new_deque()
items = [1, 3, 2, 7]
expected = []
for x in items:
# preserve order for a test
deque.push_front(x)
for _ in range(0, len(items)):
expected.append(deque.pop_back())
assert items == expected
| mit | -1,208,814,801,422,262,800 | 24.625 | 51 | 0.543415 | false |
viswimmer1/PythonGenerator | data/python_files/33842229/strategy.py | 1 | 12570 | import wx
import sys
import calendar
import threading
import datetime
import sqlite3
import traceback
import logging
from dateutil import rrule
class MonthStrategy(object):
def __init__(self, name, fnoType, buyWeek, sellWeek, priceTarget):
self.name = name
self.buyWeek = buyWeek # 0 = no buy, 1 = 1st week of month , 2, 3, 4th week of month
self.sellWeek = sellWeek # 0 = no sell, 1 = 1st week, 2, 3, 4 etc
self.fnoType = fnoType # future, put, call
self.priceTarget = priceTarget # percentage from future price on buy or sell date
self._price = None # will be set once bought or sold based on priceTarget
self._bought = False
self._sold = False
self.check()
def __str__(self):
values = []
for displayName, name in [('Name', 'name'), ('Buy Week', 'buyWeek'), ('Sell Week', 'sellWeek'),
('Fno Type', 'fnoType'), ('Price Target', 'priceTarget')]:
values.append("%s: %s"%(displayName, getattr(self, name)))
return "\n".join(values)
def check(self):
self.name = self.name.strip()
if not self.name.strip():
raise Exception("Name can not be empty")
self.buyWeek = int(self.buyWeek)
if 0 > self.buyWeek > 4:
raise Exception("Buy Week should be between 0-4")
self.sellWeek = int(self.sellWeek)
if 0 > self.sellWeek > 4:
raise Exception("Sell Week should be between 0-4")
self.fnoType = self.fnoType.strip()
if self.fnoType not in ['future', 'call', 'put']:
raise Exception("FnO Type must be future, call or put")
self.priceTarget = int(self.priceTarget)
if -20 > self.priceTarget > 20:
raise Exception("Price Target must be between -20% to 20%")
def analyze_month_data(self, monthsDates, expiryDateTag, weekDates, futureData, callData, putData, logFunc):
logFunc(" strategy %s"%self.name)
results = []
for timestamp in monthsDates:
if not self._bought and self.buyWeek > 0 and timestamp >= weekDates[self.buyWeek-1] and timestamp < weekDates[self.buyWeek]:
price = self._buy_sell(True, timestamp, futureData, callData, putData)
if price is not None:
self._bought = True
if self.fnoType == 'future':
results.append((timestamp, price))
else:
results.append((timestamp, -price)) # buy of call is loss
if not self._sold and self.sellWeek > 0 and timestamp >= weekDates[self.sellWeek-1] and timestamp < weekDates[self.sellWeek]:
price = self._buy_sell(False, timestamp, futureData, callData, putData)
if price is not None:
self._sold = True
if self.fnoType == 'future':
results.append((timestamp, -price))
else:
results.append((timestamp, price)) # sell of call is profit
# if put or call check if requested action happend
if self.fnoType != 'future':
if self.buyWeek > 0 and not self._bought:
raise AnalyzeException("%s could not be bought"%self)
if self.sellWeek > 0 and not self._sold:
raise AnalyzeException("%s could not be sold"%self)
futurePrice = futureData[expiryDateTag]
# if squareoff was not set do expiry
if self._price is not None and self.sellWeek == 0:
# only profit matters
if self.fnoType == 'put' and futurePrice < self._price:
results.append((expiryDateTag, self._price - futurePrice))
if self.fnoType == 'call' and futurePrice > self._price:
results.append((expiryDateTag, futurePrice - self._price))
if self._price is not None and self.buyWeek == 0:
# only loss matters
if self.fnoType == 'put' and futurePrice < self._price:
results.append((expiryDateTag, futurePrice-self._price))
if self.fnoType == 'call' and futurePrice > self._price:
results.append((expiryDateTag, self._price - futurePrice))
return results
def _buy_sell(self, buy, timestamp, futureData, callData, putData):
strike_price = (futureData[timestamp]['OPEN'] +futureData[timestamp]['CLOSE'])/2.0
if self.fnoType == 'future':
return strike_price
if self._price is not None: # means we already executed one buy or sell
strike_price = self._price
else:
if self.fnoType == 'put':
# for put set strike price below by priceTarget
strike_price = strike_price - strike_price*self.priceTarget/100.0
else:
# for call set strike price above by priceTarget
strike_price = strike_price + strike_price*self.priceTarget/100.0
if self.fnoType == 'put':
day_data = putData[timestamp]
else:
day_data = callData[timestamp]
# check if any put or call matches on that day
for data in day_data:
price = data['STRIKE_PR']
# both high low open close should be non zero also change_oi
for var in ['OPEN', 'CLOSE', 'HIGH', 'LOW', 'CHANGE_OI']:
if data[var] == 0:
continue
premium = (data['OPEN'] + data['CLOSE'])/2.0
if self.fnoType == 'put' and strike_price <= price:
return premium
if self.fnoType == 'call' and strike_price >= price:
return premium
return None
class AnalyzeException(Exception): pass
def analyze(model, logFunc):
months = list(calendar.month_abbr)
startMonth = months.index(model.startMonth)
startDate = datetime.datetime(int(model.startYear), startMonth, 1)
endMonth = months.index(model.endMonth)
firstWeekday, days = calendar.monthrange(int(model.endYear), endMonth)
endDate = datetime.datetime(int(model.endYear), endMonth, days)
if endDate < startDate:
raise AnalyzeException("End date must > start date")
if endDate.year == startDate.year and endDate.month - startDate.month < 2:
raise AnalyzeException("Start and end date should be atleast 2 months apart")
startDateStr = startDate.strftime("%Y-%m-%d")
endDateStr = endDate.strftime("%Y-%m-%d")
conn = sqlite3.connect(model.dbFile)
cursor = conn.cursor()
columns = [('INSTRUMENT', 'text'),
('SYMBOL', 'text'),
('EXPIRY_DT', 'text'),
('STRIKE_PR', 'real'),
('OPTION_TYP', 'text'),
('OPEN', 'real'),
('HIGH', 'real'),
('LOW', 'real'),
('CLOSE', 'real'),
('SETTLE_PR', 'real'),
('CONTRACTS', 'real'),
('VAL_INLAKH', 'real'),
('OPEN_INT', 'real'),
('CHG_IN_OI', 'real'),
('TIMESTAMP', 'text')]
column_names = [c for c, t in columns]
column_str = ",".join(column_names)
sql = "select %s from %s where TIMESTAMP >= ? and TIMESTAMP <= ? order by TIMESTAMP"%(column_str, model.tableName)
count = 0
logFunc("Analyzing...%s - %s"%(startDateStr, endDateStr))
expiryDates = list(rrule.rrule(rrule.MONTHLY, byweekday=rrule.TH(-1), dtstart=startDate).between(startDate, endDate, inc=True))
expiryDatesTagMap = {}
for expiryDate in expiryDates:
expiryDatesTagMap[expiryDate.strftime("%Y-%m-%d")] = expiryDate
monthsData = [] # keep each month's data, month is from expiry to expiry
lastDateTag = None
strategy_data = {}
for i, strategy in enumerate(model.get_strategy_list()):
strategy_data[i] = []
for row in cursor.execute(sql, [startDateStr, endDateStr]):
count += 1
data = dict(zip(column_names, row))
curDateTag = data['TIMESTAMP']
if curDateTag not in expiryDatesTagMap and lastDateTag in expiryDatesTagMap: # expirty date passed
analyze_month_data(monthsData, model, strategy_data, lastDateTag, logFunc)
monthsData = []
monthsData.append(data)
lastDateTag = curDateTag
logFunc("read %s rows"%count)
# strategy_data is map of index and data, just get data out in sequence
strategy_data = strategy_data.items()
strategy_data.sort()
strategy_data = [item[1] for item in strategy_data]
return strategy_data
def analyze_month_data(monthsData, model, strategy_data, expiryDateTag, logFunc):
startDateTag = monthsData[0]['TIMESTAMP']
endDateTag = monthsData[-1]['TIMESTAMP']
logFunc("month:%s expiry: %s "%(startDateTag, endDateTag))
if endDateTag != expiryDateTag:
raise AnalyzeException("Last day of month %s is not same as expiry %s"%(endDateTag, expiryDateTag))
# calculate week dates by dividing month equally in 4 parts
startDate = datetime.datetime.strptime(startDateTag, "%Y-%m-%d")
endDate = datetime.datetime.strptime(endDateTag, "%Y-%m-%d")
delta = endDate - startDate
weekDays = (delta.days+1)/4
weekDelta = datetime.timedelta(days=weekDays)
# weekdates split month in four part where each part : weekStart <= day < weekEnd
weekDates = [startDateTag]
weekDate = startDate
for i in range(3):
weekDate = weekDate + weekDelta
weekDates.append(weekDate.strftime("%Y-%m-%d"))
# increment last day so that day >= start and day < end is valid
endWeekDate = endDate + datetime.timedelta(days=1)
weekDates.append(endWeekDate.strftime("%Y-%m-%d"))
logFunc("weekDates: %s"%weekDates, logging.DEBUG)
# make a map of month date and future, put, call data
futureData = {}
callData = {}
putData = {}
for data in monthsData:
# only get current month data
if data['EXPIRY_DT'] != expiryDateTag:
continue
timestamp = data['TIMESTAMP']
if data['OPTION_TYP'] == 'PE':
if timestamp not in putData:
putData[timestamp] = []
putData[timestamp].append(data)
elif data['OPTION_TYP'] == 'CE':
if timestamp not in callData:
callData[timestamp] = []
callData[timestamp].append(data)
else:
futureData[timestamp] = data
monthsDates = list(set(futureData.keys()+callData.keys()+putData.keys()))
monthsDates.sort()
for i, strategy in enumerate(model.get_strategy_list()):
profit_data = strategy.analyze_month_data(monthsDates, expiryDateTag, weekDates, futureData, callData, putData, logFunc)
if profit_data:
strategy_data[i].extend(profit_data)
class AnalyzeThread(threading.Thread):
def __init__(self, model, logFunc, onAnalysisDone):
threading.Thread.__init__(self)
self.setDaemon(True)
self.model = model
self.logFunc = logFunc
self.onAnalysisDone = onAnalysisDone
def run(self):
try:
strategy_data = analyze(self.model, self.logFunc)
wx.CallAfter(self.onAnalysisDone, self.model, strategy_data)
except AnalyzeException,e:
self.logFunc(unicode(e), logging.ERROR)
wx.CallAfter(self.onAnalysisDone, self.model, None, unicode(e))
except Exception,e:
self.logFunc(unicode(e), logging.ERROR)
self.logFunc(traceback.format_exc(), logging.ERROR)
wx.CallAfter(self.onAnalysisDone, self.model, None, unicode(e))
| gpl-2.0 | -3,681,601,941,826,342,000 | 39.629139 | 137 | 0.556484 | false |
RedhawkSDR/framework-codegen | redhawk/codegen/jinja/java/ports/frontend.py | 1 | 1943 | #
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK core.
#
# REDHAWK core is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK core is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
from redhawk.codegen.lang.idl import IDLInterface
from redhawk.codegen.jinja.ports import PortGenerator
from redhawk.codegen.jinja.ports import PortFactory
from generator import JavaPortGenerator
from redhawk.codegen.lang import java
class FrontendPortFactory(PortFactory):
NAMESPACE = 'FRONTEND'
def match(self, port):
return IDLInterface(port.repid()).namespace() == self.NAMESPACE
def generator(self, port):
interface = IDLInterface(port.repid()).interface()
return FrontendPortGenerator(port)
class FrontendPortGenerator(JavaPortGenerator):
def className(self):
return "frontend." + self.templateClass()
def templateClass(self):
if self.direction == 'uses':
porttype = 'Out'
else:
porttype = 'In'
porttype += self.interface + 'Port'
return porttype
def _ctorArgs(self, name):
return [java.stringLiteral(name)]
def constructor(self, name):
return '%s(%s)' % (self.className(), ', '.join(self._ctorArgs(name)))
def loader(self):
return jinja2.PackageLoader(__package__)
| lgpl-3.0 | 2,951,852,014,566,247,400 | 33.696429 | 79 | 0.712301 | false |
jasongwartz/BasecampHelper | basecamphelper/basecamphelper.py | 1 | 3624 | # Create Basecamp users
import requests, json, csv, os, sys
def get_all_groups(query):
params = {}
try:
response = requests.get(url + "/api/v1/projects.json", \
auth=(creds[0], creds[1]), headers=headers)
except:
print "REQUESTS ERROR!"
try:
print response.json()
except:
print "Unable to print error report."
all_group_ids = []
for i in response.json():
if query.lower() in i['name'].lower():
print "Retrieved group: " + i['name']
all_group_ids.append(i['id'])
if len(all_group_ids) == 0:
print 'No groups matched.\n'
get_all_groups()
return all_group_ids
def add_to_project(projectid, emaillist):
params = {}
for email in emaillist:
accesses = requests.get(url + "/api/v1/projects/%s/accesses.json" \
% projectid, auth = (creds[0], creds[1]), headers = headers)
list_of_accesses = [i['email_address'] for i in accesses.json()]
if email not in list_of_accesses:
params["email_addresses"] = email
response = requests.post(url + "/api/v1/projects/%s/accesses.json" \
% projectid, auth=(creds[0], creds[1]), data=json.dumps(params), headers=headers)
print response
if "204" in str(response):
print "Added " + str(params['email_addresses']) + " to project.\n"
else:
print "An error occured." + str(response)
with open("error_log.txt", "a") as fp:
fp.write("\n\nError processing: %s" % params["email_addresses"])
fp.write("\Group: %s" % projectid)
try:
print response.json()
except:
pass
def archive_project(projectid):
params = {'archived':True}
response = requests.put(url + "/api/v1/projects/%s.json" % projectid, \
auth=(creds[0], creds[1]), data=json.dumps(params), headers=headers)
if "200" in str(response):
print "Archived project: %s" % projectid
else:
print "An error occured." + str(response)
with open("error_log.txt", "a") as fp:
fp.write("\n\nError processing: %s" % projectid)
try:
print response.json()
except:
pass
def post_message(projectid, title, body):
message = {}
message['subject'] = title
message['content'] = body
response = requests.post(url + "/api/v1/projects/%s/messages.json" % projectid, \
auth=(creds[0], creds[1]), data=json.dumps(message, encoding='utf8'), headers=headers)
if "201" in str(response):
print "Posted message to: %s" % projectid
else:
print "An error occured." + str(response)
with open("error_log.txt", "a") as fp:
fp.write("\n\nError processing: %s" % projectid)
try:
print response.json()
except:
pass
def main():
print "Retrieving all projects..."
print "Please enter your search query:"
projectids = get_all_groups(raw_input(">>> "))
print "\n\n"
for index, app in enumerate(applist):
print "%s. %s" % (index, app)
appchoice = int(raw_input("Which function to launch: "))
if appchoice == 0:
list_to_add = raw_input("Comma-seperated list of email addresses: ").split(", ")
for id in projectids:
add_to_project(id, list_to_add)
elif appchoice == 1:
for id in projectids:
archive_project(id)
elif appchoice == 2:
title = raw_input("Title: ")
body = raw_input("Body: ")
for id in projectids:
post_message(id, title, body)
if __name__ == "__main__":
with open('config.json', 'r') as fp:
config = json.loads(fp.read())
applist = [
'Add users to projects',
'Archive projects',
'Post message to projects',
]
url = "https://basecamp.com/%s" % config['account_id']
headers = {}
headers['content-type'] = 'application/json'
headers['user-agent'] = config['user-agent']
creds = [config['username'], config['password']]
main()
| mit | 8,323,114,452,343,719,000 | 23.821918 | 88 | 0.642108 | false |
xiaopeng163/perfomance | mongodb_vs_postgresql/insert_postgresql.py | 1 | 2133 | # Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import psycopg2
import time
pg_con = psycopg2.connect(host='127.0.0.1', database='test', user='test', password='test', port=5432)
pg_cur = pg_con.cursor()
def postgresql_create_table():
pg_cur.execute("DROP TABLE IF EXISTS test;")
pg_cur.execute("CREATE TABLE test (PREFIX CIDR PRIMARY KEY, data json);")
pg_con.commit()
def postgresql_drop_table():
pg_cur.execute("DROP TABLE IF EXISTS test;")
pg_con.commit()
def postgresql_stats():
print "** postgresql stats"
pg_cur.execute("select pg_relation_size('test'),pg_total_relation_size('test')")
pg_relation_size, pg_total_relation_size = pg_cur.fetchone()
pg_cur.execute("select count(*) from test")
pg_row_count = pg_cur.fetchone()
print "count %d" % pg_row_count
print "table storage size %d" % pg_relation_size
print "index size %d" % (pg_total_relation_size - pg_relation_size)
def postgresql_load_prefix():
t = time.time()
for prefix in open('rib.json').readlines():
prefix = json.loads(prefix)
pg_cur.execute('INSERT INTO test (PREFIX,data) VALUES (%s,%s);', (prefix['PREFIX'], json.dumps(prefix)))
pg_con.commit()
print "postgresql insert time %s" % (time.time() - t)
if __name__ == "__main__":
postgresql_create_table()
postgresql_load_prefix()
postgresql_stats()
postgresql_drop_table()
# postgresql insert time 61.1340229511
# ** postgresql stats
# count 500000
# table storage size 172482560
# index size 14794752
| apache-2.0 | -629,156,586,669,198,100 | 31.815385 | 112 | 0.683544 | false |
idaholab/civet | client/tests/test_INLClient_live.py | 1 | 10889 |
# Copyright 2016 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals, absolute_import
import os
from django.test import override_settings
from mock import patch
from client.JobGetter import JobGetter
from client import settings, BaseClient
import subprocess
from client.tests import LiveClientTester, utils
import tempfile
import threading
import time
from ci import views
from ci.tests import utils as test_utils
@override_settings(INSTALLED_GITSERVERS=[test_utils.github_config()])
class Tests(LiveClientTester.LiveClientTester):
def create_client(self, build_root):
os.environ["BUILD_ROOT"] = build_root
c = utils.create_inl_client()
c.client_info["update_step_time"] = 1
c.client_info["ssl_cert"] = False # not needed but will get another line of coverage
c.client_info["server"] = self.live_server_url
c.client_info["servers"] = [self.live_server_url]
return c
def create_job(self, client, recipes_dir, name, sleep=1, n_steps=3, extra_script=''):
job = utils.create_client_job(recipes_dir, name=name, sleep=sleep, n_steps=n_steps, extra_script=extra_script)
settings.SERVERS = [(self.live_server_url, job.event.build_user.build_key, False)]
settings.CONFIG_MODULES[job.config.name] = ["null"]
client.client_info["build_configs"] = [job.config.name]
client.client_info["build_key"] = job.recipe.build_user.build_key
return job
def create_client_and_job(self, recipes_dir, name, sleep=1):
c = self.create_client("/foo/bar")
c.client_info["single_shot"] = True
job = self.create_job(c, recipes_dir, name, sleep=sleep)
return c, job
def test_run_success(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "RunSuccess", sleep=2)
self.set_counts()
c.run(exit_if=lambda client: True)
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=1, active_branches=1)
utils.check_complete_job(self, job)
def test_run_graceful(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "Graceful", sleep=2)
self.set_counts()
c.client_info["poll"] = 1
# graceful signal, should complete
script = "sleep 3 && kill -USR2 %s" % os.getpid()
proc = subprocess.Popen(script, shell=True, executable="/bin/bash", stdout=subprocess.PIPE)
c.run()
proc.wait()
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=1, active_branches=1)
utils.check_complete_job(self, job)
self.assertEqual(c.graceful_signal.triggered, True)
self.assertEqual(c.cancel_signal.triggered, False)
def test_run_cancel(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "Cancel", sleep=4)
self.set_counts()
c.client_info["poll"] = 1
# cancel signal, should stop
script = "sleep 3 && kill -USR1 %s" % os.getpid()
proc = subprocess.Popen(script, shell=True, executable="/bin/bash", stdout=subprocess.PIPE)
c.run()
proc.wait()
self.compare_counts(num_clients=1, canceled=1, num_events_completed=1, num_jobs_completed=1, active_branches=1, events_canceled=1)
self.assertEqual(c.cancel_signal.triggered, True)
self.assertEqual(c.graceful_signal.triggered, False)
utils.check_canceled_job(self, job)
def test_run_job_cancel(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobCancel", sleep=60)
self.set_counts()
# cancel response, should cancel the job
thread = threading.Thread(target=c.run, args=(lambda client: True,))
thread.start()
time.sleep(10)
job.refresh_from_db()
views.set_job_canceled(job)
thread.join()
self.compare_counts(num_clients=1, canceled=1, num_events_completed=1, num_jobs_completed=1, active_branches=1, events_canceled=1)
self.assertEqual(c.cancel_signal.triggered, False)
self.assertEqual(c.graceful_signal.triggered, False)
utils.check_canceled_job(self, job)
def test_run_job_invalidated_basic(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobInvalidated", sleep=40)
# stop response, should stop the job
self.set_counts()
thread = threading.Thread(target=c.run, args=(lambda client: True,))
thread.start()
start_time = time.time()
time.sleep(4)
job.refresh_from_db()
job.set_invalidated("Test invalidation", check_ready=True)
thread.join()
end_time = time.time()
self.assertGreater(15, end_time-start_time)
self.compare_counts(num_clients=1, invalidated=1, num_changelog=1)
utils.check_stopped_job(self, job)
def test_run_job_invalidated_nested_bash(self):
with test_utils.RecipeDir() as recipe_dir:
c, job = self.create_client_and_job(recipe_dir, "JobInvalidated", sleep=40)
job.delete()
job = utils.create_job_with_nested_bash(recipe_dir, name="JobWithNestedBash", sleep=40)
# stop response, should stop the job
self.set_counts()
thread = threading.Thread(target=c.run, args=(lambda client: True,))
start_time = time.time()
thread.start()
time.sleep(4)
job.refresh_from_db()
job.set_invalidated("Test invalidation", check_ready=True)
thread.join()
end_time = time.time()
self.assertGreater(15, end_time-start_time)
self.compare_counts(num_clients=1, invalidated=1, num_changelog=1)
utils.check_stopped_job(self, job)
@patch.object(JobGetter, 'find_job')
def test_exception(self, mock_getter):
with test_utils.RecipeDir() as recipe_dir:
# check exception handler
mock_getter.side_effect = Exception("oh no!")
c, job = self.create_client_and_job(recipe_dir, "JobStop", sleep=4)
self.set_counts()
c.run(exit_if=lambda client: True)
self.compare_counts()
def test_check_server_no_job(self):
with test_utils.RecipeDir() as recipe_dir:
# check no jobs
c, job = self.create_client_and_job(recipe_dir, "JobStop", sleep=4)
job.complete = True
job.save()
self.set_counts()
c.check_server(settings.SERVERS[0])
self.compare_counts(num_clients=1)
@patch.object(JobGetter, 'find_job')
def test_runner_error(self, mock_getter):
with test_utils.RecipeDir() as recipe_dir:
mock_getter.return_value = None
c, job = self.create_client_and_job(recipe_dir, "JobError")
self.set_counts()
c.runner_error = True
c.run()
self.compare_counts()
def test_exit_if_exception(self):
c = self.create_client("/foo/bar")
with self.assertRaises(BaseClient.ClientException):
c.run(exit_if="foo")
with self.assertRaises(BaseClient.ClientException):
c.run(exit_if=lambda: "foo")
with self.assertRaises(BaseClient.ClientException):
c.run(exit_if=lambda client: "foo")
def test_manage_build_root(self):
with test_utils.RecipeDir() as recipe_dir:
temp_dir = tempfile.TemporaryDirectory()
build_root = temp_dir.name + "/build_root"
self.assertEqual(os.path.isdir(build_root), False)
os.mkdir(build_root)
self.assertEqual(os.path.isdir(build_root), True)
manage_build_root_before = settings.MANAGE_BUILD_ROOT
settings.MANAGE_BUILD_ROOT = True
c = self.create_client(build_root)
settings.MANAGE_BUILD_ROOT = manage_build_root_before
self.assertEqual(c.get_build_root(), build_root)
self.assertEqual(c.get_client_info('manage_build_root'), True)
self.assertEqual(c.build_root_exists(), False)
extra_script = 'if [ -d "$BUILD_ROOT" ]; then\n'
extra_script += ' if [ ! -n "$(ls -A "$BUILD_ROOT")" ]; then\n'
extra_script += ' echo BUILD_ROOT_EXISTS_EMPTY\n'
extra_script += ' echo foo > $BUILD_ROOT/build_root_test || exit 1\n'
extra_script += ' fi\n'
extra_script += 'fi\n'
jobs = []
jobs.append(self.create_job(c, recipe_dir, "ManageBuildRoot1", n_steps=1, sleep=2, extra_script=extra_script))
jobs.append(self.create_job(c, recipe_dir, "ManageBuildRoot2", n_steps=1, sleep=2, extra_script=extra_script))
jobs.append(self.create_job(c, recipe_dir, "ManageBuildRoot3", n_steps=1, sleep=2, extra_script=extra_script))
self.set_counts()
c.client_info["poll"] = 1
def exit_create_build_root(client):
self.assertEqual(client.build_root_exists(), False)
client.create_build_root()
self.assertEqual(client.build_root_exists(), True)
return client.get_client_info('jobs_ran') == 3
c.run(exit_if=exit_create_build_root)
self.assertEqual(c.build_root_exists(), False)
self.compare_counts(num_clients=1, num_events_completed=1, num_jobs_completed=3, active_branches=1)
for job in jobs:
utils.check_complete_job(self, job, n_steps=1, extra_step_msg='BUILD_ROOT_EXISTS_EMPTY\n')
temp_dir.cleanup()
def test_manage_build_root_failure(self):
manage_build_root_before = settings.MANAGE_BUILD_ROOT
settings.MANAGE_BUILD_ROOT = True
with self.assertRaises(FileNotFoundError):
self.create_client("/foo/bar")
settings.MANAGE_BUILD_ROOT = manage_build_root_before
| apache-2.0 | 7,241,736,770,665,917,000 | 44.560669 | 142 | 0.618147 | false |
dsimandl/teamsurmandl | teamsurmandl/forms.py | 1 | 2690 | from django import forms
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import AuthenticationForm
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Field, HTML, Div
from crispy_forms.bootstrap import FormActions
class SurmandlAuthForm(AuthenticationForm):
"""Form for our site login page. We are using crispy-forms here."""
username = forms.EmailField()
password = forms.PasswordInput()
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_class = 'form-signin'
#This removes all labels from the HTML
self.helper.form_show_labels = False
self.helper.form_show_errors = False
self.helper.form_method = 'post'
self.helper._form_action = ''
self.helper.layout = Layout(
HTML('<h2 class="form_signin-heading">Please sign in</h2>'),
HTML('{% if form.non_field_errors %}<div class="login-alert alert-danger">{{ form.non_field_errors.as_text }}</div>{% endif %}' ),
Field('username', css_class='form-control', placeholder="Email address", name="username", autofocus='True'),
HTML('{% if form.username.errors %} <div class="login-alert alert-danger">{{ form.username.errors.as_text }}</div>{% endif %}'),
Field('password', css_class='form-control', placeholder="Password", name="password"),
HTML('{% if form.password.errors %} <div class="login-alert alert-danger">{{ form.password.errors.as_text }}</div>{% endif %}'),
HTML('<label class="checkbox"> <input type="checkbox" value="remember-me"> Remember me</label>'),
FormActions(
Submit('submit', "Sign in", css_class="btn btn-large btn-primary btn-block")
)
)
super(SurmandlAuthForm, self).__init__(*args, **kwargs)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if not username and not password:
raise forms.ValidationError("Email address and password are required")
elif username and not password:
raise forms.ValidationError("Password is required")
elif not username and password:
raise forms.ValidationError("Email is required")
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError("Your email and password do not match")
elif not self.user_cache.is_active:
raise forms.ValidationError("The account is inactive")
return self.cleaned_data
| mit | -713,614,621,017,927,400 | 47.035714 | 142 | 0.649442 | false |
sebbcn/django-secure-storage | storage.py | 1 | 3497 |
from uuid import uuid4
from django.utils.timezone import now
from datetime import timedelta
from os.path import join
from django.core.files.storage import FileSystemStorage
from django.core.files.uploadedfile import UploadedFile
from .encryption import get_cipher_and_iv, padding
import app_settings as settings
from .models import EncryptedUploadedFileMetaData
class ExpiredFile(Exception):
pass
class InexistentFile(Exception):
pass
class EncryptedUploadedFile(UploadedFile):
''' Extends the django builtin UploadedFile.
The written file is encrypted using AES-256 cipher. '''
def __init__(self, *args, **kwargs):
self.passphrase = kwargs.pop('passphrase')
self.name = kwargs.get('name')
if self.name:
self._open_existing_file(*args, **kwargs)
else:
self._open_new_file(*args, **kwargs)
def _open_existing_file(self, *args, **kwargs):
self.file = self.open_file(mode='rb')
super(EncryptedUploadedFile, self).__init__(self.file, **kwargs)
EncryptedUploadedFileMetaData.load(self)
self.cipher = get_cipher_and_iv(self.passphrase, self.iv)[0]
def _open_new_file(self, *args, **kwargs):
self.cipher, self.iv = get_cipher_and_iv(self.passphrase)
self.name = EncryptedFileSystemStorage().get_available_name()
self.file = self.open_file(mode='wb')
# By default, we set an arbitrary 10 years expiration date.
expire = int(kwargs.pop('expire_date', 10 * settings.ONE_YEAR))
self.expire_date = now() + timedelta(seconds=expire)
self.clear_filename = kwargs.pop('clear_filename')
self.one_time = kwargs.pop('one_time', False)
kwargs['size'] = int(kwargs.pop('content_length', 0))
super(EncryptedUploadedFile, self).__init__(
self.file, self.name, **kwargs)
EncryptedUploadedFileMetaData.save_(self)
@property
def path(self):
return join(settings.UPLOAD_DIR, self.name)
def open_file(self, mode='rb'):
try:
return open(self.path, mode)
except IOError:
if mode == 'rb':
raise InexistentFile
raise
def encrypt_and_write(self, raw_data):
if raw_data:
block = self.cipher.encrypt(padding(raw_data))
self.write(block)
def chunks(self, chunk_size=None):
''' decrypting iterator '''
if not chunk_size:
chunk_size = self.DEFAULT_CHUNK_SIZE
read = 0
while True:
block = self.read(chunk_size)
if len(block) == 0:
# EOF
break
block = self.cipher.decrypt(block)
read += len(block)
if read > self.size:
# We remove the padding at the end of the file
padding = self.size - read
block = block[:padding]
yield block
class EncryptedFileSystemStorage(FileSystemStorage):
''' handles encrypted files on disk with random names '''
def __init__(self, location=settings.UPLOAD_DIR):
super(EncryptedFileSystemStorage, self).__init__(location)
def open(self, *args, **kwargs):
return EncryptedUploadedFile(*args, **kwargs)
def get_available_name(self):
''' return a random id for the upload file '''
file_id = str(uuid4()).replace("-", "")
return join(self.location, file_id)
| gpl-2.0 | 5,682,643,343,101,651,000 | 31.082569 | 72 | 0.611667 | false |
ryanho/ISParser | main.py | 1 | 6603 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import webapp2
import hinet
import seednet
import StringIO
import PyRSS2Gen
import urllib
import datetime
import hashlib
#from google.appengine.ext import ndb
from google.appengine.api import memcache
HTTP_DATE_FMT = '%a, %d %b %Y %H:%M:%S %Z'
def check_date_fmt(date):
date = date.strip().split(' ')
if len(date) == 5:
HTTP_DATE_FMT = '%a, %d %b %Y %H:%M:%S'
elif len(date) == 6:
HTTP_DATE_FMT = '%a, %d %b %Y %H:%M:%S %Z'
return HTTP_DATE_FMT
#not use yet
class MainPage(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Cache-Control'] = 'max-age=3600, must-revalidate'
self.response.write('')
#generate hinet rss
class Hinet(webapp2.RequestHandler):
def output_content(self, content, serve=True):
if serve:
self.response.out.write(content)
else:
self.response.set_status(304)
def set_headers(self):
self.response.headers['Content-Type'] = 'application/xhtml+xml'
self.response.headers['Cache-Control'] = 'public, max-age=3600, must-revalidate'
def get_cache_data(self, rss):
output = memcache.get(rss)
mtime = memcache.get('h_mtime')
etag = memcache.get('h_etag')
if mtime is None:
mtime = datetime.datetime.utcnow().strftime(HTTP_DATE_FMT) + 'GMT'
self.response.headers['Last-Modified'] = mtime
return output, mtime, etag
def get(self):
serve = True
output, mtime, etag = self.get_cache_data('hinet_rss')
if 'If-Modified-Since' in self.request.headers:
IFMOD_DATE_FMT = check_date_fmt(self.request.headers['If-Modified-Since'])
last_seen = datetime.datetime.strptime(self.request.headers['If-Modified-Since'], IFMOD_DATE_FMT)
last_modified = datetime.datetime.strptime(mtime, HTTP_DATE_FMT)
if last_seen >= last_modified:
serve = False
if 'If-None-Match' in self.request.headers:
etags = [x.strip('" ') for x in self.request.headers['If-None-Match'].split(',')]
if etag in etags:
serve = False
if output is not None:
self.set_headers()
self.response.headers['ETag'] = '"%s"' % etag
self.output_content(output, serve)
return
items = []
parser = hinet.MyHTMLParser()
parser.feed(urllib.urlopen('http://search.hinet.net/getNotify?callback=jsonpCallback&type=0&sort=0&mobile=1').read())
for i in parser.struc_data:
items.append(PyRSS2Gen.RSSItem(title=i[1] + ' ' +i[3], link=i[2], pubDate=i[0]))
rss = PyRSS2Gen.RSS2(
title=u"Hinet系統公告",
link="http://www.hinet.net/pu/notify.htm",
description=u"此RSS內容取自Hinet網頁,依照著作權法之合理使用原則節錄部份內容。\
本RSS僅供參考,Hinet或任何人都不對內容負責",
lastBuildDate=mtime,
items=items)
output = StringIO.StringIO()
rss.write_xml(output,encoding='utf-8')
etag = hashlib.sha1(output.getvalue()).hexdigest()
memcache.set('hinet_rss', output.getvalue(), time=3600)
memcache.set('h_mtime', mtime, time=3600)
memcache.set('h_etag', etag, time=3600)
self.set_headers()
self.response.headers['ETag'] = '"%s"' % (etag,)
self.output_content(output.getvalue(), serve)
#generate seednet rss
class Seednet(webapp2.RequestHandler):
def output_content(self, content, serve=True):
if serve:
self.response.out.write(content)
else:
self.response.set_status(304)
def set_headers(self):
self.response.headers['Content-Type'] = 'application/xhtml+xml'
self.response.headers['Cache-Control'] = 'public, max-age=3600, must-revalidate'
def get_cache_data(self, rss):
output = memcache.get('seednet_rss')
mtime = memcache.get('s_mtime')
etag = memcache.get('s_etag')
if mtime is None:
mtime = datetime.datetime.utcnow().strftime(HTTP_DATE_FMT) + 'GMT'
self.response.headers['Last-Modified'] = mtime
return output, mtime, etag
def get(self):
serve = True
output, mtime, etag = self.get_cache_data('seednet_rss')
if 'If-Modified-Since' in self.request.headers:
IFMOD_DATE_FMT = check_date_fmt(self.request.headers['If-Modified-Since'])
last_seen = datetime.datetime.strptime(self.request.headers['If-Modified-Since'], IFMOD_DATE_FMT)
last_modified = datetime.datetime.strptime(mtime, HTTP_DATE_FMT)
if last_seen >= last_modified:
serve = False
if 'If-None-Match' in self.request.headers:
etags = [x.strip('" ') for x in self.request.headers['If-None-Match'].split(',')]
if etag in etags:
serve = False
if output is not None:
self.set_headers()
self.response.headers['ETag'] = '"%s"' % etag
self.output_content(output, serve)
return
items = []
parser = seednet.MyHTMLParser()
parser.feed(urllib.urlopen(
'https://service.seed.net.tw/register-cgi/service_notice?FUNC=notice_qry_more&Category=02&Start=1').read())
for i in parser.struc_data:
items.append(PyRSS2Gen.RSSItem(title=i[3], link=i[2], pubDate=i[0]))
rss = PyRSS2Gen.RSS2(
title=u"Seednet系統公告",
link="https://service.seed.net.tw/register-cgi/service_notice?FUNC=notice_qry_more&Category=02&Start=1",
description=u"此RSS內容取自Seednet網頁,依照著作權法之合理使用原則節錄部份內容。\
本RSS僅供參考,Seednet或任何人都不對內容負責",
lastBuildDate=mtime,
items=items)
output = StringIO.StringIO()
rss.write_xml(output,encoding='utf-8')
etag = hashlib.sha1(output.getvalue()).hexdigest()
memcache.set('seednet_rss', output.getvalue(), time=3600)
memcache.set('s_mtime', mtime, time=3600)
memcache.set('s_etag', etag, time=3600)
self.set_headers()
self.response.headers['ETag'] = '"%s"' % (etag,)
self.output_content(output.getvalue(), serve)
application = webapp2.WSGIApplication([
('/', MainPage),
('/hinet', Hinet),
('/seednet', Seednet),
], debug=False)
| bsd-3-clause | -7,144,110,593,311,770,000 | 35.611429 | 125 | 0.60309 | false |
maximilianh/maxtools | lib/gumbyparser.py | 1 | 4210 | #!/usr/bin/python
from sys import *
from re import *
from optparse import OptionParser
import Fasta
import alignment
class gumbyBlock:
# easier to handle as a class
# baseseq is always first in seqs list
def __init__(self,number, score, pval, seqs):
self.number=number
self.score = score
self.pval=pval
self.seqs=seqs
def __repr__(self):
lines = []
lines.append( " * gumbyResult %d" % self.number)
lines.append("score %d, pval %e " % (self.score, self.pval))
for s in self.seqs:
lines.append( str(s))
return "\n".join(lines)
# -------- FUNCTIONS ------------------
def procData(baseSeq, exons, no, seqDict, pos, pval, length, score):
if len(seqDict)==0:
return []
if baseSeq not in seqDict:
stderr.write("error: your baseseq name is not in gumby result. remember that gumby keeps only first word of seq name\n")
sys.exit(1)
print seqDict[baseSeq]
if overlapped(pos[baseSeq], exons, baseSeq):
stderr.write("warning: dropping complete block with sequence %s:%s because baseSeq has overlapping exon annotation.\n" % (baseSeq, pos[baseSeq]))
return []
if seqDict[baseSeq].nucl.count("-")==len(seqDict[baseSeq].nucl):
stderr.write("warning: dropping complete block with sequence %s:%s because baseSeq contains only '-'-characters\n" % (baseSeq, pos[baseSeq]))
return []
if seqDict[baseSeq].nucl.count("N")==len(seqDict[baseSeq].nucl):
stderr.write("warning: dropping complete block with sequence %s:%s because baseSeq contains only N-characters\n" % (baseSeq, pos[baseSeq]))
return []
seqs = []
seqs.append(seqDict[baseSeq])
for n,s in seqDict.iteritems():
if n==baseSeq:
continue
seqs.append(s)
gb = gumbyBlock(no, score, pval, seqs)
return [gb]
def overlapped(pos, exons, baseSeq):
f1name, f1start, f1end = pos
if f1name != baseSeq:
return False
for e in exons:
f2start, f2end = e
# print "checking %d -- %d, %d" % (start, f2start, f2end)
result = (( f2start <= f1start and f2end > f1start) or \
(f2start < f1end and f2end >= f1end) or (f2start >= f1start and f2end <= f1end))
if result == True:
return True
return False
# ----- MAIN -------------------
def parseGumby(gumbyFile, exonFile, baseSeq):
# parses gumbyFile, removes things that overlap exons and gumbies that consist only of gaps on baseSeq
# returns a list of gumbyBlocks
infile = open(gumbyFile, "r")
exons = []
if exonFile!=None:
fh = open(exonFile, "r")
for l in fh:
fs = l.split()
if fs[0].lower()!=baseSeq:
continue
exons.append([ int(fs[3]), int(fs[4]) ] )
# print exons
re1 = compile("[a-z]+[ ]+[0-9]+[ ]+[0-9]+")
seqs = {}
pos = {}
i = -1
resultLst = alignment.Alignment()
for l in infile:
l = l.strip()
l = l.replace("*","-")
l = l.replace("<", "-")
l = l.replace(">", "-")
if l.startswith("start"):
if i!=-1:
resultLst.extend(procData(baseSeq, exons, i, seqs, pos, pval, length, score))
f = l.split()
pval = float(f[-1])
length = int(f[6].strip(","))
score = int(f[8].strip(","))
i+=1
seqs={}
if re1.match(l):
f = l.split()
name = f[0]
start = int(f[1])-1
end = int(f[2])-1
seq = f[3]
if name not in seqs:
faseq = Fasta.FastaSeq(name, seq)
faseq.chrom = name
faseq.start = start
faseq.end = end
seqs[name] = faseq
else:
faseq = seqs[f[0]]
faseq.nucl += f[3]
pos[name] = (name, start,end)
resultLst.extend(procData(baseSeq, exons, i, seqs, pos, pval, length, score))
return resultLst
# ---- DEBUGGING -----
#blocks = parseGumby("test/gumbyparser.gumby", "test/gumbyparser.bed", "oryzias")
#for b in blocks:
#print b
| gpl-2.0 | -6,803,909,765,453,174,000 | 31.137405 | 153 | 0.546318 | false |
google-research/open-covid-19-data | tests/test_exported_data.py | 1 | 1437 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
import sys
import streamlit as st
PIPELINE_DIR = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), '../')), 'src/pipeline')
sys.path.append(PIPELINE_DIR)
import path_utils
AGGREGATED_EXPORT_FILES = ['cc_by/aggregated_cc_by.csv',
'cc_by_sa/aggregated_cc_by_sa.csv',
'cc_by_nc/aggregated_cc_by_nc.csv']
def test_location_and_date_unique():
for f in AGGREGATED_EXPORT_FILES:
export_path = os.path.join(path_utils.path_to('export_dir'), f)
exported_df = pd.read_csv(export_path)
duplicates = exported_df[exported_df[['open_covid_region_code', 'date']].duplicated(keep=False)]
duplicate_info = duplicates[['open_covid_region_code', 'date']]
print(duplicate_info)
assert duplicates.shape[0] == 0
test_location_and_date_unique()
| apache-2.0 | -4,331,713,547,621,887,500 | 35.846154 | 108 | 0.70007 | false |
splice/splice-server | playpen/candlepin/get_manifest_data.py | 1 | 3520 | #!/usr/bin/env python
import json
import os
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "splice.checkin_service.settings")
from optparse import OptionParser
from splice.common import candlepin_client
def get_consumers(host, port, username, password, https):
return candlepin_client.get_consumers(host, port, username, password, https)
def get_entitlements(host, port, username, password, https):
return candlepin_client.get_entitlements(host, port, username, password, https)
def get_owners(host, port, username, password, https):
return candlepin_client.get_owners(host, port, username, password, https)
def get_pools(host, port, username, password, https):
return candlepin_client.get_pools(host, port, username, password, https)
def get_products(host, port, username, password, https):
return candlepin_client.get_products(host, port, username, password, https)
def get_rules(host, port, username, password, https):
return candlepin_client.get_rules(host, port, username, password, https)
def get_subscriptions(host, port, username, password, https):
return candlepin_client.get_subscriptions(host, port, username, password, https)
def get_accounts():
pass
if __name__ == "__main__":
# Parse Args
default_port=8443
default_user="admin"
default_password="admin"
parser = OptionParser(description="Script to fetch data from a candlepin")
parser.add_option('--host', action='store', default=None,
help="Hostname of Candlepin server")
parser.add_option('--port', action='store', default=default_port,
help="Port of Candlepin server defaults to: %s" % (default_port))
parser.add_option('--http', action='store_true', default=False, help="Use HTTP instead of HTTPs, default is False")
parser.add_option('--user', action='store', default=default_user,
help="Username, default is %s" % default_user)
parser.add_option('--password', action='store', default=default_password,
help="Password, default is %s" % default_password)
(opts, args) = parser.parse_args()
host = opts.host
port = opts.port
https = not opts.http
username = opts.user
password = opts.password
if not host:
print "Please re-run with --host"
sys.exit(1)
retval = get_consumers(host, port, username, password, https)
print "\nget_consumers = "
print json.dumps(retval, sort_keys=True, indent=4, separators=(',', ': '))
retval = get_entitlements(host, port, username, password, https)
print "\nget_entitlements = "
print json.dumps(retval, sort_keys=True, indent=4, separators=(',', ': '))
retval = get_owners(host, port, username, password, https)
print "\nget_owners = "
print json.dumps(retval, sort_keys=True, indent=4, separators=(',', ': '))
pools = get_pools(host, port, username, password, https)
print "\nget_pools = "
for p in pools:
print "%s\n" % p
products = get_products(host, port, username, password, https)
print "\nget_products = "
for p in products:
print "%s\n" % p
#TODO - Need to implement support for decoding response
#retval = get_rules(host, port, username, password, https)
#print "\nget_rules = \n%s" % (retval)
retval = get_subscriptions(host, port, username, password, https)
print "\nget_subscriptions() = "
#print json.dumps(retval, sort_keys=True, indent=4, separators=(',', ': '))
| gpl-2.0 | -4,783,654,591,121,323,000 | 35.666667 | 119 | 0.669318 | false |
bryndin/tornado-flickr-api | tornado_flickrapi/tools.py | 1 | 2292 | import sys
import os
from tornado.gen import coroutine, Return
from method_call import call_api
#TODO(DB): update collers to expect future
@coroutine
def load_methods():
"""
Loads the list of all methods
"""
try:
r = yield call_api(method="flickr.reflection.getMethods")
except Exception as e:
raise e
raise Return(r["methods"]["method"])
__perms__ = {0: 'none', '1': 'read', '2': 'write', '3': 'delete'}
#TODO(DB): update collers to expect future
@coroutine
def methods_info():
methods = {}
for m in load_methods():
try:
info = yield call_api(method="flickr.reflection.getMethodInfo", method_name=m)
except Exception as e:
raise e
info.pop("stat")
method = info.pop("method")
method["requiredperms"] = __perms__[method["requiredperms"]]
method["needslogin"] = bool(method.pop("needslogin"))
method["needssigning"] = bool(method.pop("needssigning"))
info.update(method)
info["arguments"] = info["arguments"]["argument"]
info["errors"] = info["errors"]["error"]
methods[m] = info
raise Return(methods)
def write_reflection(path, template, methods=None):
if methods is None:
methods = methods_info()
with open(template, "r") as t:
templ = t.read()
prefix = ""
new_templ = ""
tab = " "
templ = templ % str(methods)
for c in templ:
if c == '{':
new_templ += '{\n' + prefix
prefix += tab
elif c == '}':
new_templ += '\n' + prefix + '}\n' + prefix
prefix = prefix[:-len(tab)]
else:
new_templ += c
with open(path, "w") as f:
f.write(new_templ)
def write_doc(output_path, exclude=["flickr_keys", "methods"]):
import flickr_api
exclude.append("__init__")
modules = ['flickr_api']
dir = os.path.dirname(flickr_api.__file__)
modules += [
"flickr_api." + f[:-3]
for f in os.listdir(dir)
if f.endswith(".py") and f[:-3] not in exclude]
sys.path.insert(0, dir + "../")
if not os.path.exists(output_path):
os.makedirs(output_path)
os.chdir(output_path)
for m in modules:
os.system("pydoc -w " + m)
| bsd-3-clause | -206,738,358,129,790,140 | 26.285714 | 90 | 0.557155 | false |
avatartwo/avatar2 | avatar2/message.py | 1 | 2187 |
class AvatarMessage(object):
def __init__(self, origin):
self.origin = origin
def __str__(self):
if self.origin:
return "%s from %s" % (self.__class__.__name__, self.origin.name)
else:
return "%s from unkown origin" % self.__class__.__name__
class UpdateStateMessage(AvatarMessage):
def __init__(self, origin, new_state):
super(UpdateStateMessage, self).__init__(origin)
self.state = new_state
class BreakpointHitMessage(UpdateStateMessage):
def __init__(self, origin, breakpoint_number, address):
super(BreakpointHitMessage, self).__init__(origin, TargetStates.BREAKPOINT)
self.breakpoint_number = breakpoint_number
self.address = address
class SyscallCatchedMessage(BreakpointHitMessage):
def __init__(self, origin, breakpoint_number, address, type='entry'):
super(self.__class__, self).__init__(origin, breakpoint_number, address)
self.type = type
class RemoteMemoryReadMessage(AvatarMessage):
def __init__(self, origin, id, pc, address, size, dst=None):
super(self.__class__, self).__init__(origin)
self.id = id
self.pc = pc
self.address = address
self.size = size
self.dst = dst
self.num_words = 1
self.raw = False
class RemoteMemoryWriteMessage(AvatarMessage):
def __init__(self, origin, id, pc, address, value, size, dst=None):
super(self.__class__, self).__init__(origin)
self.id = id
self.pc = pc
self.address = address
self.value = value
self.size = size
self.dst = dst
class RemoteInterruptEnterMessage(AvatarMessage):
def __init__(self, origin, id, interrupt_num):
super(self.__class__, self).__init__(origin)
self.id = id
self.interrupt_num = interrupt_num
class RemoteInterruptExitMessage(AvatarMessage):
def __init__(self, origin, id, transition_type, interrupt_num):
super(self.__class__, self).__init__(origin)
self.id = id
self.transition_type = transition_type
self.interrupt_num = interrupt_num
from .targets.target import TargetStates
| apache-2.0 | 8,676,645,026,651,915,000 | 31.641791 | 83 | 0.622771 | false |
douglaswei/stock | featProcess/stockit/FeatManager.py | 1 | 3746 | __author__ = 'wgz'
import sys
from FeatTranslator import FeatTranslator
labelName = "label"
codeName = "code"
dateName = "date"
class FeatManager:
def set_train_predict_path(self, feat_train_path, feat_predict_path):
self.trainFile = open(feat_train_path, 'w')
self.predictFile = open(feat_predict_path, 'w')
def extract(self, raw_feat_path, feat_train_path, feat_predict_path):
self.set_train_predict_path(feat_train_path, feat_predict_path)
for line in open(raw_feat_path):
line = line[:-1]
fields = [item.split(":") for item in line.split("\t")]
field_map = dict(fields)
label, code, date = field_map.get(labelName), field_map.get(codeName), field_map.get(dateName)
out_file = self.predictFile if len(label) == 0 else self.trainFile
label_res = 0 if len(label) == 0 else 0 if float(label) <= 0 else 1
out_str = "%s:%f\t%s:%s\t%s:%s" % (labelName, label_res, codeName, code, dateName, date)
translator = FeatTranslator()
feature_map = translator.extract(field_map, False)
sorted_featids = sorted(feature_map.keys())
for featId in sorted_featids:
out_str += "\t%d:%s" % (featId, feature_map[featId])
out_file.write(out_str + '\n')
def extract_maxs_mins(self, raw_feat_path, feat_beg_idx):
feature_max_values = []
feature_min_values = []
for line in open(raw_feat_path):
fields = [float(item.split(':')[1]) for item in line[:-1].split('\t')[feat_beg_idx:]]
if len(feature_max_values) == 0:
feature_max_values = fields[:]
if len(feature_min_values) == 0:
feature_min_values = fields[:]
feature_max_values = map(max, zip(feature_max_values, fields))
feature_min_values = map(min, zip(feature_min_values, fields))
return feature_max_values, feature_min_values
def extract_discrete_feat(self, raw_feat_path, feat_train_path, feat_predict_path, feat_beg_idx, span_num):
self.set_train_predict_path(feat_train_path, feat_predict_path)
feature_max_values, feature_min_values = self.extract_maxs_mins(raw_feat_path, feat_beg_idx)
feature_diffs = map(lambda x: x[0] - x[1], zip(feature_max_values, feature_min_values))
feature_spans = map(lambda x: x / span_num or 0.1, feature_diffs)
translator = FeatTranslator()
for line in open(raw_feat_path):
kvs = [item.split(':') for item in line[:-1].split('\t')]
raw_feature_map = dict(kvs)
label, code, date = raw_feature_map.get(labelName), raw_feature_map.get(codeName), raw_feature_map.get(
dateName)
# label_res = 0 if (len(label) == 0 or float(label) <= 0) else float(float(label) / 30)
label_res = 0 if len(label) == 0 else float(label) / 50
label_res = pow(label_res, 0.5) if label_res > 0 else label_res
feature_map = dict(map(lambda (v, s, m): [v[0], (float(v[1]) - m) / s],
zip(kvs[feat_beg_idx:], feature_spans, feature_min_values)))
feature_res_map = translator.extract(feature_map, True)
out_str = "%s:%f\t%s:%s\t%s:%s" % (labelName, label_res, codeName, code, dateName, date)
for featId in sorted(feature_res_map.keys()):
out_str += "\t%d:%s" % (featId, feature_res_map[featId])
fout = self.predictFile if len(label) == 0 else self.trainFile
fout.write(out_str + '\n')
if __name__ == "__main__":
featManager = FeatManager("test")
featManager.extract("../feats")
sys.exit(0)
| gpl-2.0 | 7,675,545,704,607,347,000 | 43.595238 | 115 | 0.584357 | false |
kpn-digital/py-timeexecution | tests/test_threaded_backend.py | 1 | 7743 | import subprocess
import sys
import time
from datetime import datetime
from multiprocessing import Process
from threading import Thread
import mock
import pytest
from freezegun import freeze_time
from tests.conftest import go
from tests.test_base_backend import TestBaseBackend
from time_execution import settings
from time_execution.backends import elasticsearch
from time_execution.backends.threaded import ThreadedBackend
from time_execution.decorator import SHORT_HOSTNAME
from .test_elasticsearch import ELASTICSEARCH_HOST, ElasticTestMixin
class TestTimeExecution(TestBaseBackend):
def setUp(self):
self.qsize = 10
self.qtimeout = 0.1
self.mocked_backend = mock.Mock(spec=elasticsearch.ElasticsearchBackend)
self.MockedBackendClass = mock.Mock(return_value=self.mocked_backend)
self.backend = ThreadedBackend(
self.MockedBackendClass,
backend_args=("arg1", "arg2"),
backend_kwargs=dict(key1="kwarg1", key2="kwarg2"),
queue_maxsize=self.qsize,
queue_timeout=self.qtimeout,
)
self.backend.bulk_size = self.qsize / 2
self.backend.bulk_timeout = self.qtimeout * 2
settings.configure(backends=[self.backend])
def stop_worker(self):
self.backend.worker_limit = 0
time.sleep(self.qtimeout * 2)
self.assertEqual(self.backend.thread, None)
def resume_worker(self, worker_limit=None, **kwargs):
self.backend.worker_limit = worker_limit
for key, val in kwargs.items():
if hasattr(self.backend, key):
setattr(self.backend, key, val)
self.backend.start_worker()
def test_thread_name(self):
self.assertEqual(self.backend.thread.name, "TimeExecutionThread")
def test_backend_args(self):
self.MockedBackendClass.assert_called_with("arg1", "arg2", key1="kwarg1", key2="kwarg2")
ThreadedBackend(self.MockedBackendClass)
self.MockedBackendClass.assert_called_with()
def test_empty_queue(self):
time.sleep(2 * self.qtimeout) # ensures queue.get times out
self.assertEqual(0, self.backend.fetched_items)
def test_decorator(self):
now = datetime.now()
with freeze_time(now):
go()
# ensure worker thread catches up
time.sleep(2 * self.backend.bulk_timeout)
mocked_write = self.mocked_backend.bulk_write
self.assertEqual(1, self.backend.fetched_items)
mocked_write.assert_called_with(
[{"timestamp": now, "hostname": SHORT_HOSTNAME, "name": "tests.conftest.go", "value": 0.0}]
)
def test_double_start(self):
self.assertEqual(0, self.backend.fetched_items)
go()
time.sleep(2 * self.qtimeout)
self.assertEqual(1, self.backend.fetched_items)
# try to double start
self.backend.start_worker()
self.assertEqual(1, self.backend.fetched_items)
def test_write_error(self):
self.mocked_backend.write.side_effect = RuntimeError("mocked")
go()
time.sleep(2 * self.qtimeout)
def test_queue_congestion(self):
# assure worker is stopped
self.stop_worker()
# fill in the queue
for _ in range(self.qsize * 2):
go()
self.assertTrue(self.backend._queue.full())
self.resume_worker(bulk_timeout=self.qtimeout)
# wait until all metrics are picked up
time.sleep(self.qsize * self.qtimeout)
# check that metrics in the queue were sent with bulk_write calls
call_args_list = self.mocked_backend.bulk_write.call_args_list
time.sleep(2 * self.qtimeout)
self.assertEqual(self.qsize, sum(len(args[0]) for args, _ in call_args_list))
def test_worker_sends_remainder(self):
self.stop_worker()
self.mocked_backend.bulk_write.side_effect = RuntimeError("mock")
loops_count = 3
self.assertTrue(loops_count < self.backend.bulk_size)
for _ in range(loops_count):
go()
self.backend.worker_limit = loops_count
self.backend.worker()
self.assertEqual(loops_count, self.backend.fetched_items)
mocked_bulk_write = self.mocked_backend.bulk_write
mocked_bulk_write.assert_called_once()
time.sleep(self.qtimeout * 2)
self.assertEqual(loops_count, len(mocked_bulk_write.call_args[0][0]))
def test_worker_error(self):
self.assertFalse(self.backend.thread is None)
# simulate TypeError in queue.get
with mock.patch.object(self.backend._queue, "get", side_effect=TypeError):
# ensure worker loop repeat
time.sleep(2 * self.qtimeout)
# assert thread stopped
self.assertTrue(self.backend.thread is None)
@pytest.mark.skipif(
sys.platform == "darwin",
reason="multiprocessing.queues.Queue.qsize doesn't work on MacOS due to broken sem_getvalue()",
)
def test_producer_in_another_process(self):
# assure worker is stopped
self.stop_worker()
# fill in the queue
process = Process(target=go)
process.start()
process.join()
# check the queue contains the item
self.assertEqual(self.backend._queue.qsize(), 1)
def test_flush_metrics_when_parent_process_not_alive(self):
self.stop_worker()
loops = 3
with mock.patch.object(self.backend, "parent_thread", spec=Thread) as parent_thread:
parent_thread.is_alive.return_value = False
for _ in range(loops):
go()
#: do not allow flush metrics before checking if parent_thread is alive
self.backend.worker_limit = loops + 1
self.backend.worker()
mocked_bulk_write = self.mocked_backend.bulk_write
mocked_bulk_write.assert_called_once()
self.assertEqual(loops, len(mocked_bulk_write.call_args[0][0]))
class TestThreaded(object):
def test_calling_thread_waits_for_worker(self):
"""
Start a process we are not the parent of and see if it waits for at
least the queue timeout (1) before exiting. If we were using a daemon
thread the process would exit immediately.
"""
start = time.time()
result = subprocess.call("./tests/dummy_process.py")
delta = time.time() - start
assert result == 0
assert delta >= 1
class TestElastic(TestBaseBackend, ElasticTestMixin):
def setUp(self):
self.qtime = 0.1
self.backend = ThreadedBackend(
elasticsearch.ElasticsearchBackend,
backend_args=(ELASTICSEARCH_HOST,),
backend_kwargs=dict(index="threaded-metrics"),
queue_timeout=self.qtime,
)
settings.configure(backends=[self.backend])
self._clear(self.backend.backend)
def test_write_method(self):
go()
time.sleep(2 * self.backend.bulk_timeout)
metrics = self._query_backend(self.backend.backend, go.get_fqn())
self.assertEqual(metrics["hits"]["total"], 1)
class TestSetupBackend:
def test_backend_importpath(self):
backend = ThreadedBackend(backend="time_execution.backends.elasticsearch.ElasticsearchBackend")
assert isinstance(backend.backend, elasticsearch.ElasticsearchBackend)
def test_backend_importpath_wrong_path(self):
with pytest.raises(ImportError):
ThreadedBackend(backend="time_execution.backends.wrong_path.NewBackend")
def test_backend_class(self):
backend = ThreadedBackend(backend=elasticsearch.ElasticsearchBackend)
assert isinstance(backend.backend, elasticsearch.ElasticsearchBackend)
| apache-2.0 | -1,550,487,496,775,174,000 | 35.013953 | 103 | 0.652977 | false |
Peilonrayz/instruction-follower | src/hrm_readable/__main__.py | 1 | 3562 | import re
def setup():
BASE_COMMANDS = [
("inbox", ""),
("outbox", ""),
("copyfrom", "{}"),
("copyto", "{}"),
("add", "{}"),
("sub", "{}"),
("bumpup", "{}"),
("bumpdn", "{}"),
("jump", "{}"),
("jumpz", "{}"),
("jumpn", "{}"),
]
max_len = max(len(c[0]) for c in BASE_COMMANDS) + 1
base_commands = {
command: " " + command.upper() + " " * (max_len - len(command)) + arg
for command, arg in BASE_COMMANDS
}
base_commands.update({"label": "{}:"})
additional_commands = {
"b>": (("inbox", ""),),
"b<": (("outbox", ""),),
"c>": (("copyto", "{0}"),),
"c<": (("copyfrom", "{0}"),),
"+": (("add", "{0}"),),
"-": (("sub", "{0}"),),
"u>": (("add", "{0}"),),
"u<": (("sub", "{0}"),),
"::": (("label", "{0}"),),
"~:": (("jump", "{0}"),),
"-:": (("jumpn", "{0}"),),
"0:": (("jumpz", "{0}"),),
"=>": (("jump", "{0}"),),
"->": (("jumpn", "{0}"),),
"0>": (("jumpz", "{0}"),),
"place": (("inbox", ""), ("copyto", "{0}"),),
"take": (("copyfrom", "{0}"), ("outbox", ""),),
"through": (("inbox", ""), ("outbox", ""),),
"gt": (("copyfrom", "{0}"), ("sub", "{1}"),),
"lt": (("copyfrom", "{1}"), ("sub", "{0}"),),
"move": (("copyfrom", "{0}"), ("copyto", "{1}"),),
"swap": (
("copyfrom", "{0}"),
("copyto", "{2}"),
("copyfrom", "{1}"),
("copyto", "{0}"),
("copyfrom", "{2}"),
("copyto", "{1}"),
),
"i>": (("inbox", ""), ("copyto", "{0}"),),
"i<": (("copyfrom", "{0}"), ("outbox", ""),),
">>": (("inbox", ""), ("outbox", ""),),
">": (("copyfrom", "{0}"), ("sub", "{1}"),),
"<": (("copyfrom", "{1}"), ("sub", "{0}"),),
"~>": (("copyfrom", "{0}"), ("copyto", "{1}"),),
"<>": (
("copyfrom", "{0}"),
("copyto", "{2}"),
("copyfrom", "{1}"),
("copyto", "{0}"),
("copyfrom", "{2}"),
("copyto", "{1}"),
),
}
return base_commands, additional_commands
COMMANDS, ADDITIONAL_COMMANDS = setup()
def read_commands(program):
commands = []
for line in program:
line = line.strip()
if not line or line.startswith(("#", "//", "--")):
continue
match = re.match(r"(.+):$", line)
if match:
commands.append(("label", (match.groups(1))))
continue
name, *args = line.split()
commands.append((name.lower(), args))
return commands
def to_hrm(commands):
hrm_commands = []
for name, args in commands:
additional_commands = ADDITIONAL_COMMANDS.get(name, None)
if additional_commands is None:
hrm_commands.append((name, (args[:1] or [None])[0]))
continue
for command, value in additional_commands:
hrm_commands.append((command, value.format(*args)))
return hrm_commands
def format_hrm(commands):
return "\n".join(COMMANDS[name].format(arg) for name, arg in commands)
while True:
level = input("level: ")
try:
f = open("./levels/{}".format(level))
except FileNotFoundError:
print("File doesn't exist")
continue
with f:
mhrm_commands = read_commands(f)
hrm_commands = to_hrm(mhrm_commands)
print("\n\n{}\n\n".format(format_hrm(hrm_commands)))
| mit | -2,887,195,703,688,148,500 | 29.444444 | 80 | 0.396126 | false |
GenosResearchGroup/ContourMetrics | apps/calculator/views.py | 1 | 14052 | import itertools
import json
import numpy
import scipy.stats
from django.db.models import Q
from django.http import HttpResponse
from django.shortcuts import render
import lib
import lib.similarity
from apps.calculator import models
from apps.calculator.forms import ComparisonFormOne, ComparisonFormMultiple, AlgorithmForm, \
ComparisonFormAlgorithms
from apps.calculator.models import Contour
from lib import Contour as C
from lib.utils import print_cseg, get_first, get_last, apply_fn_to_matrix, round_tolist
# Views
def calculator(request):
args = {'algorithm_form': AlgorithmForm()}
return render(request, 'calculator.html', args)
def get_calculus(request):
if request.is_ajax():
normalized_seq = list(map(int, json.loads(request.POST.get('normalized'))))
operation = request.POST.get('operation')
query_set = Contour.objects.filter(normalized=normalized_seq)
if query_set and operation in ['Class', 'Reduction', 'IAD', 'Oscillation']:
contour_model = query_set[0]
else:
contour = C(list(map(int, json.loads(request.POST.get('request_data')))))
if operation == 'Class':
if query_set:
result = contour_model.equivalent_class
else:
result = contour.equivalent_class_prime().sequence
elif operation == 'Reduction':
if query_set:
result = contour_model.prime
else:
result = contour.reduction()[0].sequence
elif operation == 'IAD':
if query_set:
result = contour_model.direction_index
else:
result = contour.direction_index()
elif operation == 'Oscillation':
if query_set:
result = contour_model.oscillation_index
else:
result = contour.oscillation_index()
elif operation == 'Window 3':
result = contour.window_reduction(3).sequence
elif operation == 'Window 5':
result = contour.window_reduction(5).sequence
else:
return
json_out = json.dumps(result)
return HttpResponse(json_out, content_type="application/json")
def get_memory_comparison(request):
if request.is_ajax():
algorithm_index = json.loads(request.POST.get('algorithm'))
memories = json.loads(request.POST.get('memory'))
algorithm = models.ALGORITHM_CHOICES[algorithm_index][0]
contours = {}
for k, v in memories.items():
contour = C(list(map(int, v['array'])))
if k not in contours:
contours[k] = contour
combinations = itertools.combinations(contours.keys(), 2)
data = []
for a, b in combinations:
ca = contours[a]
cb = contours[b]
labels = ' '.join([ca.__repr__(), cb.__repr__()])
comp_obj = lib.Comparison(ca, cb)
value = comp_obj.compare(algorithm)
data.append([labels, value])
json_out = json.dumps(data)
return HttpResponse(json_out, content_type="application/json")
def comparison_one(request):
form = ComparisonFormOne()
args = {'form': form}
return render(request, 'comparison_one.html', args)
def comparison_multiple(request):
form = ComparisonFormMultiple()
args = {'form': form}
return render(request, 'comparison_multiple.html', args)
def comparison_algorithms(request):
form = ComparisonFormAlgorithms()
args = {'form': form}
return render(request, 'benchmark.html', args)
def get_comparison_data_one(request):
if request.is_ajax():
main_contour_seq = list(map(int, json.loads(request.POST.get('mainContour'))))
collection_id = request.POST.get('selectedCollection')
algorithm_label = request.POST.get('selectedAlgorithm')
mode_label = request.POST.get('selectedMode')
key_label = request.POST.get('selectedKey')
time_signature_label = request.POST.get('selectedTimeSignature')
voice = request.POST.get('selectedVoice')
collection = models.Collection.objects.get(id=collection_id)
contour_query = [Q(phrase__piece__collection=collection)]
if mode_label != '':
contour_query.append(Q(phrase__piece__mode=mode_label))
if key_label != '':
contour_query.append(Q(phrase__piece__key=key_label))
if time_signature_label != '':
contour_query.append(Q(phrase__piece__time_signature=time_signature_label))
if voice != '':
contour_query.append(Q(phrase__voice=voice))
qs_contours = models.Contour.objects.filter(*contour_query).distinct()
main_contour = C(main_contour_seq)
main_contour_adjacency_series = main_contour.adjacent_series()
if algorithm_label == 'OSC':
input_data_a = main_contour.oscillation_spectrum(False)
else:
input_data_a = ''.join(map(str, main_contour_adjacency_series))
data = []
for c in qs_contours.values('normalized', 'oscillation_spectrum', 'adjacency_series'):
if algorithm_label == 'OSC':
input_data_b = list(map(get_first, c['oscillation_spectrum']))
value = lib.similarity.oscillation_spectrum_correlation(input_data_a, input_data_b)
elif algorithm_label == 'AED':
input_data_b = ''.join(map(str, c['adjacency_series']))
value = lib.similarity.adjacent_edit_distance(input_data_a, input_data_b)
elif algorithm_label == 'AGP':
input_data_b = ''.join(map(str, c['adjacency_series']))
value = lib.similarity.sequence_similarity(input_data_a, input_data_b)
data.append([print_cseg(c['normalized']), value])
arr = numpy.array(list(map(get_last, data)))
mean = numpy.nan_to_num(arr.mean())
std = numpy.nan_to_num(arr.std())
frequency = scipy.stats.itemfreq(arr)
statistics = [
['Mean', mean],
['Std', std],
['Skew', numpy.nan_to_num(scipy.stats.skew(arr))],
['Kurtosis', numpy.nan_to_num(scipy.stats.kurtosis(arr))],
['Variation coefficient', std / mean],
['Entropy', scipy.stats.entropy(frequency)[0]]
]
json_out = json.dumps({
'data': data,
'statistics': statistics
})
return HttpResponse(json_out, content_type="application/json")
def get_comparison_data_multiple(request):
if request.is_ajax():
collection_id = request.POST.get('selectedCollection')
algorithm_label = request.POST.get('selectedAlgorithm')
mode_label = request.POST.get('selectedMode')
key_label = request.POST.get('selectedKey')
time_signature_label = request.POST.get('selectedTimeSignature')
voice_label = request.POST.get('selectedVoice')
collection = models.Collection.objects.get(id=collection_id)
contour_query = [
Q(phrase__piece__collection=collection),
]
comparison_query = [
Q(collection=collection),
Q(algorithm=algorithm_label)
]
if mode_label != '':
contour_query.append(Q(phrase__piece__mode=mode_label))
comparison_query.append(Q(mode=mode_label))
else:
comparison_query.append(Q(mode=None))
if key_label != '':
contour_query.append(Q(phrase__piece__key=key_label))
comparison_query.append(Q(key=key_label))
else:
comparison_query.append(Q(key=None))
if time_signature_label != '':
contour_query.append(Q(phrase__piece__time_signature=time_signature_label))
comparison_query.append(Q(time_signature=time_signature_label))
else:
comparison_query.append(Q(time_signature=None))
if voice_label != '':
contour_query.append(Q(phrase__voice=voice_label))
comparison_query.append(Q(voice=voice_label))
else:
comparison_query.append(Q(voice=None))
qs_comparison = models.Comparison.objects.filter(*comparison_query)
# FIXME: get the comparison object
print('> Comparison', qs_comparison)
if not qs_comparison.exists():
return HttpResponse(json.dumps({'collection_exists': False}), content_type="application/json")
comp = models.Comparison.objects.filter(*comparison_query).first()
columns = ['mean', 'std', 'skew', 'kurtosis', 'variation_coefficient']
ind = ['similarity', 'direction', 'oscillation', 'size', 'diversity']
columns_size = len(columns)
ind_size = len(ind)
dic = comp.get_dict()
matrix = numpy.zeros(columns_size * ind_size).reshape(columns_size, ind_size)
for j in range(ind_size):
for i in range(columns_size):
label = '{}_{}'.format(ind[j], columns[i])
matrix[i][j] = dic[label]
seq = matrix.round(2).tolist()
for row, label in zip(seq, columns):
row.insert(0, label.capitalize().replace('_', ' '))
dic = {
'statistics': seq,
'collection_exists': True
}
dic.update(comp.get_dict())
json_out = json.dumps(dic)
return HttpResponse(json_out, content_type="application/json")
def get_comparison_algorithms_data(request):
if request.is_ajax():
collection_id = request.POST.get('selectedCollection')
algorithms_labels = request.POST.getlist('selectedAlgorithms[]')
mode_label = request.POST.get('selectedMode')
key_label = request.POST.get('selectedKey')
time_signature_label = request.POST.get('selectedTimeSignature')
voice_label = request.POST.get('selectedVoice')
collection = models.Collection.objects.get(id=collection_id)
contour_query = [
Q(phrase__piece__collection=collection),
]
if mode_label != '':
contour_query.append(Q(phrase__piece__mode=mode_label))
if key_label != '':
contour_query.append(Q(phrase__piece__key=key_label))
if time_signature_label != '':
contour_query.append(Q(phrase__piece__time_signature=time_signature_label))
if voice_label != '':
contour_query.append(Q(phrase__voice=voice_label))
qs_contour = models.Contour.objects.filter(*contour_query)
confidence_level = 95
confidence_interval = 10
sample_size = lib.utils.sample_size(qs_contour.count(), confidence_level, confidence_interval)
qs_contour_sample = qs_contour.order_by('?')[:sample_size]
contours = map(lambda d: d['normalized'], qs_contour_sample.values('normalized'))
combined_contours = itertools.combinations(contours, 2)
seq = []
aux_dic = {}
for a, b in combined_contours:
k = tuple(sorted([tuple(a), tuple(b)]))
if k not in aux_dic:
ca = C(a)
cb = C(b)
comp = lib.Comparison(ca, cb)
aux_dic[k] = [comp.compare(algorithm) for algorithm in algorithms_labels]
seq.append(aux_dic[k])
arr = numpy.array(seq)
number_of_algorithms = len(algorithms_labels)
combined_index = list(itertools.combinations(range(number_of_algorithms), 2))
scatter_data = []
scatter_labels = []
correlation_data = []
arr_t = arr.T
# get correlation scatter data
for i1, i2 in combined_index:
scatter_data.append(arr_t[[i1, i2]].T.tolist())
scatter_labels.append([algorithms_labels[j] for j in [i1, i2]])
correlation_data.append(numpy.corrcoef(arr_t[i1], arr_t[i2])[0][1])
hist_data = []
correlation = numpy.zeros(number_of_algorithms ** 2).reshape(number_of_algorithms, number_of_algorithms).tolist()
# get correlation table and histogram data
for i in range(number_of_algorithms):
hist_data.append([[str(a), b] for a, b in zip(range(len(arr_t[i])), arr_t[i])])
for j in range(i, number_of_algorithms):
if i == j:
v = 1
else:
v = numpy.corrcoef(arr_t[i], arr_t[j])[0][1].round(2)
correlation[i][j] = v
correlation[j][i] = v
for row, label in zip(correlation, algorithms_labels):
row.insert(0, label.upper())
correlation_header = algorithms_labels
correlation_header.insert(0, '')
sample_data = [
['Population size', qs_contour.count()],
['Sample size', sample_size],
['Confidence level', confidence_level],
['Confidence interval', confidence_interval],
]
# Entropy doesn't work with apply_fn_to_matrix
entropy = numpy.matrix([scipy.stats.entropy(scipy.stats.itemfreq(arr[:, i]))[1] for i in range(len(arr.T))])
std = apply_fn_to_matrix(arr, numpy.std)
mean = apply_fn_to_matrix(arr, numpy.mean)
statistics_data = [
['Mean'] + round_tolist(mean),
['Median'] + round_tolist(apply_fn_to_matrix(arr, numpy.median)), # df.median().round(2).values.tolist(),
['Std'] + round_tolist(std),
['Skew'] + round_tolist(apply_fn_to_matrix(arr, scipy.stats.skew)),
['Kurtosis'] + round_tolist(apply_fn_to_matrix(arr, scipy.stats.kurtosis)),
['Variation coefficient'] + round_tolist(std / mean), # (df.std() / df.mean()).round(2).values.tolist(),
['Entropy'] + round_tolist(entropy) # [scipy.stats.entropy(df[c].value_counts()).round(2) for c in df.columns]
]
dic = {
'sample_data': sample_data,
'correlation': correlation,
'correlation_header': correlation_header,
'scatter_data': scatter_data,
'scatter_labels': scatter_labels,
'histogram_data': hist_data,
'histogram_labels': correlation_header[1:],
'statistics_data': statistics_data
}
json_out = json.dumps(dic)
return HttpResponse(json_out, content_type="application/json")
| mit | 4,782,281,060,298,566,000 | 34.484848 | 119 | 0.607316 | false |
kawamon/hue | desktop/libs/azure/src/azure/conf.py | 1 | 7628 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import sys
from desktop.lib.conf import Config, UnspecifiedConfigSection, ConfigSection, coerce_password_from_script
from desktop.lib.idbroker import conf as conf_idbroker
from hadoop import core_site
if sys.version_info[0] > 2:
from django.utils.translation import gettext_lazy as _t
else:
from django.utils.translation import ugettext_lazy as _t
LOG = logging.getLogger(__name__)
PERMISSION_ACTION_ABFS = "abfs_access"
PERMISSION_ACTION_ADLS = "adls_access"
REFRESH_URL = 'https://login.microsoftonline.com/<tenant_id>/oauth2/<version>token'
META_DATA_URL = 'http://169.254.169.254/metadata/instance'
AZURE_METADATA = None
def get_default_client_id():
"""
Attempt to set AWS client id from script, else core-site, else None
"""
client_id_script = AZURE_ACCOUNTS['default'].CLIENT_ID_SCRIPT.get()
return client_id_script or core_site.get_adls_client_id() or core_site.get_azure_client_id()
def get_default_secret_key():
"""
Attempt to set AWS secret key from script, else core-site, else None
"""
client_secret_script = AZURE_ACCOUNTS['default'].CLIENT_SECRET_SCRIPT.get()
return client_secret_script or core_site.get_adls_authentication_code() or core_site.get_azure_client_secret()
def get_default_tenant_id():
"""
Attempt to set AWS tenant id from script, else core-site, else None
"""
return AZURE_ACCOUNTS['default'].TENANT_ID_SCRIPT.get()
def get_refresh_url(conf, version):
refresh_url = core_site.get_adls_refresh_url() or core_site.get_azure_client_endpoint()
if not refresh_url:
refresh_url = REFRESH_URL.replace('<tenant_id>', conf.TENANT_ID.get()).replace('<version>', version + '/' if version else '')
return refresh_url
def get_default_region():
return ""
def get_default_adls_url():
return ADLS_CLUSTERS['default'].WEBHDFS_URL.get()
def get_default_adls_fs():
return ADLS_CLUSTERS['default'].FS_DEFAULTFS.get()
def get_default_abfs_url():
return ABFS_CLUSTERS['default'].WEBHDFS_URL.get()
def get_default_abfs_fs():
return ABFS_CLUSTERS['default'].FS_DEFAULTFS.get()
ADLS_CLUSTERS = UnspecifiedConfigSection(
"adls_clusters",
help="One entry for each ADLS cluster",
each=ConfigSection(
help="Information about a single ADLS cluster",
members=dict(
FS_DEFAULTFS=Config("fs_defaultfs", help="adl://<account_name>.azuredatalakestore.net", type=str, default=None),
WEBHDFS_URL=Config("webhdfs_url",
help="https://<account_name>.azuredatalakestore.net/webhdfs/v1",
type=str, default=None),
)
)
)
AZURE_ACCOUNTS = UnspecifiedConfigSection(
"azure_accounts",
help="One entry for each Azure account",
each=ConfigSection(
help="Information about a single azure account",
members=dict(
CLIENT_ID=Config(
key="client_id",
type=str,
dynamic_default=get_default_client_id,
help="https://docs.microsoft.com/en-us/azure/data-lake-store/data-lake-store-service-to-service-authenticate-rest-api"),
CLIENT_ID_SCRIPT=Config(
key="client_id_script",
type=coerce_password_from_script,
default=None,
private=True,
help="Execute this script to produce the ADLS client id."),
CLIENT_SECRET=Config(
key="client_secret",
type=str,
dynamic_default=get_default_secret_key,
private=True,
help="https://docs.microsoft.com/en-us/azure/data-lake-store/data-lake-store-service-to-service-authenticate-rest-api"),
CLIENT_SECRET_SCRIPT=Config(
key='client_secret_script',
type=coerce_password_from_script,
default=None,
private=True,
help=_t("Execute this script to produce the ADLS client secret.")),
TENANT_ID=Config(
key="tenant_id",
type=str,
dynamic_default=get_default_tenant_id,
help="https://docs.microsoft.com/en-us/azure/data-lake-store/data-lake-store-service-to-service-authenticate-rest-api"),
TENANT_ID_SCRIPT=Config(
key='tenant_id_script',
type=coerce_password_from_script,
default=None,
private=True,
help=_t("Execute this script to produce the ADLS tenant id.")),
)
)
)
ABFS_CLUSTERS = UnspecifiedConfigSection(
"abfs_clusters",
help="One entry for each ABFS cluster",
each=ConfigSection(
help="Information about a single ABFS cluster",
members=dict(
FS_DEFAULTFS=Config("fs_defaultfs", help="abfss://<container_name>@<account_name>.dfs.core.windows.net", type=str, default=None),
WEBHDFS_URL=Config("webhdfs_url",
help="https://<container_name>@<account_name>.dfs.core.windows.net",
type=str, default=None),
)
)
)
def is_adls_enabled():
return ('default' in list(AZURE_ACCOUNTS.keys()) and AZURE_ACCOUNTS['default'].get_raw() and AZURE_ACCOUNTS['default'].CLIENT_ID.get() \
or (conf_idbroker.is_idbroker_enabled('azure') and has_azure_metadata())) and 'default' in list(ADLS_CLUSTERS.keys())
def is_abfs_enabled():
return ('default' in list(AZURE_ACCOUNTS.keys()) and AZURE_ACCOUNTS['default'].get_raw() and AZURE_ACCOUNTS['default'].CLIENT_ID.get() \
or (conf_idbroker.is_idbroker_enabled('azure') and has_azure_metadata())) and 'default' in list(ABFS_CLUSTERS.keys())
def has_adls_access(user):
from desktop.auth.backend import is_admin
return user.is_authenticated and user.is_active and (is_admin(user) or user.has_hue_permission(action="adls_access", app="filebrowser"))
def has_abfs_access(user):
from desktop.auth.backend import is_admin
return user.is_authenticated and user.is_active and (is_admin(user) or user.has_hue_permission(action="abfs_access", app="filebrowser"))
def azure_metadata():
global AZURE_METADATA
if AZURE_METADATA is None:
from desktop.lib.rest import http_client, resource
client = http_client.HttpClient(META_DATA_URL, logger=LOG)
root = resource.Resource(client)
try:
AZURE_METADATA = root.get('/compute', params={'api-version': '2019-06-04', 'format': 'json'}, headers={'Metadata': 'true'})
except Exception as e:
AZURE_METADATA = False
return AZURE_METADATA
def has_azure_metadata():
return azure_metadata() is not None
def config_validator(user):
res = []
import desktop.lib.fsmanager # Avoid cyclic loop
if is_adls_enabled() or is_abfs_enabled():
try:
headers = desktop.lib.fsmanager.get_client(name='default', fs='abfs')._getheaders()
if not headers.get('Authorization'):
raise ValueError('Failed to obtain Azure authorization token')
except Exception as e:
LOG.exception('Failed to obtain Azure authorization token.')
res.append(('azure', _t('Failed to obtain Azure authorization token, check your azure configuration.')))
return res
| apache-2.0 | 7,484,886,632,806,225,000 | 37.720812 | 138 | 0.697299 | false |
edgarcosta/lmfdb-gce | transition_scripts/reimport_knowls_and_userdb.py | 1 | 3895 | import os, sys
from sage.all import load
os.chdir("/home/edgarcosta/lmfdb/")
sys.path.append("/home/edgarcosta/lmfdb/")
import lmfdb
db = lmfdb.db_backend.db
DelayCommit = lmfdb.db_backend.DelayCommit
load("/home/edgarcosta/lmfdb-gce/transition_scripts/export_special.py")
def backup():
import subprocess, datetime
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M")
userdbdump="/scratch/postgres-backup/userdb-backup-%s.tar" % timestamp
knowlsdump="/scratch/postgres-backup/knowls-backup-%s.tar" % timestamp
a = subprocess.check_call(["sudo", "-u", "postgres", "pg_dump", "--clean", "--if-exists", "--schema=userdb", "--file", userdbdump, "--format", "tar", "lmfdb"])
b = subprocess.check_call(["sudo", "-u", "postgres", "pg_dump", "--clean", "--if-exists", "--schema=public", "-t", 'kwl_knowls', "-t", "kwl_deleted", "-t", "kwl_history", "--file", knowlsdump, "--format", "tar", "lmfdb"], stderr=subprocess.STDOUT)
if a + b != 0:
print "Failed to backup users and kwl_*"
raise ValueError
print "Succeeded in backing up knowls and userdb"
return a + b
def import_knowls():
cur = db.conn.cursor()
tablenames = ['kwl_history', 'kwl_deleted', 'kwl_knowls'];
with DelayCommit(db, silence=True):
try:
# rename old tables
for name in tablenames:
cur.execute("ALTER TABLE IF EXISTS %s DROP CONSTRAINT IF EXISTS %s_pkey" % (name, name));
cur.execute("DROP TABLE IF EXISTS %s" % name);
# create tables
cur.execute("CREATE TABLE kwl_knowls (id text, cat text, title text, content text, authors jsonb, last_author text, quality text, timestamp timestamp, _keywords jsonb, history jsonb)")
cur.execute("CREATE TABLE kwl_deleted (id text, cat text, title text, content text, authors jsonb, last_author text, quality text, timestamp timestamp, _keywords jsonb, history jsonb)")
cur.execute("CREATE TABLE kwl_history (id text, title text, time timestamp, who text, state text)")
for tbl in ["kwl_knowls", "kwl_deleted", "kwl_history"]:
for action in ["INSERT", "UPDATE", "DELETE"]:
db._grant(action, tbl, ['webserver'])
db.grant_select(tbl)
with open('/scratch/importing/kwl_knowls.txt') as F:
cur.copy_from(F, 'kwl_knowls', columns=["id", "cat", "title", "content", "authors", "last_author", "quality", "timestamp", "_keywords", "history"])
with open('/scratch/importing/kwl_history.txt') as F:
cur.copy_from(F, 'kwl_history', columns=["id", "title", "time", "who", "state"])
cur.execute("ALTER TABLE kwl_knowls ADD CONSTRAINT kwl_knowls_pkey PRIMARY KEY (id)")
# no primary key on deleted
#cur.execute("ALTER TABLE kwl_deleted ADD CONSTRAINT kwl_deleted_pkey PRIMARY KEY (id)")
cur.execute("ALTER TABLE kwl_history ADD CONSTRAINT kwl_history_pkey PRIMARY KEY (id)")
except Exception:
print "Failure in importing knowls"
db.conn.rollback()
raise
print "Succeeded in importing knowls"
def import_users():
with DelayCommit(db, silence=True):
try:
conn = db.conn
cur = conn.cursor()
# delete rows of usersdb.users
cur.execute("DELETE FROM userdb.users")
with open('/scratch/importing/users.txt') as F:
cur.copy_from(F, 'userdb.users', columns=["username", "password", "bcpassword", "admin", "color_scheme", "full_name", "email", "url", "about", "created"])
except Exception:
conn.rollback()
print "Failure in importing users"
raise
print "Successfully imported users"
export_knowls()
export_users()
backup()
import_knowls()
import_users()
| mit | 8,707,786,978,296,396,000 | 47.08642 | 251 | 0.61258 | false |
bstrebel/OxAPI | oxapi/attachment.py | 1 | 3058 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys,time,json,requests
from oxapi import *
class OxAttachment(OxBean):
module_name = 'attachment'
module_type = None
map = {'folder': 800,
'attached': 801,
'module': 802,
'filename': 803,
'filesize': 804,
'file_mimetype': 805,
'rtf_flag': 806}
map.update(OxBean.map)
columns = OxBean.columns(map)
def __init__(self, data, ox=None, timestamp=None, columns=None):
self._document = None
self._module = 'attachment'
OxBean.__init__(self, data, ox, timestamp, columns)
@property
def document(self):
if not self._document:
if self._data:
params = {'module': self.module,
'attached': self.attached,
'id': self.id,
'folder': self.folder}
document = self._ox.get(self.module_name, 'document', params)
if document:
#self._timestamp = content.get('timestamp', None)
self._document = document
return self._document
def detach(self):
if self._data:
params = {'module': self.module,
'attached': self.attached,
'folder': self.folder}
body = []
body.append(self.id)
result = self._ox.put(self.module_name,'detach', params, body)
# {u'timestamp': 1449912233915L, u'data': u''}
pass
class OxAttachments(OxBeans):
module_name = 'attachment'
def __init__(self, ox):
OxBeans.__init__(self, ox)
def action(self, action, params):
if action == 'all':
params.update({'columns': ",".join(map(lambda id: str(id), OxAttachment.columns))})
self._data = []
OxBeans.action(self, OxAttachment, action, params)
if self._raw:
folder = params['folder']
id = OxAttachment.map['folder']
pos = OxAttachment.columns.index(id)
for raw in self._raw:
# workaround because of Open-Xchange bug
if raw[pos] == 0: raw[pos] = folder
self._data.append(OxAttachment(raw, self._ox))
return self
elif action == 'get':
self._data = None
OxBeans.action(self, OxAttachment, action, params)
self._data = OxAttachment(self._raw, self._ox, self._timestamp)
return self._data
elif action == 'document':
self._data = None
OxBeans.action(self, OxAttachment, action, params)
self._data = OxAttachment(self._raw, self._content, self._ox, self._timestamp)
return self._data
# region __main__
if __name__ == '__main__':
with OxHttpAPI.get_session() as ox:
task = ox.get_task('246','43806')
attachments = ox.get_attachments(task)
pass
# endregion
| gpl-2.0 | -6,618,038,992,664,991,000 | 28.980392 | 95 | 0.515697 | false |
mrnamingo/enigma2-test | lib/python/Plugins/SystemPlugins/SoftwareManager/ImageFlasher.py | 1 | 30780 | from Plugins.SystemPlugins.Hotplug.plugin import hotplugNotifier
from Components.Label import Label
from Components.Button import Button
from Components.ActionMap import ActionMap
from Components.MenuList import MenuList
from Components.FileList import FileList
from Components.Task import Task, Job, job_manager, Condition
from Components.Sources.StaticText import StaticText
from Components.SystemInfo import SystemInfo
from Screens.Console import Console
from Screens.MessageBox import MessageBox
from Screens.ChoiceBox import ChoiceBox
from Screens.Screen import Screen
from Screens.Console import Console
from Screens.HelpMenu import HelpableScreen
from Screens.TaskView import JobView
from Tools.Downloader import downloadWithProgress
from enigma import fbClass
import urllib2
import os
import shutil
import math
from boxbranding import getBoxType, getImageDistro, getMachineName, getMachineBrand, getImageVersion, getMachineKernelFile, getMachineRootFile
distro = getImageDistro()
ImageVersion = getImageVersion()
ROOTFSBIN = getMachineRootFile()
KERNELBIN = getMachineKernelFile()
#############################################################################################################
image = 0 # 0=openATV / 1=openMips
if distro.lower() == "openmips":
image = 1
elif distro.lower() == "openatv":
image = 0
feedurl_atv = 'http://images.mynonpublic.com/openatv/%s' %ImageVersion
if ImageVersion == '5.3':
ImageVersion2= '5.4'
else:
ImageVersion2= '5.3'
feedurl_atv2= 'http://images.mynonpublic.com/openatv/%s' %ImageVersion2
feedurl_om = 'http://image.openmips.com/5.3'
imagePath = '/media/hdd/imagebackups'
flashPath = '/media/hdd/flash1'
flashTmp = '/media/hdd/flash2'
ofgwritePath = '/usr/bin/ofgwrite'
#############################################################################################################
def Freespace(dev):
statdev = os.statvfs(dev)
space = (statdev.f_bavail * statdev.f_frsize) / 1024
print "[Flash Online] Free space on %s = %i kilobytes" %(dev, space)
return space
def ReadNewfeed():
f = open('/etc/enigma2/newfeed', 'r')
newfeed = f.readlines()
f.close()
return newfeed
class FlashOnline(Screen):
skin = """
<screen position="center,center" size="560,400" title="Image Flasher">
<ePixmap position="10,368" zPosition="1" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap position="140,368" zPosition="1" size="140,40" transparent="1" alphatest="on" />
<ePixmap position="280,368" zPosition="1" size="140,40" transparent="1" alphatest="on" />
<ePixmap position="425,368" zPosition="1" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget source="key_red" render="Label" position="0,360" zPosition="2" size="140,40" valign="center" halign="center" font="skyreg;20" foregroundColor="skygold" backgroundColor="skydarkblue" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget source="key_green" render="Label" position="140,360" zPosition="2" size="140,40" valign="center" halign="center" font="skyreg;20" foregroundColor="skygold" backgroundColor="skydarkblue" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget source="key_yellow" render="Label" position="280,360" zPosition="2" size="140,40" valign="center" halign="center" font="skyreg;20" foregroundColor="skygold" backgroundColor="skydarkblue" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget source="key_blue" render="Label" position="420,360" zPosition="2" size="140,40" valign="center" halign="center" font="skyreg;20" foregroundColor="skygold" backgroundColor="skydarkblue" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="info-online" position="10,30" zPosition="1" size="450,100" font="skyreg;20" foregroundColor="skygold" backgroundColor="skydarkblue" halign="left" valign="top" transparent="1" />
<widget name="info-local" position="10,150" zPosition="1" size="450,200" font="skyreg;20" foregroundColor="skygold" backgroundColor="skydarkblue" halign="left" valign="top" transparent="1" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
self.session = session
self.selection = 0
self.devrootfs = "/dev/mmcblk0p3"
self.multi = 1
self.list = self.list_files("/boot")
Screen.setTitle(self, _("ViX4E2PROJECT Image Flasher"))
self["key_yellow"] = Button("")
self["key_green"] = Button("NEXT")
self["key_red"] = Button(_("EXIT"))
self["key_blue"] = Button(_(""))
self["info-local"] = Label(_("You will automatically be presented with existing available image .zip files (if any) which are stored in your default local folder: /media/hdd/imagebackups\n\nYou can choose to directly flash any of these images from this menu by highlighting and pressing green to flash or you can press yellow to browse to another image .zip file which is stored elsewhere on your system."))
self["info-online"] = Label(_("When on the next step of this process you will have the options of pressing green to flash highlighted images in your default folder (if any), blue to delete image, yellow to browse to another image .zip file or red to exit."))
self["press-green"] = Label(_("PRESS GREEN BUTTON TO CONTINUE TO NEXT STEP"))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"blue": self.quit,
"yellow": self.quit,
"green": self.green,
"red": self.quit,
"cancel": self.quit,
}, -2)
if SystemInfo["HaveMultiBoot"]:
self.multi = self.read_startup("/boot/" + self.list[self.selection]).split(".",1)[1].split(" ",1)[0]
self.multi = self.multi[-1:]
print "[Flash Online] MULTI:",self.multi
def check_hdd(self):
if not os.path.exists("/media/hdd"):
self.session.open(MessageBox, _("No /hdd found !!\nPlease make sure you have a HDD mounted.\n\nExit plugin."), type = MessageBox.TYPE_ERROR)
return False
if Freespace('/media/hdd') < 300000:
self.session.open(MessageBox, _("Not enough free space on /hdd !!\nYou need at least 300Mb free space.\n\nExit plugin."), type = MessageBox.TYPE_ERROR)
return False
if not os.path.exists(ofgwritePath):
self.session.open(MessageBox, _('ofgwrite not found !!\nPlease make sure you have ofgwrite installed in /usr/bin/ofgwrite.\n\nExit plugin.'), type = MessageBox.TYPE_ERROR)
return False
if not os.path.exists(imagePath):
try:
os.mkdir(imagePath)
except:
pass
if os.path.exists(flashPath):
try:
os.system('rm -rf ' + flashPath)
except:
pass
try:
os.mkdir(flashPath)
except:
pass
return True
def quit(self):
self.close()
def green(self):
if self.check_hdd():
self.session.open(doFlashImage, online = False, list=self.list[self.selection], multi=self.multi, devrootfs=self.devrootfs)
else:
self.close()
def blue(self):
if self.check_hdd():
self.session.open(doFlashImage, online = True, list=self.list[self.selection], multi=self.multi, devrootfs=self.devrootfs)
else:
self.close()
def yellow(self):
if SystemInfo["HaveMultiBoot"]:
self.selection = self.selection + 1
if self.selection == len(self.list):
self.selection = 0
self["key_yellow"].setText(_(self.list[self.selection]))
self.multi = self.read_startup("/boot/" + self.list[self.selection]).split(".",1)[1].split(" ",1)[0]
self.multi = self.multi[-1:]
print "[Flash Online] MULTI:",self.multi
cmdline = self.read_startup("/boot/" + self.list[self.selection]).split("=",3)[3].split(" ",1)[0]
self.devrootfs = cmdline
print "[Flash Online] MULTI rootfs ", self.devrootfs
def read_startup(self, FILE):
file = FILE
with open(file, 'r') as myfile:
data=myfile.read().replace('\n', '')
myfile.close()
return data
def list_files(self, PATH):
files = []
if SystemInfo["HaveMultiBoot"]:
path = PATH
for name in os.listdir(path):
if name != 'bootname' and os.path.isfile(os.path.join(path, name)):
try:
cmdline = self.read_startup("/boot/" + name).split("=",3)[3].split(" ",1)[0]
except IndexError:
continue
cmdline_startup = self.read_startup("/boot/STARTUP").split("=",3)[3].split(" ",1)[0]
if (cmdline != cmdline_startup) and (name != "STARTUP"):
files.append(name)
files.insert(0,"STARTUP")
else:
files = "None"
return files
class doFlashImage(Screen):
skin = """
<screen position="center,center" size="560,500" title="ViX4E2PROJECT Image Flasher (select a image)">
<ePixmap position="10,467" zPosition="1" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap position="140,467" zPosition="1" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap position="280,467" zPosition="1" size="140,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap position="420,467" zPosition="1" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget source="key_red" render="Label" position="0,460" zPosition="2" size="140,40" valign="center" halign="center" font="skyreg;21" foregroundColor="skygold" backgroundColor="skydarkblue" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget source="key_green" render="Label" position="140,460" zPosition="2" size="140,40" valign="center" halign="center" font="skyreg;21" foregroundColor="skygold" backgroundColor="skydarkblue" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget source="key_yellow" render="Label" position="285,460" zPosition="2" size="140,40" valign="center" halign="center" font="skyreg;21" foregroundColor="skygold" backgroundColor="skydarkblue" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget source="key_blue" render="Label" position="420,460" zPosition="2" size="140,40" valign="center" halign="center" font="skyreg;21" foregroundColor="skygold" backgroundColor="skydarkblue" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="info-online" position="10,30" zPosition="1" size="450,100" font="skyreg;20" foregroundColor="skygold" backgroundColor="skydarkblue" halign="left" valign="top" transparent="1" /> <widget name="imageList" position="10,10" zPosition="1" size="450,450" font="skyreg;21" foregroundColor="skygold" backgroundColor="skydarkblue" scrollbarMode="showOnDemand" transparent="1" />
</screen>"""
def __init__(self, session, online, list=None, multi=None, devrootfs=None ):
Screen.__init__(self, session)
self.session = session
Screen.setTitle(self, _("ViX4E2PROJECT Image Flasher"))
self["key_green"] = Button(_("FLASH"))
self["key_red"] = Button(_("EXIT"))
self["key_blue"] = Button("")
self["key_yellow"] = Button("")
self["info-local"] = Label(_("Press green to flash highlighted image below, blue to delete image, yellow to browse to another image .zip file or red to exit.\n\nDefault local folder: /media/hdd/imagebackups"))
self.filename = None
self.imagelist = []
self.simulate = False
self.Online = online
self.List = list
self.multi=multi
self.devrootfs=devrootfs
self.imagePath = imagePath
self.feedurl = feedurl_atv
if image == 0:
self.feed = "atv"
else:
self.feed = "om"
self["imageList"] = MenuList(self.imagelist)
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"green": self.green,
"yellow": self.yellow,
"red": self.quit,
"blue": self.blue,
"ok": self.green,
"cancel": self.quit,
}, -2)
self.onLayoutFinish.append(self.layoutFinished)
self.newfeed = None
if os.path.exists('/etc/enigma2/newfeed'):
self.newfeed = ReadNewfeed()
def quit(self):
if self.simulate or not self.List == "STARTUP":
fbClass.getInstance().unlock()
self.close()
def blue(self):
if self.Online:
if image == 1:
if self.feed == "atv":
self.feed = "om"
else:
self.feed = "atv"
else:
if self.feed == "atv":
self.feed = "atv2"
else:
self.feed = "atv"
self.layoutFinished()
return
sel = self["imageList"].l.getCurrentSelection()
if sel == None:
print"Nothing to select !!"
return
self.filename = sel
self.session.openWithCallback(self.RemoveCB, MessageBox, _("Do you really want to delete\n%s ?") % (sel), MessageBox.TYPE_YESNO)
def RemoveCB(self, ret):
if ret:
if os.path.exists(self.imagePath + "/" + self.filename):
os.remove(self.imagePath + "/" + self.filename)
self.imagelist.remove(self.filename)
self["imageList"].l.setList(self.imagelist)
def box(self):
box = getBoxType()
machinename = getMachineName()
if box in ('uniboxhd1', 'uniboxhd2', 'uniboxhd3'):
box = "ventonhdx"
elif box == 'odinm6':
box = getMachineName().lower()
elif box == "inihde" and machinename.lower() == "xpeedlx":
box = "xpeedlx"
elif box in ('xpeedlx1', 'xpeedlx2'):
box = "xpeedlx"
elif box == "inihde" and machinename.lower() == "hd-1000":
box = "sezam-1000hd"
elif box == "ventonhdx" and machinename.lower() == "hd-5000":
box = "sezam-5000hd"
elif box == "ventonhdx" and machinename.lower() == "premium twin":
box = "miraclebox-twin"
elif box == "xp1000" and machinename.lower() == "sf8 hd":
box = "sf8"
elif box.startswith('et') and not box in ('et8000', 'et8500', 'et8500s', 'et10000'):
box = box[0:3] + 'x00'
elif box == 'odinm9' and self.feed == "atv":
box = 'maram9'
return box
def green(self, ret = None):
sel = self["imageList"].l.getCurrentSelection()
if sel == None:
print"Nothing to select !!"
return
file_name = self.imagePath + "/" + sel
self.filename = file_name
self.sel = sel
box = self.box()
self.hide()
if self.Online:
url = self.feedurl + "/" + box + "/" + sel
print "[Flash Online] Download image: >%s<" % url
if self.newfeed:
self.feedurl = self.newfeed[0][:-1]
url = self.feedurl + "/" + box + "/" + sel
authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm()
authinfo.add_password(None, self.feedurl, self.newfeed[1][:-1], self.newfeed[2][:-1])
handler = urllib2.HTTPBasicAuthHandler(authinfo)
myopener = urllib2.build_opener(handler)
opened = urllib2.install_opener(myopener)
u = urllib2.urlopen(url)
total_size = int(u.info().getheaders("Content-Length")[0])
downloaded = 0
CHUNK = 256 * 1024
with open(file_name, 'wb') as fp:
while True:
chunk = u.read(CHUNK)
downloaded += len(chunk)
print "Downloading: %s Bytes of %s" % (downloaded, total_size)
if not chunk: break
fp.write(chunk)
self.ImageDownloadCB(False)
else:
try:
u = urllib2.urlopen(url)
f = open(file_name, 'wb')
f.close()
job = ImageDownloadJob(url, file_name, sel)
job.afterEvent = "close"
job_manager.AddJob(job)
job_manager.failed_jobs = []
self.session.openWithCallback(self.ImageDownloadCB, JobView, job, backgroundable = False, afterEventChangeable = False)
except urllib2.URLError as e:
print "[Flash Online] Download failed !!\n%s" % e
self.session.openWithCallback(self.ImageDownloadCB, MessageBox, _("Download Failed !!" + "\n%s" % e), type = MessageBox.TYPE_ERROR)
self.close()
else:
self.startInstallLocal(True)
def ImageDownloadCB(self, ret):
if ret:
return
if job_manager.active_job:
job_manager.active_job = None
self.close()
return
if len(job_manager.failed_jobs) == 0:
self.flashWithPostFlashActionMode = 'online'
self.flashWithPostFlashAction()
else:
self.session.open(MessageBox, _("Download Failed !!"), type = MessageBox.TYPE_ERROR)
def flashWithPostFlashAction(self, ret = True):
if ret:
print "flashWithPostFlashAction"
title =_("Please confirm if wish to continue with new flash or return to previous menu")
list = ((_("Confirmed! Flash new firmware and reboot"), "wizard"),
(_("Do not flash! Just return to previous menu"), "abort"))
self.session.openWithCallback(self.postFlashActionCallback, ChoiceBox,title=title,list=list,selection=self.SelectPrevPostFashAction())
else:
self.show()
def SelectPrevPostFashAction(self):
index = 0
Settings = False
AllPlugins = False
noPlugins = False
if os.path.exists('/media/hdd/images/config/settings'):
Settings = True
if os.path.exists('/media/hdd/images/config/plugins'):
AllPlugins = True
if os.path.exists('/media/hdd/images/config/noplugins'):
noPlugins = True
if Settings and noPlugins:
index = 1
elif Settings and not AllPlugins and not noPlugins:
index = 2
elif Settings and AllPlugins:
index = 3
return index
def postFlashActionCallback(self, answer):
print "postFlashActionCallback"
restoreSettings = False
restoreAllPlugins = False
restoreSettingsnoPlugin = False
if answer is not None:
if answer[1] == "restoresettings":
restoreSettings = True
if answer[1] == "restoresettingsnoplugin":
restoreSettings = True
restoreSettingsnoPlugin = True
if answer[1] == "restoresettingsandallplugins":
restoreSettings = True
restoreAllPlugins = True
if restoreSettings:
self.SaveEPG()
if answer[1] != "abort":
if restoreSettings:
try:
os.system('mkdir -p /media/hdd/images/config')
os.system('touch /media/hdd/images/config/settings')
except:
print "postFlashActionCallback: failed to create /media/hdd/images/config/settings"
else:
if os.path.exists('/media/hdd/images/config/settings'):
os.system('rm -f /media/hdd/images/config/settings')
if restoreAllPlugins:
try:
os.system('mkdir -p /media/hdd/images/config')
os.system('touch /media/hdd/images/config/plugins')
except:
print "postFlashActionCallback: failed to create /media/hdd/images/config/plugins"
else:
if os.path.exists('/media/hdd/images/config/plugins'):
os.system('rm -f /media/hdd/images/config/plugins')
if restoreSettingsnoPlugin:
try:
os.system('mkdir -p /media/hdd/images/config')
os.system('touch /media/hdd/images/config/noplugins')
except:
print "postFlashActionCallback: failed to create /media/hdd/images/config/noplugins"
else:
if os.path.exists('/media/hdd/images/config/noplugins'):
os.system('rm -f /media/hdd/images/config/noplugins')
if self.flashWithPostFlashActionMode == 'online':
self.unzip_image(self.filename, flashPath)
else:
self.startInstallLocalCB()
else:
self.show()
else:
self.show()
def unzip_image(self, filename, path):
print "Unzip %s to %s" %(filename,path)
self.session.openWithCallback(self.cmdFinished, Console, title = _("Unzipping files, Please wait ..."), cmdlist = ['unzip ' + filename + ' -o -d ' + path, "sleep 3"], closeOnSuccess = True)
def cmdFinished(self):
self.prepair_flashtmp(flashPath)
self.Start_Flashing()
def Start_Flashing(self):
print "Start Flashing"
cmdlist = []
if os.path.exists(ofgwritePath):
text = _("Flashing: ")
if self.simulate:
text += _("Simulate (no write)")
if SystemInfo["HaveMultiBoot"]:
cmdlist.append("%s -n -r -k -m%s %s > /dev/null 2>&1" % (ofgwritePath, self.multi, flashTmp))
else:
cmdlist.append("%s -n -r -k %s > /dev/null 2>&1" % (ofgwritePath, flashTmp))
self.close()
message = "echo -e '\n"
message += _('Show only found image and mtd partitions.\n')
message += "'"
else:
text += _("root and kernel")
if SystemInfo["HaveMultiBoot"]:
if not self.List == "STARTUP":
os.system('mkfs.ext4 -F ' + self.devrootfs)
cmdlist.append("%s -r -k -m%s %s > /dev/null 2>&1" % (ofgwritePath, self.multi, flashTmp))
if not self.List == "STARTUP":
cmdlist.append("umount -fl /oldroot_bind")
cmdlist.append("umount -fl /newroot")
else:
cmdlist.append("%s -r -k %s > /dev/null 2>&1" % (ofgwritePath, flashTmp))
message = "echo -e '\n"
if not self.List == "STARTUP" and SystemInfo["HaveMultiBoot"]:
message += _('ofgwrite flashing ready.\n')
message += _('please press exit to go back to the menu.\n')
else:
message += _('ofgwrite will stop enigma2 now to run the flash.\n')
message += _('Your STB will freeze during the flashing process.\n')
message += _('Please: DO NOT reboot your STB and turn off the power.\n')
message += _('The image or kernel will be flashing and auto booted in few minutes.\n')
if self.box() == 'gb800solo':
message += _('GB800SOLO takes about 20 mins !!\n')
message += "'"
cmdlist.append(message)
self.session.open(Console, title = text, cmdlist = cmdlist, finishedCallback = self.quit, closeOnSuccess = False)
if not self.simulate:
fbClass.getInstance().lock()
if not self.List == "STARTUP":
self.close()
def prepair_flashtmp(self, tmpPath):
if os.path.exists(flashTmp):
flashTmpold = flashTmp + 'old'
os.system('mv %s %s' %(flashTmp, flashTmpold))
os.system('rm -rf %s' %flashTmpold)
if not os.path.exists(flashTmp):
os.mkdir(flashTmp)
kernel = True
rootfs = True
for path, subdirs, files in os.walk(tmpPath):
for name in files:
if name.find('kernel') > -1 and name.endswith('.bin') and kernel:
binfile = os.path.join(path, name)
dest = flashTmp + '/%s' %KERNELBIN
shutil.copyfile(binfile, dest)
kernel = False
elif name.find('root') > -1 and (name.endswith('.bin') or name.endswith('.jffs2') or name.endswith('.bz2')) and rootfs:
binfile = os.path.join(path, name)
dest = flashTmp + '/%s' %ROOTFSBIN
shutil.copyfile(binfile, dest)
rootfs = False
elif name.find('uImage') > -1 and kernel:
binfile = os.path.join(path, name)
dest = flashTmp + '/uImage'
shutil.copyfile(binfile, dest)
kernel = False
elif name.find('e2jffs2') > -1 and name.endswith('.img') and rootfs:
binfile = os.path.join(path, name)
dest = flashTmp + '/e2jffs2.img'
shutil.copyfile(binfile, dest)
rootfs = False
def yellow(self):
if not self.Online:
self.session.openWithCallback(self.DeviceBrowserClosed, DeviceBrowser, None, matchingPattern="^.*\.(zip|bin|jffs2|img)", showDirectories=True, showMountpoints=True, inhibitMounts=["/autofs/sr0/"])
else:
from Plugins.SystemPlugins.SoftwareManager.BackupRestore import BackupScreen
self.session.openWithCallback(self.green,BackupScreen, runBackup = True)
def startInstallLocal(self, ret = None):
self.flashWithPostFlashActionMode = 'local'
self.flashWithPostFlashAction()
def startInstallLocalCB(self, ret = None):
if self.sel == str(flashTmp):
self.Start_Flashing()
else:
self.unzip_image(self.filename, flashPath)
def DeviceBrowserClosed(self, path, filename, binorzip):
if path:
print path, filename, binorzip
strPath = str(path)
if strPath[-1] == '/':
strPath = strPath[:-1]
self.imagePath = strPath
if os.path.exists(flashTmp):
os.system('rm -rf ' + flashTmp)
os.mkdir(flashTmp)
if binorzip == 0:
for files in os.listdir(self.imagePath):
if files.endswith(".bin") or files.endswith('.jffs2') or files.endswith('.img'):
self.prepair_flashtmp(strPath)
break
self.Start_Flashing()
elif binorzip == 1:
self.unzip_image(strPath + '/' + filename, flashPath)
else:
self.layoutFinished()
else:
self.imagePath = imagePath
def layoutFinished(self):
box = self.box()
self.imagelist = []
if self.Online:
self["key_yellow"].setText("Backup&Flash")
if image == 1:
if self.feed == "atv":
self.feedurl = feedurl_atv
self["key_blue"].setText("openMIPS")
else:
self.feedurl = feedurl_om
self["key_blue"].setText("openATV")
else:
if self.feed == "atv":
self.feedurl = feedurl_atv
self["key_blue"].setText("ATV %s" %ImageVersion2)
else:
self.feedurl = feedurl_atv2
self["key_blue"].setText("ATV %s" %ImageVersion)
url = '%s/index.php?open=%s' % (self.feedurl,box)
try:
req = urllib2.Request(url)
if self.newfeed:
self.feedurl = self.newfeed[0][:-1]
url = '%s/index.php?open=%s' % (self.feedurl,box)
authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm()
authinfo.add_password(None, self.feedurl, self.newfeed[1][:-1], self.newfeed[2][:-1])
handler = urllib2.HTTPBasicAuthHandler(authinfo)
myopener = urllib2.build_opener(handler)
opened = urllib2.install_opener(myopener)
response = urllib2.urlopen(url)
else:
response = urllib2.urlopen(req)
except urllib2.URLError as e:
print "URL ERROR: %s\n%s" % (e,url)
self["imageList"].l.setList(self.imagelist)
return
try:
the_page = response.read()
except urllib2.HTTPError as e:
print "HTTP download ERROR: %s" % e.code
return
lines = the_page.split('\n')
tt = len(box)
for line in lines:
if line.find("<a href='%s/" % box) > -1:
t = line.find("<a href='%s/" % box)
if self.feed == "atv" or self.feed == "atv2":
self.imagelist.append(line[t+tt+10:t+tt+tt+39])
else:
self.imagelist.append(line[t+tt+10:t+tt+tt+40])
else:
self["key_blue"].setText(_("DELETE"))
self["key_yellow"].setText(_("BROWSE"))
for name in os.listdir(self.imagePath):
if name.endswith(".zip"): # and name.find(box) > 1:
self.imagelist.append(name)
self.imagelist.sort()
if os.path.exists(flashTmp):
os.system('cd /media/hdd/flash1 && rm -rf *')
if os.path.exists(flashPath):
os.system('cd /media/hdd/flash2 && rm -rf *')
self["imageList"].l.setList(self.imagelist)
def SaveEPG(self):
from enigma import eEPGCache
epgcache = eEPGCache.getInstance()
epgcache.save()
class ImageDownloadJob(Job):
def __init__(self, url, filename, file):
Job.__init__(self, _("Downloading %s") %file)
ImageDownloadTask(self, url, filename)
class DownloaderPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return self.error_message
class ImageDownloadTask(Task):
def __init__(self, job, url, path):
Task.__init__(self, job, _("Downloading"))
self.postconditions.append(DownloaderPostcondition())
self.job = job
self.url = url
self.path = path
self.error_message = ""
self.last_recvbytes = 0
self.error_message = None
self.download = None
self.aborted = False
def run(self, callback):
self.callback = callback
self.download = downloadWithProgress(self.url,self.path)
self.download.addProgress(self.download_progress)
self.download.start().addCallback(self.download_finished).addErrback(self.download_failed)
print "[ImageDownloadTask] downloading", self.url, "to", self.path
def abort(self):
print "[ImageDownloadTask] aborting", self.url
if self.download:
self.download.stop()
self.aborted = True
def download_progress(self, recvbytes, totalbytes):
if ( recvbytes - self.last_recvbytes ) > 100000: # anti-flicker
self.progress = int(100*(float(recvbytes)/float(totalbytes)))
self.name = _("Downloading") + ' ' + _("%d of %d kBytes") % (recvbytes/1024, totalbytes/1024)
self.last_recvbytes = recvbytes
def download_failed(self, failure_instance=None, error_message=""):
self.error_message = error_message
if error_message == "" and failure_instance is not None:
self.error_message = failure_instance.getErrorMessage()
Task.processFinished(self, 1)
def download_finished(self, string=""):
if self.aborted:
self.finish(aborted = True)
else:
Task.processFinished(self, 0)
class DeviceBrowser(Screen, HelpableScreen):
skin = """
<screen name="DeviceBrowser" position="center,center" size="520,430" >
<ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="message" render="Label" position="5,50" size="510,150" font="Regular;16" />
<widget name="filelist" position="5,210" size="510,220" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, startdir, message="", showDirectories = True, showFiles = True, showMountpoints = True, matchingPattern = "", useServiceRef = False, inhibitDirs = False, inhibitMounts = False, isTop = False, enableWrapAround = False, additionalExtensions = None):
Screen.__init__(self, session)
HelpableScreen.__init__(self)
Screen.setTitle(self, _("Browse to image .zip file"))
self["key_red"] = Button(_("EXIT"))
self["key_green"] = Button(_("FLASH"))
self["message"] = Button(message)
self.filelist = FileList(startdir, showDirectories = showDirectories, showFiles = showFiles, showMountpoints = showMountpoints, matchingPattern = matchingPattern, useServiceRef = useServiceRef, inhibitDirs = inhibitDirs, inhibitMounts = inhibitMounts, isTop = isTop, enableWrapAround = enableWrapAround, additionalExtensions = additionalExtensions)
self["filelist"] = self.filelist
self["FilelistActions"] = ActionMap(["SetupActions", "ColorActions"],
{
"green": self.use,
"red": self.exit,
"ok": self.ok,
"cancel": self.exit
})
hotplugNotifier.append(self.hotplugCB)
self.onShown.append(self.updateButton)
self.onClose.append(self.removeHotplug)
def hotplugCB(self, dev, action):
print "[hotplugCB]", dev, action
self.updateButton()
def updateButton(self):
if self["filelist"].getFilename() or self["filelist"].getCurrentDirectory():
self["key_green"].text = _("FLASH")
else:
self["key_green"].text = ""
def removeHotplug(self):
print "[removeHotplug]"
hotplugNotifier.remove(self.hotplugCB)
def ok(self):
if self.filelist.canDescent():
if self["filelist"].showMountpoints == True and self["filelist"].showDirectories == False:
self.use()
else:
self.filelist.descent()
def use(self):
print "[use]", self["filelist"].getCurrentDirectory(), self["filelist"].getFilename()
if self["filelist"].getFilename() is not None and self["filelist"].getCurrentDirectory() is not None:
if self["filelist"].getFilename().endswith(".bin") or self["filelist"].getFilename().endswith(".jffs2"):
self.close(self["filelist"].getCurrentDirectory(), self["filelist"].getFilename(), 0)
elif self["filelist"].getFilename().endswith(".zip"):
self.close(self["filelist"].getCurrentDirectory(), self["filelist"].getFilename(), 1)
else:
return
def exit(self):
self.close(False, False, -1)
| gpl-2.0 | -8,050,462,198,954,369,000 | 38.818887 | 409 | 0.6782 | false |
Alignak-monitoring-contrib/alignak-app | alignak_app/qobjects/service/services.py | 1 | 10854 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2018:
# Matthieu Estrada, [email protected]
#
# This file is part of (AlignakApp).
#
# (AlignakApp) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (AlignakApp) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (AlignakApp). If not, see <http://www.gnu.org/licenses/>.
"""
Services
++++++++
Services manage creation of QWidget to display the services
"""
from logging import getLogger
from operator import itemgetter
from PyQt5.Qt import QTreeWidget, QTreeWidgetItem, QWidget, QIcon, QGridLayout, QSize, QListWidget
from PyQt5.Qt import Qt, QListWidgetItem
from alignak_app.backend.datamanager import data_manager
from alignak_app.items.item import get_icon_name
from alignak_app.utils.config import settings
from alignak_app.qobjects.common.frames import get_frame_separator
from alignak_app.qobjects.service.tree_item import ServiceTreeItem
from alignak_app.qobjects.service.services_dashboard import ServicesDashboardQWidget
from alignak_app.qobjects.service.service import ServiceDataQWidget
logger = getLogger(__name__)
class ServicesQWidget(QWidget):
"""
Class wo create services QWidget
"""
def __init__(self, parent=None):
super(ServicesQWidget, self).__init__(parent)
# Fields
self.services = None
self.services_tree_widget = QTreeWidget()
self.services_list_widget = QListWidget()
self.service_data_widget = ServiceDataQWidget()
self.services_dashboard = ServicesDashboardQWidget()
def initialize(self):
"""
Initialize QWidget
"""
layout = QGridLayout()
self.setLayout(layout)
layout.setContentsMargins(0, 0, 0, 0)
# Services dashboard
self.services_dashboard.initialize()
for state in self.services_dashboard.states_btns:
self.services_dashboard.states_btns[state].clicked.connect(
lambda _, s=state: self.filter_services(state=s)
)
layout.addWidget(self.services_dashboard, 0, 0, 1, 2)
layout.addWidget(get_frame_separator(), 1, 0, 1, 2)
# Services QTreeWidget
self.services_tree_widget.setIconSize(QSize(32, 32))
self.services_tree_widget.setAlternatingRowColors(True)
self.services_tree_widget.header().close()
layout.addWidget(self.services_tree_widget, 2, 0, 1, 1)
# Services QListWidget
self.services_list_widget.clicked.connect(self.update_service_data)
self.services_list_widget.hide()
layout.addWidget(self.services_list_widget, 2, 0, 1, 1)
# Service DataWidget
self.service_data_widget.initialize()
layout.addWidget(self.service_data_widget, 2, 1, 1, 1)
def filter_services(self, state):
"""
Filter services with the wanted state
:param state: state of service: OK, WARNING, NOT_MONITORED, DOWNTIME
:return:
"""
# Clear QListWidget and update filter buttons of services dashboard
self.services_list_widget.clear()
for btn_state in self.services_dashboard.states_btns:
if btn_state != state:
self.services_dashboard.states_btns[btn_state].setChecked(False)
# Update QWidgets
if self.sender().isChecked():
self.set_filter_items(state)
self.services_tree_widget.hide()
self.services_list_widget.show()
else:
self.services_tree_widget.show()
self.services_list_widget.hide()
def set_filter_items(self, state):
"""
Add filter items to QListWidget corresponding to "state"
:param state: state of service to filter
:type state: str
"""
services_added = False
if state in 'NOT_MONITORED':
for service in self.services:
if not service.data['active_checks_enabled'] and \
not service.data['passive_checks_enabled']and \
not service.data['ls_downtimed'] and \
not service.data['ls_acknowledged']:
self.add_filter_item(service)
services_added = True
elif state in 'DOWNTIME':
for service in self.services:
if service.data['ls_downtimed']:
self.add_filter_item(service)
services_added = True
elif state in 'ACKNOWLEDGE':
for service in self.services:
if service.data['ls_acknowledged']:
self.add_filter_item(service)
services_added = True
else:
for service in self.services:
if service.data['ls_state'] in state:
self.add_filter_item(service)
services_added = True
if not services_added:
not_added_item = QListWidgetItem()
not_added_item.setData(Qt.DecorationRole, QIcon(settings.get_image('services_ok')))
not_added_item.setData(Qt.DisplayRole, _('No such services to display...'))
self.services_list_widget.addItem(not_added_item)
def add_filter_item(self, filter_item):
"""
Add filter item to QListWidget
:param filter_item: filter item (service)
:type filter_item: alignak_app.items.service.Service
"""
item = QListWidgetItem()
monitored = \
filter_item.data['passive_checks_enabled'] + filter_item.data['active_checks_enabled']
icon_name = get_icon_name(
filter_item.item_type,
filter_item.data['ls_state'],
filter_item.data['ls_acknowledged'],
filter_item.data['ls_downtimed'],
monitored
)
item.setData(Qt.DecorationRole, QIcon(settings.get_image(icon_name)))
item.setData(Qt.DisplayRole, filter_item.get_display_name())
item.setData(Qt.UserRole, filter_item.item_id)
item.setToolTip(filter_item.get_tooltip())
self.services_list_widget.addItem(item)
def update_widget(self, services):
"""
Update the QTreeWidget and its items
:param services: list of :class:`Services <alignak_app.items.service.Service>` items
:type services: list
"""
self.services = services
# Update services dashboard
self.services_dashboard.update_widget(self.services)
# Clear QTreeWidget
self.services_tree_widget.clear()
self.services_tree_widget.setIconSize(QSize(16, 16))
if self.services:
# Set as "Global" aggregation who are empty
for service in self.services:
if not service.data['aggregation']:
service.data['aggregation'] = 'Global'
# First sort list by state then by aggregation
newlist = sorted(
self.services,
key=lambda s: itemgetter('ls_state', 'ls_acknowledged', 'aggregation')(s.data)
)
self.services = newlist
# Get list of aggregations
aggregations = []
for service in self.services:
if service.data['aggregation'] not in aggregations:
aggregations.append(service.data['aggregation'])
# Add QTreeWidgetItems
for aggregation in aggregations:
main_tree = QTreeWidgetItem()
main_tree.setText(0, aggregation)
main_tree.setIcon(0, QIcon(settings.get_image('tree')))
main_tree.setToolTip(0, aggregation)
for service in self.services:
if service.data['aggregation'] == aggregation:
service_tree = ServiceTreeItem()
service_tree.initialize(service)
service_tree.setToolTip(0, service.get_tooltip())
self.services_tree_widget.clicked.connect(self.update_service_data)
main_tree.addChild(service_tree)
self.services_tree_widget.addTopLevelItem(main_tree)
self.service_data_widget.hide()
else:
# If no services, reset service item to None and hide data widget
self.service_data_widget.service_item = None
self.service_data_widget.hide()
def update_service_data(self): # pragma: no cover
"""
Update ServiceDataqWidget
"""
service_item = self.sender().currentItem()
if isinstance(service_item, (ServiceTreeItem, QListWidgetItem)):
service = None
# Get service
if isinstance(service_item, ServiceTreeItem):
service = data_manager.get_item('service', '_id', service_item.service_id)
elif isinstance(service_item, QListWidgetItem):
service = data_manager.get_item('service', '_id', service_item.data(Qt.UserRole))
if not service:
service = self.service_data_widget.service_item
# Update QWidgets
self.services_tree_widget.setMaximumWidth(self.width() * 0.5)
self.services_list_widget.setMaximumWidth(self.width() * 0.5)
self.service_data_widget.setMaximumWidth(self.width() * 0.5)
self.service_data_widget.update_widget(service)
self.services_dashboard.update_widget(self.services)
self.service_data_widget.show()
# Update Service Items (ServiceTreeItem, QListWidgetItem)
if isinstance(service_item, ServiceTreeItem):
service_item.update_item()
else:
monitored = \
service.data['passive_checks_enabled'] + service.data['active_checks_enabled']
icon_name = get_icon_name(
'service',
service.data['ls_state'],
service.data['ls_acknowledged'],
service.data['ls_downtimed'],
monitored
)
service_item.setData(Qt.DecorationRole, QIcon(settings.get_image(icon_name)))
service_item.setData(Qt.DisplayRole, service.get_display_name())
service_item.setToolTip(service.get_tooltip())
| agpl-3.0 | 7,076,940,687,448,298,000 | 37.626335 | 98 | 0.608163 | false |
CaliOpen/CaliOpen | src/backend/tools/py.migrate/caliopen_migrate/shards.py | 1 | 3022 | import logging
from caliopen_storage.config import Configuration
from caliopen_storage.helpers.connection import get_index_connection
from caliopen_main.user.core.setups import setup_shard_index
from caliopen_main.user.core import User
log = logging.getLogger(__name__)
def delete_all_shards(dry_run=True):
"""Delete all index shards."""
client = get_index_connection()
shards = Configuration('global').get('elasticsearch.shards')
for shard in shards:
log.info('Processing shard {}'.format(shard))
if not shard.startswith('caliopen-'):
log.warn('Invalid shard name, pass')
continue
if not client.indices.exists(shard):
log.warn('Shard does not exist')
continue
if dry_run:
log.info('Delete shard but dry run do not touch')
else:
client.indices.delete(shard)
log.info('Index {} deleted'.format(shard))
def create_all_shards(dry_run=True):
"""Create all needed index shards."""
client = get_index_connection()
shards = Configuration('global').get('elasticsearch.shards')
for shard_id in shards:
if not client.indices.exists(shard_id):
log.info('Creating shard {}'.format(shard_id))
if not dry_run:
setup_shard_index(shard_id)
def recreate_user_alias(client, user, dry_run=True):
"""Create an index alias mapping user_id -> shard_id."""
if not user.shard_id:
log.error('No shard for user {}'.format(user.user_id))
return False
shards = Configuration('global').get('elasticsearch.shards')
alias_exists = False
if client.indices.exists_alias(name=user.user_id):
alias = client.indices.get_alias(name=user.user_id)
for index, alias_infos in alias.items():
if index not in shards:
if not dry_run:
client.indices.delete_alias(index=index, name=user.user_id)
else:
log.info('Alias exist {} with index {}, should delete'.
format(user.user_id, index))
else:
log.info('Alias on shard exist, skipping')
alias_exists = True
if alias_exists:
return True
if not dry_run:
body = {'filter': {'term': {'user_id': user.user_id}}}
try:
client.indices.put_alias(index=user.shard_id,
name=user.user_id,
body=body)
except Exception as exc:
log.exception('Error during alias creation for user {} : {}'.
format(user.user_id, exc))
return False
else:
log.info('Should create alias {}'.format(user.user_id))
return True
def recreate_all_user_aliases(dry_run=True):
"""Recreate alias for all users."""
client = get_index_connection()
for user in User._model_class.all():
recreate_user_alias(client, user, dry_run)
| gpl-3.0 | 7,146,484,076,049,893,000 | 35.409639 | 79 | 0.589676 | false |
smurfix/pybble | pybble/blueprint/_root/part/usertracker.py | 1 | 1356 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This is part of Pybble, a WMS (Whatever Management System) based on
## Jinja2/Haml, Werkzeug, Flask, and Optimism.
##
## Pybble is Copyright © 2009-2014 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.md` for details,
## including an optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
from flask import request, session
from pybble.core.models.tracking import UserTracker
from pybble.core.db import db
from pybble.render import render_template
from .._base import expose
expose = expose.sub("part.usertracker")
from datetime import datetime,timedelta
from time import time
###
### Tracking
###
@expose("/changes")
def view_all():
user = request.user
f = (UserTracker.user == user)
last = session.get("chg_",None)
if last and time()-last[0] < 2*60:
pass
else:
session["chg_"] = (int(time()), user.feed_read)
user.feed_read = datetime.utcnow()
return render_template("changelist.html", changes=UserTracker.q.filter(f).order_by(UserTracker.id.desc())[0:30])
| gpl-3.0 | 796,936,370,807,972,100 | 29.704545 | 113 | 0.721688 | false |
ScriptGadget/Creare | server/ncm/appengine_config.py | 1 | 1173 | # Copyright 2011 Bill Glover
#
# This file is part of Creare.
#
# Creare is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Creare is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Creare. If not, see <http://www.gnu.org/licenses/>.
#
# This is a setup file needed by gaesessions (gaesessions itself
# is not part of Creare). This should probably be removed from
# the repository and replaced with instructions on where to get
# gaesessions and instruction for how to instal it.
from gaesessions import SessionMiddleware
def webapp_add_wsgi_middleware(app):
app = SessionMiddleware(app, cookie_key='KMOPgO79WHQ4vtrUil9TPPPK33idCJaHi+FL/O+v34cri8CQ5N9aPOgO1xjWYwVp7HS8js1Rx0YW2i9C4CbT3Q==')
return app
| gpl-3.0 | -7,984,899,056,931,966,000 | 44.115385 | 135 | 0.767263 | false |
jonathanmorgan/django_reference_data | examples/ref_domain-from_listofnewspapers-Texas.py | 1 | 6532 | # imports
# urllib
import datetime
import urllib2
# beautifulsoup 4
from bs4 import BeautifulSoup
# python_utilties
#import python_utilities.beautiful_soup.beautiful_soup_helper
# django_reference_data
import django_reference_data.models
#===============================================================================#
# declare variables
#===============================================================================#
# declare variables - tracking performance
start_dt = None
end_dt = None
domain_counter = -1
no_match_counter = -1
error_counter = -1
my_exception_helper = None
# declare variables
do_update_existing = True
# processing state list.
state_name = ""
state_url = ""
state_file_path = ""
state_file = None
# processing a state's page.
state_html = None
state_bs = None
state_paper_list = None
state_paper_li = None
paper_name = ""
paper_url = ""
current_domain_instance = None
paper_counter = -1
# fields we collect per domain.
bs_helper = None
current_domain_name = ""
slash_index = ""
current_domain_path = ""
current_description = ""
current_source = ""
current_source_details = ""
current_domain_type = ""
current_is_news = True
current_rank = -1
#===============================================================================#
# Code
#===============================================================================#
# capture start datetime, initialize counters
start_dt = datetime.datetime.now()
domain_counter = 0
no_match_counter = 0
error_counter = 0
# init beautiful soup helper
#bs_helper = python_utilities.beautiful_soup.beautiful_soup_helper.BeautifulSoupHelper()
# clean out broken texas domain rows.
'''
DELETE from `django_reference_data_reference_domain`
WHERE source_details LIKE '%in-texas%';
'''
state_name = "Texas"
state_url = "http://www.listofnewspapers.com/en/north-america/texan-newspapers-in-texas.html"
state_file_path = "texan-newspapers-in-texas-TIDY.html"
# print next state:
print( "==> processing " + state_name + ": " + state_file_path )
# load the state's HTML
state_file = open( state_file_path, "r" )
state_html = state_file.read()
# let BeautifulSoup parse it.
state_bs = BeautifulSoup( state_html, "html.parser" )
# get list of papers.
state_paper_list = state_bs.find_all( "li", "linewspapers" )
print( "- paper count: " + str( len( state_paper_list ) ) )
# loop over papers.
paper_counter = 0
for state_paper_li in state_paper_list:
paper_counter += 1
domain_counter += 1
print( "- paper " + str( paper_counter ) + ": " + str( state_paper_li ) )
# get values
paper_name = state_paper_li.get_text()
paper_url = state_paper_li.a[ 'href' ]
print( " - " + paper_name + ": " + paper_url )
# collect information - init
current_domain_name = ""
slash_index = ""
current_domain_path = ""
current_description = ""
current_source = ""
current_source_details = ""
current_domain_type = ""
current_is_news = True
current_rank = -1
# description
current_description = paper_name
# parse out domain and path
current_domain_name = django_reference_data.models.Reference_Domain.parse_URL( paper_url, django_reference_data.models.Reference_Domain.URL_PARSE_RETURN_DOMAIN )
current_domain_path = django_reference_data.models.Reference_Domain.parse_URL( paper_url, django_reference_data.models.Reference_Domain.URL_PARSE_RETURN_PATH )
# no rank
# always the same for these.
current_source = "listofnewspapers.com"
current_source_details = state_url
current_domain_type = django_reference_data.models.Reference_Domain.DOMAIN_TYPE_NEWS
current_is_news = True
# get Reference_Domain instance
# update existing?
if ( do_update_existing == True ):
try:
# first, try looking up existing domain.
#domain_rs = django_reference_data.models.Reference_Domain.objects.filter( source = current_source )
#domain_rs = domain_rs.filter( domain_name = current_domain_name )
#current_domain_instance = domain_rs.get( domain_path = current_domain_path )
# use lookup_record() method. Returns None if
# not found.
current_domain_instance = django_reference_data.models.Reference_Domain.lookup_record( source_IN = current_source, domain_name_IN = current_domain_name, domain_path_IN = current_domain_path )
# got anything?
if ( current_domain_instance == None ):
# nothing returned. Create new instance.
current_domain_instance = django_reference_data.models.Reference_Domain()
no_match_counter += 1
#-- END check to see if domain found --#
except:
# No matching row. Create new instance.
current_domain_instance = django_reference_data.models.Reference_Domain()
no_match_counter += 1
#-- END attempt to get existing row. --#
else:
# not updating. Just create new instance.
current_domain_instance = django_reference_data.models.Reference_Domain()
#-- END check to see if we update existing. --#
# set values
#current_domain_instance.domain_name = current_domain_name
#current_domain_instance.domain_path = current_domain_path
#current_domain_instance.long_name = None
# parse and store the URL information.
current_domain_instance.parse_and_store_URL( paper_url )
current_domain_instance.description = current_description
current_domain_instance.source = current_source
current_domain_instance.source_details = current_source_details
current_domain_instance.domain_type = current_domain_type
current_domain_instance.is_news = current_is_news
#current_domain_instance.is_multimedia = False
#current_domain_instance.rank = current_rank
current_domain_instance.state = state_name
#current_domain_instance.county = ""
#current_domain_instance.city = ""
#current_domain_instance.zip_code = ""
# save
current_domain_instance.save()
#-- END loop over papers. --#
# a little overview
end_dt = datetime.datetime.now()
print( "==> Started at " + str( start_dt ) )
print( "==> Finished at " + str( end_dt ) )
print( "==> Duration: " + str( end_dt - start_dt ) )
print( "==> Domains: " + str( domain_counter ) )
print( "==> No Match: " + str( no_match_counter ) )
print( "==> Errors: " + str( error_counter ) ) | gpl-3.0 | 5,729,771,921,642,360,000 | 30.109524 | 203 | 0.629822 | false |
violarium/file-pyncoder | pyncoder.py | 1 | 1229 | import lib.files
import lib.filters
import os
import sys
import argparse
parser = argparse.ArgumentParser(description='Convert file encodings.')
# the target - file or directory
parser.add_argument('--target', '-t', action='store', type=str, required=True)
# converter options
parser.add_argument('--in-encoding', '-i', action='store', required=True)
parser.add_argument('--out-encoding', '-o', action='store', required=True)
parser.add_argument('--keep-backup', '-k', action='store_true', default=True)
# the regular expressions: to include and exclude files
parser.add_argument('--regexp', '-r', action='store')
parser.add_argument('--ng-regexp', '-nr', action='store')
# the extensions: can include or exclude extensions
group = parser.add_mutually_exclusive_group()
group.add_argument('--extensions', '-e', action='store')
group.add_argument('--ng-extensions', '-ne', action='store')
args = parser.parse_args()
# check whether file or directory
if os.path.isdir(args.target):
pass
elif os.path.isfile(args.target):
lib.files.change_file_encoding(args.target, args.in_encoding, args.out_encoding, args.keep_backup)
pass
else:
print "There are no file or directory '%s'" % args.target
sys.exit(1)
| mit | 852,558,385,694,625,700 | 32.216216 | 102 | 0.716843 | false |
ypid/series60-remote | pc/window/mainwindow.py | 1 | 51261 | # -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2010 Lukas Hetzenecker <[email protected]>
import sys
import re
import base64
import copy
import distutils.version
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import window.contacts_edit
import window.contacts_import
import window.settings
import window.history
import window.statistics
import window.export
import window.message_queue
import window.import_messages
import window.about
import window.favorites
import widget.SortedTreeWidgetItem
import widget.SortedListWidgetItem
import ui.ui_main
import ui.ui_mobileNumberSelect
import ui.ui_mobileNumberNotFound
import ui.ui_connection_failed
import ui.ui_connection_version_mismatch
import ui.ui_connection_update_version
import ui.resource_rc
import lib.update_checker
import lib.favorites
import lib.obex_handler
import lib.obex_scheduler
import lib.obex_wrapper
from lib.classes import *
LINUX= "qt_x11_wait_for_window_manager" in dir()
class MainWindow(QMainWindow, ui.ui_main.Ui_MainWindow):
def __init__(self, parent, main):
super(MainWindow, self).__init__(parent)
self.parent = parent
self.main = main
self.log = main.log
self.connection = main.connection
self.database = main.database
self.settings = main.settings
self.helper = main.helper
# Refresh all 10 minutes the device information
self.refreshTimer = QTimer(self)
self.refreshTimer.setInterval(600000)
self.setupUi(self)
# Favorites menu
self.contactMenu = self.menu_Contacts
self.favMenu = lib.favorites.FavoriteMenu(self.contactMenu, main)
# Change color of the ListWidget to a normal background color and make the highlight color lighter
pal = QPalette()
pal.setColor(QPalette.Base, self.palette().color(QPalette.Window))
pal.setColor(QPalette.Highlight, QColor(38, 136, 240))
self.listWidget.setPalette(pal)
# Add menu to "Import contacts" button
self.importMenu = QMenu(self)
self.importVcardAction = self.importMenu.addAction(QIcon(":/text-x-vcard"), self.tr("Import &Vcard file..."))
self.importLdifAction = self.importMenu.addAction(QIcon(":/text-x-ldif"), self.tr("Import &LDIF file..."))
self.contactsImportButton.setMenu(self.importMenu)
# Restore size, position and splitter states from previous saved value
windowSize = self.settings.setting("windows/main/size")
windowPosition = self.settings.setting("windows/main/position")
messagesSplitter = self.settings.setting("windows/main/messagesSplitter")
contactsSplitter = self.settings.setting("windows/main/contactsSplitter")
if windowSize.isValid():
self.resize(windowSize)
if not windowPosition.isNull():
self.move(windowPosition)
if not messagesSplitter.isNull():
self.messagesSplitter.restoreState(messagesSplitter)
if not contactsSplitter.isNull():
self.contactsSplitter.restoreState(contactsSplitter)
self.newMessagesComplete = False
self.queueMessages = 0
self.fillTypeBox = True
self.connectionAttemptByUser = True
self.connectionClosedByUser = False
self.deviceScanner = self.connection.scanner()
self.automaticConnectionTimer = QTimer()
self.emptyPixmap = QPixmap(16, 16)
self.emptyPixmap.fill(Qt.transparent)
self.emptyIcon = QIcon(self.emptyPixmap)
self.contactIsRecipientIcon = QIcon(":/dialog-apply")
self.fileWidget.updateActions()
# Load the contacts and devices when the event loop is started and all other events are handled
# This results in a faster startup (saved ~274ms)
self.loadSettings()
self.showFavorites()
QTimer.singleShot(0, self.loadUpdateChecker)
QTimer.singleShot(0, lambda : self.loadAutomaticConnection(True))
self.adjustSize()
self.connect(self.main, SIGNAL("favoriteListChanged"), self.showFavorites)
self.connect(self.main, SIGNAL("updateContact"), self.updateContact)
self.connect(self.contactMenu, SIGNAL("triggered(QAction *)"), self.favoriteClicked)
self.connect(self.settingsAction, SIGNAL("triggered()"), self.showSettings)
self.connect(self.exportAction, SIGNAL("triggered()"), self.showExportDialog)
self.connect(self.quitAction, SIGNAL("triggered()"), self.quit)
self.connect(self.aboutApplicationAction, SIGNAL("triggered()"), self.showAboutDialog)
self.connect(self.aboutQtAction, SIGNAL("triggered()"), self.main.app.aboutQt)
self.connect(self.historyAction, SIGNAL("triggered()"), self.showHistory)
self.connect(self.statisticsAction, SIGNAL("triggered()"), self.showStatistics)
self.connect(self.messageQueueAction, SIGNAL("triggered()"), self.showMessageQueue)
self.connect(self.importMessagesAction, SIGNAL("triggered()"), self.showImportMessages)
self.connect(self.logAction, SIGNAL("triggered()"), self.showLog)
self.connect(self.donateAction, SIGNAL("triggered()"), self.openDonateWebsite)
self.connect(self.connectButton, SIGNAL("clicked()"), self.connectToDevice)
self.connect(self.messageText, SIGNAL("sendMessage"), self.sendButton, SLOT("animateClick()"))
self.connect(self.sendButton, SIGNAL("clicked()"), self.sendMessage)
self.connect(self.refreshButton, SIGNAL("clicked()"), self.refreshSysinfo)
self.connect(self.refreshTimer, SIGNAL("timeout()"), self.refreshSysinfo)
self.connect(self.listWidget, SIGNAL("itemSelectionChanged()"), self.checkPosition)
self.connect(self.stackedWidget, SIGNAL("currentChanged(int)"),
lambda: self.searchLine.setSearchText() or self.searchLine_2.setSearchText())
self.connect(self.stackedWidget, SIGNAL("currentChanged(int)"), self.checkFiles)
self.connect(self.disconnectButton, SIGNAL("clicked()"), self.closeConnection)
self.connect(self.cancelButton, SIGNAL("clicked()"), self.closeConnection)
self.connect(self.messageText, SIGNAL("textChanged()"), self.textChanged)
self.connect(self.toLine, SIGNAL("textEdited(const QString &)"), self.recipientChanged)
self.connect(self.contactsTree,SIGNAL("customContextMenuRequested(QPoint)"),self.showCustomContextMenu)
self.connect(self.contactsTree,SIGNAL("itemActivated(QTreeWidgetItem *, int)"),self.contactClicked)
self.connect(self.contactsList,SIGNAL("currentItemChanged(QListWidgetItem *, QListWidgetItem *)"),self.showContact)
self.connect(self.contactsList,SIGNAL("currentItemChanged(QListWidgetItem *, QListWidgetItem *)"),
self, SLOT("checkEditContactButton()"))
self.connect(self.contactEditButton,SIGNAL("clicked()"), self.editContact)
self.connect(self.contactAddButton,SIGNAL("clicked()"), self.addContact)
self.connect(self.importVcardAction,SIGNAL("triggered()"), lambda : self.importContacts("vcard"))
self.connect(self.importLdifAction,SIGNAL("triggered()"), lambda : self.importContacts("ldif"))
self.connect(self.contactsList,SIGNAL("customContextMenuRequested(QPoint)"),self.showContactListContextMenu)
self.connect(self.searchLine,SIGNAL("textChanged(const QString &)"),self.showContacts)
self.connect(self.searchLine_2,SIGNAL("textChanged(const QString &)"),self.showContacts)
self.connect(self.typeBox,SIGNAL("currentIndexChanged(int)"),self.showContacts)
self.connect(self.connection, SIGNAL("connectionStateChanged"), lambda x: self.connectBar.setValue(x))
self.connect(self.connection, SIGNAL("sysinfoCompleted"), self.showSysinfo)
self.connect(self.connection, SIGNAL("contactsCompleted"), self.showContacts)
self.connect(self.connection, SIGNAL("contactsUpdated"), self.showContacts)
self.connect(self.connection, SIGNAL("connectionCompleted"), self.connected)
self.connect(self.connection, SIGNAL("connectionClosed"), self.connectionClosed)
self.connect(self.connection, SIGNAL("connectionAborted"), self.connectionClosed)
self.connect(self.connection, SIGNAL("connectionFailed"), self.connectionFailed)
self.connect(self.connection, SIGNAL("connectionVersionMismatchError"), self.connectionVersionMismatch)
self.connect(self.connection, SIGNAL("messagesRequest"), self.newMessages)
self.connect(self.connection, SIGNAL("messagesRequestComplete"), self.newMessagesFinished)
self.connect(self.connection, SIGNAL("messageSent"), self.messageStateChanged)
self.connect(self.connection, SIGNAL("messageQueued"), self.messageStateChanged)
self.connect(self.automaticConnectionTimer, SIGNAL("timeout()"), self.automaticConnectionTimerFired)
self.connect(self.deviceScanner, SIGNAL("scanStarted"), self.automaticConnectionScanStarted)
self.connect(self.deviceScanner, SIGNAL("foundDevice"), self.automaticConnectionFoundDevice)
self.connect(self.deviceScanner, SIGNAL("scanCompleted"), self.automaticConnectionScanFinished)
self.connect(self.deviceScanner, SIGNAL("scanFailed"), self.automaticConnectionScanFinished)
self.connect(self.settings, SIGNAL("reloadSettings"), self.loadSettings)
self.connect(self.settings, SIGNAL("reloadSettings"), self.loadAutomaticConnection)
# Also update the icons in the summary tab when the connection state has changed
self.okPixmap = QIcon(":/dialog-apply").pixmap(16, 16)
self.loadingMovie = QMovie(":/loading-2", parent=self)
self.loadingMovie.setScaledSize(QSize(20, 20))
self.loadingMovie.start()
self.connect(self.connection, SIGNAL("connectionEstablished"), lambda : self.connectionStateLabel.setPixmap(self.okPixmap))
self.connect(self.connection, SIGNAL("connectionEstablished"), lambda : self.sysinfoStateLabel.setMovie(self.loadingMovie))
self.connect(self.connection, SIGNAL("sysinfoCompleted"), lambda : self.sysinfoStateLabel.setPixmap(self.okPixmap))
self.connect(self.connection, SIGNAL("sysinfoCompleted"), lambda : self.contactStateLabel.setMovie(self.loadingMovie))
self.connect(self.connection, SIGNAL("contactsCompleted"), lambda : self.contactStateLabel.setPixmap(self.okPixmap))
self.connect(self.connection, SIGNAL("contactsCompleted"), lambda : self.calendarStateLabel.setMovie(self.loadingMovie))
self.connect(self.connection, SIGNAL("calendarCompleted"), lambda : self.calendarStateLabel.setPixmap(self.okPixmap))
if not main.minimized:
self.show()
def __str__(self):
return "\"Main-Window\""
def loadSettings(self):
self.updateDevices()
if self.connection.connected():
# Show the extended StackedWidget when there is an active connection
# after reloading the settings
self.showSysinfo()
self.showContacts()
self.connection = self.main.connection
self.messageText.setSendMessageOnReturn(self.settings.setting("general/sendMessageOnReturn"))
self.checkSendButton()
self.checkEditContactButton()
def loadUpdateChecker(self):
if self.settings.setting("updateCheck/enabled"):
lastCheck = self.settings.setting("updateCheck/lastCheck")
interval = self.settings.setting("updateCheck/interval")
if interval == 0:
return
if not lastCheck.isValid() or lastCheck.daysTo(QDate.currentDate()) >= interval:
self.updateChecker = lib.update_checker.UpdateChecker(self, self.main)
self.connect(self.updateChecker, SIGNAL("updateCheckFailed"), self.updateCheckError)
self.connect(self.updateChecker, SIGNAL("updateCheckNewVersion"), self.updateCheckNewVersion)
self.updateChecker.updateCheck()
else:
lastVersion = self.settings.setting("updateCheck/lastVersion")
if not lastVersion:
return
lastVersion = distutils.version.LooseVersion(lastVersion)
currentVersion = ".".join([str(i) for i in self.main.appVersion])
currentVersion = distutils.version.LooseVersion(currentVersion)
if lastVersion > currentVersion:
self.updateCheckNewVersion(self.settings.setting("updateCheck/lastVersion"), self.settings.setting("updateCheck/lastMessage"))
def loadAutomaticConnection(self, firstStart=False):
enabled = self.settings.setting("general/automaticConnectionEnabled")
if enabled and not self.connection.connected():
interval = self.settings.setting("general/automaticConnectionInterval")
if firstStart:
self.automaticConnectionTimerFired()
self.automaticConnectionTimer.setInterval(interval * 1000)
self.automaticConnectionTimer.start()
def showFavorites(self):
self.contactMenu.clear()
self.favMenu.menu(self.contactMenu)
def adjustSize(self):
maxSize = QSize()
for i in range(self.listWidget.count()):
itemSize = self.listWidget.sizeHintForIndex( self.listWidget.indexFromItem(self.listWidget.item(i)) )
if itemSize.width() > maxSize.width():
maxSize.setWidth(itemSize.width())
if itemSize.height() > maxSize.height():
maxSize.setHeight(itemSize.height())
# Add spacing
maxSize.setWidth(maxSize.width() + 13)
maxSize.setHeight(maxSize.height() + 10)
for i in range(self.listWidget.count()):
self.listWidget.item(i).setSizeHint(maxSize)
self.listWidget.setGridSize(maxSize)
self.listWidget.setMaximumWidth(maxSize.width() + self.listWidget.rect().width() - self.listWidget.contentsRect().width() )
self.listWidget.setMinimumWidth(maxSize.width() + self.listWidget.rect().width() - self.listWidget.contentsRect().width() )
def checkPosition(self):
# If you select the last item, hold the left mouse button move your mouse to a free space select the last item
if len(self.listWidget.selectedItems()) == 0:
self.listWidget.setCurrentRow(self.listWidget.currentRow())
def checkFiles(self, index):
if self.stackedWidget.indexOf(self.files) == index:
if lib.obex_wrapper.FOUND_OBEX and not self.fileWidget.connected() and self.connection.connected():
handler = lib.obex_handler.ObexHandler(self.connection.device().bluetoothAddress())
scheduler = lib.obex_scheduler.ObexScheduler(handler)
self.fileWidget.setScheduler(scheduler)
def updateDevices(self):
device = self.devicesBox.currentDevice()
if not isinstance(device, type(None)):
try:
try:
totalRam = self.helper.pretty_filesize(device.value("total_ram"))
except:
totalRam = self.tr("unknown")
try:
totalRom = self.helper.pretty_filesize(device.value("total_rom"))
except:
totalRom = self.tr("unknown")
self.modelLabel_3.setText(str(device.value("model")))
self.imeiLabel_3.setText(str(device.value("imei")))
self.totalRamLabel_3.setText(totalRam)
self.romLabel_3.setText(totalRom)
self.displayLabel_3.setText(self.tr("%1 pixels").arg(device.value("display") ))
self.osLabel_3.setText(device.value("s60_version")) # TODO: append to modelLabel
self.detailStack.setCurrentWidget(self.simpleWidget)
except ValueError:
# This happens when you were never connected to the device
# (e.g. when you start the application for the first time)
self.detailStack.setCurrentWidget(self.noDataWidget)
else:
self.detailStack.setCurrentWidget(self.noDataWidget)
def __connectToDevice(self, device):
if self.connection.connected():
return
if isinstance(device, type(None)):
return
self.settings.setSetting("bluetooth/lastName", device.name())
port = self.settings.setting("bluetooth/port")
if self.scanningMovie.movie():
self.scanningMovie.movie().stop()
self.scanningMovie.setMovie(QMovie())
self.scanningMovie.setToolTip("")
self.automaticConnectionTimer.stop()
# FIXME: Ugly hack
device.setPort(port)
# Reset connection state icons
self.connectionStateLabel.setMovie(self.loadingMovie)
self.sysinfoStateLabel.clear()
self.contactStateLabel.clear()
self.calendarStateLabel.clear()
self.log.info(QString("Connect to device %1 ( %2 on port %3 )").arg(device.name()).arg(device.bluetoothAddress()).arg(port))
self.statusLabel.setText(self.tr("Establish connection!"))
self.connectLabel.setText(self.tr("Connection establishment to: <b>%1</b>").arg(device.name()))
self.devicesBox.selectDevice(device)
self.devicesBox.setEnabled(False)
self.connectButton.setEnabled(False)
self.establishConnectionStack.setCurrentWidget(self.establishConnectionWidget)
self.connection.connectToDevice(device)
def connectToDevice(self):
device = self.devicesBox.currentDevice()
if isinstance(device, type(None)):
return
self.connectionAttemptByUser = True
self.__connectToDevice(device)
def showSysinfo(self):
refreshDate = QDate.currentDate().toString("dd.MM.yyyy")
refreshTime = QTime().currentTime().toString("hh:mm:ss")
try:
freeRam = self.helper.pretty_filesize(self.connection.device().value("free_ram"))
except:
freeRam = self.tr("unknown")
try:
totalRam = self.helper.pretty_filesize(self.connection.device().value("total_ram"))
except:
totalRam = self.tr("unknown")
try:
totalRom = self.helper.pretty_filesize(self.connection.device().value("total_rom"))
except:
totalRom = self.tr("unknown")
try:
signalBars = int(self.connection.device().value("signal_bars"))
except:
signalBars = 0
try:
battery = int(self.connection.device().value("battery"))
except:
battery = 0
if self.connection.device().value("signal_dbm") == u"-1":
# Mobile phone is in offline mode
signalDbm = self.tr("offline mode")
else:
signalDbm = self.tr("%1 dbM").arg(self.connection.device().value("signal_dbm"))
if signalBars == -1:
# Mobile phone is in offline mode
self.signalBar_2.setHidden(True)
self.signalBar.setHidden(True)
else:
self.signalBar_2.setHidden(False)
self.signalBar.setHidden(False)
self.refreshTimeLabel_2.setText(refreshDate + " " + refreshTime)
self.modelLabel_2.setText(str(self.connection.device().value("model")))
self.batteryLabel_2.setText(self.tr("%1% of 100%").arg(self.connection.device().value("battery")))
self.batteryBar_2.setValue(battery)
self.signalLabel_2.setText(signalDbm)
self.signalBar_2.setValue(signalBars)
self.refreshTimeLabel.setText(refreshDate + " " + refreshTime)
self.modelLabel.setText(str(self.connection.device().value("model")))
self.batteryLabel.setText(self.tr("%1% of 100%").arg(self.connection.device().value("battery")))
self.batteryBar.setValue(battery)
self.signalLabel.setText(signalDbm)
self.signalBar.setValue(signalBars)
self.profileLabel.setText(self.connection.device().value("active_profile"))
self.btAddressLabel.setText(self.connection.device().bluetoothAddress())
self.displayLabel.setText(self.tr("%1 pixels").arg(self.connection.device().value("display") ))
self.drivespaceBox.clear()
for type, value in self.connection.device().values():
if type <> "free_drivespace":
continue
drive, free = value.split(":")
free = self.helper.pretty_filesize(free)
self.drivespaceBox.addItem(QString("%1: %2").arg(drive, free))
self.imeiLabel.setText(str(self.connection.device().value("imei")))
self.freeRamLabel.setText(freeRam)
self.totalRamLabel.setText(totalRam)
self.romLabel.setText(totalRom)
self.swLabel.setText(self.connection.device().value("program_version"))
self.programLabel.setText(self.connection.device().value("pys60_version"))
self.osLabel.setText(self.connection.device().value("s60_version")) # TODO: append to modelLabel
self.detailStack.setCurrentWidget(self.extendedWidget)
def showContacts(self, search=""):
if self.fillTypeBox:
self.typeBox.addItem(self.tr("Name"), QVariant("s60remote-name"))
self.typeBox.addItem(self.tr("All fields"), QVariant("s60remote-all"))
self.typeBox.insertSeparator(2)
search = self.searchLine.searchText()
if not search:
search = self.searchLine_2.searchText()
search = unicode(search).lower()
self.contactsTree.clear()
self.contactsList.clear()
searchField = self.typeBox.itemData(self.typeBox.currentIndex()).toString()
for contact in self.database.contacts(True):
if self.fillTypeBox:
for field, value in contact.values():
if field.isPicture():
continue
if self.typeBox.findData(QVariant(field.type())) == -1:
self.typeBox.addItem(field.toString()[:-1], QVariant(field.type()))
if search:
# Search for name
if searchField == "s60remote-name":
if search not in contact.name().lower():
continue
# Search in all field
elif searchField == "s60remote-all":
found = False
for type in contact.types():
if type == "thumbnail_image":
continue
for value in contact.value(type):
if search in value.lower():
found = True
if not found:
continue
# Search in one specific field
else:
found = False
for value in contact.value(searchField):
if search in value.lower():
found = True
if not found:
continue
item = widget.SortedListWidgetItem.SortedListWidgetItem(self.contactsList)
item.setData(Roles.ContactRole, QVariant(contact))
item.setText(contact.name())
if "thumbnail_image" in contact and self.settings.setting("contacts/displayIcon"):
try:
data = base64.decodestring(contact.value("thumbnail_image")[0])
except:
pass
image = QImage().fromData(data)
icon = QIcon(QPixmap().fromImage(image))
item.setIcon(icon)
self.contactsList.setIconSize( QSize(image.size().width()/2, image.size().height()/2) )
if "mobile_number" in contact:
if self.settings.setting("contacts/hideCellnumber"):
item = widget.SortedTreeWidgetItem.SortedTreeWidgetItem(self.contactsTree)
item.setData(0, Roles.ContactRole, QVariant(contact))
item.setText(0, contact.name())
item.setIcon(0, self.contactIsRecipientIcon) if self.contactIsRecipient(contact) else item.setIcon(0, self.emptyIcon)
else:
for number in contact.value("mobile_number"):
item = widget.SortedTreeWidgetItem.SortedTreeWidgetItem(self.contactsTree)
item.setData(0, Roles.ContactRole, QVariant(contact))
item.setText(0, contact.name())
item.setText(1, number)
item.setIcon(0, self.contactIsRecipientIcon) if self.contactIsRecipient(contact) else item.setIcon(0, self.emptyIcon)
if self.contactsList.currentRow() == -1 and self.contactsList.count() > 0:
self.contactsList.setCurrentRow(0)
self.contactsTree.setColumnHidden(1, self.settings.setting("contacts/hideCellnumber"))
self.contactsTree.sortByColumn(0, Qt.AscendingOrder)
self.contactsTree.resizeColumnToContents(0)
self.contactsTree.resizeColumnToContents(1)
if self.fillTypeBox:
self.fillTypeBox = False
def updateContact(self, contact):
#TODO: Only update the changed contact...
self.showContacts()
item = self.contactsList.item(0)
for row in range(self.contactsList.count()):
data = self.contactsList.item(row).data(Roles.ContactRole).toPyObject()
if data == contact:
item = self.contactsList.item(row)
break
self.contactsList.setCurrentItem(item, QItemSelectionModel.ClearAndSelect)
def showContact(self, contact, previousContact):
try:
contact = contact.data(Roles.ContactRole).toPyObject()
except:
return
self.contactBrowser.clear()
self.nameLabel.setText("""<span style=" font-size:16pt; font-weight:600;">""" + unicode(contact.name()) + """</span>""")
if "thumbnail_image" in contact:
data = base64.decodestring(contact.value("thumbnail_image")[0])
image = QImage().fromData(data)
pixmap = QPixmap().fromImage(image)
self.pictureLabel.setPixmap(pixmap)
else:
self.pictureLabel.setPixmap(QPixmap())
for field, value in contact.values():
if field.isPicture():
continue
if field.isDate():
value = QDate.fromString(value, "yyyyMMdd").toString(Qt.DefaultLocaleLongDate)
self.contactBrowser.insertHtml("<b>" + field.toString(printLocation=True) + " </b> " + value + "<br />")
def connected(self):
self.refreshTimer.start()
self.connectionClosedByUser = False
self.connectionDate = QDate.currentDate().toString("dd.MM.yyyy")
self.connectionTime = QTime().currentTime().toString("hh:mm:ss")
self.connectionTimeLabel.setText(self.connectionDate + " " + self.connectionTime)
self.connectionTimeLabel_2.setText(self.connectionDate + " " + self.connectionTime)
self.disconnectButton.setEnabled(True)
self.refreshButton.setEnabled(True)
self.statusLabel.setText(self.tr("Connected to <b>%1</b>").arg(self.connection.device().name()))
self.connectionStack.setCurrentWidget(self.informationWidget)
self.checkEditContactButton()
self.checkSendButton()
def contactClicked(self, item, column):
contact = item.data(0, Roles.ContactRole).toPyObject()
phone = item.text(1) if not self.settings.setting("contacts/hideCellnumber") else None
if phone:
contact.addInternalValue("phone", phone)
if self.contactIsRecipient(contact):
self.takeContact(contact, phone)
item.setIcon(0, self.emptyIcon)
else:
self.insertContact(contact, phone)
item.setIcon(0, self.contactIsRecipientIcon)
self.checkSendButton()
def contactIsRecipient(self, contact):
to = self.toLine.text()
if not to:
return False
hide = self.settings.setting("contacts/hideCellnumber")
for recipient in unicode(to).split(";"):
recipient = recipient.strip()
if hide:
if recipient == contact.name():
return True
else:
if recipient == contact.name() + " (" + contact.internalValue("phone") + ")":
return True
return False
def insertContact(self, contact, phone):
name = contact.name()
if phone:
name += " (" + phone + ")"
curName = unicode(self.toLine.text())
if (len(curName) == 0):
name = unicode(name)
self.toLine.setText(name)
else:
name = curName + u"; " + unicode(name)
self.toLine.setText(name)
def takeContact(self, contact, phone):
to = unicode(self.toLine.text())
name = contact.name()
if phone:
name += " (" + phone + ")"
toList = to.split(";")
toList = [entry.strip() for entry in toList]
toList.remove(name)
to = "; ".join(toList)
self.toLine.setText(to)
def textChanged(self):
len = int(self.messageText.toPlainText().length())
chars, messages = self.helper.countMessages(len)
if len >= 512:
bigsms = '***'
else: bigsms = ''
self.charLabel.setText(self.tr("%1 chars left; %n message(s); total chars: %2%3", "", messages).arg(chars).arg(len).arg(bigsms))
self.checkSendButton()
def recipientChanged(self, recipients):
# This is only called if the to line is changed by the user (and NOT programmatically)
toList = recipients.split(";")
toList = [unicode(entry).strip() for entry in toList]
hideCell = self.settings.setting("contacts/hideCellnumber")
for itemPos in xrange(self.contactsTree.topLevelItemCount()):
item = self.contactsTree.topLevelItem(itemPos)
contact = item.data(0, Roles.ContactRole).toPyObject()
if (hideCell and contact.name() in toList) \
or (not hideCell and contact.name() + " (" + item.text(1) + ")" in toList):
item.setIcon(0, self.contactIsRecipientIcon)
else:
item.setIcon(0, self.emptyIcon)
self.checkSendButton()
def sendMessage(self):
to = unicode(self.toLine.text())
msg = unicode(self.messageText.toPlainText())
to = to.split(";")
for name in to:
contact = None
name = name.strip()
# Only a phone number, sth. like 06641234567 or +436641234567
if re.match(r"^[+]{0,1}\d*$", name) != None:
contact = Contact(name=name)
contact.addInternalValue("phone", name)
# Name and phone number, sth. like foo for (06641234567)
elif re.match(r".*\([+]{0,1}\d{3,15}\)$", name) != None:
search = re.search(r"(.*)\s\((.*)\)$", name)
name = search.groups()[0]
phone = search.groups()[1]
contact = Contact(name=name)
contact.addInternalValue("phone", phone)
# Only a name, sth. like foo
else:
for recipient in self.database.contacts(True):
if unicode(recipient.name()) == name:
contact = copy.deepcopy(recipient)
if len(recipient.value("mobile_number")) > 1:
self.log.info(QString("Contact %1 has more then one mobile number.").arg(name))
number = self.askMobileNumber(contact)
if number != None:
contact.addInternalValue("phone", number)
else:
continue
else:
contact.addInternalValue("phone", recipient.value("mobile_number")[0])
if not contact:
# foo must be in the contact list
dlg = QDialog(self)
dialog = ui.ui_mobileNumberNotFound.Ui_MobileNumberNotFoundDialog()
dialog.setupUi(dlg)
self.main.setupButtonBox(dialog.buttonBox)
dlg.exec_()
continue
if not "phone" in contact.internalValues():
continue
self.log.info(QString("Sending message to contact %1").arg(unicode(contact)))
message = Message()
message.setType(MessageType.Outgoing)
message.setDevice(self.connection.device())
message.setContact(contact)
message.setDateTime(QDateTime.currentDateTime())
message.setMessage(msg)
self.connection.sendMessage(message)
self.toLine.clear()
self.messageText.clear()
self.messageText.setFocus()
for itemPos in xrange(self.contactsTree.topLevelItemCount()):
item = self.contactsTree.topLevelItem(itemPos)
item.setIcon(0, self.emptyIcon)
def askMobileNumber(self, contact):
dlg = QDialog(self)
dialog = ui.ui_mobileNumberSelect.Ui_MobileNumberSelectDialog()
dialog.setupUi(dlg)
self.main.setupButtonBox(dialog.buttonBox)
dialog.contactLabel.setText(self.tr("Please choose the telephone number for contact <i>%1</i>:").arg(contact.name()))
for number in contact.value("mobile_number"):
dialog.mobileBox.addItem(number)
if not dlg.exec_():
return None
return str(dialog.mobileBox.currentText())
def showCustomContextMenu(self, pos):
index = self.contactsTree.indexAt(pos)
if not index.isValid():
return
item = self.contactsTree.itemAt(pos)
menu = QMenu(self)
# Contact as QVariant: There is no need to convert it to a PyObject,
# because it is only used to pass it to the actions
contact = item.data(0, Roles.ContactRole)
startChat = QAction(self)
startChat.setText(self.tr("Start &chat"))
startChat.setIcon(QIcon(":/message-chat"))
startChat.setProperty("type", QVariant("chat"))
startChat.setProperty("contact", contact)
menu.addAction(startChat)
if self.settings.setting("messages/saveAllMessages"):
showHistory = QAction(self)
showHistory.setText(self.tr("View &history"))
showHistory.setIcon(QIcon(":/message-history"))
showHistory.setProperty("type", QVariant("history"))
showHistory.setProperty("contact", contact)
menu.addAction(showHistory)
showStatistics = QAction(self)
showStatistics.setText(self.tr("View &statistics"))
showStatistics.setIcon(QIcon(":/view-statistics"))
showStatistics.setProperty("type", QVariant("statistics"))
showStatistics.setProperty("contact", contact)
menu.addAction(showStatistics)
menu.popup(QCursor.pos())
self.connect(menu, SIGNAL("triggered(QAction *)"), self.customContextMenuTriggered)
def customContextMenuTriggered(self, action):
type = str(action.property("type").toString())
contact = action.property("contact").toPyObject()
if type == "chat":
self.openChat(contact)
elif type == "history":
historyBrowser = window.history.History(self, self.main, contact)
elif type == "statistics":
statisticsDialog = window.statistics.Statistics(self, self.main, contact)
def showContactListContextMenu(self, pos):
menu = QMenu(self)
if self.connection.connected():
index = self.contactsList.indexAt(pos)
if not index.isValid():
return
item = self.contactsList.itemAt(pos)
# Contact as QVariant: There is no need to convert it to a PyObject,
# because it is only used to pass it to the actions
contact = item.data(Roles.ContactRole)
editAction = QAction(self)
editAction.setText(self.tr("&Edit contact"))
editAction.setIcon(QIcon(":/user-properties"))
editAction.setProperty("type", QVariant("edit"))
editAction.setProperty("contact", contact)
menu.addAction(editAction)
removeAction = QAction(self)
removeAction.setText(self.tr("&Remove contact"))
removeAction.setIcon(QIcon(":/list-remove-user"))
removeAction.setProperty("type", QVariant("remove"))
removeAction.setProperty("contact", contact)
menu.addAction(removeAction)
self.connect(menu, SIGNAL("triggered(QAction *)"), self.contactListContextMenuTriggered)
else:
notConnectedAction = QAction(self)
notConnectedAction.setText(self.tr("You aren't connected to the mobile phone."))
notConnectedAction.setIcon(QIcon(":/dialog-close"))
notConnectedAction.setEnabled(False)
menu.addAction(notConnectedAction)
menu.popup(QCursor.pos())
def contactListContextMenuTriggered(self, action):
type = str(action.property("type").toString())
contact = action.property("contact").toPyObject()
if type == "edit":
dlg = window.contacts_edit.ContactsEdit(self, self.main, contact)
elif type == "remove":
ret = QMessageBox.question(None,
self.tr("Delete contact"),
self.tr("Do you really want to remove contact \"%1\"?").arg(contact.name()),
QMessageBox.StandardButtons(\
QMessageBox.No | \
QMessageBox.Yes))
if ret == QMessageBox.Yes:
self.connection.contactRemove(contact)
self.database.contactRemove(contact.idOnPhone())
self.showContacts()
def refreshSysinfo(self):
if not self.connection.connected():
return
self.connection.refreshSysinfo()
def newMessages(self):
if not self.newMessagesComplete:
time = QTime().currentTime().toString()
self.statusBar().showMessage(self.tr("[%1] Fetching new messages...").arg(time))
def newMessagesFinished(self, num):
if not self.newMessagesComplete:
time = QTime().currentTime().toString()
if num > 0:
self.statusBar().showMessage(self.tr("[%1] %n new message(s) got saved.", "", num).arg(time), 5000)
else:
self.statusBar().showMessage(self.tr("[%1] There are no new messages.").arg(time), 5000)
self.newMessagesComplete = True
def messageStateChanged(self, message):
queue = self.connection.pendingMessages()
anz = len(queue)
time = QTime().currentTime().toString()
if anz >= 1:
self.statusBar().showMessage(self.tr("[%1] %n message(s) in queue", "", anz).arg(time))
elif anz == 0 and self.queueMessages > 0:
self.statusBar().showMessage(self.tr("[%1] All messages were sent").arg(time), 5000)
self.queueMessages = anz
def closeConnection(self):
self.connectionClosedByUser = True
self.connection.closeConnection()
self.fileWidget.closeConnection()
def connectionClosed(self):
self.refreshTimer.stop()
self.statusLabel.setText(self.tr("No active connection!"))
self.devicesBox.setEnabled(True)
self.connectButton.setEnabled(True)
self.establishConnectionStack.setCurrentWidget(self.emptyWidget)
self.connectionStack.setCurrentWidget(self.connectionWidget)
self.disconnectButton.setEnabled(False)
self.refreshButton.setEnabled(False)
self.connectBar.setValue(0)
self.updateDevices()
self.checkEditContactButton()
self.checkSendButton()
if not self.connectionClosedByUser:
self.log.debug(QString("Automatic connection establishment: connection closed by error, restarting timer..."))
self.loadAutomaticConnection()
else:
self.log.debug(QString("Automatic connection establishment: connection closed by user, timer is not started"))
def automaticConnectionTimerFired(self):
self.log.debug(QString("Timer for automatic connection establishment fired.. scanning for devices"))
self.deviceScanner.start()
def automaticConnectionScanStarted(self):
self.log.debug(QString("Automatic connection establishment: Device scan started"))
movie = QMovie(":/loading-2", "", self)
movie.setScaledSize(QSize(16, 16))
self.scanningMovie.setMovie(movie)
self.scanningMovie.setToolTip(self.tr("There is an active device scan for the automatic connection establishment"))
self.scanningMovie.movie().start()
def automaticConnectionFoundDevice(self, address, name, deviceClass):
for device in self.database.devices():
if device.bluetoothAddress() == address:
self.log.info(QString("Automatic connection establishment: Matching device found, connecting..."))
self.deviceScanner.stop()
self.connectionAttemptByUser = False
self.__connectToDevice(device)
def automaticConnectionScanFinished(self):
self.log.debug(QString("Automatic connection establishment: Device scan finished"))
if self.scanningMovie.movie():
self.scanningMovie.movie().stop()
self.scanningMovie.setMovie(QMovie())
self.scanningMovie.setToolTip("")
@pyqtSignature("")
def checkEditContactButton(self):
if self.connection.connected() and self.contactsList.selectedItems():
self.contactAddButton.setEnabled(True)
self.contactsImportButton.setEnabled(True)
self.contactEditButton.setEnabled(True)
else:
self.contactAddButton.setEnabled(False)
self.contactsImportButton.setEnabled(False)
self.contactEditButton.setEnabled(False)
def checkSendButton(self):
if self.toLine.text() and self.messageText.toPlainText() and self.connection.connected():
self.sendButton.setEnabled(True)
else:
self.sendButton.setEnabled(False)
def openChat(self, contact):
if contact:
# Close all popup windows of the contact
for popup in self.main.popups:
try:
button = popup.buttons.buttons()[0] # Chat button
popupContact = button.property("contact").toPyObject()
if contact == popupContact:
popup.close()
except:
pass
#myChat = window.chat.Chat(None, self.main, contact)
self.main.chatManager.openChat(contact)
def favoriteClicked(self, action):
type = action.property("type").toString()
if type == "contact":
contact = action.data().toPyObject()
self.openChat(contact)
elif type == "configureFavorites":
self.showFavoriteDialog()
def editContact(self):
try:
contact = self.contactsList.currentItem()
contact = contact.data(Roles.ContactRole).toPyObject()
except:
contact = None
dlg = window.contacts_edit.ContactsEdit(self, self.main, contact)
def reinstallService(self):
self.closeConnection()
dlg = QDialog(self)
dialog = ui.ui_connection_update_version.Ui_ConnectionUpdateVersionDialog()
dialog.setupUi(dlg)
if lib.obex_wrapper.FOUND_OBEX:
dialog.obexStack.setCurrentWidget(dialog.obexFoundWidget)
self.log.info(QString("OBEX support was found, trying to send installation file to device"))
else:
dialog.obexStack.setCurrentWidget(dialog.obexNotFoundWidget)
self.log.info(QString("OBEX support was not found"))
if LINUX:
dialog.operatingSystemStack.setCurrentWidget(dialog.linuxWidget)
else:
dialog.operatingSystemStack.setCurrentWidget(dialog.windowsWidget)
self.connect(dialog.sendApplicationButton, SIGNAL("clicked()"), lambda : self.sendApplicationFile(dialog.py20Box.isChecked()))
self.connect(dialog.sendPythonButton, SIGNAL("clicked()"), lambda : self.sendPythonFile(dialog.py20Box.isChecked()))
self.connect(dialog.openFolderButton, SIGNAL("clicked()"), self.helper.openFolder)
dlg.exec_()
def sendApplicationFile(self, usePy20):
if usePy20:
self.helper.sendFile(self, self.devicesBox.currentDevice(), self.main.applicationSis_Py20)
else:
self.helper.sendFile(self, self.devicesBox.currentDevice(), self.main.applicationSis_Py14)
def sendPythonFile(self, usePy20):
if usePy20:
self.helper.sendFile(self, self.devicesBox.currentDevice(), self.main.pythonSis_Py20)
else:
self.helper.sendFile(self, self.devicesBox.currentDevice(), self.main.pythonSis_Py14)
def connectionFailed(self, errno, errmsg):
self.connectionClosedByUser = False
if not self.connectionAttemptByUser:
self.statusBar().showMessage(self.tr("Connection to device failed: %1 - %2").arg(errno).arg(errmsg), 6000)
return
dlg = QDialog(self)
dialog = ui.ui_connection_failed.Ui_ConnectionFailedDialog()
dialog.setupUi(dlg)
dialog.errnoLabel.setText("<b>" + str(errno) + "</b>")
dialog.errmsgLabel.setText("<b>" + errmsg + "</b>")
self.main.setupButtonBox(dialog.buttonBox)
self.connect(dialog.reinstallButton, SIGNAL("clicked()"), self.reinstallService)
self.connect(dialog.buttonBox.button(QDialogButtonBox.Retry), SIGNAL("clicked()"), self.connectToDevice)
self.connect(dialog.buttonBox.button(QDialogButtonBox.Cancel), SIGNAL("clicked()"), self.closeConnection)
dlg.exec_()
def connectionVersionMismatch(self, deviceVersion, pcVersion):
dlg = QDialog(self)
dialog = ui.ui_connection_version_mismatch.Ui_ConnectionVersionMismatchDialog()
dialog.setupUi(dlg)
dialog.mobileVersionLabel.setText("<b>" + str(deviceVersion) + "</b>")
dialog.pcVersionLabel.setText("<b>" + str(pcVersion) + "</b>")
self.main.setupButtonBox(dialog.buttonBox)
self.connect(dialog.updateButton, SIGNAL("clicked()"), self.reinstallService)
self.connect(dialog.buttonBox.button(QDialogButtonBox.Cancel), SIGNAL("clicked()"), self.closeConnection)
dlg.exec_()
def updateCheckError(self, errorMessage):
self.statusBar().showMessage(self.tr("Update check failed: %1").arg(errorMessage), 5000)
def updateCheckNewVersion(self, version, message):
text = self.tr("The update to <b>%1</b> of Series60-Remote is available at <b>%2</b>. Would you like to get it?").arg(version, self.settings.setting("updateCheck/website").toString())
if message:
text += '<br /><br />' + self.tr("Update notes: %1").arg(message)
button = QMessageBox.information(self, self.tr("New version detected"), text, QMessageBox.Yes | QMessageBox.No | QMessageBox.Ignore, QMessageBox.Yes)
if button == QMessageBox.Yes:
QDesktopServices.openUrl(self.settings.setting("updateCheck/website"))
elif button == QMessageBox.Ignore:
self.settings.setSetting("updateCheck/interval", 0)
def openDonateWebsite(self):
QDesktopServices.openUrl(QUrl("http://sourceforge.net/donate/index.php?group_id=204429"))
def addContact(self):
dlg = window.contacts_edit.ContactsEdit(self, self.main)
def importContacts(self, format):
dlg = window.contacts_import.ContactsImport(self, self.main, format)
def showFavoriteDialog(self):
dlg = window.favorites.Favorites(self, self.main)
def showAboutDialog(self):
dlg = window.about.About(self, self.main)
def showSettings(self):
dlg = window.settings.Settings(self, self.main)
def showExportDialog(self):
dlg = window.export.Export(self, self.main)
def showHistory(self):
dlg = window.history.History(self, self.main)
def showStatistics(self):
dlg = window.statistics.Statistics(self, self.main)
def showMessageQueue(self):
dlg = window.message_queue.MessageQueue(self, self.main)
def showImportMessages(self):
dlg = window.import_messages.ImportMessages(self, self.main)
def showLog(self):
self.main.logWindow.show()
def quit(self):
self.main.app.closeAllWindows()
self.main.app.quit()
def closeEvent(self, event):
self.settings.beginGroup("windows")
self.settings.beginGroup("main")
self.settings.setSetting("size", self.size())
self.settings.setSetting("position", self.pos())
self.settings.setSetting("messagesSplitter", self.messagesSplitter.saveState())
self.settings.setSetting("contactsSplitter", self.contactsSplitter.saveState())
close = self.settings.setting("windows/main/minimizeOnClose")
if close == 0:
message = QMessageBox.question(None, self.tr("Quit"),
self.tr("Should the application stay in the system tray?"),
QMessageBox.StandardButtons( QMessageBox.No | QMessageBox.Yes), QMessageBox.Yes)
if message == QMessageBox.Yes:
self.settings.setSetting("minimizeOnClose", 1)
else:
self.settings.setSetting("minimizeOnClose", 2)
self.settings.endGroup()
self.settings.endGroup()
close = self.settings.setting("windows/main/minimizeOnClose")
if close == 1:
self.hide()
else:
for popup in self.main.popups:
popup.close()
if hasattr(self.main, "trayicon"):
self.main.trayicon.hide()
if self.connection.connected():
self.connection.closeConnection()
if self.fileWidget.connected():
self.fileWidget.closeConnection()
self.settings.sync()
self.database.close()
self.hide()
self.main.app.quit()
| gpl-2.0 | -1,246,744,118,675,140,000 | 43.114458 | 191 | 0.629816 | false |
WillWeatherford/deliver-cute | subscribers/migrations/0001_initial.py | 1 | 1594 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-06 22:02
from __future__ import unicode_literals
from django.db import migrations, models
import subscribers.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SubReddit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(max_length=21, unique=True)),
],
),
migrations.CreateModel(
name='Subscriber',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('send_hour', models.IntegerField(choices=[(0, '12:00 AM'), (1, '1:00 AM'), (2, '2:00 AM'), (3, '3:00 AM'), (4, '4:00 AM'), (5, '5:00 AM'), (6, '6:00 AM'), (7, '7:00 AM'), (8, '8:00 AM'), (9, '9:00 AM'), (10, '10:00 AM'), (11, '11:00 AM'), (12, '12:00 PM'), (13, '1:00 PM'), (14, '2:00 PM'), (15, '3:00 PM'), (16, '4:00 PM'), (17, '5:00 PM'), (18, '6:00 PM'), (19, '7:00 PM'), (20, '8:00 PM'), (21, '9:00 PM'), (22, '10:00 PM'), (23, '11:00 PM')], default=8)),
('unsubscribe_hash', models.CharField(default=subscribers.models._hash, max_length=255, unique=True)),
('subreddits', models.ManyToManyField(related_name='subscribers', to='subscribers.SubReddit')),
],
),
]
| mit | 4,599,604,326,839,132,700 | 45.882353 | 476 | 0.54266 | false |
Pal3love/otRebuilder | Package/otRebuilder/Dep/fontTools/varLib/__init__.py | 1 | 23435 | """
Module for dealing with 'gvar'-style font variations, also known as run-time
interpolation.
The ideas here are very similar to MutatorMath. There is even code to read
MutatorMath .designspace files in the varLib.designspace module.
For now, if you run this file on a designspace file, it tries to find
ttf-interpolatable files for the masters and build a variable-font from
them. Such ttf-interpolatable and designspace files can be generated from
a Glyphs source, eg., using noto-source as an example:
$ fontmake -o ttf-interpolatable -g NotoSansArabic-MM.glyphs
Then you can make a variable-font this way:
$ fonttools varLib master_ufo/NotoSansArabic.designspace
API *will* change in near future.
"""
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from fontTools.misc.py23 import *
from fontTools.misc.arrayTools import Vector
from fontTools.ttLib import TTFont, newTable
from fontTools.ttLib.tables._n_a_m_e import NameRecord
from fontTools.ttLib.tables._f_v_a_r import Axis, NamedInstance
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates
from fontTools.ttLib.tables.ttProgram import Program
from fontTools.ttLib.tables.TupleVariation import TupleVariation
from fontTools.ttLib.tables import otTables as ot
from fontTools.varLib import builder, designspace, models, varStore
from fontTools.varLib.merger import VariationMerger, _all_equal
from fontTools.varLib.mvar import MVAR_ENTRIES
from fontTools.varLib.iup import iup_delta_optimize
from collections import OrderedDict
import os.path
import logging
from pprint import pformat
log = logging.getLogger("fontTools.varLib")
class VarLibError(Exception):
pass
#
# Creation routines
#
def _add_fvar(font, axes, instances):
"""
Add 'fvar' table to font.
axes is an ordered dictionary of DesignspaceAxis objects.
instances is list of dictionary objects with 'location', 'stylename',
and possibly 'postscriptfontname' entries.
"""
assert axes
assert isinstance(axes, OrderedDict)
log.info("Generating fvar")
fvar = newTable('fvar')
nameTable = font['name']
for a in axes.values():
axis = Axis()
axis.axisTag = Tag(a.tag)
# TODO Skip axes that have no variation.
axis.minValue, axis.defaultValue, axis.maxValue = a.minimum, a.default, a.maximum
axis.axisNameID = nameTable.addName(tounicode(a.labelname['en']))
# TODO:
# Replace previous line with the following when the following issues are resolved:
# https://github.com/fonttools/fonttools/issues/930
# https://github.com/fonttools/fonttools/issues/931
# axis.axisNameID = nameTable.addMultilingualName(a.labelname, font)
fvar.axes.append(axis)
for instance in instances:
coordinates = instance['location']
name = tounicode(instance['stylename'])
psname = instance.get('postscriptfontname')
inst = NamedInstance()
inst.subfamilyNameID = nameTable.addName(name)
if psname is not None:
psname = tounicode(psname)
inst.postscriptNameID = nameTable.addName(psname)
inst.coordinates = {axes[k].tag:axes[k].map_backward(v) for k,v in coordinates.items()}
#inst.coordinates = {axes[k].tag:v for k,v in coordinates.items()}
fvar.instances.append(inst)
assert "fvar" not in font
font['fvar'] = fvar
return fvar
def _add_avar(font, axes):
"""
Add 'avar' table to font.
axes is an ordered dictionary of DesignspaceAxis objects.
"""
assert axes
assert isinstance(axes, OrderedDict)
log.info("Generating avar")
avar = newTable('avar')
interesting = False
for axis in axes.values():
# Currently, some rasterizers require that the default value maps
# (-1 to -1, 0 to 0, and 1 to 1) be present for all the segment
# maps, even when the default normalization mapping for the axis
# was not modified.
# https://github.com/googlei18n/fontmake/issues/295
# https://github.com/fonttools/fonttools/issues/1011
# TODO(anthrotype) revert this (and 19c4b37) when issue is fixed
curve = avar.segments[axis.tag] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}
if not axis.map:
continue
items = sorted(axis.map.items())
keys = [item[0] for item in items]
vals = [item[1] for item in items]
# Current avar requirements. We don't have to enforce
# these on the designer and can deduce some ourselves,
# but for now just enforce them.
assert axis.minimum == min(keys)
assert axis.maximum == max(keys)
assert axis.default in keys
# No duplicates
assert len(set(keys)) == len(keys)
assert len(set(vals)) == len(vals)
# Ascending values
assert sorted(vals) == vals
keys_triple = (axis.minimum, axis.default, axis.maximum)
vals_triple = tuple(axis.map_forward(v) for v in keys_triple)
keys = [models.normalizeValue(v, keys_triple) for v in keys]
vals = [models.normalizeValue(v, vals_triple) for v in vals]
if all(k == v for k, v in zip(keys, vals)):
continue
interesting = True
curve.update(zip(keys, vals))
assert 0.0 in curve and curve[0.0] == 0.0
assert -1.0 not in curve or curve[-1.0] == -1.0
assert +1.0 not in curve or curve[+1.0] == +1.0
# curve.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0})
assert "avar" not in font
if not interesting:
log.info("No need for avar")
avar = None
else:
font['avar'] = avar
return avar
def _add_stat(font, axes):
nameTable = font['name']
assert "STAT" not in font
STAT = font["STAT"] = newTable('STAT')
stat = STAT.table = ot.STAT()
stat.Version = 0x00010000
axisRecords = []
for i,a in enumerate(axes.values()):
axis = ot.AxisRecord()
axis.AxisTag = Tag(a.tag)
# Meh. Reuse fvar nameID!
axis.AxisNameID = nameTable.addName(tounicode(a.labelname['en']))
axis.AxisOrdering = i
axisRecords.append(axis)
axisRecordArray = ot.AxisRecordArray()
axisRecordArray.Axis = axisRecords
# XXX these should not be hard-coded but computed automatically
stat.DesignAxisRecordSize = 8
stat.DesignAxisCount = len(axisRecords)
stat.DesignAxisRecord = axisRecordArray
# TODO Move to glyf or gvar table proper
def _GetCoordinates(font, glyphName):
"""font, glyphName --> glyph coordinates as expected by "gvar" table
The result includes four "phantom points" for the glyph metrics,
as mandated by the "gvar" spec.
"""
glyf = font["glyf"]
if glyphName not in glyf.glyphs: return None
glyph = glyf[glyphName]
if glyph.isComposite():
coord = GlyphCoordinates([(getattr(c, 'x', 0),getattr(c, 'y', 0)) for c in glyph.components])
control = (glyph.numberOfContours,[c.glyphName for c in glyph.components])
else:
allData = glyph.getCoordinates(glyf)
coord = allData[0]
control = (glyph.numberOfContours,)+allData[1:]
# Add phantom points for (left, right, top, bottom) positions.
horizontalAdvanceWidth, leftSideBearing = font["hmtx"].metrics[glyphName]
if not hasattr(glyph, 'xMin'):
glyph.recalcBounds(glyf)
leftSideX = glyph.xMin - leftSideBearing
rightSideX = leftSideX + horizontalAdvanceWidth
# XXX these are incorrect. Load vmtx and fix.
topSideY = glyph.yMax
bottomSideY = -glyph.yMin
coord = coord.copy()
coord.extend([(leftSideX, 0),
(rightSideX, 0),
(0, topSideY),
(0, bottomSideY)])
return coord, control
# TODO Move to glyf or gvar table proper
def _SetCoordinates(font, glyphName, coord):
glyf = font["glyf"]
assert glyphName in glyf.glyphs
glyph = glyf[glyphName]
# Handle phantom points for (left, right, top, bottom) positions.
assert len(coord) >= 4
if not hasattr(glyph, 'xMin'):
glyph.recalcBounds(glyf)
leftSideX = coord[-4][0]
rightSideX = coord[-3][0]
topSideY = coord[-2][1]
bottomSideY = coord[-1][1]
for _ in range(4):
del coord[-1]
if glyph.isComposite():
assert len(coord) == len(glyph.components)
for p,comp in zip(coord, glyph.components):
if hasattr(comp, 'x'):
comp.x,comp.y = p
elif glyph.numberOfContours is 0:
assert len(coord) == 0
else:
assert len(coord) == len(glyph.coordinates)
glyph.coordinates = coord
glyph.recalcBounds(glyf)
horizontalAdvanceWidth = round(rightSideX - leftSideX)
leftSideBearing = round(glyph.xMin - leftSideX)
# XXX Handle vertical
font["hmtx"].metrics[glyphName] = horizontalAdvanceWidth, leftSideBearing
def _add_gvar(font, model, master_ttfs, tolerance=0.5, optimize=True):
assert tolerance >= 0
log.info("Generating gvar")
assert "gvar" not in font
gvar = font["gvar"] = newTable('gvar')
gvar.version = 1
gvar.reserved = 0
gvar.variations = {}
for glyph in font.getGlyphOrder():
allData = [_GetCoordinates(m, glyph) for m in master_ttfs]
allCoords = [d[0] for d in allData]
allControls = [d[1] for d in allData]
control = allControls[0]
if (any(c != control for c in allControls)):
log.warning("glyph %s has incompatible masters; skipping" % glyph)
continue
del allControls
# Update gvar
gvar.variations[glyph] = []
deltas = model.getDeltas(allCoords)
supports = model.supports
assert len(deltas) == len(supports)
# Prepare for IUP optimization
origCoords = deltas[0]
endPts = control[1] if control[0] >= 1 else list(range(len(control[1])))
for i,(delta,support) in enumerate(zip(deltas[1:], supports[1:])):
if all(abs(v) <= tolerance for v in delta.array):
continue
var = TupleVariation(support, delta)
if optimize:
delta_opt = iup_delta_optimize(delta, origCoords, endPts, tolerance=tolerance)
if None in delta_opt:
# Use "optimized" version only if smaller...
var_opt = TupleVariation(support, delta_opt)
axis_tags = sorted(support.keys()) # Shouldn't matter that this is different from fvar...?
tupleData, auxData, _ = var.compile(axis_tags, [], None)
unoptimized_len = len(tupleData) + len(auxData)
tupleData, auxData, _ = var_opt.compile(axis_tags, [], None)
optimized_len = len(tupleData) + len(auxData)
if optimized_len < unoptimized_len:
var = var_opt
gvar.variations[glyph].append(var)
def _remove_TTHinting(font):
for tag in ("cvar", "cvt ", "fpgm", "prep"):
if tag in font:
del font[tag]
for attr in ("maxTwilightPoints", "maxStorage", "maxFunctionDefs", "maxInstructionDefs", "maxStackElements", "maxSizeOfInstructions"):
setattr(font["maxp"], attr, 0)
font["maxp"].maxZones = 1
font["glyf"].removeHinting()
# TODO: Modify gasp table to deactivate gridfitting for all ranges?
def _merge_TTHinting(font, model, master_ttfs, tolerance=0.5):
log.info("Merging TT hinting")
assert "cvar" not in font
# Check that the existing hinting is compatible
# fpgm and prep table
for tag in ("fpgm", "prep"):
all_pgms = [m[tag].program for m in master_ttfs if tag in m]
if len(all_pgms) == 0:
continue
if tag in font:
font_pgm = font[tag].program
else:
font_pgm = Program()
if any(pgm != font_pgm for pgm in all_pgms):
log.warning("Masters have incompatible %s tables, hinting is discarded." % tag)
_remove_TTHinting(font)
return
# glyf table
for name, glyph in font["glyf"].glyphs.items():
all_pgms = [
m["glyf"][name].program
for m in master_ttfs
if hasattr(m["glyf"][name], "program")
]
if not any(all_pgms):
continue
glyph.expand(font["glyf"])
if hasattr(glyph, "program"):
font_pgm = glyph.program
else:
font_pgm = Program()
if any(pgm != font_pgm for pgm in all_pgms if pgm):
log.warning("Masters have incompatible glyph programs in glyph '%s', hinting is discarded." % name)
_remove_TTHinting(font)
return
# cvt table
all_cvs = [Vector(m["cvt "].values) for m in master_ttfs if "cvt " in m]
if len(all_cvs) == 0:
# There is no cvt table to make a cvar table from, we're done here.
return
if len(all_cvs) != len(master_ttfs):
log.warning("Some masters have no cvt table, hinting is discarded.")
_remove_TTHinting(font)
return
num_cvt0 = len(all_cvs[0])
if (any(len(c) != num_cvt0 for c in all_cvs)):
log.warning("Masters have incompatible cvt tables, hinting is discarded.")
_remove_TTHinting(font)
return
# We can build the cvar table now.
cvar = font["cvar"] = newTable('cvar')
cvar.version = 1
cvar.variations = []
deltas = model.getDeltas(all_cvs)
supports = model.supports
for i,(delta,support) in enumerate(zip(deltas[1:], supports[1:])):
delta = [round(d) for d in delta]
if all(abs(v) <= tolerance for v in delta):
continue
var = TupleVariation(support, delta)
cvar.variations.append(var)
def _add_HVAR(font, model, master_ttfs, axisTags):
log.info("Generating HVAR")
hAdvanceDeltas = {}
metricses = [m["hmtx"].metrics for m in master_ttfs]
for glyph in font.getGlyphOrder():
hAdvances = [metrics[glyph][0] for metrics in metricses]
# TODO move round somewhere else?
hAdvanceDeltas[glyph] = tuple(round(d) for d in model.getDeltas(hAdvances)[1:])
# We only support the direct mapping right now.
supports = model.supports[1:]
varTupleList = builder.buildVarRegionList(supports, axisTags)
varTupleIndexes = list(range(len(supports)))
n = len(supports)
items = []
zeroes = [0]*n
for glyphName in font.getGlyphOrder():
items.append(hAdvanceDeltas.get(glyphName, zeroes))
while items and items[-1] is zeroes:
del items[-1]
advanceMapping = None
# Add indirect mapping to save on duplicates
uniq = set(items)
# TODO Improve heuristic
if (len(items) - len(uniq)) * len(varTupleIndexes) > len(items):
newItems = sorted(uniq)
mapper = {v:i for i,v in enumerate(newItems)}
mapping = [mapper[item] for item in items]
while len(mapping) > 1 and mapping[-1] == mapping[-2]:
del mapping[-1]
advanceMapping = builder.buildVarIdxMap(mapping)
items = newItems
del mapper, mapping, newItems
del uniq
varData = builder.buildVarData(varTupleIndexes, items)
varstore = builder.buildVarStore(varTupleList, [varData])
assert "HVAR" not in font
HVAR = font["HVAR"] = newTable('HVAR')
hvar = HVAR.table = ot.HVAR()
hvar.Version = 0x00010000
hvar.VarStore = varstore
hvar.AdvWidthMap = advanceMapping
hvar.LsbMap = hvar.RsbMap = None
def _add_MVAR(font, model, master_ttfs, axisTags):
log.info("Generating MVAR")
store_builder = varStore.OnlineVarStoreBuilder(axisTags)
store_builder.setModel(model)
records = []
lastTableTag = None
fontTable = None
tables = None
for tag, (tableTag, itemName) in sorted(MVAR_ENTRIES.items(), key=lambda kv: kv[1]):
if tableTag != lastTableTag:
tables = fontTable = None
if tableTag in font:
# TODO Check all masters have same table set?
fontTable = font[tableTag]
tables = [master[tableTag] for master in master_ttfs]
lastTableTag = tableTag
if tables is None:
continue
# TODO support gasp entries
master_values = [getattr(table, itemName) for table in tables]
if _all_equal(master_values):
base, varIdx = master_values[0], None
else:
base, varIdx = store_builder.storeMasters(master_values)
setattr(fontTable, itemName, base)
if varIdx is None:
continue
log.info(' %s: %s.%s %s', tag, tableTag, itemName, master_values)
rec = ot.MetricsValueRecord()
rec.ValueTag = tag
rec.VarIdx = varIdx
records.append(rec)
assert "MVAR" not in font
if records:
MVAR = font["MVAR"] = newTable('MVAR')
mvar = MVAR.table = ot.MVAR()
mvar.Version = 0x00010000
mvar.Reserved = 0
mvar.VarStore = store_builder.finish()
# XXX these should not be hard-coded but computed automatically
mvar.ValueRecordSize = 8
mvar.ValueRecordCount = len(records)
mvar.ValueRecord = sorted(records, key=lambda r: r.ValueTag)
def _merge_OTL(font, model, master_fonts, axisTags):
log.info("Merging OpenType Layout tables")
merger = VariationMerger(model, axisTags, font)
merger.mergeTables(font, master_fonts, ['GPOS'])
store = merger.store_builder.finish()
try:
GDEF = font['GDEF'].table
assert GDEF.Version <= 0x00010002
except KeyError:
font['GDEF']= newTable('GDEF')
GDEFTable = font["GDEF"] = newTable('GDEF')
GDEF = GDEFTable.table = ot.GDEF()
GDEF.Version = 0x00010003
GDEF.VarStore = store
# Pretty much all of this file should be redesigned and moved inot submodules...
# Such a mess right now, but kludging along...
class _DesignspaceAxis(object):
def __repr__(self):
return repr(self.__dict__)
@staticmethod
def _map(v, map):
keys = map.keys()
if not keys:
return v
if v in keys:
return map[v]
k = min(keys)
if v < k:
return v + map[k] - k
k = max(keys)
if v > k:
return v + map[k] - k
# Interpolate
a = max(k for k in keys if k < v)
b = min(k for k in keys if k > v)
va = map[a]
vb = map[b]
return va + (vb - va) * (v - a) / (b - a)
def map_forward(self, v):
if self.map is None: return v
return self._map(v, self.map)
def map_backward(self, v):
if self.map is None: return v
map = {v:k for k,v in self.map.items()}
return self._map(v, map)
def load_designspace(designspace_filename):
ds = designspace.load(designspace_filename)
axes = ds.get('axes')
masters = ds.get('sources')
if not masters:
raise VarLibError("no sources found in .designspace")
instances = ds.get('instances', [])
standard_axis_map = OrderedDict([
('weight', ('wght', {'en':'Weight'})),
('width', ('wdth', {'en':'Width'})),
('slant', ('slnt', {'en':'Slant'})),
('optical', ('opsz', {'en':'Optical Size'})),
])
# Setup axes
axis_objects = OrderedDict()
if axes is not None:
for axis_dict in axes:
axis_name = axis_dict.get('name')
if not axis_name:
axis_name = axis_dict['name'] = axis_dict['tag']
if 'map' not in axis_dict:
axis_dict['map'] = None
else:
axis_dict['map'] = {m['input']:m['output'] for m in axis_dict['map']}
if axis_name in standard_axis_map:
if 'tag' not in axis_dict:
axis_dict['tag'] = standard_axis_map[axis_name][0]
if 'labelname' not in axis_dict:
axis_dict['labelname'] = standard_axis_map[axis_name][1].copy()
axis = _DesignspaceAxis()
for item in ['name', 'tag', 'minimum', 'default', 'maximum', 'map']:
assert item in axis_dict, 'Axis does not have "%s"' % item
if 'labelname' not in axis_dict:
axis_dict['labelname'] = {'en': axis_name}
axis.__dict__ = axis_dict
axis_objects[axis_name] = axis
else:
# No <axes> element. Guess things...
base_idx = None
for i,m in enumerate(masters):
if 'info' in m and m['info']['copy']:
assert base_idx is None
base_idx = i
assert base_idx is not None, "Cannot find 'base' master; Either add <axes> element to .designspace document, or add <info> element to one of the sources in the .designspace document."
master_locs = [o['location'] for o in masters]
base_loc = master_locs[base_idx]
axis_names = set(base_loc.keys())
assert all(name in standard_axis_map for name in axis_names), "Non-standard axis found and there exist no <axes> element."
for name,(tag,labelname) in standard_axis_map.items():
if name not in axis_names:
continue
axis = _DesignspaceAxis()
axis.name = name
axis.tag = tag
axis.labelname = labelname.copy()
axis.default = base_loc[name]
axis.minimum = min(m[name] for m in master_locs if name in m)
axis.maximum = max(m[name] for m in master_locs if name in m)
axis.map = None
# TODO Fill in weight / width mapping from OS/2 table? Need loading fonts...
axis_objects[name] = axis
del base_idx, base_loc, axis_names, master_locs
axes = axis_objects
del axis_objects
log.info("Axes:\n%s", pformat(axes))
# Check all master and instance locations are valid and fill in defaults
for obj in masters+instances:
obj_name = obj.get('name', obj.get('stylename', ''))
loc = obj['location']
for axis_name in loc.keys():
assert axis_name in axes, "Location axis '%s' unknown for '%s'." % (axis_name, obj_name)
for axis_name,axis in axes.items():
if axis_name not in loc:
loc[axis_name] = axis.default
else:
v = axis.map_backward(loc[axis_name])
assert axis.minimum <= v <= axis.maximum, "Location for axis '%s' (mapped to %s) out of range for '%s' [%s..%s]" % (axis_name, v, obj_name, axis.minimum, axis.maximum)
# Normalize master locations
normalized_master_locs = [o['location'] for o in masters]
log.info("Internal master locations:\n%s", pformat(normalized_master_locs))
# TODO This mapping should ideally be moved closer to logic in _add_fvar/avar
internal_axis_supports = {}
for axis in axes.values():
triple = (axis.minimum, axis.default, axis.maximum)
internal_axis_supports[axis.name] = [axis.map_forward(v) for v in triple]
log.info("Internal axis supports:\n%s", pformat(internal_axis_supports))
normalized_master_locs = [models.normalizeLocation(m, internal_axis_supports) for m in normalized_master_locs]
log.info("Normalized master locations:\n%s", pformat(normalized_master_locs))
# Find base master
base_idx = None
for i,m in enumerate(normalized_master_locs):
if all(v == 0 for v in m.values()):
assert base_idx is None
base_idx = i
assert base_idx is not None, "Base master not found; no master at default location?"
log.info("Index of base master: %s", base_idx)
return axes, internal_axis_supports, base_idx, normalized_master_locs, masters, instances
def build(designspace_filename, master_finder=lambda s:s):
"""
Build variation font from a designspace file.
If master_finder is set, it should be a callable that takes master
filename as found in designspace file and map it to master font
binary as to be opened (eg. .ttf or .otf).
"""
axes, internal_axis_supports, base_idx, normalized_master_locs, masters, instances = load_designspace(designspace_filename)
log.info("Building variable font")
log.info("Loading master fonts")
basedir = os.path.dirname(designspace_filename)
master_ttfs = [master_finder(os.path.join(basedir, m['filename'])) for m in masters]
master_fonts = [TTFont(ttf_path) for ttf_path in master_ttfs]
# Reload base font as target font
vf = TTFont(master_ttfs[base_idx])
# TODO append masters as named-instances as well; needs .designspace change.
fvar = _add_fvar(vf, axes, instances)
_add_stat(vf, axes)
_add_avar(vf, axes)
del instances
# Map from axis names to axis tags...
normalized_master_locs = [{axes[k].tag:v for k,v in loc.items()} for loc in normalized_master_locs]
#del axes
# From here on, we use fvar axes only
axisTags = [axis.axisTag for axis in fvar.axes]
# Assume single-model for now.
model = models.VariationModel(normalized_master_locs, axisOrder=axisTags)
assert 0 == model.mapping[base_idx]
log.info("Building variations tables")
_add_MVAR(vf, model, master_fonts, axisTags)
_add_HVAR(vf, model, master_fonts, axisTags)
_merge_OTL(vf, model, master_fonts, axisTags)
if 'glyf' in vf:
_add_gvar(vf, model, master_fonts)
_merge_TTHinting(vf, model, master_fonts)
return vf, model, master_ttfs
def main(args=None):
from argparse import ArgumentParser
from fontTools import configLogger
parser = ArgumentParser(prog='varLib')
parser.add_argument('designspace')
options = parser.parse_args(args)
# TODO: allow user to configure logging via command-line options
configLogger(level="INFO")
designspace_filename = options.designspace
finder = lambda s: s.replace('master_ufo', 'master_ttf_interpolatable').replace('.ufo', '.ttf')
outfile = os.path.splitext(designspace_filename)[0] + '-VF.ttf'
vf, model, master_ttfs = build(designspace_filename, finder)
log.info("Saving variation font %s", outfile)
vf.save(outfile)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
sys.exit(main())
import doctest
sys.exit(doctest.testmod().failed)
| mit | 5,187,638,124,208,858,000 | 30.080902 | 185 | 0.699125 | false |
Axam/nsx-web | nailgun/nailgun/db/sqlalchemy/models/cluster.py | 2 | 4812 | # -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import Enum
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Text
from sqlalchemy import Unicode
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from nailgun import consts
from nailgun.db import db
from nailgun.db.sqlalchemy.models.base import Base
from nailgun.db.sqlalchemy.models.fields import JSON
from nailgun.db.sqlalchemy.models.node import Node
class ClusterChanges(Base):
__tablename__ = 'cluster_changes'
id = Column(Integer, primary_key=True)
cluster_id = Column(Integer, ForeignKey('clusters.id'))
node_id = Column(Integer, ForeignKey('nodes.id', ondelete='CASCADE'))
name = Column(
Enum(*consts.CLUSTER_CHANGES, name='possible_changes'),
nullable=False
)
class Cluster(Base):
__tablename__ = 'clusters'
id = Column(Integer, primary_key=True)
mode = Column(
Enum(*consts.CLUSTER_MODES, name='cluster_mode'),
nullable=False,
default=consts.CLUSTER_MODES.ha_compact
)
status = Column(
Enum(*consts.CLUSTER_STATUSES, name='cluster_status'),
nullable=False,
default=consts.CLUSTER_STATUSES.new
)
net_provider = Column(
Enum(*consts.CLUSTER_NET_PROVIDERS, name='net_provider'),
nullable=False,
default=consts.CLUSTER_NET_PROVIDERS.nova_network
)
network_config = relationship("NetworkingConfig",
backref=backref("cluster"),
cascade="all,delete",
uselist=False)
grouping = Column(
Enum(*consts.CLUSTER_GROUPING, name='cluster_grouping'),
nullable=False,
default=consts.CLUSTER_GROUPING.roles
)
name = Column(Unicode(50), unique=True, nullable=False)
release_id = Column(Integer, ForeignKey('releases.id'), nullable=False)
pending_release_id = Column(Integer, ForeignKey('releases.id'))
nodes = relationship(
"Node", backref="cluster", cascade="delete", order_by='Node.id')
tasks = relationship("Task", backref="cluster", cascade="delete")
attributes = relationship("Attributes", uselist=False,
backref="cluster", cascade="delete")
changes_list = relationship("ClusterChanges", backref="cluster",
cascade="delete")
# We must keep all notifications even if cluster is removed.
# It is because we want user to be able to see
# the notification history so that is why we don't use
# cascade="delete" in this relationship
# During cluster deletion sqlalchemy engine will set null
# into cluster foreign key column of notification entity
notifications = relationship("Notification", backref="cluster")
network_groups = relationship(
"NetworkGroup",
backref="cluster",
cascade="delete",
order_by="NetworkGroup.id"
)
replaced_deployment_info = Column(JSON, default={})
replaced_provisioning_info = Column(JSON, default={})
is_customized = Column(Boolean, default=False)
fuel_version = Column(Text, nullable=False)
@property
def changes(self):
return [
{"name": i.name, "node_id": i.node_id}
for i in self.changes_list
]
@changes.setter
def changes(self, value):
self.changes_list = value
@property
def is_ha_mode(self):
return self.mode in ('ha_full', 'ha_compact')
@property
def full_name(self):
return '%s (id=%s, mode=%s)' % (self.name, self.id, self.mode)
@property
def is_locked(self):
if self.status in ("new", "stopped") and not \
db().query(Node).filter_by(
cluster_id=self.id,
status="ready"
).count():
return False
return True
class Attributes(Base):
__tablename__ = 'attributes'
id = Column(Integer, primary_key=True)
cluster_id = Column(Integer, ForeignKey('clusters.id'))
editable = Column(JSON)
generated = Column(JSON)
| apache-2.0 | -4,429,185,441,734,378,000 | 34.124088 | 78 | 0.645885 | false |
waterwoodwind/QA_web | main/save_load_func.py | 1 | 3092 | #coding=utf-8
import pandas as pd
import pickle
import json
from main.models import qa_info
from django.core import serializers
from django.http import HttpResponse
import os
#function
def list_all_data():
file_1 = file('data_all.pkl', 'rb')
updata = pickle.load(file_1)
return updata
#views
def refresh_middle_data(request):
exclude_list = []
query_data = qa_info.objects.all().order_by('-data')
json_data = serializers.serialize("json", query_data, use_natural_foreign_keys=True)
list_data = json.loads(json_data)
dict_name_verbose_name = {}
columns_set = []
colheaders = []
dataSchema = {}
for field in qa_info._meta.fields:
dict_name_verbose_name[field.name] = field.verbose_name
if not field.verbose_name in exclude_list:
print field.verbose_name
colheaders.append(field.verbose_name.encode("utf8"))
dataSchema[field.verbose_name] = ''
columns_item = {
u"title": field.verbose_name,
u"field": field.verbose_name,
# u"sortable": u"true",
}
if field.verbose_name == u"问题描述":
columns_item[u"width"] = u"20%"
columns_item[u"title"] = u"问题描述"
elif field.verbose_name == u"整改措施":
columns_item[u"width"] = u"20%"
columns_item[u"title"] = u"整改措施"
elif field.verbose_name == u"处理意见":
columns_item[u"width"] = u"6%"
columns_item[u"title"] = u"处理意见"
else:
split_list = list(field.verbose_name)
# every two word add
title_str = ""
for i in range(len(split_list)):
title_str = title_str + split_list[i]
if (i + 1) % 2 == 0:
title_str = title_str + u"<br>"
if field.verbose_name == u"相关附件":
columns_item[u'formatter'] = "attachment"
columns_item[u"title"] = title_str
columns_item[u"width"] = u"2%"
columns_set.append(columns_item)
json_columns = json.dumps(columns_set)
upload_data = []
for item in list_data:
single_data = item['fields']
single_data[u'id'] = item['pk']
upload_data.append(single_data)
# print upload_data
chinese_updata = []
for item in upload_data:
dict_updata = {}
for key, value in item.items():
dict_updata[dict_name_verbose_name[key]] = value
# print chinese_updata
chinese_updata.append(dict_updata)
#save list
if os.path.exists('data_all.pkl'):
os.remove('data_all.pkl')
file_1 = file('data_all.pkl', 'wb')
pickle.dump(chinese_updata, file_1, True)
#save pd file
if os.path.exists('data.h5'):
os.remove('data.h5')
df_data = pd.DataFrame(chinese_updata)
df_data.to_hdf('data.h5', 'df')
return HttpResponse(u"前端数据已刷新") | mit | -4,685,831,142,808,562,000 | 31.159574 | 88 | 0.549967 | false |
crgwbr/asymmetric_jwt_auth | src/asymmetric_jwt_auth/nonce/django.py | 1 | 1352 | from django.core.cache import cache
from django.conf import settings
from .. import default_settings
from . import BaseNonceBackend
class DjangoCacheNonceBackend(BaseNonceBackend):
"""
Nonce backend which uses DJango's cache system.
Simple, but not great. Prone to race conditions.
"""
def validate_nonce(self, username: str, timestamp: int, nonce: str) -> bool:
"""
Confirm that the given nonce hasn't already been used.
"""
key = self._create_nonce_key(username, timestamp)
used = cache.get(key, set([]))
return nonce not in used
def log_used_nonce(self, username: str, timestamp: int, nonce: str) -> None:
"""
Log a nonce as being used, and therefore henceforth invalid.
"""
key = self._create_nonce_key(username, timestamp)
used = cache.get(key, set([]))
used.add(nonce)
timestamp_tolerance = getattr(settings, 'ASYMMETRIC_JWT_AUTH', default_settings)['TIMESTAMP_TOLERANCE']
cache.set(key, used, timestamp_tolerance * 2)
def _create_nonce_key(self, username: str, timestamp: int) -> str:
"""
Create and return the cache key for storing nonces
"""
return '%s-nonces-%s-%s' % (
self.__class__.__name__,
username,
timestamp,
)
| isc | 5,193,959,262,588,831,000 | 31.190476 | 111 | 0.611686 | false |
danielhrisca/asammdf | asammdf/mdf.py | 1 | 172031 | # -*- coding: utf-8 -*-
""" common MDF file format module """
from collections import defaultdict, OrderedDict
from copy import deepcopy
import csv
from datetime import datetime, timezone
from functools import reduce
from io import BytesIO
import logging
from pathlib import Path
import re
from shutil import copy
from struct import unpack
import zipfile, gzip, bz2
from traceback import format_exc
import xml.etree.ElementTree as ET
from canmatrix import CanMatrix
import numpy as np
import pandas as pd
from .blocks import v2_v3_constants as v23c
from .blocks import v4_constants as v4c
from .blocks.bus_logging_utils import extract_mux
from .blocks.conversion_utils import from_dict
from .blocks.mdf_v2 import MDF2
from .blocks.mdf_v3 import MDF3
from .blocks.mdf_v4 import MDF4
from .blocks.utils import (
components,
csv_bytearray2hex,
csv_int2hex,
downcast,
is_file_like,
load_can_database,
master_using_raster,
matlab_compatible,
MDF2_VERSIONS,
MDF3_VERSIONS,
MDF4_VERSIONS,
MdfException,
plausible_timestamps,
randomized_string,
SUPPORTED_VERSIONS,
UINT16_u,
UINT64_u,
UniqueDB,
validate_version_argument,
)
from .blocks.v2_v3_blocks import ChannelConversion as ChannelConversionV3
from .blocks.v2_v3_blocks import ChannelExtension
from .blocks.v2_v3_blocks import HeaderBlock as HeaderV3
from .blocks.v4_blocks import ChannelConversion as ChannelConversionV4
from .blocks.v4_blocks import EventBlock, FileHistory
from .blocks.v4_blocks import HeaderBlock as HeaderV4
from .blocks.v4_blocks import SourceInformation
from .signal import Signal
from .version import __version__
logger = logging.getLogger("asammdf")
LOCAL_TIMEZONE = datetime.now(timezone.utc).astimezone().tzinfo
__all__ = ["MDF", "SUPPORTED_VERSIONS"]
def get_measurement_timestamp_and_version(mdf, file):
mdf.seek(64)
blk_id = mdf.read(2)
if blk_id == b"HD":
header = HeaderV3
version = "3.00"
else:
version = "4.00"
blk_id += mdf.read(2)
if blk_id == b"##HD":
header = HeaderV4
else:
raise MdfException(f'"{file}" is not a valid MDF file')
header = header(address=64, stream=mdf)
return header.start_time, version
class MDF:
"""Unified access to MDF v3 and v4 files. Underlying _mdf's attributes and
methods are linked to the `MDF` object via *setattr*. This is done to expose
them to the user code and for performance considerations.
Parameters
----------
name : string | BytesIO | zipfile.ZipFile | bz2.BZ2File | gzip.GzipFile
mdf file name (if provided it must be a real file name), file-like object or
compressed file opened as Python object
.. versionchanged:: 6.2.0
added support for zipfile.ZipFile, bz2.BZ2File and gzip.GzipFile
version : string
mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10', '3.20',
'3.30', '4.00', '4.10', '4.11', '4.20'); default '4.10'. This argument is
only used for MDF objects created from scratch; for MDF objects created
from a file the version is set to file version
channels : iterable
channel names that will used for selective loading. This can dramatically
improve the file loading time.
.. versionadded:: 6.1.0
callback (\*\*kwargs) : function
keyword only argument: function to call to update the progress; the
function must accept two arguments (the current progress and maximum
progress value)
use_display_names (\*\*kwargs) : bool
keyword only argument: for MDF4 files parse the XML channel comment to
search for the display name; XML parsing is quite expensive so setting
this to *False* can decrease the loading times very much; default
*False*
remove_source_from_channel_names (\*\*kwargs) : bool
remove source from channel names ("Speed\XCP3" -> "Speed")
copy_on_get (\*\*kwargs) : bool
copy arrays in the get method; default *True*
expand_zippedfile (\*\*kwargs) : bool
only for bz2.BZ2File and gzip.GzipFile, load the file content into a
BytesIO before parsing (avoids the huge performance penalty of doing
random reads from the zipped file); default *True*
Examples
--------
>>> mdf = MDF(version='3.30') # new MDF object with version 3.30
>>> mdf = MDF('path/to/file.mf4') # MDF loaded from file
>>> mdf = MDF(BytesIO(data)) # MDF from file contents
>>> mdf = MDF(zipfile.ZipFile('data.zip')) # MDF creating using the first valid MDF from archive
>>> mdf = MDF(bz2.BZ2File('path/to/data.bz2', 'rb')) # MDF from bz2 object
>>> mdf = MDF(gzip.GzipFile('path/to/data.gzip', 'rb')) # MDF from gzip object
"""
_terminate = False
def __init__(self, name=None, version="4.10", channels=(), **kwargs):
self._mdf = None
expand_zippedfile = kwargs.pop("expand_zippedfile", True)
if name:
if is_file_like(name):
file_stream = name
do_close = False
if expand_zippedfile and isinstance(file_stream, (bz2.BZ2File, gzip.GzipFile)):
if isinstance(file_stream, (bz2.BZ2File, gzip.GzipFile)):
file_stream.seek(0)
file_stream = BytesIO(file_stream.read())
name = file_stream
elif isinstance(name, zipfile.ZipFile):
do_close = False
file_stream = name
for fn in file_stream.namelist():
if fn.lower().endswith(('mdf', 'dat', 'mf4')):
break
else:
raise Exception
file_stream = name = BytesIO(file_stream.read(fn))
else:
name = Path(name)
if name.is_file():
do_close = True
file_stream = open(name, "rb")
else:
raise MdfException(f'File "{name}" does not exist')
file_stream.seek(0)
magic_header = file_stream.read(8)
if magic_header.strip() not in (b"MDF", b"UnFinMF"):
raise MdfException(
f'"{name}" is not a valid ASAM MDF file: magic header is {magic_header}'
)
file_stream.seek(8)
version = file_stream.read(4).decode("ascii").strip(" \0")
if not version:
file_stream.read(16)
version = unpack("<H", file_stream.read(2))[0]
version = str(version)
version = f"{version[0]}.{version[1:]}"
if do_close:
file_stream.close()
if version in MDF3_VERSIONS:
self._mdf = MDF3(name, channels=channels, **kwargs)
elif version in MDF4_VERSIONS:
self._mdf = MDF4(name, channels=channels, **kwargs)
elif version in MDF2_VERSIONS:
self._mdf = MDF2(name, channels=channels, **kwargs)
else:
message = f'"{name}" is not a supported MDF file; "{version}" file version was found'
raise MdfException(message)
else:
version = validate_version_argument(version)
if version in MDF2_VERSIONS:
self._mdf = MDF3(version=version, **kwargs)
elif version in MDF3_VERSIONS:
self._mdf = MDF3(version=version, **kwargs)
elif version in MDF4_VERSIONS:
self._mdf = MDF4(version=version, **kwargs)
else:
message = (
f'"{version}" is not a supported MDF file version; '
f"Supported versions are {SUPPORTED_VERSIONS}"
)
raise MdfException(message)
# we need a backreference to the MDF object to avoid it being garbage
# collected in code like this:
# MDF(filename).convert('4.10')
self._mdf._parent = self
def __setattr__(self, item, value):
if item == "_mdf":
super().__setattr__(item, value)
else:
setattr(self._mdf, item, value)
def __getattr__(self, item):
return getattr(self._mdf, item)
def __dir__(self):
return sorted(set(super().__dir__()) | set(dir(self._mdf)))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._mdf is not None:
try:
self.close()
except:
pass
self._mdf = None
def __del__(self):
if self._mdf is not None:
try:
self.close()
except:
pass
self._mdf = None
def __lt__(self, other):
if self.header.start_time < other.header.start_time:
return True
elif self.header.start_time > other.header.start_time:
return False
else:
t_min = []
for i, group in enumerate(self.groups):
cycles_nr = group.channel_group.cycles_nr
if cycles_nr and i in self.masters_db:
master_min = self.get_master(i, record_offset=0, record_count=1)
if len(master_min):
t_min.append(master_min[0])
other_t_min = []
for i, group in enumerate(other.groups):
cycles_nr = group.channel_group.cycles_nr
if cycles_nr and i in other.masters_db:
master_min = other.get_master(i, record_offset=0, record_count=1)
if len(master_min):
other_t_min.append(master_min[0])
if not t_min or not other_t_min:
return True
else:
return min(t_min) < min(other_t_min)
def _transfer_events(self, other):
def get_scopes(event, events):
if event.scopes:
return event.scopes
else:
if event.parent is not None:
return get_scopes(events[event.parent], events)
elif event.range_start is not None:
return get_scopes(events[event.range_start], events)
else:
return event.scopes
if other.version >= "4.00":
for event in other.events:
if self.version >= "4.00":
new_event = deepcopy(event)
event_valid = True
for i, ref in enumerate(new_event.scopes):
try:
dg_cntr, ch_cntr = ref
try:
(self.groups[dg_cntr].channels[ch_cntr])
except:
event_valid = False
except TypeError:
dg_cntr = ref
try:
(self.groups[dg_cntr].channel_group)
except:
event_valid = False
# ignore attachments for now
for i in range(new_event.attachment_nr):
key = f"attachment_{i}_addr"
event[key] = 0
if event_valid:
self.events.append(new_event)
else:
ev_type = event.event_type
ev_range = event.range_type
ev_base = event.sync_base
ev_factor = event.sync_factor
timestamp = ev_base * ev_factor
try:
comment = ET.fromstring(
event.comment.replace(
' xmlns="http://www.asam.net/mdf/v4"', ""
)
)
pre = comment.find(".//pre_trigger_interval")
if pre is not None:
pre = float(pre.text)
else:
pre = 0.0
post = comment.find(".//post_trigger_interval")
if post is not None:
post = float(post.text)
else:
post = 0.0
comment = comment.find(".//TX")
if comment is not None:
comment = comment.text
else:
comment = ""
except:
pre = 0.0
post = 0.0
comment = event.comment
if comment:
comment += ": "
if ev_range == v4c.EVENT_RANGE_TYPE_BEGINNING:
comment += "Begin of "
elif ev_range == v4c.EVENT_RANGE_TYPE_END:
comment += "End of "
else:
comment += "Single point "
if ev_type == v4c.EVENT_TYPE_RECORDING:
comment += "recording"
elif ev_type == v4c.EVENT_TYPE_RECORDING_INTERRUPT:
comment += "recording interrupt"
elif ev_type == v4c.EVENT_TYPE_ACQUISITION_INTERRUPT:
comment += "acquisition interrupt"
elif ev_type == v4c.EVENT_TYPE_START_RECORDING_TRIGGER:
comment += "measurement start trigger"
elif ev_type == v4c.EVENT_TYPE_STOP_RECORDING_TRIGGER:
comment += "measurement stop trigger"
elif ev_type == v4c.EVENT_TYPE_TRIGGER:
comment += "trigger"
else:
comment += "marker"
scopes = get_scopes(event, other.events)
if scopes:
for i, ref in enumerate(scopes):
event_valid = True
try:
dg_cntr, ch_cntr = ref
try:
(self.groups[dg_cntr])
except:
event_valid = False
except TypeError:
dg_cntr = ref
try:
(self.groups[dg_cntr])
except:
event_valid = False
if event_valid:
self.add_trigger(
dg_cntr,
timestamp,
pre_time=pre,
post_time=post,
comment=comment,
)
else:
for i, _ in enumerate(self.groups):
self.add_trigger(
i,
timestamp,
pre_time=pre,
post_time=post,
comment=comment,
)
else:
for trigger_info in other.iter_get_triggers():
comment = trigger_info["comment"]
timestamp = trigger_info["time"]
group = trigger_info["group"]
if self.version < "4.00":
self.add_trigger(
group,
timestamp,
pre_time=trigger_info["pre_time"],
post_time=trigger_info["post_time"],
comment=comment,
)
else:
if timestamp:
ev_type = v4c.EVENT_TYPE_TRIGGER
else:
ev_type = v4c.EVENT_TYPE_START_RECORDING_TRIGGER
event = EventBlock(
event_type=ev_type,
sync_base=int(timestamp * 10 ** 9),
sync_factor=10 ** -9,
scope_0_addr=0,
)
event.comment = comment
event.scopes.append(group)
self.events.append(event)
def _transfer_header_data(self, other, message=""):
self.header.author = other.header.author
self.header.department = other.header.department
self.header.project = other.header.project
self.header.subject = other.header.subject
self.header.comment = other.header.comment
if self.version >= "4.00" and message:
fh = FileHistory()
fh.comment = f"""<FHcomment>
<TX>{message}</TX>
<tool_id>asammdf</tool_id>
<tool_vendor>asammdf</tool_vendor>
<tool_version>{__version__}</tool_version>
</FHcomment>"""
self.file_history = [fh]
@staticmethod
def _transfer_channel_group_data(sgroup, ogroup):
if not hasattr(sgroup, "acq_name") or not hasattr(ogroup, "acq_name"):
sgroup.comment = ogroup.comment
else:
sgroup.flags = ogroup.flags
sgroup.path_separator = ogroup.path_separator
sgroup.comment = ogroup.comment
sgroup.acq_name = ogroup.acq_name
acq_source = ogroup.acq_source
if acq_source:
sgroup.acq_source = acq_source.copy()
def _transfer_metadata(self, other, message=""):
self._transfer_events(other)
self._transfer_header_data(other, message)
def __contains__(self, channel):
""" if *'channel name'* in *'mdf file'* """
return channel in self.channels_db
def __iter__(self):
"""iterate over all the channels found in the file; master channels
are skipped from iteration
"""
yield from self.iter_channels()
def convert(self, version):
"""convert *MDF* to other version
Parameters
----------
version : str
new mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10',
'3.20', '3.30', '4.00', '4.10', '4.11', '4.20'); default '4.10'
Returns
-------
out : MDF
new *MDF* object
"""
version = validate_version_argument(version)
out = MDF(
version=version,
**self._kwargs
)
integer_interpolation_mode = self._integer_interpolation
float_interpolation_mode = self._float_interpolation
out.configure(from_other=self)
out.header.start_time = self.header.start_time
groups_nr = len(self.virtual_groups)
if self._callback:
self._callback(0, groups_nr)
cg_nr = None
self.configure(copy_on_get=False)
# walk through all groups and get all channels
for i, virtual_group in enumerate(self.virtual_groups):
for idx, sigs in enumerate(
self._yield_selected_signals(virtual_group, version=version)
):
if idx == 0:
if sigs:
cg = self.groups[virtual_group].channel_group
cg_nr = out.append(
sigs,
common_timebase=True,
)
MDF._transfer_channel_group_data(
out.groups[cg_nr].channel_group, cg
)
else:
break
else:
out.extend(cg_nr, sigs)
if self._callback:
self._callback(i + 1, groups_nr)
if self._terminate:
return
out._transfer_metadata(self, message=f"Converted from <{self.name}>")
self.configure(copy_on_get=True)
if self._callback:
out._callback = out._mdf._callback = self._callback
return out
def cut(
self,
start=None,
stop=None,
whence=0,
version=None,
include_ends=True,
time_from_zero=False,
):
"""cut *MDF* file. *start* and *stop* limits are absolute values
or values relative to the first timestamp depending on the *whence*
argument.
Parameters
----------
start : float
start time, default *None*. If *None* then the start of measurement
is used
stop : float
stop time, default *None*. If *None* then the end of measurement is
used
whence : int
how to search for the start and stop values
* 0 : absolute
* 1 : relative to first timestamp
version : str
new mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10',
'3.20', '3.30', '4.00', '4.10', '4.11', 4.20'); default *None* and in this
case the original file version is used
include_ends : bool
include the *start* and *stop* timestamps after cutting the signal.
If *start* and *stop* are found in the original timestamps, then
the new samples will be computed using interpolation. Default *True*
time_from_zero : bool
start time stamps from 0s in the cut measurement
Returns
-------
out : MDF
new MDF object
"""
if version is None:
version = self.version
else:
version = validate_version_argument(version)
out = MDF(
version=version,
**self._kwargs,
)
integer_interpolation_mode = self._integer_interpolation
float_interpolation_mode = self._float_interpolation
out.configure(from_other=self)
self.configure(copy_on_get=False)
if whence == 1:
timestamps = []
for group in self.virtual_groups:
master = self.get_master(group, record_offset=0, record_count=1)
if master.size:
timestamps.append(master[0])
if timestamps:
first_timestamp = np.amin(timestamps)
else:
first_timestamp = 0
if start is not None:
start += first_timestamp
if stop is not None:
stop += first_timestamp
if time_from_zero:
delta = start
t_epoch = self.header.start_time.timestamp() + delta
out.header.start_time = datetime.fromtimestamp(t_epoch)
else:
delta = 0
out.header.start_time = self.header.start_time
groups_nr = len(self.virtual_groups)
if self._callback:
self._callback(0, groups_nr)
# walk through all groups and get all channels
for i, (group_index, virtual_group) in enumerate(self.virtual_groups.items()):
included_channels = self.included_channels(group_index)[group_index]
if not included_channels:
continue
idx = 0
signals = []
for j, sigs in enumerate(
self._yield_selected_signals(group_index, groups=included_channels)
):
if not sigs:
break
if j == 0:
master = sigs[0].timestamps
signals = sigs
else:
master = sigs[0][0]
if not len(master):
continue
needs_cutting = True
# check if this fragement is within the cut interval or
# if the cut interval has ended
if start is None and stop is None:
fragment_start = None
fragment_stop = None
start_index = 0
stop_index = len(master)
needs_cutting = False
elif start is None:
fragment_start = None
start_index = 0
if master[0] > stop:
break
else:
fragment_stop = min(stop, master[-1])
stop_index = np.searchsorted(
master, fragment_stop, side="right"
)
if stop_index == len(master):
needs_cutting = False
elif stop is None:
fragment_stop = None
if master[-1] < start:
continue
else:
fragment_start = max(start, master[0])
start_index = np.searchsorted(
master, fragment_start, side="left"
)
stop_index = len(master)
if start_index == 0:
needs_cutting = False
else:
if master[0] > stop:
break
elif master[-1] < start:
continue
else:
fragment_start = max(start, master[0])
start_index = np.searchsorted(
master, fragment_start, side="left"
)
fragment_stop = min(stop, master[-1])
stop_index = np.searchsorted(
master, fragment_stop, side="right"
)
if start_index == 0 and stop_index == len(master):
needs_cutting = False
# update the signal is this is not the first yield
if j:
for signal, (samples, invalidation) in zip(signals, sigs[1:]):
signal.samples = samples
signal.timestamps = master
signal.invalidation_bits = invalidation
if needs_cutting:
master = (
Signal(master, master, name="_")
.cut(
fragment_start,
fragment_stop,
include_ends,
integer_interpolation_mode=integer_interpolation_mode,
float_interpolation_mode=float_interpolation_mode,
)
.timestamps
)
if not len(master):
continue
signals = [
sig.cut(
master[0],
master[-1],
include_ends=include_ends,
integer_interpolation_mode=integer_interpolation_mode,
float_interpolation_mode=float_interpolation_mode,
)
for sig in signals
]
else:
for sig in signals:
native = sig.samples.dtype.newbyteorder("=")
if sig.samples.dtype != native:
sig.samples = sig.samples.astype(native)
if time_from_zero:
master = master - delta
for sig in signals:
sig.timestamps = master
if idx == 0:
if start:
start_ = f"{start}s"
else:
start_ = "start of measurement"
if stop:
stop_ = f"{stop}s"
else:
stop_ = "end of measurement"
cg = self.groups[group_index].channel_group
cg_nr = out.append(
signals,
common_timebase=True,
)
MDF._transfer_channel_group_data(
out.groups[cg_nr].channel_group, cg
)
else:
sigs = [(sig.samples, sig.invalidation_bits) for sig in signals]
sigs.insert(0, (master, None))
out.extend(cg_nr, sigs)
idx += 1
# if the cut interval is not found in the measurement
# then append a data group with 0 cycles
if idx == 0 and signals:
for sig in signals:
sig.samples = sig.samples[:0]
sig.timestamps = sig.timestamps[:0]
if sig.invalidation_bits is not None:
sig.invaldiation_bits = sig.invalidation_bits[:0]
if start:
start_ = f"{start}s"
else:
start_ = "start of measurement"
if stop:
stop_ = f"{stop}s"
else:
stop_ = "end of measurement"
cg = self.groups[group_index].channel_group
cg_nr = out.append(
signals,
common_timebase=True,
)
MDF._transfer_channel_group_data(out.groups[cg_nr].channel_group, cg)
if self._callback:
self._callback(i + 1, groups_nr)
if self._terminate:
return
self.configure(copy_on_get=True)
out._transfer_metadata(self, message=f"Cut from {start_} to {stop_}")
if self._callback:
out._callback = out._mdf._callback = self._callback
return out
def export(self, fmt, filename=None, **kwargs):
r"""export *MDF* to other formats. The *MDF* file name is used is
available, else the *filename* argument must be provided.
The *pandas* export option was removed. you should use the method
*to_dataframe* instead.
Parameters
----------
fmt : string
can be one of the following:
* `csv` : CSV export that uses the "," delimiter. This option
will generate a new csv file for each data group
(<MDFNAME>_DataGroup_<cntr>.csv)
* `hdf5` : HDF5 file output; each *MDF* data group is mapped to
a *HDF5* group with the name 'DataGroup_<cntr>'
(where <cntr> is the index)
* `mat` : Matlab .mat version 4, 5 or 7.3 export. If
*single_time_base==False* the channels will be renamed in the mat
file to 'D<cntr>_<channel name>'. The channel group
master will be renamed to 'DM<cntr>_<channel name>'
( *<cntr>* is the data group index starting from 0)
* `parquet` : export to Apache parquet format
filename : string | pathlib.Path
export file name
\*\*kwargs
* `single_time_base`: resample all channels to common time base,
default *False*
* `raster`: float time raster for resampling. Valid if
*single_time_base* is *True*
* `time_from_zero`: adjust time channel to start from 0
* `use_display_names`: use display name instead of standard channel
name, if available.
* `empty_channels`: behaviour for channels without samples; the
options are *skip* or *zeros*; default is *skip*
* `format`: only valid for *mat* export; can be '4', '5' or '7.3',
default is '5'
* `oned_as`: only valid for *mat* export; can be 'row' or 'column'
* `keep_arrays` : keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
* `reduce_memory_usage` : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
* `compression` : str
compression to be used
* for ``parquet`` : "GZIP" or "SANPPY"
* for ``hfd5`` : "gzip", "lzf" or "szip"
* for ``mat`` : bool
* `time_as_date` (False) : bool
export time as local timezone datetimee; only valid for CSV export
.. versionadded:: 5.8.0
* `ignore_value2text_conversions` (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.8.0
* raw (False) : bool
export all channels using the raw values
.. versionadded:: 6.0.0
* delimiter (',') : str
only valid for CSV: see cpython documentation for csv.Dialect.delimiter
.. versionadded:: 6.2.0
* doublequote (True) : bool
only valid for CSV: see cpython documentation for csv.Dialect.doublequote
.. versionadded:: 6.2.0
* escapechar (None) : str
only valid for CSV: see cpython documentation for csv.Dialect.escapechar
.. versionadded:: 6.2.0
* lineterminator ("\\r\\n") : str
only valid for CSV: see cpython documentation for csv.Dialect.lineterminator
.. versionadded:: 6.2.0
* quotechar ('"') : str
only valid for CSV: see cpython documentation for csv.Dialect.quotechar
.. versionadded:: 6.2.0
* quoting ("MINIMAL") : str
only valid for CSV: see cpython documentation for csv.Dialect.quoting. Use the
last part of the quoting constant name
.. versionadded:: 6.2.0
"""
header_items = (
"date",
"time",
"author_field",
"department_field",
"project_field",
"subject_field",
)
if fmt != "pandas" and filename is None and self.name is None:
message = (
"Must specify filename for export"
"if MDF was created without a file name"
)
logger.warning(message)
return
single_time_base = kwargs.get("single_time_base", False)
raster = kwargs.get("raster", None)
time_from_zero = kwargs.get("time_from_zero", True)
use_display_names = kwargs.get("use_display_names", True)
empty_channels = kwargs.get("empty_channels", "skip")
format = kwargs.get("format", "5")
oned_as = kwargs.get("oned_as", "row")
reduce_memory_usage = kwargs.get("reduce_memory_usage", False)
compression = kwargs.get("compression", "")
time_as_date = kwargs.get("time_as_date", False)
ignore_value2text_conversions = kwargs.get(
"ignore_value2text_conversions", False
)
raw = bool(kwargs.get("raw", False))
if compression == "SNAPPY":
try:
import snappy
except ImportError:
logger.warning(
"snappy compressor is not installed; compression will be set to GZIP"
)
compression = "GZIP"
filename = Path(filename) if filename else self.name
if fmt == "parquet":
try:
from fastparquet import write as write_parquet
except ImportError:
logger.warning(
"fastparquet not found; export to parquet is unavailable"
)
return
elif fmt == "hdf5":
try:
from h5py import File as HDF5
except ImportError:
logger.warning("h5py not found; export to HDF5 is unavailable")
return
elif fmt == "mat":
if format == "7.3":
try:
from hdf5storage import savemat
except ImportError:
logger.warning(
"hdf5storage not found; export to mat v7.3 is unavailable"
)
return
else:
try:
from scipy.io import savemat
except ImportError:
logger.warning("scipy not found; export to mat is unavailable")
return
elif fmt not in ("csv",):
raise MdfException(f"Export to {fmt} is not implemented")
name = ""
if self._callback:
self._callback(0, 100)
if single_time_base or fmt == "parquet":
df = self.to_dataframe(
raster=raster,
time_from_zero=time_from_zero,
use_display_names=use_display_names,
empty_channels=empty_channels,
reduce_memory_usage=reduce_memory_usage,
ignore_value2text_conversions=ignore_value2text_conversions,
raw=raw,
)
units = OrderedDict()
comments = OrderedDict()
used_names = UniqueDB()
dropped = {}
groups_nr = len(self.groups)
for i, grp in enumerate(self.groups):
if self._terminate:
return
for ch in grp.channels:
if use_display_names:
channel_name = ch.display_name or ch.name
else:
channel_name = ch.name
channel_name = used_names.get_unique_name(channel_name)
if hasattr(ch, "unit"):
unit = ch.unit
if ch.conversion:
unit = unit or ch.conversion.unit
else:
unit = ""
comment = ch.comment
units[channel_name] = unit
comments[channel_name] = comment
if self._callback:
self._callback(i + 1, groups_nr * 2)
if fmt == "hdf5":
filename = filename.with_suffix(".hdf")
if single_time_base:
with HDF5(str(filename), "w") as hdf:
# header information
group = hdf.create_group(str(filename))
if self.version in MDF2_VERSIONS + MDF3_VERSIONS:
for item in header_items:
group.attrs[item] = self.header[item].replace(b"\0", b"")
# save each data group in a HDF5 group called
# "DataGroup_<cntr>" with the index starting from 1
# each HDF5 group will have a string attribute "master"
# that will hold the name of the master channel
count = len(df.columns)
for i, channel in enumerate(df):
samples = df[channel]
unit = units.get(channel, "")
comment = comments.get(channel, "")
if samples.dtype.kind == "O":
if isinstance(samples[0], np.ndarray):
samples = np.vstack(samples)
else:
continue
if compression:
dataset = group.create_dataset(
channel, data=samples, compression=compression
)
else:
dataset = group.create_dataset(channel, data=samples)
unit = unit.replace("\0", "")
if unit:
dataset.attrs["unit"] = unit
comment = comment.replace("\0", "")
if comment:
dataset.attrs["comment"] = comment
if self._callback:
self._callback(i + 1 + count, count * 2)
else:
with HDF5(str(filename), "w") as hdf:
# header information
group = hdf.create_group(str(filename))
if self.version in MDF2_VERSIONS + MDF3_VERSIONS:
for item in header_items:
group.attrs[item] = self.header[item].replace(b"\0", b"")
# save each data group in a HDF5 group called
# "DataGroup_<cntr>" with the index starting from 1
# each HDF5 group will have a string attribute "master"
# that will hold the name of the master channel
groups_nr = len(self.virtual_groups)
for i, (group_index, virtual_group) in enumerate(
self.virtual_groups.items()
):
channels = self.included_channels(group_index)[group_index]
if not channels:
continue
names = UniqueDB()
if self._terminate:
return
if len(virtual_group.groups) == 1:
comment = self.groups[
virtual_group.groups[0]
].channel_group.comment
else:
comment = "Virtual group i"
group_name = r"/" + f"ChannelGroup_{i}"
group = hdf.create_group(group_name)
group.attrs["comment"] = comment
master_index = self.masters_db.get(group_index, -1)
if master_index >= 0:
group.attrs["master"] = (
self.groups[group_index].channels[master_index].name
)
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in channels.items()
for ch_index in channel_indexes
]
if not channels:
continue
channels = self.select(channels, raw=raw)
for j, sig in enumerate(channels):
if use_display_names:
name = sig.display_name or sig.name
else:
name = sig.name
name = name.replace("\\", "_").replace("/", "_")
name = names.get_unique_name(name)
if reduce_memory_usage:
sig.samples = downcast(sig.samples)
if compression:
dataset = group.create_dataset(
name, data=sig.samples, compression=compression
)
else:
dataset = group.create_dataset(
name, data=sig.samples, dtype=sig.samples.dtype
)
unit = sig.unit.replace("\0", "")
if unit:
dataset.attrs["unit"] = unit
comment = sig.comment.replace("\0", "")
if comment:
dataset.attrs["comment"] = comment
if self._callback:
self._callback(i + 1, groups_nr)
elif fmt == "csv":
fmtparams = {
"delimiter": kwargs.get("delimiter", ",")[0],
"doublequote": kwargs.get("doublequote", True),
"lineterminator": kwargs.get("lineterminator", '\r\n'),
"quotechar": kwargs.get("quotechar", '"')[0],
}
quoting = kwargs.get("quoting", "MINIMAL").upper()
quoting = getattr(csv, f"QUOTE_{quoting}")
fmtparams["quoting"] = quoting
escapechar = kwargs.get("escapechar", None)
if escapechar is not None:
escapechar = escapechar[0]
fmtparams["escapechar"] = escapechar
if single_time_base:
filename = filename.with_suffix(".csv")
message = f'Writing csv export to file "{filename}"'
logger.info(message)
if time_as_date:
index = (
pd.to_datetime(
df.index + self.header.start_time.timestamp(), unit="s"
)
.tz_localize("UTC")
.tz_convert(LOCAL_TIMEZONE)
.astype(str)
)
df.index = index
df.index.name = "timestamps"
if hasattr(self, "can_logging_db") and self.can_logging_db:
dropped = {}
for name_ in df.columns:
if name_.endswith("CAN_DataFrame.ID"):
dropped[name_] = pd.Series(
csv_int2hex(df[name_].astype("<u4") & 0x1FFFFFFF),
index=df.index,
)
elif name_.endswith("CAN_DataFrame.DataBytes"):
dropped[name_] = pd.Series(
csv_bytearray2hex(df[name_]), index=df.index
)
df = df.drop(columns=list(dropped))
for name, s in dropped.items():
df[name] = s
with open(filename, "w", newline="") as csvfile:
writer = csv.writer(csvfile, **fmtparams)
names_row = [df.index.name, *df.columns]
writer.writerow(names_row)
if reduce_memory_usage:
vals = [df.index, *(df[name] for name in df)]
else:
vals = [
df.index.to_list(),
*(df[name].to_list() for name in df),
]
count = len(df.index)
if self._terminate:
return
for i, row in enumerate(zip(*vals)):
writer.writerow(row)
if self._callback:
self._callback(i + 1 + count, count * 2)
else:
filename = filename.with_suffix(".csv")
gp_count = len(self.virtual_groups)
for i, (group_index, virtual_group) in enumerate(
self.virtual_groups.items()
):
if self._terminate:
return
message = f"Exporting group {i+1} of {gp_count}"
logger.info(message)
if len(virtual_group.groups) == 1:
comment = self.groups[
virtual_group.groups[0]
].channel_group.comment
else:
comment = ""
if comment:
for char in r' \/:"':
comment = comment.replace(char, "_")
group_csv_name = (
filename.parent
/ f"{filename.stem}.ChannelGroup_{i}_{comment}.csv"
)
else:
group_csv_name = (
filename.parent / f"{filename.stem}.ChannelGroup_{i}.csv"
)
df = self.get_group(
group_index,
raster=raster,
time_from_zero=time_from_zero,
use_display_names=use_display_names,
reduce_memory_usage=reduce_memory_usage,
ignore_value2text_conversions=ignore_value2text_conversions,
raw=raw,
)
if time_as_date:
index = (
pd.to_datetime(
df.index + self.header.start_time.timestamp(), unit="s"
)
.tz_localize("UTC")
.tz_convert(LOCAL_TIMEZONE)
.astype(str)
)
df.index = index
df.index.name = "timestamps"
with open(group_csv_name, "w", newline="") as csvfile:
writer = csv.writer(csvfile, **fmtparams)
if hasattr(self, "can_logging_db") and self.can_logging_db:
dropped = {}
for name_ in df.columns:
if name_.endswith("CAN_DataFrame.ID"):
dropped[name_] = pd.Series(
csv_int2hex(df[name_] & 0x1FFFFFFF),
index=df.index,
)
elif name_.endswith("CAN_DataFrame.DataBytes"):
dropped[name_] = pd.Series(
csv_bytearray2hex(df[name_]), index=df.index
)
df = df.drop(columns=list(dropped))
for name_, s in dropped.items():
df[name_] = s
names_row = [df.index.name, *df.columns]
writer.writerow(names_row)
if reduce_memory_usage:
vals = [df.index, *(df[name] for name in df)]
else:
vals = [
df.index.to_list(),
*(df[name].to_list() for name in df),
]
for i, row in enumerate(zip(*vals)):
writer.writerow(row)
if self._callback:
self._callback(i + 1, gp_count)
elif fmt == "mat":
filename = filename.with_suffix(".mat")
if not single_time_base:
mdict = {}
master_name_template = "DGM{}_{}"
channel_name_template = "DG{}_{}"
used_names = UniqueDB()
groups_nr = len(self.virtual_groups)
for i, (group_index, virtual_group) in enumerate(
self.virtual_groups.items()
):
if self._terminate:
return
channels = self.included_channels(group_index)[group_index]
if not channels:
continue
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in channels.items()
for ch_index in channel_indexes
]
if not channels:
continue
channels = self.select(
channels,
ignore_value2text_conversions=ignore_value2text_conversions,
raw=raw,
)
master = channels[0].copy()
master.samples = master.timestamps
channels.insert(0, master)
for j, sig in enumerate(channels):
if j == 0:
channel_name = master_name_template.format(i, "timestamps")
else:
if use_display_names:
channel_name = sig.display_name or sig.name
else:
channel_name = sig.name
channel_name = channel_name_template.format(i, channel_name)
channel_name = matlab_compatible(channel_name)
channel_name = used_names.get_unique_name(channel_name)
if sig.samples.dtype.names:
sig.samples.dtype.names = [
matlab_compatible(name)
for name in sig.samples.dtype.names
]
mdict[channel_name] = sig.samples
if self._callback:
self._callback(i + 1, groups_nr + 1)
else:
used_names = UniqueDB()
mdict = {}
count = len(df.columns)
for i, name in enumerate(df.columns):
channel_name = matlab_compatible(name)
channel_name = used_names.get_unique_name(channel_name)
mdict[channel_name] = df[name].values
if hasattr(mdict[channel_name].dtype, "categories"):
mdict[channel_name] = np.array(mdict[channel_name], dtype="S")
if self._callback:
self._callback(i + 1 + count, count * 2)
mdict["timestamps"] = df.index.values
if self._callback:
self._callback(80, 100)
if format == "7.3":
savemat(
str(filename),
mdict,
long_field_names=True,
format="7.3",
delete_unused_variables=False,
oned_as=oned_as,
structured_numpy_ndarray_as_struct=True,
)
else:
savemat(
str(filename),
mdict,
long_field_names=True,
oned_as=oned_as,
do_compression=bool(compression),
)
if self._callback:
self._callback(100, 100)
elif fmt == "parquet":
filename = filename.with_suffix(".parquet")
if compression:
write_parquet(filename, df, compression=compression)
else:
write_parquet(filename, df)
else:
message = (
'Unsopported export type "{}". '
'Please select "csv", "excel", "hdf5", "mat" or "pandas"'
)
message.format(fmt)
logger.warning(message)
def filter(self, channels, version=None):
"""return new *MDF* object that contains only the channels listed in
*channels* argument
Parameters
----------
channels : list
list of items to be filtered; each item can be :
* a channel name string
* (channel name, group index, channel index) list or tuple
* (channel name, group index) list or tuple
* (None, group index, channel index) list or tuple
version : str
new mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10',
'3.20', '3.30', '4.00', '4.10', '4.11', '4.20'); default *None* and in this
case the original file version is used
Returns
-------
mdf : MDF
new *MDF* file
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> t = np.arange(5)
>>> s = np.ones(5)
>>> mdf = MDF()
>>> for i in range(4):
... sigs = [Signal(s*(i*10+j), t, name='SIG') for j in range(1,4)]
... mdf.append(sigs)
...
>>> filtered = mdf.filter(['SIG', ('SIG', 3, 1), ['SIG', 2], (None, 1, 2)])
>>> for gp_nr, ch_nr in filtered.channels_db['SIG']:
... print(filtered.get(group=gp_nr, index=ch_nr))
...
<Signal SIG:
samples=[ 1. 1. 1. 1. 1.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
<Signal SIG:
samples=[ 31. 31. 31. 31. 31.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
<Signal SIG:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
<Signal SIG:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
"""
if version is None:
version = self.version
else:
version = validate_version_argument(version)
# group channels by group index
gps = self.included_channels(channels=channels)
mdf = MDF(
version=version,
**self._kwargs,
)
integer_interpolation_mode = self._integer_interpolation
float_interpolation_mode = self._float_interpolation
mdf.configure(from_other=self)
mdf.header.start_time = self.header.start_time
self.configure(copy_on_get=False)
if self.name:
origin = self.name.name
else:
origin = "New MDF"
groups_nr = len(gps)
if self._callback:
self._callback(0, groups_nr)
for i, (group_index, groups) in enumerate(gps.items()):
for idx, sigs in enumerate(
self._yield_selected_signals(
group_index, groups=groups, version=version
)
):
if not sigs:
break
if idx == 0:
if sigs:
cg = self.groups[group_index].channel_group
cg_nr = mdf.append(
sigs,
common_timebase=True,
)
MDF._transfer_channel_group_data(
mdf.groups[cg_nr].channel_group, cg
)
else:
break
else:
mdf.extend(cg_nr, sigs)
if self._callback:
self._callback(i + 1, groups_nr)
if self._terminate:
return
self.configure(copy_on_get=True)
mdf._transfer_metadata(self, message=f"Filtered from {self.name}")
if self._callback:
mdf._callback = mdf._mdf._callback = self._callback
return mdf
def iter_get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
raw=False,
):
"""iterator over a channel
This is usefull in case of large files with a small number of channels.
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
raw : bool
return channel samples without appling the conversion rule; default
`False`
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
data = self._load_data(grp)
for fragment in data:
yield self.get(
group=gp_nr,
index=ch_nr,
raster=raster,
samples_only=samples_only,
data=fragment,
raw=raw,
)
@staticmethod
def concatenate(
files,
version="4.10",
sync=True,
add_samples_origin=False,
direct_timestamp_continuation=False,
**kwargs,
):
"""concatenates several files. The files
must have the same internal structure (same number of groups, and same
channels in each group)
Parameters
----------
files : list | tuple
list of *MDF* file names or *MDF*, zipfile.ZipFile, bz2.BZ2File or gzip.GzipFile
instances
..versionchanged:: 6.2.0
added support for zipfile.ZipFile, bz2.BZ2File and gzip.GzipFile
version : str
merged file version
sync : bool
sync the files based on the start of measurement, default *True*
add_samples_origin : bool
option to create a new "__samples_origin" channel that will hold
the index of the measurement from where each timestamp originated
direct_timestamp_continuation (False) : bool
the time stamps from the next file will be added right after the last
time stamp from the previous file; default False
.. versionadded:: 6.0.0
kwargs :
use_display_names (False) : bool
Examples
--------
>>> conc = MDF.concatenate(
[
'path/to/file.mf4',
MDF(BytesIO(data)),
MDF(zipfile.ZipFile('data.zip')),
MDF(bz2.BZ2File('path/to/data.bz2', 'rb')),
MDF(gzip.GzipFile('path/to/data.gzip', 'rb')),
],
version='4.00',
sync=False,
)
Returns
-------
concatenate : MDF
new *MDF* object with concatenated channels
Raises
------
MdfException : if there are inconsistencies between the files
"""
if not files:
raise MdfException("No files given for merge")
callback = kwargs.get("callback", None)
if callback:
callback(0, 100)
mdf_nr = len(files)
input_types = [isinstance(mdf, MDF) for mdf in files]
use_display_names = kwargs.get("use_display_names", False)
versions = []
if sync:
timestamps = []
for file in files:
if isinstance(file, MDF):
timestamps.append(file.header.start_time)
versions.append(file.version)
else:
if is_file_like(file):
ts, version = get_measurement_timestamp_and_version(file, "io")
timestamps.append(ts)
versions.append(version)
else:
with open(file, "rb") as mdf:
ts, version = get_measurement_timestamp_and_version(mdf, file)
timestamps.append(ts)
versions.append(version)
try:
oldest = min(timestamps)
except TypeError:
timestamps = [
timestamp.astimezone(timezone.utc) for timestamp in timestamps
]
oldest = min(timestamps)
offsets = [(timestamp - oldest).total_seconds() for timestamp in timestamps]
offsets = [offset if offset > 0 else 0 for offset in offsets]
else:
file = files[0]
if isinstance(file, MDF):
oldest = file.header.start_time
versions.append(file.version)
else:
if is_file_like(file):
ts, version = get_measurement_timestamp_and_version(file, "io")
versions.append(version)
else:
with open(file, "rb") as mdf:
ts, version = get_measurement_timestamp_and_version(mdf, file)
versions.append(version)
oldest = ts
offsets = [0 for _ in files]
included_channel_names = []
cg_map = {}
if add_samples_origin:
origin_conversion = {}
for i, mdf in enumerate(files):
origin_conversion[f"val_{i}"] = i
if isinstance(mdf, MDF):
origin_conversion[f"text_{i}"] = str(mdf.name)
else:
origin_conversion[f"text_{i}"] = str(mdf)
origin_conversion = from_dict(origin_conversion)
for mdf_index, (offset, mdf) in enumerate(zip(offsets, files)):
if not isinstance(mdf, MDF):
mdf = MDF(
mdf,
use_display_names=use_display_names,
)
if mdf_index == 0:
version = validate_version_argument(version)
kwargs = dict(mdf._kwargs)
kwargs.pop("callback", None)
merged = MDF(
version=version,
callback=callback,
**kwargs,
)
integer_interpolation_mode = mdf._integer_interpolation
float_interpolation_mode = mdf._float_interpolation
merged.configure(from_other=mdf)
merged.header.start_time = oldest
mdf.configure(copy_on_get=False)
if mdf_index == 0:
last_timestamps = [None for gp in mdf.virtual_groups]
groups_nr = len(last_timestamps)
else:
if len(mdf.virtual_groups) != groups_nr:
raise MdfException(
f"internal structure of file <{mdf.name}> is different; different channel groups count"
)
for i, group_index in enumerate(mdf.virtual_groups):
included_channels = mdf.included_channels(group_index)[group_index]
if mdf_index == 0:
included_channel_names.append(
[
mdf.groups[gp_index].channels[ch_index].name
for gp_index, channels in included_channels.items()
for ch_index in channels
]
)
different_channel_order = False
else:
names = [
mdf.groups[gp_index].channels[ch_index].name
for gp_index, channels in included_channels.items()
for ch_index in channels
]
different_channel_order = False
if names != included_channel_names[i]:
if sorted(names) != sorted(included_channel_names[i]):
raise MdfException(
f"internal structure of file {mdf_index} is different; different channels"
)
else:
original_names = included_channel_names[i]
different_channel_order = True
remap = [original_names.index(name) for name in names]
if not included_channels:
continue
idx = 0
last_timestamp = last_timestamps[i]
first_timestamp = None
original_first_timestamp = None
for idx, signals in enumerate(
mdf._yield_selected_signals(group_index, groups=included_channels)
):
if not signals:
break
if mdf_index == 0 and idx == 0:
first_signal = signals[0]
if len(first_signal):
if offset > 0:
timestamps = first_signal.timestamps + offset
for sig in signals:
sig.timestamps = timestamps
last_timestamp = first_signal.timestamps[-1]
first_timestamp = first_signal.timestamps[0]
original_first_timestamp = first_timestamp
if add_samples_origin:
signals.append(
Signal(
samples=np.ones(len(first_signal), dtype="<u2")
* mdf_index,
timestamps=first_signal.timestamps,
conversion=origin_conversion,
name="__samples_origin",
)
)
cg = mdf.groups[group_index].channel_group
cg_nr = merged.append(
signals,
common_timebase=True,
)
MDF._transfer_channel_group_data(
merged.groups[cg_nr].channel_group, cg
)
cg_map[group_index] = cg_nr
else:
if different_channel_order:
new_signals = [None for _ in signals]
if idx == 0:
for new_index, sig in zip(remap, signals):
new_signals[new_index] = sig
else:
for new_index, sig in zip(remap, signals[1:]):
new_signals[new_index + 1] = sig
new_signals[0] = signals[0]
signals = new_signals
if idx == 0:
signals = [(signals[0].timestamps, None)] + [
(sig.samples, sig.invalidation_bits) for sig in signals
]
master = signals[0][0]
_copied = False
if len(master):
if original_first_timestamp is None:
original_first_timestamp = master[0]
if offset > 0:
master = master + offset
_copied = True
if last_timestamp is None:
last_timestamp = master[-1]
else:
if (
last_timestamp >= master[0]
or direct_timestamp_continuation
):
if len(master) >= 2:
delta = master[1] - master[0]
else:
delta = 0.001
if _copied:
master -= master[0]
else:
master = master - master[0]
_copied = True
master += last_timestamp + delta
last_timestamp = master[-1]
signals[0] = master, None
if add_samples_origin:
signals.append(
(
np.ones(len(master), dtype="<u2") * mdf_index,
None,
)
)
cg_nr = cg_map[group_index]
merged.extend(cg_nr, signals)
if first_timestamp is None:
first_timestamp = master[0]
last_timestamps[i] = last_timestamp
mdf.configure(copy_on_get=True)
if mdf_index == 0:
merged._transfer_metadata(mdf)
if not input_types[mdf_index]:
mdf.close()
if callback:
callback(i + 1 + mdf_index * groups_nr, groups_nr * mdf_nr)
if MDF._terminate:
return
try:
merged._process_bus_logging()
except:
pass
return merged
@staticmethod
def stack(files, version="4.10", sync=True, **kwargs):
"""stack several files and return the stacked *MDF* object
Parameters
----------
files : list | tuple
list of *MDF* file names or *MDF*, zipfile.ZipFile, bz2.BZ2File or gzip.GzipFile
instances
..versionchanged:: 6.2.0
added support for zipfile.ZipFile, bz2.BZ2File and gzip.GzipFile
version : str
merged file version
sync : bool
sync the files based on the start of measurement, default *True*
kwargs :
use_display_names (False) : bool
Examples
--------
>>> stacked = MDF.stack(
[
'path/to/file.mf4',
MDF(BytesIO(data)),
MDF(zipfile.ZipFile('data.zip')),
MDF(bz2.BZ2File('path/to/data.bz2', 'rb')),
MDF(gzip.GzipFile('path/to/data.gzip', 'rb')),
],
version='4.00',
sync=False,
)
Returns
-------
stacked : MDF
new *MDF* object with stacked channels
"""
if not files:
raise MdfException("No files given for stack")
version = validate_version_argument(version)
callback = kwargs.get("callback", None)
use_display_names = kwargs.get("use_display_names", False)
files_nr = len(files)
input_types = [isinstance(mdf, MDF) for mdf in files]
if callback:
callback(0, files_nr)
if sync:
timestamps = []
for file in files:
if isinstance(file, MDF):
timestamps.append(file.header.start_time)
else:
if is_file_like(file):
ts, version = get_measurement_timestamp_and_version(file, "io")
timestamps.append(ts)
else:
with open(file, "rb") as mdf:
ts, version = get_measurement_timestamp_and_version(mdf, file)
timestamps.append(ts)
try:
oldest = min(timestamps)
except TypeError:
timestamps = [
timestamp.astimezone(timezone.utc) for timestamp in timestamps
]
oldest = min(timestamps)
offsets = [(timestamp - oldest).total_seconds() for timestamp in timestamps]
else:
offsets = [0 for file in files]
for mdf_index, (offset, mdf) in enumerate(zip(offsets, files)):
if not isinstance(mdf, MDF):
mdf = MDF(mdf, use_display_names=use_display_names)
if mdf_index == 0:
version = validate_version_argument(version)
kwargs = dict(mdf._kwargs)
kwargs.pop("callback", None)
stacked = MDF(
version=version,
callback=callback, **kwargs,
)
integer_interpolation_mode = mdf._integer_interpolation
float_interpolation_mode = mdf._float_interpolation
stacked.configure(from_other=mdf)
if sync:
stacked.header.start_time = oldest
else:
stacked.header.start_time = mdf.header.start_time
mdf.configure(copy_on_get=False)
for i, group in enumerate(mdf.virtual_groups):
dg_cntr = None
included_channels = mdf.included_channels(group)[group]
if not included_channels:
continue
for idx, signals in enumerate(
mdf._yield_selected_signals(
group, groups=included_channels, version=version
)
):
if not signals:
break
if idx == 0:
if sync:
timestamps = signals[0].timestamps + offset
for sig in signals:
sig.timestamps = timestamps
cg = mdf.groups[group].channel_group
dg_cntr = stacked.append(
signals,
common_timebase=True,
)
MDF._transfer_channel_group_data(
stacked.groups[dg_cntr].channel_group, cg
)
else:
master = signals[0][0]
if sync:
master = master + offset
signals[0] = master, None
stacked.extend(dg_cntr, signals)
if dg_cntr is not None:
for index in range(dg_cntr, len(stacked.groups)):
stacked.groups[
index
].channel_group.comment = (
f'stacked from channel group {i} of "{mdf.name.parent}"'
)
if callback:
callback(mdf_index, files_nr)
mdf.configure(copy_on_get=True)
if mdf_index == 0:
stacked._transfer_metadata(mdf)
if not input_types[mdf_index]:
mdf.close()
if MDF._terminate:
return
try:
stacked._process_bus_logging()
except:
pass
return stacked
def iter_channels(self, skip_master=True, copy_master=True):
"""generator that yields a *Signal* for each non-master channel
Parameters
----------
skip_master : bool
do not yield master channels; default *True*
copy_master : bool
copy master for each yielded channel
"""
for index in self.virtual_groups:
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in self.included_channels(index)[
index
].items()
for ch_index in channel_indexes
]
channels = self.select(channels, copy_master=copy_master)
yield from channels
def iter_groups(
self,
raster=None,
time_from_zero=True,
empty_channels="skip",
keep_arrays=False,
use_display_names=False,
time_as_date=False,
reduce_memory_usage=False,
raw=False,
ignore_value2text_conversions=False,
only_basenames=False,
):
"""generator that yields channel groups as pandas DataFrames. If there
are multiple occurrences for the same channel name inside a channel
group, then a counter will be used to make the names unique
(<original_name>_<counter>)
Parameters
----------
use_display_names : bool
use display name instead of standard channel name, if available.
.. versionadded:: 5.21.0
reduce_memory_usage : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
.. versionadded:: 5.21.0
raw (False) : bool
the dataframe will contain the raw channel values
.. versionadded:: 5.21.0
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.21.0
keep_arrays (False) : bool
keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
.. versionadded:: 5.21.0
empty_channels ("skip") : str
behaviour for channels without samples; the options are *skip* or
*zeros*; default is *skip*
.. versionadded:: 5.21.0
only_basenames (False) : bool
use just the field names, without prefix, for structures and channel
arrays
.. versionadded:: 5.21.0
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
see `resample` for examples of using this argument
.. versionadded:: 5.21.0
"""
for i in self.virtual_groups:
yield self.get_group(
i,
raster=None,
time_from_zero=time_from_zero,
empty_channels=empty_channels,
keep_arrays=keep_arrays,
use_display_names=use_display_names,
time_as_date=time_as_date,
reduce_memory_usage=reduce_memory_usage,
raw=raw,
ignore_value2text_conversions=ignore_value2text_conversions,
only_basenames=only_basenames,
)
def resample(self, raster, version=None, time_from_zero=False):
"""resample all channels using the given raster. See *configure* to select
the interpolation method for interger channels
Parameters
----------
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
version : str
new mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10',
'3.20', '3.30', '4.00', '4.10', '4.11', '4.20'); default *None* and
in this case the original file version is used
time_from_zero : bool
start time stamps from 0s in the cut measurement
Returns
-------
mdf : MDF
new *MDF* with resampled channels
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> mdf = MDF()
>>> sig = Signal(name='S1', samples=[1,2,3,4], timestamps=[1,2,3,4])
>>> mdf.append(sig)
>>> sig = Signal(name='S2', samples=[1,2,3,4], timestamps=[1.1, 3.5, 3.7, 3.9])
>>> mdf.append(sig)
>>> resampled = mdf.resample(raster=0.1)
>>> resampled.select(['S1', 'S2'])
[<Signal S1:
samples=[1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 4]
timestamps=[1. 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 2. 2.1 2.2 2.3 2.4 2.5 2.6 2.7
2.8 2.9 3. 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 4. ]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
, <Signal S2:
samples=[1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 3 3 4 4]
timestamps=[1. 1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9 2. 2.1 2.2 2.3 2.4 2.5 2.6 2.7
2.8 2.9 3. 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 4. ]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
]
>>> resampled = mdf.resample(raster='S2')
>>> resampled.select(['S1', 'S2'])
[<Signal S1:
samples=[1 3 3 3]
timestamps=[1.1 3.5 3.7 3.9]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
, <Signal S2:
samples=[1 2 3 4]
timestamps=[1.1 3.5 3.7 3.9]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
]
>>> resampled = mdf.resample(raster=[1.9, 2.0, 2.1])
>>> resampled.select(['S1', 'S2'])
[<Signal S1:
samples=[1 2 2]
timestamps=[1.9 2. 2.1]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
, <Signal S2:
samples=[1 1 1]
timestamps=[1.9 2. 2.1]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
]
>>> resampled = mdf.resample(raster='S2', time_from_zero=True)
>>> resampled.select(['S1', 'S2'])
[<Signal S1:
samples=[1 3 3 3]
timestamps=[0. 2.4 2.6 2.8]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
, <Signal S2:
samples=[1 2 3 4]
timestamps=[0. 2.4 2.6 2.8]
invalidation_bits=None
unit=""
conversion=None
source=Source(name='Python', path='Python', comment='', source_type=4, bus_type=0)
comment=""
mastermeta="('time', 1)"
raw=True
display_name=
attachment=()>
]
"""
if version is None:
version = self.version
else:
version = validate_version_argument(version)
mdf = MDF(
version=version,
**self._kwargs,
)
integer_interpolation_mode = self._integer_interpolation
float_interpolation_mode = self._float_interpolation
mdf.configure(from_other=self)
mdf.header.start_time = self.header.start_time
groups_nr = len(self.virtual_groups)
if self._callback:
self._callback(0, groups_nr)
try:
raster = float(raster)
assert raster > 0
except (TypeError, ValueError):
if isinstance(raster, str):
raster = self.get(raster).timestamps
else:
raster = np.array(raster)
else:
raster = master_using_raster(self, raster)
if time_from_zero and len(raster):
delta = raster[0]
new_raster = raster - delta
t_epoch = self.header.start_time.timestamp() + delta
mdf.header.start_time = datetime.fromtimestamp(t_epoch)
else:
delta = 0
new_raster = None
mdf.header.start_time = self.header.start_time
for i, (group_index, virtual_group) in enumerate(self.virtual_groups.items()):
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in self.included_channels(group_index)[
group_index
].items()
for ch_index in channel_indexes
]
sigs = self.select(channels, raw=True)
sigs = [
sig.interp(
raster,
integer_interpolation_mode=integer_interpolation_mode,
float_interpolation_mode=float_interpolation_mode,
)
for sig in sigs
]
if new_raster is not None:
for sig in sigs:
if len(sig):
sig.timestamps = new_raster
cg = self.groups[group_index].channel_group
dg_cntr = mdf.append(
sigs,
common_timebase=True,
)
MDF._transfer_channel_group_data(mdf.groups[dg_cntr].channel_group, cg)
if self._callback:
self._callback(i + 1, groups_nr)
if self._terminate:
return
if self._callback:
self._callback(groups_nr, groups_nr)
mdf._transfer_metadata(self, message=f"Resampled from {self.name}")
if self._callback:
mdf._callback = mdf._mdf._callback = self._callback
return mdf
def select(
self,
channels,
record_offset=0,
raw=False,
copy_master=True,
ignore_value2text_conversions=False,
record_count=None,
validate=False,
):
"""retrieve the channels listed in *channels* argument as *Signal*
objects
.. note:: the *dataframe* argument was removed in version 5.8.0
use the ``to_dataframe`` method instead
Parameters
----------
channels : list
list of items to be filtered; each item can be :
* a channel name string
* (channel name, group index, channel index) list or tuple
* (channel name, group index) list or tuple
* (None, group index, channel index) list or tuple
record_offset : int
record number offset; optimization to get the last part of signal samples
raw : bool
get raw channel samples; default *False*
copy_master : bool
option to get a new timestamps array for each selected Signal or to
use a shared array for channels of the same channel group; default *True*
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.8.0
validate (False) : bool
consider the invalidation bits
.. versionadded:: 5.16.0
Returns
-------
signals : list
list of *Signal* objects based on the input channel list
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> t = np.arange(5)
>>> s = np.ones(5)
>>> mdf = MDF()
>>> for i in range(4):
... sigs = [Signal(s*(i*10+j), t, name='SIG') for j in range(1,4)]
... mdf.append(sigs)
...
>>> # select SIG group 0 default index 1 default, SIG group 3 index 1, SIG group 2 index 1 default and channel index 2 from group 1
...
>>> mdf.select(['SIG', ('SIG', 3, 1), ['SIG', 2], (None, 1, 2)])
[<Signal SIG:
samples=[ 1. 1. 1. 1. 1.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
, <Signal SIG:
samples=[ 31. 31. 31. 31. 31.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
, <Signal SIG:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
, <Signal SIG:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
]
"""
virtual_groups = self.included_channels(
channels=channels, minimal=False, skip_master=False
)
output_signals = {}
for virtual_group, groups in virtual_groups.items():
cycles_nr = self._mdf.virtual_groups[virtual_group].cycles_nr
pairs = [
(gp_index, ch_index)
for gp_index, channel_indexes in groups.items()
for ch_index in channel_indexes
]
if record_count is None:
cycles = cycles_nr - record_offset
else:
if cycles_nr < record_count + record_offset:
cycles = cycles_nr - record_offset
else:
cycles = record_count
signals = []
current_pos = 0
for idx, sigs in enumerate(
self._yield_selected_signals(
virtual_group,
groups=groups,
record_offset=record_offset,
record_count=record_count,
)
):
if not sigs:
break
if idx == 0:
next_pos = current_pos + len(sigs[0])
master = np.empty(cycles, dtype=sigs[0].timestamps.dtype)
master[current_pos:next_pos] = sigs[0].timestamps
for sig in sigs:
shape = (cycles,) + sig.samples.shape[1:]
signal = np.empty(shape, dtype=sig.samples.dtype)
signal[current_pos:next_pos] = sig.samples
sig.samples = signal
signals.append(sig)
if sig.invalidation_bits is not None:
inval = np.empty(cycles, dtype=sig.invalidation_bits.dtype)
inval[current_pos:next_pos] = sig.invalidation_bits
sig.invalidation_bits = inval
else:
sig, _ = sigs[0]
next_pos = current_pos + len(sig)
master[current_pos:next_pos] = sig
for signal, (sig, inval) in zip(signals, sigs[1:]):
signal.samples[current_pos:next_pos] = sig
if signal.invalidation_bits is not None:
signal.invalidation_bits[current_pos:next_pos] = inval
current_pos = next_pos
for signal, pair in zip(signals, pairs):
signal.timestamps = master
output_signals[pair] = signal
indexes = []
for item in channels:
if not isinstance(item, (list, tuple)):
item = [item]
indexes.append(self._validate_channel_selection(*item))
signals = [output_signals[pair] for pair in indexes]
if copy_master:
for signal in signals:
signal.timestamps = signal.timestamps.copy()
if not raw:
if ignore_value2text_conversions:
for signal in signals:
conversion = signal.conversion
if conversion:
samples = conversion.convert(signal.samples)
if samples.dtype.kind not in "US":
signal.samples = samples
signal.raw = True
signal.conversion = None
else:
for signal in signals:
conversion = signal.conversion
if conversion:
signal.samples = conversion.convert(signal.samples)
signal.raw = False
signal.conversion = None
if signal.samples.dtype.kind == "S":
signal.encoding = (
"utf-8" if self.version >= "4.00" else "latin-1"
)
if validate:
signals = [sig.validate() for sig in signals]
return signals
@staticmethod
def scramble(name, skip_attachments=False, **kwargs):
"""scramble text blocks and keep original file structure
Parameters
----------
name : str | pathlib.Path
file name
skip_attachments : bool
skip scrambling of attachments data if True
.. versionadded:: 5.9.0
Returns
-------
name : str
scrambled file name
"""
name = Path(name)
mdf = MDF(name)
texts = {}
callback = kwargs.get("callback", None)
if callback:
callback(0, 100)
count = len(mdf.groups)
if mdf.version >= "4.00":
try:
ChannelConversion = ChannelConversionV4
stream = mdf._file
if mdf.header.comment_addr:
stream.seek(mdf.header.comment_addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[mdf.header.comment_addr] = randomized_string(size)
for fh in mdf.file_history:
addr = fh.comment_addr
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
for ev in mdf.events:
for addr in (ev.comment_addr, ev.name_addr):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
for at in mdf.attachments:
for addr in (at.comment_addr, at.file_name_addr):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
if not skip_attachments and at.embedded_data:
texts[at.address + v4c.AT_COMMON_SIZE] = randomized_string(
at.embedded_size
)
for idx, gp in enumerate(mdf.groups, 1):
addr = gp.data_group.comment_addr
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
cg = gp.channel_group
for addr in (cg.acq_name_addr, cg.comment_addr):
if cg.flags & v4c.FLAG_CG_BUS_EVENT:
continue
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
source = cg.acq_source_addr
if source:
source = SourceInformation(
address=source, stream=stream, mapped=False, tx_map={}
)
for addr in (
source.name_addr,
source.path_addr,
source.comment_addr,
):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
for ch in gp.channels:
for addr in (ch.name_addr, ch.unit_addr, ch.comment_addr):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
source = ch.source_addr
if source:
source = SourceInformation(
address=source, stream=stream, mapped=False, tx_map={}
)
for addr in (
source.name_addr,
source.path_addr,
source.comment_addr,
):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
conv = ch.conversion_addr
if conv:
conv = ChannelConversion(
address=conv,
stream=stream,
mapped=False,
tx_map={},
si_map={},
)
for addr in (
conv.name_addr,
conv.unit_addr,
conv.comment_addr,
):
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
if conv.conversion_type == v4c.CONVERSION_TYPE_ALG:
addr = conv.formula_addr
if addr and addr not in texts:
stream.seek(addr + 8)
size = UINT64_u(stream.read(8))[0] - 24
texts[addr] = randomized_string(size)
if conv.referenced_blocks:
for key, block in conv.referenced_blocks.items():
if block:
if isinstance(block, bytes):
addr = conv[key]
if addr not in texts:
stream.seek(addr + 8)
size = len(block)
texts[addr] = randomized_string(size)
if callback:
callback(int(idx / count * 66), 100)
except:
print(
f"Error while scrambling the file: {format_exc()}.\nWill now use fallback method"
)
texts = MDF._fallback_scramble_mf4(name)
mdf.close()
dst = name.with_suffix(".scrambled.mf4")
copy(name, dst)
with open(dst, "rb+") as mdf:
count = len(texts)
chunk = max(count // 34, 1)
idx = 0
for index, (addr, bts) in enumerate(texts.items()):
mdf.seek(addr + 24)
mdf.write(bts)
if index % chunk == 0:
if callback:
callback(66 + idx, 100)
if callback:
callback(100, 100)
else:
ChannelConversion = ChannelConversionV3
stream = mdf._file
if mdf.header.comment_addr:
stream.seek(mdf.header.comment_addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[mdf.header.comment_addr + 4] = randomized_string(size)
texts[36 + 0x40] = randomized_string(32)
texts[68 + 0x40] = randomized_string(32)
texts[100 + 0x40] = randomized_string(32)
texts[132 + 0x40] = randomized_string(32)
for idx, gp in enumerate(mdf.groups, 1):
cg = gp.channel_group
addr = cg.comment_addr
if addr and addr not in texts:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
if gp.trigger:
addr = gp.trigger.text_addr
if addr:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
for ch in gp.channels:
for key in ("long_name_addr", "display_name_addr", "comment_addr"):
if hasattr(ch, key):
addr = getattr(ch, key)
else:
addr = 0
if addr and addr not in texts:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
texts[ch.address + 26] = randomized_string(32)
texts[ch.address + 58] = randomized_string(128)
source = ch.source_addr
if source:
source = ChannelExtension(address=source, stream=stream)
if source.type == v23c.SOURCE_ECU:
texts[source.address + 12] = randomized_string(80)
texts[source.address + 92] = randomized_string(32)
else:
texts[source.address + 14] = randomized_string(36)
texts[source.address + 50] = randomized_string(36)
conv = ch.conversion_addr
if conv:
texts[conv + 22] = randomized_string(20)
conv = ChannelConversion(address=conv, stream=stream)
if conv.conversion_type == v23c.CONVERSION_TYPE_FORMULA:
texts[conv + 36] = randomized_string(conv.block_len - 36)
if conv.referenced_blocks:
for key, block in conv.referenced_blocks.items():
if block:
if isinstance(block, bytes):
addr = conv[key]
if addr and addr not in texts:
stream.seek(addr + 2)
size = UINT16_u(stream.read(2))[0] - 4
texts[addr + 4] = randomized_string(size)
if callback:
callback(int(idx / count * 66), 100)
mdf.close()
dst = name.with_suffix(".scrambled.mdf")
copy(name, dst)
with open(dst, "rb+") as mdf:
chunk = count // 34
idx = 0
for index, (addr, bts) in enumerate(texts.items()):
mdf.seek(addr)
mdf.write(bts)
if chunk and index % chunk == 0:
if callback:
callback(66 + idx, 100)
if callback:
callback(100, 100)
return dst
@staticmethod
def _fallback_scramble_mf4(name):
"""scramble text blocks and keep original file structure
Parameters
----------
name : str | pathlib.Path
file name
Returns
-------
name : pathlib.Path
scrambled file name
"""
name = Path(name)
pattern = re.compile(
rb"(?P<block>##(TX|MD))",
re.DOTALL | re.MULTILINE,
)
texts = {}
with open(name, "rb") as stream:
stream.seek(0, 2)
file_limit = stream.tell()
stream.seek(0)
for match in re.finditer(pattern, stream.read()):
start = match.start()
if file_limit - start >= 24:
stream.seek(start + 8)
(size,) = UINT64_u(stream.read(8))
if start + size <= file_limit:
texts[start + 24] = randomized_string(size - 24)
return texts
def get_group(
self,
index,
channels=None,
raster=None,
time_from_zero=True,
empty_channels="skip",
keep_arrays=False,
use_display_names=False,
time_as_date=False,
reduce_memory_usage=False,
raw=False,
ignore_value2text_conversions=False,
only_basenames=False,
):
"""get channel group as pandas DataFrames. If there are multiple
occurrences for the same channel name, then a counter will be used to
make the names unique (<original_name>_<counter>)
Parameters
----------
index : int
channel group index
use_display_names : bool
use display name instead of standard channel name, if available.
reduce_memory_usage : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
raw (False) : bool
the dataframe will contain the raw channel values
.. versionadded:: 5.7.0
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.8.0
keep_arrays (False) : bool
keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
.. versionadded:: 5.8.0
empty_channels ("skip") : str
behaviour for channels without samples; the options are *skip* or
*zeros*; default is *skip*
.. versionadded:: 5.8.0
only_basenames (False) : bool
use just the field names, without prefix, for structures and channel
arrays
.. versionadded:: 5.13.0
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
see `resample` for examples of using this argument
Returns
-------
df : pandas.DataFrame
"""
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in self.included_channels(index)[
index
].items()
for ch_index in channel_indexes
]
return self.to_dataframe(
channels=channels,
raster=raster,
time_from_zero=time_from_zero,
empty_channels=empty_channels,
keep_arrays=keep_arrays,
use_display_names=use_display_names,
time_as_date=time_as_date,
reduce_memory_usage=reduce_memory_usage,
raw=raw,
ignore_value2text_conversions=ignore_value2text_conversions,
only_basenames=only_basenames,
)
def iter_to_dataframe(
self,
channels=None,
raster=None,
time_from_zero=True,
empty_channels="skip",
keep_arrays=False,
use_display_names=False,
time_as_date=False,
reduce_memory_usage=False,
raw=False,
ignore_value2text_conversions=False,
use_interpolation=True,
only_basenames=False,
chunk_ram_size=200 * 1024 * 1024,
interpolate_outwards_with_nan=False,
):
"""generator that yields pandas DataFrame's that should not exceed
200MB of RAM
.. versionadded:: 5.15.0
Parameters
----------
channels : list
list of items to be filtered (default None); each item can be :
* a channel name string
* (channel name, group index, channel index) list or tuple
* (channel name, group index) list or tuple
* (None, group index, channel index) list or tuple
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
see `resample` for examples of using this argument
time_from_zero : bool
adjust time channel to start from 0; default *True*
empty_channels : str
behaviour for channels without samples; the options are *skip* or
*zeros*; default is *skip*
use_display_names : bool
use display name instead of standard channel name, if available.
keep_arrays : bool
keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
time_as_date : bool
the dataframe index will contain the datetime timestamps
according to the measurement start time; default *False*. If
*True* then the argument ``time_from_zero`` will be ignored.
reduce_memory_usage : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
raw (False) : bool
the columns will contain the raw values
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
use_interpolation (True) : bool
option to perform interpoaltions when multiple timestamp raster are
present. If *False* then dataframe columns will be automatically
filled with NaN's were the dataframe index values are not found in
the current column's timestamps
only_basenames (False) : bool
use jsut the field names, without prefix, for structures and channel
arrays
interpolate_outwards_with_nan : bool
use NaN values for the samples that lie outside of the original
signal's timestamps
chunk_ram_size : int
desired data frame RAM usage in bytes; default 200 MB
Returns
-------
dataframe : pandas.DataFrame
yields pandas DataFrame's that should not exceed 200MB of RAM
"""
if channels:
mdf = self.filter(channels)
result = mdf.iter_to_dataframe(
raster=raster,
time_from_zero=time_from_zero,
empty_channels=empty_channels,
keep_arrays=keep_arrays,
use_display_names=use_display_names,
time_as_date=time_as_date,
reduce_memory_usage=reduce_memory_usage,
raw=raw,
ignore_value2text_conversions=ignore_value2text_conversions,
use_interpolation=use_interpolation,
only_basenames=only_basenames,
chunk_ram_size=chunk_ram_size,
interpolate_outwards_with_nan=interpolate_outwards_with_nan,
)
for df in result:
yield df
mdf.close()
df = {}
self._set_temporary_master(None)
if raster is not None:
try:
raster = float(raster)
assert raster > 0
except (TypeError, ValueError):
if isinstance(raster, str):
raster = self.get(
raster, raw=True, ignore_invalidation_bits=True
).timestamps
else:
raster = np.array(raster)
else:
raster = master_using_raster(self, raster)
master = raster
else:
masters = {index: self.get_master(index) for index in self.virtual_groups}
if masters:
master = reduce(np.union1d, masters.values())
else:
master = np.array([], dtype="<f4")
master_ = master
channel_count = sum(len(gp.channels) - 1 for gp in self.groups) + 1
# approximation with all float64 dtype
itemsize = channel_count * 8
# use 200MB DataFrame chunks
chunk_count = chunk_ram_size // itemsize or 1
chunks, r = divmod(len(master), chunk_count)
if r:
chunks += 1
for i in range(chunks):
master = master_[chunk_count * i : chunk_count * (i + 1)]
start = master[0]
end = master[-1]
df = {}
self._set_temporary_master(None)
used_names = UniqueDB()
used_names.get_unique_name("timestamps")
groups_nr = len(self.virtual_groups)
for group_index, virtual_group in self.virtual_groups.items():
group_cycles = virtual_group.cycles_nr
if group_cycles == 0 and empty_channels == "skip":
continue
record_offset = max(
np.searchsorted(masters[group_index], start).flatten()[0] - 1, 0
)
stop = np.searchsorted(masters[group_index], end).flatten()[0]
record_count = min(stop - record_offset + 1, group_cycles)
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in self.included_channels(
group_index
)[group_index].items()
for ch_index in channel_indexes
]
signals = [
signal
for signal in self.select(
channels,
raw=True,
copy_master=False,
record_offset=record_offset,
record_count=record_count,
validate=False,
)
]
if not signals:
continue
group_master = signals[0].timestamps
for sig in signals:
if len(sig) == 0:
if empty_channels == "zeros":
sig.samples = np.zeros(
len(master)
if virtual_group.cycles_nr == 0
else virtual_group.cycles_nr,
dtype=sig.samples.dtype,
)
sig.timestamps = (
master if virtual_group.cycles_nr == 0 else group_master
)
if not raw:
if ignore_value2text_conversions:
if self.version < "4.00":
text_conversion = 11
else:
text_conversion = 7
for signal in signals:
conversion = signal.conversion
if (
conversion
and conversion.conversion_type < text_conversion
):
signal.samples = conversion.convert(signal.samples)
else:
for signal in signals:
if signal.conversion:
signal.samples = signal.conversion.convert(
signal.samples
)
for s_index, sig in enumerate(signals):
sig = sig.validate(copy=False)
if len(sig) == 0:
if empty_channels == "zeros":
sig.samples = np.zeros(
len(master)
if virtual_group.cycles_nr == 0
else virtual_group.cycles_nr,
dtype=sig.samples.dtype,
)
sig.timestamps = (
master if virtual_group.cycles_nr == 0 else group_master
)
signals[s_index] = sig
if use_interpolation:
same_master = np.array_equal(master, group_master)
if not same_master and interpolate_outwards_with_nan:
idx = np.argwhere(
(master >= group_master[0]) & (master <= group_master[-1])
).flatten()
cycles = len(group_master)
signals = [
signal.interp(master, self._integer_interpolation)
if not same_master or len(signal) != cycles
else signal
for signal in signals
]
if not same_master and interpolate_outwards_with_nan:
for sig in signals:
sig.timestamps = sig.timestamps[idx]
sig.samples = sig.samples[idx]
group_master = master
signals = [sig for sig in signals if len(sig)]
if signals:
diffs = np.diff(group_master, prepend=-np.inf) > 0
if np.all(diffs):
index = pd.Index(group_master, tupleize_cols=False)
else:
idx = np.argwhere(diffs).flatten()
group_master = group_master[idx]
index = pd.Index(group_master, tupleize_cols=False)
for sig in signals:
sig.samples = sig.samples[idx]
sig.timestamps = sig.timestamps[idx]
size = len(index)
for k, sig in enumerate(signals):
sig_index = (
index
if len(sig) == size
else pd.Index(sig.timestamps, tupleize_cols=False)
)
# byte arrays
if len(sig.samples.shape) > 1:
if use_display_names:
channel_name = sig.display_name or sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
df[channel_name] = pd.Series(
list(sig.samples),
index=sig_index,
)
# arrays and structures
elif sig.samples.dtype.names:
for name, series in components(
sig.samples,
sig.name,
used_names,
master=sig_index,
only_basenames=only_basenames,
):
df[name] = series
# scalars
else:
if use_display_names:
channel_name = sig.display_name or sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
if reduce_memory_usage and sig.samples.dtype.kind in "SU":
unique = np.unique(sig.samples)
if len(sig.samples) / len(unique) >= 2:
df[channel_name] = pd.Series(
sig.samples,
index=sig_index,
dtype="category",
)
else:
df[channel_name] = pd.Series(
sig.samples,
index=sig_index,
fastpath=True,
)
else:
if reduce_memory_usage:
sig.samples = downcast(sig.samples)
df[channel_name] = pd.Series(
sig.samples,
index=sig_index,
fastpath=True,
)
if self._callback:
self._callback(group_index + 1, groups_nr)
strings, nonstrings = {}, {}
for col, series in df.items():
if series.dtype.kind == "S":
strings[col] = series
else:
nonstrings[col] = series
df = pd.DataFrame(nonstrings, index=master)
for col, series in strings.items():
df[col] = series
df.index.name = "timestamps"
if time_as_date:
new_index = np.array(df.index) + self.header.start_time.timestamp()
new_index = pd.to_datetime(new_index, unit="s")
df.set_index(new_index, inplace=True)
elif time_from_zero and len(master):
df.set_index(df.index - df.index[0], inplace=True)
yield df
def to_dataframe(
self,
channels=None,
raster=None,
time_from_zero=True,
empty_channels="skip",
keep_arrays=False,
use_display_names=False,
time_as_date=False,
reduce_memory_usage=False,
raw=False,
ignore_value2text_conversions=False,
use_interpolation=True,
only_basenames=False,
interpolate_outwards_with_nan=False,
):
"""generate pandas DataFrame
Parameters
----------
channels : list
list of items to be filtered (default None); each item can be :
* a channel name string
* (channel name, group index, channel index) list or tuple
* (channel name, group index) list or tuple
* (None, group index, channel index) list or tuple
raster : float | np.array | str
new raster that can be
* a float step value
* a channel name who's timestamps will be used as raster (starting with asammdf 5.5.0)
* an array (starting with asammdf 5.5.0)
see `resample` for examples of using this argument
time_from_zero : bool
adjust time channel to start from 0; default *True*
empty_channels : str
behaviour for channels without samples; the options are *skip* or
*zeros*; default is *skip*
use_display_names : bool
use display name instead of standard channel name, if available.
keep_arrays : bool
keep arrays and structure channels as well as the
component channels. If *True* this can be very slow. If *False*
only the component channels are saved, and their names will be
prefixed with the parent channel.
time_as_date : bool
the dataframe index will contain the datetime timestamps
according to the measurement start time; default *False*. If
*True* then the argument ``time_from_zero`` will be ignored.
reduce_memory_usage : bool
reduce memory usage by converting all float columns to float32 and
searching for minimum dtype that can reprezent the values found
in integer columns; default *False*
raw (False) : bool
the columns will contain the raw values
.. versionadded:: 5.7.0
ignore_value2text_conversions (False) : bool
valid only for the channels that have value to text conversions and
if *raw=False*. If this is True then the raw numeric values will be
used, and the conversion will not be applied.
.. versionadded:: 5.8.0
use_interpolation (True) : bool
option to perform interpoaltions when multiple timestamp raster are
present. If *False* then dataframe columns will be automatically
filled with NaN's were the dataframe index values are not found in
the current column's timestamps
.. versionadded:: 5.11.0
only_basenames (False) : bool
use just the field names, without prefix, for structures and channel
arrays
.. versionadded:: 5.13.0
interpolate_outwards_with_nan : bool
use NaN values for the samples that lie outside of the original
signal's timestamps
.. versionadded:: 5.15.0
Returns
-------
dataframe : pandas.DataFrame
"""
if channels is not None:
mdf = self.filter(channels)
result = mdf.to_dataframe(
raster=raster,
time_from_zero=time_from_zero,
empty_channels=empty_channels,
keep_arrays=keep_arrays,
use_display_names=use_display_names,
time_as_date=time_as_date,
reduce_memory_usage=reduce_memory_usage,
raw=raw,
ignore_value2text_conversions=ignore_value2text_conversions,
use_interpolation=use_interpolation,
only_basenames=only_basenames,
interpolate_outwards_with_nan=interpolate_outwards_with_nan,
)
mdf.close()
return result
df = {}
self._set_temporary_master(None)
if raster is not None:
try:
raster = float(raster)
assert raster > 0
except (TypeError, ValueError):
if isinstance(raster, str):
raster = self.get(raster).timestamps
else:
raster = np.array(raster)
else:
raster = master_using_raster(self, raster)
master = raster
else:
masters = {index: self.get_master(index) for index in self.virtual_groups}
if masters:
master = reduce(np.union1d, masters.values())
else:
master = np.array([], dtype="<f4")
del masters
idx = np.argwhere(np.diff(master, prepend=-np.inf) > 0).flatten()
master = master[idx]
used_names = UniqueDB()
used_names.get_unique_name("timestamps")
groups_nr = len(self.virtual_groups)
for group_index, (virtual_group_index, virtual_group) in enumerate(
self.virtual_groups.items()
):
if virtual_group.cycles_nr == 0 and empty_channels == "skip":
continue
channels = [
(None, gp_index, ch_index)
for gp_index, channel_indexes in self.included_channels(
virtual_group_index
)[virtual_group_index].items()
for ch_index in channel_indexes
if ch_index != self.masters_db.get(gp_index, None)
]
signals = [
signal
for signal in self.select(
channels, raw=True, copy_master=False, validate=False
)
]
if not signals:
continue
group_master = signals[0].timestamps
for sig in signals:
if len(sig) == 0:
if empty_channels == "zeros":
sig.samples = np.zeros(
len(master)
if virtual_group.cycles_nr == 0
else virtual_group.cycles_nr,
dtype=sig.samples.dtype,
)
sig.timestamps = (
master if virtual_group.cycles_nr == 0 else group_master
)
if not raw:
if ignore_value2text_conversions:
for signal in signals:
conversion = signal.conversion
if conversion:
samples = conversion.convert(signal.samples)
if samples.dtype.kind not in "US":
signal.samples = samples
else:
for signal in signals:
if signal.conversion:
signal.samples = signal.conversion.convert(signal.samples)
for s_index, sig in enumerate(signals):
sig = sig.validate(copy=False)
if len(sig) == 0:
if empty_channels == "zeros":
sig.samples = np.zeros(
len(master)
if virtual_group.cycles_nr == 0
else virtual_group.cycles_nr,
dtype=sig.samples.dtype,
)
sig.timestamps = (
master if virtual_group.cycles_nr == 0 else group_master
)
signals[s_index] = sig
if use_interpolation:
same_master = np.array_equal(master, group_master)
if not same_master and interpolate_outwards_with_nan:
idx = np.argwhere(
(master >= group_master[0]) & (master <= group_master[-1])
).flatten()
cycles = len(group_master)
signals = [
signal.interp(master, self._integer_interpolation)
if not same_master or len(signal) != cycles
else signal
for signal in signals
]
if not same_master and interpolate_outwards_with_nan:
for sig in signals:
sig.timestamps = sig.timestamps[idx]
sig.samples = sig.samples[idx]
group_master = master
signals = [sig for sig in signals if len(sig)]
if signals:
diffs = np.diff(group_master, prepend=-np.inf) > 0
if np.all(diffs):
index = pd.Index(group_master, tupleize_cols=False)
else:
idx = np.argwhere(diffs).flatten()
group_master = group_master[idx]
index = pd.Index(group_master, tupleize_cols=False)
for sig in signals:
sig.samples = sig.samples[idx]
sig.timestamps = sig.timestamps[idx]
size = len(index)
for k, sig in enumerate(signals):
sig_index = (
index
if len(sig) == size
else pd.Index(sig.timestamps, tupleize_cols=False)
)
# byte arrays
if len(sig.samples.shape) > 1:
if use_display_names:
channel_name = sig.display_name or sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
df[channel_name] = pd.Series(
list(sig.samples),
index=sig_index,
)
# arrays and structures
elif sig.samples.dtype.names:
for name, series in components(
sig.samples,
sig.name,
used_names,
master=sig_index,
only_basenames=only_basenames,
):
df[name] = series
# scalars
else:
if use_display_names:
channel_name = sig.display_name or sig.name
else:
channel_name = sig.name
channel_name = used_names.get_unique_name(channel_name)
if reduce_memory_usage and sig.samples.dtype.kind not in "SU":
sig.samples = downcast(sig.samples)
df[channel_name] = pd.Series(
sig.samples, index=sig_index, fastpath=True
)
if self._callback:
self._callback(group_index + 1, groups_nr)
strings, nonstrings = {}, {}
for col, series in df.items():
if series.dtype.kind == "S":
strings[col] = series
else:
nonstrings[col] = series
df = pd.DataFrame(nonstrings, index=master)
for col, series in strings.items():
df[col] = series
df.index.name = "timestamps"
if time_as_date:
new_index = np.array(df.index) + self.header.start_time.timestamp()
new_index = pd.to_datetime(new_index, unit="s")
df.set_index(new_index, inplace=True)
elif time_from_zero and len(master):
df.set_index(df.index - df.index[0], inplace=True)
return df
def extract_bus_logging(
self,
database_files,
version=None,
ignore_invalid_signals=False,
consolidated_j1939=True,
ignore_value2text_conversion=True,
):
"""extract all possible CAN signal using the provided databases.
Changed in version 6.0.0 from `extract_can_logging`
Parameters
----------
database_files : dict
each key will contain an iterable of database files for that bus type. The
supported bus types are "CAN", "LIN". The iterables will contain the
databases as str, pathlib.Path or canamtrix.CanMatrix objects
.. versionchanged:: 6.0.0 added canmatrix.CanMatrix type
version (None) : str
output file version
ignore_invalid_signals (False) : bool
ignore signals that have all samples equal to their maximum value
.. versionadded:: 5.7.0
consolidated_j1939 (True) : bool
handle PGNs from all the messages as a single instance
.. versionadded:: 5.7.0
ignore_value2text_conversion (True): bool
ignore value to text conversions
.. versionadded:: 5.23.0
Returns
-------
mdf : MDF
new MDF file that contains the succesfully extracted signals
Examples
--------
>>> "extrac CAN and LIN bus logging"
>>> mdf = asammdf.MDF(r'bus_logging.mf4')
>>> databases = {
... "CAN": ["file1.dbc", "file2.arxml"],
... "LIN": ["file3.dbc"],
... }
>>> extracted = mdf.extract_bus_logging(database_files=database_files)
>>> ...
>>> "extrac just LIN bus logging"
>>> mdf = asammdf.MDF(r'bus_logging.mf4')
>>> databases = {
... "LIN": ["file3.dbc"],
... }
>>> extracted = mdf.extract_bus_logging(database_files=database_files)
"""
if version is None:
version = self.version
else:
version = validate_version_argument(version)
out = MDF(
version=version,
encryption_function=self._encryption_function,
decryption_function=self._decryption_function,
callback=self._callback,
)
out.header.start_time = self.header.start_time
if self._callback:
out._callback = out._mdf._callback = self._callback
self.last_call_info = {}
if database_files.get("CAN", None):
out = self._extract_can_logging(
out,
database_files["CAN"],
ignore_invalid_signals,
consolidated_j1939,
ignore_value2text_conversion,
)
if database_files.get("LIN", None):
out = self._extract_lin_logging(
out,
database_files["LIN"],
ignore_invalid_signals,
ignore_value2text_conversion,
)
return out
def _extract_can_logging(
self,
output_file,
dbc_files,
ignore_invalid_signals=False,
consolidated_j1939=True,
ignore_value2text_conversion=True,
):
out = output_file
max_flags = []
valid_dbc_files = []
unique_name = UniqueDB()
for dbc_name in dbc_files:
if isinstance(dbc_name, CanMatrix):
valid_dbc_files.append(
(dbc_name, unique_name.get_unique_name("UserProvidedCanMatrix"))
)
else:
dbc = load_can_database(Path(dbc_name))
if dbc is None:
continue
else:
valid_dbc_files.append((dbc, dbc_name))
count = sum(
1
for group in self.groups
if group.channel_group.flags & v4c.FLAG_CG_BUS_EVENT
and group.channel_group.acq_source.bus_type == v4c.BUS_TYPE_CAN
)
count *= len(valid_dbc_files)
cntr = 0
total_unique_ids = set()
found_ids = defaultdict(set)
not_found_ids = defaultdict(list)
unknown_ids = defaultdict(list)
for dbc, dbc_name in valid_dbc_files:
is_j1939 = dbc.contains_j1939
if is_j1939:
messages = {message.arbitration_id.pgn: message for message in dbc}
else:
messages = {message.arbitration_id.id: message for message in dbc}
current_not_found_ids = {
(msg_id, message.name) for msg_id, message in messages.items()
}
msg_map = {}
for i, group in enumerate(self.groups):
if (
not group.channel_group.flags & v4c.FLAG_CG_BUS_EVENT
or not group.channel_group.acq_source.bus_type == v4c.BUS_TYPE_CAN
or not "CAN_DataFrame" in [ch.name for ch in group.channels]
):
continue
parents, dtypes = self._prepare_record(group)
data = self._load_data(group, optimize_read=False)
for fragment_index, fragment in enumerate(data):
if dtypes.itemsize:
group.record = np.core.records.fromstring(
fragment[0], dtype=dtypes
)
else:
group.record = None
continue
self._set_temporary_master(None)
self._set_temporary_master(self.get_master(i, data=fragment))
bus_ids = self.get(
"CAN_DataFrame.BusChannel",
group=i,
data=fragment,
samples_only=True,
)[0].astype("<u1")
msg_ids = (
self.get("CAN_DataFrame.ID", group=i, data=fragment).astype(
"<u4"
)
& 0x1FFFFFFF
)
original_ids = msg_ids.samples.copy()
if is_j1939:
ps = (msg_ids.samples >> 8) & 0xFF
pf = (msg_ids.samples >> 16) & 0xFF
_pgn = pf << 8
msg_ids.samples = np.where(pf >= 240, _pgn + ps, _pgn)
data_bytes = self.get(
"CAN_DataFrame.DataBytes",
group=i,
data=fragment,
samples_only=True,
)[0]
buses = np.unique(bus_ids)
for bus in buses:
idx = np.argwhere(bus_ids == bus).ravel()
bus_t = msg_ids.timestamps[idx]
bus_msg_ids = msg_ids.samples[idx]
bus_data_bytes = data_bytes[idx]
original_msg_ids = original_ids[idx]
if is_j1939 and not consolidated_j1939:
unique_ids = np.unique(
np.core.records.fromarrays([bus_msg_ids, original_ids])
)
else:
unique_ids = np.unique(
np.core.records.fromarrays([bus_msg_ids, bus_msg_ids])
)
total_unique_ids = total_unique_ids | set(
tuple(int(e) for e in f) for f in unique_ids
)
for msg_id_record in unique_ids:
msg_id = int(msg_id_record[0])
original_msg_id = int(msg_id_record[1])
message = messages.get(msg_id, None)
if message is None:
unknown_ids[msg_id].append(True)
continue
found_ids[dbc_name].add((msg_id, message.name))
try:
current_not_found_ids.remove((msg_id, message.name))
except KeyError:
pass
unknown_ids[msg_id].append(False)
if is_j1939 and not consolidated_j1939:
idx = np.argwhere(
(bus_msg_ids == msg_id)
& (original_msg_ids == original_msg_id)
).ravel()
else:
idx = np.argwhere(bus_msg_ids == msg_id).ravel()
payload = bus_data_bytes[idx]
t = bus_t[idx]
extracted_signals = extract_mux(
payload,
message,
msg_id,
bus,
t,
original_message_id=original_msg_id
if is_j1939 and not consolidated_j1939
else None,
ignore_value2text_conversion=ignore_value2text_conversion,
)
for entry, signals in extracted_signals.items():
if len(next(iter(signals.values()))["samples"]) == 0:
continue
if entry not in msg_map:
sigs = []
index = len(out.groups)
msg_map[entry] = index
for name_, signal in signals.items():
sig = Signal(
samples=signal["samples"],
timestamps=signal["t"],
name=signal["name"],
comment=signal["comment"],
unit=signal["unit"],
invalidation_bits=signal[
"invalidation_bits"
]
if ignore_invalid_signals
else None,
)
sig.comment = f"""\
<CNcomment>
<TX>{sig.comment}</TX>
<names>
<display>
CAN{bus}.{message.name}.{signal['name']}
</display>
</names>
</CNcomment>"""
sigs.append(sig)
cg_nr = out.append(
sigs,
acq_name=f"from CAN{bus} message ID=0x{msg_id:X}",
comment=f"{message} 0x{msg_id:X}",
common_timebase=True,
)
if ignore_invalid_signals:
max_flags.append([False])
for ch_index, sig in enumerate(sigs, 1):
max_flags[cg_nr].append(
np.all(sig.invalidation_bits)
)
else:
index = msg_map[entry]
sigs = []
for name_, signal in signals.items():
sigs.append(
(
signal["samples"],
signal["invalidation_bits"]
if ignore_invalid_signals
else None,
)
)
t = signal["t"]
if ignore_invalid_signals:
for ch_index, sig in enumerate(sigs, 1):
max_flags[index][ch_index] = max_flags[
index
][ch_index] or np.all(sig[1])
sigs.insert(0, (t, None))
out.extend(index, sigs)
self._set_temporary_master(None)
group.record = None
cntr += 1
if self._callback:
self._callback(cntr, count)
if current_not_found_ids:
not_found_ids[dbc_name] = list(current_not_found_ids)
unknown_ids = {
msg_id for msg_id, not_found in unknown_ids.items() if all(not_found)
}
self.last_call_info["CAN"] = {
"dbc_files": dbc_files,
"total_unique_ids": total_unique_ids,
"unknown_id_count": len(unknown_ids),
"not_found_ids": not_found_ids,
"found_ids": found_ids,
"unknown_ids": unknown_ids,
}
if ignore_invalid_signals:
to_keep = []
for i, group in enumerate(out.groups):
for j, channel in enumerate(group.channels[1:], 1):
if not max_flags[i][j]:
to_keep.append((None, i, j))
tmp = out.filter(to_keep, out.version)
out.close()
out = tmp
if self._callback:
self._callback(100, 100)
if not out.groups:
logger.warning(
f'No CAN signals could be extracted from "{self.name}". The'
"output file will be empty."
)
return out
def _extract_lin_logging(
self,
output_file,
dbc_files,
ignore_invalid_signals=False,
ignore_value2text_conversion=True,
):
out = output_file
max_flags = []
valid_dbc_files = []
unique_name = UniqueDB()
for dbc_name in dbc_files:
if isinstance(dbc_name, CanMatrix):
valid_dbc_files.append(
(dbc_name, unique_name.get_unique_name("UserProvidedCanMatrix"))
)
else:
dbc = load_can_database(Path(dbc_name))
if dbc is None:
continue
else:
valid_dbc_files.append((dbc, dbc_name))
count = sum(
1
for group in self.groups
if group.channel_group.flags & v4c.FLAG_CG_BUS_EVENT
and group.channel_group.acq_source.bus_type == v4c.BUS_TYPE_LIN
)
count *= len(valid_dbc_files)
cntr = 0
total_unique_ids = set()
found_ids = defaultdict(set)
not_found_ids = defaultdict(list)
unknown_ids = defaultdict(list)
for dbc, dbc_name in valid_dbc_files:
messages = {message.arbitration_id.id: message for message in dbc}
current_not_found_ids = {
(msg_id, message.name) for msg_id, message in messages.items()
}
msg_map = {}
for i, group in enumerate(self.groups):
if (
not group.channel_group.flags & v4c.FLAG_CG_BUS_EVENT
or not group.channel_group.acq_source.bus_type == v4c.BUS_TYPE_LIN
or not "LIN_Frame" in [ch.name for ch in group.channels]
):
continue
parents, dtypes = self._prepare_record(group)
data = self._load_data(group, optimize_read=False)
for fragment_index, fragment in enumerate(data):
if dtypes.itemsize:
group.record = np.core.records.fromstring(
fragment[0], dtype=dtypes
)
else:
group.record = None
continue
self._set_temporary_master(None)
self._set_temporary_master(self.get_master(i, data=fragment))
msg_ids = (
self.get("LIN_Frame.ID", group=i, data=fragment).astype("<u4")
& 0x1FFFFFFF
)
original_ids = msg_ids.samples.copy()
data_bytes = self.get(
"LIN_Frame.DataBytes",
group=i,
data=fragment,
samples_only=True,
)[0]
bus_t = msg_ids.timestamps
bus_msg_ids = msg_ids.samples
bus_data_bytes = data_bytes
original_msg_ids = original_ids
unique_ids = np.unique(
np.core.records.fromarrays([bus_msg_ids, bus_msg_ids])
)
total_unique_ids = total_unique_ids | set(
tuple(int(e) for e in f) for f in unique_ids
)
for msg_id_record in unique_ids:
msg_id = int(msg_id_record[0])
original_msg_id = int(msg_id_record[1])
message = messages.get(msg_id, None)
if message is None:
unknown_ids[msg_id].append(True)
continue
found_ids[dbc_name].add((msg_id, message.name))
try:
current_not_found_ids.remove((msg_id, message.name))
except KeyError:
pass
unknown_ids[msg_id].append(False)
idx = np.argwhere(bus_msg_ids == msg_id).ravel()
payload = bus_data_bytes[idx]
t = bus_t[idx]
extracted_signals = extract_mux(
payload,
message,
msg_id,
0,
t,
original_message_id=None,
ignore_value2text_conversion=ignore_value2text_conversion,
)
for entry, signals in extracted_signals.items():
if len(next(iter(signals.values()))["samples"]) == 0:
continue
if entry not in msg_map:
sigs = []
index = len(out.groups)
msg_map[entry] = index
for name_, signal in signals.items():
sig = Signal(
samples=signal["samples"],
timestamps=signal["t"],
name=signal["name"],
comment=signal["comment"],
unit=signal["unit"],
invalidation_bits=signal["invalidation_bits"]
if ignore_invalid_signals
else None,
)
sig.comment = f"""\
<CNcomment>
<TX>{sig.comment}</TX>
<names>
<display>
LIN.{message.name}.{signal['name']}
</display>
</names>
</CNcomment>"""
sigs.append(sig)
cg_nr = out.append(
sigs,
acq_name=f"from LIN message ID=0x{msg_id:X}",
comment=f"{message} 0x{msg_id:X}",
common_timebase=True,
)
if ignore_invalid_signals:
max_flags.append([False])
for ch_index, sig in enumerate(sigs, 1):
max_flags[cg_nr].append(
np.all(sig.invalidation_bits)
)
else:
index = msg_map[entry]
sigs = []
for name_, signal in signals.items():
sigs.append(
(
signal["samples"],
signal["invalidation_bits"]
if ignore_invalid_signals
else None,
)
)
t = signal["t"]
if ignore_invalid_signals:
for ch_index, sig in enumerate(sigs, 1):
max_flags[index][ch_index] = max_flags[index][
ch_index
] or np.all(sig[1])
sigs.insert(0, (t, None))
out.extend(index, sigs)
self._set_temporary_master(None)
group.record = None
cntr += 1
if self._callback:
self._callback(cntr, count)
if current_not_found_ids:
not_found_ids[dbc_name] = list(current_not_found_ids)
unknown_ids = {
msg_id for msg_id, not_found in unknown_ids.items() if all(not_found)
}
self.last_call_info["LIN"] = {
"dbc_files": dbc_files,
"total_unique_ids": total_unique_ids,
"unknown_id_count": len(unknown_ids),
"not_found_ids": not_found_ids,
"found_ids": found_ids,
"unknown_ids": unknown_ids,
}
if ignore_invalid_signals:
to_keep = []
for i, group in enumerate(out.groups):
for j, channel in enumerate(group.channels[1:], 1):
if not max_flags[i][j]:
to_keep.append((None, i, j))
tmp = out.filter(to_keep, out.version)
out.close()
out = tmp
if self._callback:
self._callback(100, 100)
if not out.groups:
logger.warning(
f'No LIN signals could be extracted from "{self.name}". The'
"output file will be empty."
)
return out
@property
def start_time(self):
"""getter and setter the measurement start timestamp
Returns
-------
timestamp : datetime.datetime
start timestamp
"""
return self.header.start_time
@start_time.setter
def start_time(self, timestamp):
self.header.start_time = timestamp
def cleanup_timestamps(
self, minimum, maximum, exp_min=-15, exp_max=15, version=None
):
"""convert *MDF* to other version
.. versionadded:: 5.22.0
Parameters
----------
minimum : float
minimum plausible time stamp
maximum : float
maximum plausible time stamp
exp_min (-15) : int
minimum plausible exponent used for the time stamps float values
exp_max (15) : int
maximum plausible exponent used for the time stamps float values
version : str
new mdf file version from ('2.00', '2.10', '2.14', '3.00', '3.10',
'3.20', '3.30', '4.00', '4.10', '4.11', '4.20'); default the same as
the input file
Returns
-------
out : MDF
new *MDF* object
"""
if version is None:
version = self.version
else:
version = validate_version_argument(version)
out = MDF(version=version)
out.header.start_time = self.header.start_time
groups_nr = len(self.virtual_groups)
if self._callback:
self._callback(0, groups_nr)
cg_nr = None
self.configure(copy_on_get=False)
# walk through all groups and get all channels
for i, virtual_group in enumerate(self.virtual_groups):
for idx, sigs in enumerate(
self._yield_selected_signals(virtual_group, version=version)
):
if idx == 0:
if sigs:
t = sigs[0].timestamps
if len(t):
all_ok, idx = plausible_timestamps(
t, minimum, maximum, exp_min, exp_max
)
if not all_ok:
t = t[idx]
if len(t):
for sig in sigs:
sig.samples = sig.samples[idx]
sig.timestamps = t
if sig.invalidation_bits is not None:
sig.invalidation_bits = (
sig.invalidation_bits[idx]
)
cg = self.groups[virtual_group].channel_group
cg_nr = out.append(
sigs,
acq_name=getattr(cg, "acq_name", None),
acq_source=getattr(cg, "acq_source", None),
comment=f"Timestamps cleaned up and converted from {self.version} to {version}",
common_timebase=True,
)
else:
break
else:
t, _ = sigs[0]
if len(t):
all_ok, idx = plausible_timestamps(
t, minimum, maximum, exp_min, exp_max
)
if not all_ok:
t = t[idx]
if len(t):
for i, (samples, invalidation_bits) in enumerate(sigs):
if invalidation_bits is not None:
invalidation_bits = invalidation_bits[idx]
samples = samples[idx]
sigs[i] = (samples, invalidation_bits)
out.extend(cg_nr, sigs)
if self._callback:
self._callback(i + 1, groups_nr)
if self._terminate:
return
out._transfer_metadata(self)
self.configure(copy_on_get=True)
if self._callback:
out._callback = out._mdf._callback = self._callback
return out
def whereis(self, channel, source_name=None, source_path=None):
"""get ocurrences of channel name in the file
Parameters
----------
channel : str
channel name string
source_name (None) : str
filter occurrences using source name
source_path (None) : str
filter occurrences using source path
.. versionadded:: 6.0.0
Returns
-------
ocurrences : tuple
Examples
--------
>>> mdf = MDF(file_name)
>>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file
((1, 2), (2, 4))
>>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file
()
"""
try:
occurrences = self._filter_occurrences(
self.channels_db[channel], source_name=source_name, source_path=source_path
)
except:
occurrences = tuple()
return tuple(occurrences)
if __name__ == "__main__":
pass
| lgpl-3.0 | 1,954,601,776,935,667,000 | 35.478159 | 139 | 0.448425 | false |
nathandaddio/puzzle_app | puzzle_app/puzzle_app/engine_api/hitori_engine_api.py | 1 | 1556 | from collections import namedtuple
import transaction
from marshmallow import (
fields,
Schema,
post_load
)
from puzzle_app.schemas.hitori import (
HitoriGameBoardCellSchema,
HitoriGameBoardSchema
)
from puzzle_app.models import (
db_session_maker
)
from puzzle_app.models.hitori import (
HitoriGameBoardCell,
HitoriGameBoard,
HitoriSolve,
HITORI_SOLVE_STATUS
)
def make_hitori_engine_data(hitori_game_board_id):
db_session = db_session_maker()
hitori_game_board = db_session.query(HitoriGameBoard).get(hitori_game_board_id)
return HitoriGameBoardSchema(strict=True).dump(hitori_game_board).data
HitoriSolution = namedtuple('HitoriSolution', ['board', 'cells_on', 'cells_off', 'feasible'])
def read_hitori_engine_data(hitori_engine_solution):
solution = HitoriSolution(**hitori_engine_solution)
with transaction.manager:
db_session = db_session_maker()
board = db_session.query(HitoriGameBoard).get(solution.board)
board.solved = True
board.feasible = solution.feasible
for cell_id in solution.cells_on:
db_session.query(HitoriGameBoardCell).get(cell_id).included_in_solution = True
for cell_id in solution.cells_off:
db_session.query(HitoriGameBoardCell).get(cell_id).included_in_solution = False
def update_hitori_solve_status(solve_id, status):
with transaction.manager:
db_session = db_session_maker()
db_session.query(HitoriSolve).get(solve_id).status = HITORI_SOLVE_STATUS[status]
| mit | -4,959,705,911,207,896,000 | 26.298246 | 93 | 0.710797 | false |
danielrichman/snowball-ticketing | snowball_ticketing/tickets/__init__.py | 1 | 25949 | # Copyright 2013 Daniel Richman
#
# This file is part of The Snowball Ticketing System.
#
# The Snowball Ticketing System is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# The Snowball Ticketing System is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with The Snowball Ticketing System. If not, see
# <http://www.gnu.org/licenses/>.
"""tickets - tickets table manipulation functions"""
from __future__ import unicode_literals
import re
from datetime import datetime
import flask
from werkzeug.datastructures import ImmutableDict
from .. import utils, queries
__all__ = ["available", "quotas_per_person_sentence", "none_min",
"counts", "prices", "settings", "tickets",
"BuyFailed", "InsufficientSpare", "QPPAnyMet", "QPPTypeMet",
"FormRace", "IncorrectMode", "QuotaMet", "QuotaNotMet",
"WaitingQuotaMet",
"buy", "buy_lock", "user_pg_lock", "set_quota_met",
"FinaliseRace", "AlreadyFinalised", "NonExistantTicket",
"finalise", "outstanding_balance",
"mark_paid", "purge_unpaid", "waiting_release"]
logger = utils.getLogger("snowball_ticketing.tickets")
buy_pg_lock_num = (0x123abc, 100)
user_pg_lock_num = 0x124000
_null_settings_row = \
ImmutableDict({"quota": None, "quota_met": None,
"waiting_quota": None, "waiting_quota_met": None,
"waiting_smallquota": None, "quota_per_person": None,
"mode": 'available'})
def available(ticket_type, user_group=None, pg=utils.postgres):
"""
Find out whether `user_group` can buy `ticket_type` tickets
Returns a dict, with the following keys:
* `mode`: one of ``not-yet-open``, ``available`` or ``closed``
* `spare`: how many more can be bought
* `waiting_spare`: how many can be added to the waiting list
* `waiting_small`: Is `waiting count` < ``waiting_smallquota``?
* `qpp_any`: The smallest ``quota_per_person`` applying to any ticket type
* `qpp_type`: The smallest ``quota_per_person`` applying to this ticket type
The last five keys may be ``None``, corresponding to "no limit".
"""
if user_group is None:
user_group = _user_group_from_user(flask.session["user"])
s = settings(pg=pg)
c = counts(pg=pg)
result = {}
overall_mode = 'available'
overall_spare = None
overall_quota_met = None
overall_waiting_spare = None
overall_waiting_quota_met = None
overall_qpp_type = None
overall_qpp_any = None
waiting_small = None
for test in _test_keys(user_group, ticket_type):
limits = s.get(test, _null_settings_row)
mode = limits["mode"]
quota = limits["quota"]
quota_met = limits["quota_met"]
count = c["{0}_{1}".format(*test)]
waiting_quota = limits["waiting_quota"]
waiting_quota_met = limits["waiting_quota_met"]
waiting_smallquota = limits["waiting_smallquota"]
waiting_count = c["waiting_{0}_{1}".format(*test)]
overall_mode = _mode_precedence(overall_mode, mode)
if quota is not None:
spare = quota - count
if spare <= 0 and not quota_met:
logger.warning("quota_met should be set: %r", test)
quota_met = True
overall_quota_met = overall_quota_met or quota_met
else:
spare = None
if waiting_quota is not None:
waiting_spare = waiting_quota - waiting_count
if waiting_spare <= 0 and not waiting_quota_met:
logger.warning("waiting_quota_met should be set: %r", test)
waiting_quota_met = True
overall_waiting_quota_met = \
overall_waiting_quota_met or waiting_quota_met
else:
waiting_spare = None
overall_spare = none_min(overall_spare, spare)
overall_waiting_spare = none_min(overall_waiting_spare, waiting_spare)
waiting_small = \
_waiting_smallquota_test(waiting_small, waiting_smallquota,
waiting_count)
qpp = limits["quota_per_person"]
if test[1] == 'any':
overall_qpp_any = none_min(overall_qpp_any, qpp)
else:
overall_qpp_type = none_min(overall_qpp_type, qpp)
return {"mode": overall_mode,
"spare": overall_spare,
"quota_met": overall_quota_met,
"waiting_spare": overall_waiting_spare,
"waiting_quota_met": overall_waiting_quota_met,
"waiting_small": waiting_small,
"qpp_any": overall_qpp_any,
"qpp_type": overall_qpp_type}
def _test_keys(user_group, ticket_type):
return ((user_group, ticket_type), (user_group, "any"),
("all", ticket_type), ("all", "any"))
def _mode_precedence(a, b):
"""
return the 'largest' of `a` and `b`
order: closed > not-yet-open > available
"""
modes = ('closed', 'not-yet-open', 'available')
assert a in modes and b in modes
for o in modes:
if a == o or b == o:
return o
else:
raise AssertionError
def _waiting_smallquota_test(old_value, quota, waiting_count):
if old_value is False:
# another rule's smallquota was not met,
# which takes precedence (think Order allow, deny)
return False
if quota is None:
# no smallquota for this row
# None -> None; True -> True
return old_value
if waiting_count < quota:
# (None, True) -> True
return True
else:
# (None, True) -> False
return False
def quotas_per_person_sentence(user_group=None, pg=utils.postgres):
"""
Get the quotas-per-person sentence for `user_group` (or the current user)
"""
if user_group is None:
user_group = _user_group_from_user(flask.session["user"])
s = settings(pg=pg)
for g in (user_group, 'all'):
p = s.get((g, 'any'), {}).get('quota_per_person_sentence')
if p is not None:
return p
else:
raise AssertionError("qpp_sentence not set for {0}".format(user_group))
def mode_sentence(pg=utils.postgres):
"""Get the "mode sentence" (describes availability)"""
return settings(pg=pg).get(('all', 'any'), {}).get("mode_sentence")
def none_min(a, b):
"""Return min(a, b), treating ``None`` as infinity"""
if a is None:
return b
if b is None:
return a
else:
return min(a, b)
def counts(drop_cache=False, pg=utils.postgres):
"""
Counts various types in the tickets table
Will used a cached result if `drop_cache` is ``False`` (and there is one
available)
Returns a dict, with keys
``{,waiting_}{all,members,alumni}_{any,standard,vip}`` and values being the
respective counts
"""
if not drop_cache and flask.current_app and \
hasattr(flask.g, '_snowball_tickets_counts'):
return flask.g._snowball_tickets_counts
with pg.cursor(True) as cur:
cur.execute(queries.unexpired_counts)
c = cur.fetchone()
if flask.current_app:
flask.g._snowball_tickets_counts = c
return c
def prices(user_group=None, pg=utils.postgres):
"""
Gets ticket prices for `user_group` (or the current user)
Returns a dict, ``{'standard': N, 'vip': N}``.
"""
if user_group is None:
user_group = _user_group_from_user(flask.session["user"])
s = settings(pg=pg)
r = {}
for type in ('standard', 'vip'):
for key in ((user_group, type), ('all', type),
(user_group, 'any'), ('all', 'any')):
p = s.get(key, {}).get('price')
if p is not None:
r[type] = p
break
else:
raise AssertionError("unset price for {0}"
.format((user_group, type)))
return r
def _user_group_from_user(user):
"""Return the user group for the user row `user`"""
pt = user["person_type"]
assert pt != 'non-cam'
if pt == 'alumnus':
return 'alumni'
else:
return 'members'
def settings(pg=utils.postgres):
"""Retrieve (with caching) the rows of the tickets_settings table"""
if flask.current_app and hasattr(flask.g, '_snowball_tickets_settings'):
return flask.g._snowball_tickets_settings
key = ("who", "what")
s = {}
with pg.cursor(True) as cur:
cur.execute("SELECT * FROM tickets_settings")
for row in cur:
row_key = tuple([row[k] for k in key])
s[row_key] = row
if flask.current_app:
flask.g._snowball_tickets_settings = s
return s
def tickets(user_id=None, vip=None, waiting_list=None, quota_exempt=None,
finalised=None, paid=None, entered_ball=None, expired=None,
pg=utils.postgres):
"""
Return tickets for a user `user_id`, with filtering.
For each of `finalised`, `paid` and `entered_ball` - if the argument is
``True`` then that timestamp must be non-null; ``False`` it is required
to be null; ``None`` - no condition
For `expires`, ``False`` demands the expires column be null, or in the
future; ``True`` wants a non-null timestamp in the past.
If `user_id` is ``None``, the current session's user is used.
"""
if user_id is None:
user_id = flask.session["user_id"]
cond1 = _nully_conditions(finalised=finalised, paid=paid,
entered_ball=entered_ball)
cond2 = _booly_conditions(vip=vip, waiting_list=waiting_list,
quota_exempt=quota_exempt)
if expired is True:
expires_cond = "(expires IS NOT NULL AND expires <= utcnow())"
elif expired is False:
expires_cond = "(expires IS NULL OR expires > utcnow())"
else:
expires_cond = "TRUE"
cond = " AND ".join(["user_id = %s", expires_cond, cond1, cond2])
query = "SELECT * FROM tickets WHERE " + cond
with pg.cursor(True) as cur:
cur.execute(query, (user_id, ))
return cur.fetchall()
def _nully_conditions(**kwargs):
"""
Generate IS [ NOT ] NULL conditions for the keys in `kwargs`
Several columns in the tickets table are timestamps,
where their being non-null indicates that something has happened;
for example, `tickets.finalised` being non-null means that a ticket
is finalised.
For each key, where `value` is ``kwargs[key]``:
*If `value` is ``None``, no condition is generated - i.e., "don't care" or
"all"
*If `value` is ``True``, produces the condition "{name} IS NOT NULL"
*If `value` is ``False``, produces the condition "{name} IS NULL"
The conditions are joined with ``AND`` and wrapped in parentheses
"""
conditions = []
for key, value in kwargs.items():
if value is True:
conditions.append(key + " IS NOT NULL")
elif value is False:
conditions.append(key + " IS NULL")
if not conditions:
return "TRUE"
else:
return "(" + ' AND '.join(conditions) + ")"
def _booly_conditions(**kwargs):
"""
Generate conditions for the keys in `kwargs`
For each key, where `value` is ``kwargs[key]``:
*If `value` is ``None``, no condition is generated - i.e., "don't care" or
"all"
*If `value` is ``True``, produces the condition "{name}"
*If `value` is ``False``, produces the condition "NOT {name}"
The conditions are joined with ``AND`` and wrapped in parentheses
"""
conditions = []
for key, value in kwargs.items():
if value is True:
conditions.append(key)
elif value is False:
conditions.append("NOT " + key)
if not conditions:
return "TRUE"
else:
return "(" + ' AND '.join(conditions) + ")"
class BuyFailed(Exception):
"""A call to :func:`buy` failed"""
class InsufficientSpare(BuyFailed):
"""Insufficient spare tickets"""
class QPPAnyMet(BuyFailed):
"""Quota per person limit (any ticket type) met"""
class QPPTypeMet(BuyFailed):
"""Quota per person limit (specific ticket type) met"""
class FormRace(BuyFailed):
"""Between displaying options and submitting things changed"""
class IncorrectMode(FormRace):
"""Tickets are not ``available``"""
class QuotaMet(FormRace):
"""The quota has been met"""
class QuotaNotMet(FormRace):
"""The quota has not been met (and so you may not join the waiting list)"""
class WaitingQuotaMet(QuotaMet):
"""The waiting quota has been met"""
def buy(ticket_type, waiting_list, number,
user=None, quota_exempt=False, pg=utils.postgres):
"""Buy `user` `number` `ticket_type` tickets / add to waiting list."""
if user is None:
user = flask.session["user"]
user_id = user["user_id"]
user_group = _user_group_from_user(user)
if not waiting_list:
verb = ('buying', '')
else:
verb = ('adding', ' to the waiting list')
log_prefix = "{0} {1} {2} tickets for user {3} ({4}){5}" \
.format(verb[0], number, ticket_type, user_id, user_group, verb[1])
# automatically released, recursive
buy_lock(pg=pg)
user_pg_lock(user["user_id"], pg=pg)
vip = ticket_type == 'vip'
# force a re-count having acquired the lock
ticket_counts = counts(drop_cache=True, pg=pg)
if not quota_exempt:
avail = available(ticket_type, user_group=user_group, pg=pg)
qpp_any_count = 0
qpp_type_count = 0
for ticket in tickets(user_id=user_id, expired=False,
quota_exempt=False, pg=pg):
qpp_any_count += 1
if ticket["vip"] == vip:
qpp_type_count += 1
qpp_any = avail["qpp_any"]
qpp_type = avail["qpp_type"]
if qpp_any is not None:
if qpp_any < qpp_any_count + number:
raise QPPAnyMet
if qpp_type is not None:
if qpp_type < qpp_type_count + number:
raise QPPTypeMet
if avail["mode"] != "available":
logger.info("%s: not available (form race)", log_prefix)
raise IncorrectMode
if waiting_list and not avail["quota_met"]:
logger.info("%s: wanted waiting list but quota not met (form race)",
log_prefix)
raise QuotaNotMet
if not waiting_list:
quota_met = avail["quota_met"]
spare = avail["spare"]
else:
quota_met = avail["waiting_quota_met"]
spare = avail["waiting_spare"]
if quota_met:
logger.info("%s: quota met (form race)", log_prefix)
if waiting_list:
raise WaitingQuotaMet
else:
raise QuotaMet
if spare is not None and spare < number:
logger.info("%s: insufficient spare (%s < %s)", log_prefix,
spare, number)
set_quota_met(user_group, ticket_type, waiting_list, number, pg=pg)
raise InsufficientSpare
elif spare == number:
logger.info("%s: exactly met quota", log_prefix)
set_quota_met(user_group, ticket_type, waiting_list, number, pg=pg)
# else... OK. Make some tickets
query = "INSERT INTO tickets (user_id, vip, waiting_list, price, "\
" created, expires, expires_reason, quota_exempt) "
values_row = "(%(user_id)s, %(vip)s, %(waiting_list)s, %(price)s, " \
"utcnow(), utcnow() + '10 minutes'::interval, " \
"'not-finalised', %(quota_exempt)s)"
values = ', '.join([values_row] * number)
query += "VALUES " + values + " RETURNING ticket_id"
price = prices(user_group=user_group, pg=pg)[ticket_type]
args = {"user_id": user_id, "vip": vip, "waiting_list": waiting_list,
"price": price, "quota_exempt": quota_exempt}
# if :func:`counts` is cached on flask.g, this will update it
if waiting_list:
p = "waiting_{0}_{1}"
else:
p = "{0}_{1}"
for test in _test_keys(user_group, ticket_type):
ticket_counts[p.format(*test)] += number
with pg.cursor() as cur:
cur.execute(query, args)
ids = [r[0] for r in cur]
logger.info("%s: inserted tickets %r", log_prefix, ids)
return ids
def buy_lock(pg=utils.postgres):
"""Take out the transaction level postgres advisory lock for buying"""
logger.debug("Acquiring buy lock")
with pg.cursor() as cur:
cur.execute("SELECT pg_advisory_xact_lock(%s, %s)", buy_pg_lock_num)
logger.debug("buy lock acquired")
def user_pg_lock(user_id, pg=utils.postgres):
"""
Take out the transaction level postgres advisory lock for `user_id`
This lock is acquired by :func:`buy`, :func:`finalise` and
:func:`receipt.send_update`.
"""
logger.debug("Acquiring user %s lock", user_id)
with pg.cursor() as cur:
cur.execute("SELECT pg_advisory_xact_lock(%s, %s)",
(user_pg_lock_num, user_id))
logger.debug("user %s lock acquired", user_id)
def set_quota_met(user_group, ticket_type, waiting_list, number,
pg=utils.postgres):
"""
Take action when a quota has been met.
Suppose:
* person A buys the last 4 tickets
* person B then has to go on the waiting list
* ten minutes later, A has not finalised their tickets and so they expire
* person C coould now come and buy those 4 tickets, jumping B in the queue
Hence: as soon as we `meet` the quota, change the mode.
Note: this is not as soon as we go `over` the quota - hitting it exactly
counts, since as soon as the quota is met the text will show
'waiting list' on the buy buttons.
"""
s = settings(pg=pg)
c = counts(pg=pg)
rows_quota_met = []
rows_waiting_quota_met = []
for test in _test_keys(user_group, ticket_type):
limits = s.get(test, _null_settings_row)
assert limits["mode"] == 'available'
just_set_quota_met = False
if not waiting_list:
quota = limits["quota"]
count = c["{0}_{1}".format(*test)]
if quota is not None and not limits["quota_met"] and \
count + number >= quota:
logger.warning("quota met: %r", test)
rows_quota_met.append(test)
# if `s` came from or was saved to the cache on flask.g,
# this will update it.
s.get(test, {})["quota_met"] = True
# now check if the waiting quota has been met; it could happen
# instantly
just_set_quota_met = True
if waiting_list or just_set_quota_met:
quota = limits["waiting_quota"]
count = c["waiting_{0}_{1}".format(*test)]
if just_set_quota_met:
number2 = 0
else:
number2 = number
if quota is not None and not limits["waiting_quota_met"] and \
count + number2 >= quota:
logger.warning("waiting quota met: %r", test)
rows_waiting_quota_met.append(test)
s.get(test, {})["waiting_quota_met"] = True
if rows_quota_met:
logger.info("set_quota_met: setting quota_met on rows %r", rows_quota_met)
with pg.cursor() as cur:
cur.execute("UPDATE tickets_settings SET quota_met = TRUE "
"WHERE (who, what) IN %s", (tuple(rows_quota_met), ))
if rows_waiting_quota_met:
logger.info("set_quota_met: setting waiting_quota_met on rows %r",
rows_waiting_quota_met)
with pg.cursor() as cur:
cur.execute("UPDATE tickets_settings SET waiting_quota_met = TRUE "
"WHERE (who, what) IN %s",
(tuple(rows_waiting_quota_met), ))
class FinaliseRace(Exception):
"""A race condition occured in :func:`finalise`"""
class AlreadyFinalised(FinaliseRace):
"""
The ticket was already finalised
.. attribute:: new_ticket
The ticket, as it now exists in the database finalised.
"""
def __init__(self, new_ticket):
self.new_ticket = new_ticket
class NonExistantTicket(FinaliseRace):
"""The ticket does not exist"""
def finalise(ticket, update, pg=utils.postgres):
"""
Finalise a ticket
Essentially does this::
ticket["finalised"] = datetime.utcnow()
ticket.update(update)
But also updates the corresponding database row, and will avoid a race.
Does not check if the ticket is expired. Loading the tickets at the start
of :func:`snowball_ticketing.tickets.views.details` should not race against
the ticket expiring since ``utcnow()`` remains constant in a transaction.
If the ticket is deleted between loading and updating,
:class:`NonExistantTicket` will be raised and `ticket` won't be modified.
If the ticket was already finalised, :class:`AlreadyFinalised` is raised.
"""
assert ticket["finalised"] is None
assert set(update) <= {"person_type", "surname", "othernames",
"college_id", "matriculation_year"}
logger.debug("finalising ticket %s", ticket["ticket_id"])
# protects against races with receipt
user_pg_lock(ticket["user_id"], pg=pg)
update = update.copy()
# special case finalise to use utcnow()
update["expires"] = None
update["expires_reason"] = None
sets = ', '.join("{0} = %({0})s".format(key) for key in update)
query1 = "UPDATE tickets " \
"SET finalised = utcnow(), " + sets + " " \
"WHERE ticket_id = %(ticket_id)s AND finalised IS NULL " \
"RETURNING *"
query2 = "SELECT * FROM tickets WHERE ticket_id = %s"
args = update
args["ticket_id"] = ticket["ticket_id"]
with pg.cursor(True) as cur:
cur.execute(query1, args)
assert cur.rowcount in (0, 1)
if cur.rowcount == 1:
# success
ticket.update(cur.fetchone())
else:
# some race
cur.execute(query2, (ticket["ticket_id"], ))
assert cur.rowcount in (0, 1)
if cur.rowcount == 1:
raise AlreadyFinalised(cur.fetchone())
else:
raise NonExistantTicket
_reference_first_cleaner = re.compile("[^a-zA-Z0-9]")
def reference(user=None):
"""Get the payment reference `user` should use"""
if user is None:
user = flask.session["user"]
if user["crsid"]:
first = user["crsid"]
else:
first = _reference_first_cleaner.sub("", user["email"])[:9]
# we're aiming for < 18 characters. The id isn't going to realistically
# be longer than 4 characters, so this will work just fine.
second = unicode(user["user_id"]).rjust(4, "0")
return "{0}/{1}".format(first, second)
def outstanding_balance(user_id=None, ids_too=False, pg=utils.postgres):
"""
Get the total unpaid for `user_id` (or the current user)
If `ids_too` is ``True``, returns ``total unpaid, ticket_ids``
"""
ts = tickets(user_id=user_id, finalised=True, paid=False,
waiting_list=False, pg=pg)
bal = sum(t["price"] for t in ts)
if ids_too:
ids = [t["ticket_id"] for t in ts]
return bal, ids
else:
return bal
def mark_paid(ticket_ids, add_note="", pg=utils.postgres):
"""Mark each of `ticket_ids` paid, optionally adding to `notes`"""
query = "UPDATE tickets " \
"SET paid = utcnow(), notes = notes || %s " \
"WHERE ticket_id IN %s AND paid IS NULL"
with pg.cursor() as cur:
cur.execute(query, (add_note, tuple(ticket_ids)))
assert cur.rowcount == len(ticket_ids)
def purge_unpaid(user_id, ticket_id, pg=utils.postgres):
"""
Mark `ticket_id` unpaid and unfinalised
`user_id` must match the `user_id` on the ticket (safety check).
"""
query = "UPDATE tickets " \
"SET finalised = NULL, expires = utcnow(), " \
" expires_reason = 'not-paid' " \
"WHERE finalised IS NOT NULL AND paid IS NULL AND " \
" NOT waiting_list AND " \
" user_id = %s AND ticket_id = %s"
logger.info("Purging ticket %s (not-paid)", ticket_id,
extra={"user_id": user_id})
with pg.cursor() as cur:
cur.execute(query, (user_id, ticket_id))
assert cur.rowcount == 1
def waiting_release(user_id, ticket_ids, ask_pay_within=7, pg=utils.postgres):
"""Release tickets to `user_id`, and send them an email"""
query = "UPDATE tickets SET waiting_list = FALSE, notes = notes || %s " \
"WHERE finalised IS NOT NULL AND waiting_list AND " \
" ticket_id IN %s AND user_id = %s"
notes = "Released from waiting list on {0}\n".format(datetime.utcnow())
logger.info("Releasing tickets %s from waiting list", list(ticket_ids),
extra={"user_id": user_id})
with pg.cursor() as cur:
cur.execute(query, (notes, tuple(ticket_ids), user_id))
assert cur.rowcount == len(ticket_ids)
# :-( :-( circular import.
from . import receipt
receipt.send_update(user_id, pg=pg, ask_pay_within=ask_pay_within,
waiting_release=True)
| gpl-3.0 | -6,056,314,305,332,957,000 | 31.68136 | 82 | 0.589002 | false |
OpenBEL/resource-generator | datasets.py | 1 | 33416 |
'''
datasets.py
Represent each parsed dataset as an object. This is
really just a wrapper to the underlying dictionaries,
but it also provides some useful functions that assist
in the namespacing and equivalencing process.
'''
import os.path
import time
from common import get_citation_info
from collections import defaultdict
class DataSet():
def __init__(self, dictionary={}, prefix='unnamed-data-object'):
self._dict = dictionary
self._prefix = prefix
def get_values(self):
''' Get all non-obsolete primary ids in dictionary.'''
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
continue
else:
yield term_id
def __str__(self):
return self._prefix
class OrthologyData(DataSet):
def __init__(self, dictionary={}, prefix='use-index-term-prefix'):
super().__init__(dictionary, prefix)
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get(term_id)
mouse_orthologs = mapping.get('mouse_ortholog_id').split('|')
orthologs.update(mouse_orthologs)
if mapping.get('human_ortholog_id') is not '':
human_orthologs = mapping.get('human_ortholog_id').split('|')
human_orthologs = {'HGNC:' + ortho for ortho in human_orthologs}
orthologs.update(human_orthologs)
return orthologs
def __str__(self):
return self._prefix + '_ortho'
class HomologeneData(OrthologyData):
def __init__(self, dictionary={}, prefix='egid'):
super().__init__(dictionary, prefix)
def get_values(self):
for term_id in self._dict['gene_ids']:
yield term_id
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get('gene_ids').get(term_id)
group = mapping.get('homologene_group')
species = mapping.get('tax_id')
for k, v in self._dict['homologene_groups'][group].items():
if k == species and len(v) > 1:
return set() # stop here, don't return any orthologs since homologene group contains paralog
elif k == species:
next
elif k != species and len(v) == 1:
orthologs.update(v)
else:
print(
"WARNING! Missed case {0} - {1} - {2}".format(term_id, k, v))
orthologs = {'EGID:' + o for o in orthologs}
return orthologs
class HistoryDataSet(DataSet):
def __init__(self, dictionary={}, prefix='use-index-term-prefix'):
super().__init__(dictionary, prefix)
def get_id_update(self, term_id):
''' given an id, return the current value or "withdrawn". '''
mapping = self._dict.get(term_id)
if mapping is not None:
if mapping.get('status') == 'withdrawn':
value = 'withdrawn'
else:
value = mapping.get('new_id')
else:
value = None
return value
def get_obsolete_ids(self):
''' return dict with all obsolete ids, and current value.'''
value = None
replacement_dict = {}
for term_id in self._dict:
mapping = self._dict.get(term_id)
if mapping.get('status') == 'withdrawn':
value = 'withdrawn'
else:
value = self.get_id_update(term_id)
replacement_dict[term_id] = value
return replacement_dict
def __str__(self):
return self._prefix + '_history'
class NamespaceDataSet(DataSet):
ids = False # make .belns file containing labels (default = False)
labels = True # make .bels file containing ids (default = True)
# namespace ('ns') and/or annotation ('anno') concept scheme
scheme_type = ['ns']
def __init__(
self,
dictionary={},
name='namespace-name',
prefix='namespace-prefix',
domain=['other']):
self._name = name
self._domain = domain
super().__init__(dictionary, prefix)
def get_label(self, term_id):
''' Return the value to be used as the preferred
label for the associated term id. Use id as default,
but will generally be a name/symbol. '''
return term_id
def get_xrefs(self, term_id):
''' Return equivalences to other namespaces (or None). '''
return None
def get_name(self, term_id):
''' Return the term name to use as title (or None). '''
try:
name = self._dict.get(term_id).get('name')
return name
except:
return None
def get_species(self, term_id):
''' Return species as NCBI tax ID (or None, as applicable). '''
return None
def get_encoding(self, term_id):
''' Return encoding (allowed abundance types) for value.
Default = 'A' (Abundance). '''
return 'A'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
return {'AnnotationConcept'}
def get_alt_symbols(self, term_id):
''' Return set of symbol synonyms. Default = None. '''
return None
def get_alt_names(self, term_id):
''' Return set of name synonyms. Default = None. '''
return None
def get_alt_ids(self, term_id):
''' Returns set of alternative IDs. IDs should be
unique. '''
try:
alt_ids = self._dict.get(term_id).get('alt_ids')
except:
alt_ids = set()
if alt_ids:
alt_ids = {a.lstrip(self._prefix.upper() + ':') for a in alt_ids}
alt_ids = {a.lstrip(self._prefix.upper() + 'ID:') for a in alt_ids}
return alt_ids
def write_ns_values(self, dir):
data_names = {}
data_ids = {}
for term_id in self.get_values():
encoding = self.get_encoding(term_id)
label = self.get_label(term_id)
data_names[label] = encoding
data_ids[term_id] = encoding
if self.get_alt_ids(term_id):
for alt_id in self.get_alt_ids(term_id):
data_ids[alt_id] = encoding
if self.labels:
self.write_data(data_names, dir, self._name + '.belns')
if self.ids:
self.write_data(data_ids, dir, self._name + '-ids.belns')
def write_data(self, data, dir, name):
if len(data) == 0:
print(' WARNING: skipping writing ' +
name + '; no namespace data found.')
else:
with open(os.path.join(dir, name), mode='w', encoding='utf8') as f:
# insert header chunk
if os.path.exists(dir + '/templates/' + name):
tf = open(dir + '/templates/' + name, encoding="utf-8")
header = tf.read().rstrip()
tf.close()
# add Namespace, Citation and Author values
# source_file attribute added to object during parsing
header = get_citation_info(name, header, self.source_file)
else:
print(
'WARNING: Missing header template for {0}'.format(name))
header = '[Values]'
f.write(header + '\n')
# write data
for i in sorted(data.items()):
f.write('|'.join(i) + '\n')
def __str__(self):
return self._prefix
class StandardCustomData(NamespaceDataSet, HistoryDataSet):
def __init__(self, dictionary={}, *, name, prefix, domain):
super().__init__(dictionary, name, prefix, domain)
self._dict = {} # make unique dict for each instance of class
def get_values(self):
for term_id in self._dict:
if term_id is not None and self._dict.get(
term_id).get('OBSOLETE') != 1:
yield term_id
def get_label(self, term_id):
''' Return the value to be used as the preferred
label for the associated term id. '''
label = self._dict.get(term_id).get('LABEL')
return label
def get_xrefs(self, term_id):
xrefs = set(self._dict.get(term_id).get('XREF').split('|'))
xrefs = {x.strip() for x in xrefs if ':' in x}
return xrefs
def get_species(self, term_id):
species = self._dict.get(term_id).get('SPECIES')
return species
def get_encoding(self, term_id):
encoding = self._dict.get(term_id).get('TYPE')
return encoding
def get_alt_names(self, term_id):
synonyms = set()
synonyms.update(self._dict.get(term_id).get('SYNONYMS').split('|'))
synonyms = {s for s in synonyms if s}
return synonyms
def get_obsolete_ids(self):
''' return dict with all obsolete ids, and current value.'''
# TODO Add alt id handling,
value = None
replacement_dict = {}
for term_id in self._dict:
if self._dict.get(term_id).get('OBSOLETE') == 1:
mapping = self._dict.get(term_id)
value = 'withdrawn'
replacement_dict[term_id] = value
return replacement_dict
class EntrezInfoData(NamespaceDataSet):
ENC = {
'protein-coding': 'GRP', 'miscRNA': 'GR', 'ncRNA': 'GR',
'snoRNA': 'GR', 'snRNA': 'GR', 'tRNA': 'GR', 'scRNA': 'GR',
'other': 'G', 'pseudo': 'GR', 'unknown': 'GRP', 'rRNA': 'GR'
}
subject = "gene/RNA/protein"
description = "NCBI Entrez Gene identifiers for Homo sapiens, Mus musculus, and Rattus norvegicus."
def __init__(
self,
dictionary={},
*,
name='entrez-gene',
prefix='egid',
domain=['gene and gene product'],
ids=True,
labels=False):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.labels = labels
def get_label(self, term_id):
''' Return the value to be used as the preffered
label for the associated term id. For Entrez,
using the gene ID. '''
return term_id
def get_species(self, term_id):
''' Return species as NCBI tax ID (or None, as applicable). '''
species = self._dict.get(term_id).get('tax_id')
return species
def get_encoding(self, gene_id):
''' Return encoding (allowed abundance types) for value. '''
mapping = self._dict.get(gene_id)
gene_type = mapping.get('type_of_gene')
description = mapping.get('description')
encoding = EntrezInfoData.ENC.get(gene_type, 'G')
if gene_type == 'ncRNA' and 'microRNA' in description:
encoding = 'GRM'
if gene_type not in EntrezInfoData.ENC:
print(
'WARNING ' +
gene_type +
' not defined for Entrez. G assigned as default encoding.')
return encoding
def get_xrefs(self, term_id):
''' Returns xrefs to HGNC, MGI, RGD. '''
targets = ('MGI:', 'HGNC:', 'RGD:')
xrefs = set()
mapping = self._dict.get(term_id)
xrefs.update(mapping.get('dbXrefs').split('|'))
# normalize xrefs with duplicated prefix
# e.g., HGNC:HGNC:5
xrefs = {x.split(':', x.count(':') - 1)[-1] for x in xrefs}
xrefs = {x for x in xrefs if x.startswith(targets)}
return xrefs
def get_alt_symbols(self, gene_id):
''' Return set of symbol synonyms. '''
synonyms = set()
mapping = self._dict.get(gene_id)
if mapping.get('Synonyms') is not '-':
synonyms.update(mapping.get('Synonyms').split('|'))
synonyms.add(mapping.get('Symbol'))
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Other_designations') is not '-':
synonyms.update(mapping.get('Other_designations').split('|'))
if mapping.get('description') != '-':
synonyms.add(mapping.get('description'))
return synonyms
def get_name(self, term_id):
''' Get official term name. '''
mapping = self._dict.get(term_id)
name = mapping.get('Full_name_from_nomenclature_authority')
return name
class EntrezHistoryData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='egid'):
super().__init__(dictionary, prefix)
class HGNCData(NamespaceDataSet, OrthologyData, HistoryDataSet):
ENC = {
'gene with protein product': 'GRP', 'RNA, cluster': 'GR',
'RNA, long non-coding': 'GR', 'RNA, micro': 'GRM',
'RNA, ribosomal': 'GR', 'RNA, small cytoplasmic': 'GR',
'RNA, small misc': 'GR', 'RNA, small nuclear': 'GR',
'RNA, small nucleolar': 'GR', 'RNA, transfer': 'GR',
'phenotype only': 'G', 'RNA, pseudogene': 'GR',
'T cell receptor pseudogene': 'GR',
'immunoglobulin pseudogene': 'GR', 'pseudogene': 'GR',
'T cell receptor gene': 'GRP',
'complex locus constituent': 'GRP',
'endogenous retrovirus': 'G', 'fragile site': 'G',
'immunoglobulin gene': 'GRP', 'protocadherin': 'GRP',
'readthrough': 'GR', 'region': 'G',
'transposable element': 'G', 'unknown': 'GRP',
'virus integration site': 'G', 'RNA, micro': 'GRM',
'RNA, misc': 'GR', 'RNA, Y': 'GR', 'RNA, vault': 'GR',
'T-cell receptor gene':'G','T-cell receptor pseudogene':'G',
}
def __init__(
self,
dictionary={},
*,
name='hgnc-human-genes',
prefix='hgnc',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_values(self):
for term_id in self._dict:
if '~withdrawn' not in self._dict.get(term_id).get('Symbol'):
yield term_id
def get_id_update(self, term_id):
mapping = self._dict.get(term_id)
if mapping is None:
return None
else:
if mapping.get('Locus Type') == 'withdrawn':
name = self.get_name(term_id)
if 'entry withdrawn' in name:
return 'withdrawn'
elif 'symbol withdrawn' in name:
new_symbol = name.split('see ')[1]
new_id = None
for term_id in self._dict:
if new_symbol == self.get_label(term_id):
new_id = term_id
continue
return new_id
else:
return term_id
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if 'withdrawn' in self.get_label(term_id):
obsolete[term_id] = self.get_id_update(term_id)
return obsolete
def get_label(self, term_id):
''' Return preferred label associated with term id. '''
mapping = self._dict.get(term_id)
if mapping is None:
return None
else:
label = mapping.get('Symbol')
return label
def get_encoding(self, term_id):
mapping = self._dict.get(term_id)
locus_type = mapping.get('Locus Type')
encoding = HGNCData.ENC.get(locus_type, 'G')
if locus_type not in HGNCData.ENC:
print(
'WARNING ' +
locus_type +
' not defined for HGNC. G assigned as default encoding.')
return encoding
def get_species(self, term_id):
return '9606'
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Synonyms'):
symbol_synonyms = [s.strip()
for s in mapping.get('Synonyms').split(',')]
synonyms.update(symbol_synonyms)
if mapping.get('Previous Symbols'):
old_symbols = [s.strip()
for s in mapping.get('Previous Symbols').split(',')]
synonyms.update(old_symbols)
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('Previous Names'):
old_names = [s.strip('" ') for s in mapping.get(
'Previous Names').split(', "')]
synonyms.update(old_names)
return synonyms
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('Approved Name')
return name
def get_orthologs(self, term_id):
orthologs = set()
mapping = self._dict.get(term_id)
mouse_orthologs = mapping.get('mouse_ortholog_id').split('|')
orthologs.update(mouse_orthologs)
rat_orthologs = mapping.get('rat_ortholog_id').split('|')
orthologs.update(rat_orthologs)
return orthologs
class MGIData(NamespaceDataSet):
ENC = {
'gene': 'GRP', 'protein coding gene': 'GRP',
'non-coding RNA gene': 'GR', 'rRNA gene': 'GR',
'tRNA gene': 'GR', 'snRNA gene': 'GR', 'snoRNA gene': 'GR',
'miRNA gene': 'GRM', 'scRNA gene': 'GR',
'lincRNA gene': 'GR', 'RNase P RNA gene': 'GR',
'RNase MRP RNA gene': 'GR', 'telomerase RNA gene': 'GR',
'unclassified non-coding RNA gene': 'GR',
'heritable phenotypic marker': 'G', 'gene segment': 'G',
'unclassified gene': 'GRP', 'other feature types': 'G',
'pseudogene': 'GR', 'transgene': 'G',
'other genome feature': 'G', 'pseudogenic region': 'GR',
'polymorphic pseudogene': 'GRP',
'pseudogenic gene segment': 'GR', 'SRP RNA gene': 'GR',
'antisense lncRNA gene': 'GR', 'lncRNA gene': 'GR',
'intronic lncRNA gene': 'GR', 'ribozyme gene': 'GR'
}
def __init__(
self,
dictionary={},
*,
name='mgi-mouse-genes',
prefix='mgi',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_values(self):
for term_id in self._dict:
mapping = self._dict.get(term_id)
marker_type = mapping.get('Marker Type')
if marker_type == 'Gene' or marker_type == 'Pseudogene':
yield term_id
def get_species(self, term_id):
return '10090'
def get_encoding(self, term_id):
feature_type = self._dict.get(term_id).get('Feature Type')
encoding = self.ENC.get(feature_type, 'G')
if feature_type not in self.ENC:
print(
'WARNING ' +
feature_type +
' not defined for MGI. G assigned as default encoding.')
return encoding
def get_label(self, term_id):
try:
label = self._dict.get(term_id).get('Symbol')
return label
except:
return None
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('Marker Name')
return name
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms = mapping.get('Marker Synonyms').split('|')
synonyms = {s for s in synonyms if s}
return synonyms
class RGDData(NamespaceDataSet):
ENC = {
'gene': 'GRP', 'miscrna': 'GR', 'predicted-high': 'GRP',
'predicted-low': 'GRP', 'predicted-moderate': 'GRP',
'protein-coding': 'GRP', 'pseudo': 'GR', 'snrna': 'GR',
'trna': 'GR', 'rrna': 'GR', 'ncrna': 'GR'
}
def __init__(
self,
dictionary={},
*,
name='rgd-rat-genes',
prefix='rgd',
domain=['gene and gene product']):
super().__init__(dictionary, name, prefix, domain)
def get_species(self, term_id):
''' Rat '''
return '10116'
def get_label(self, term_id):
''' Use Symbol as preferred label for RGD. '''
try:
label = self._dict.get(term_id).get('SYMBOL')
return label
except:
return None
def get_name(self, term_id):
name = self._dict.get(term_id).get('NAME')
return name
def get_encoding(self, term_id):
gene_type = self._dict.get(term_id).get('GENE_TYPE')
name = self.get_name(term_id)
encoding = RGDData.ENC.get(gene_type, 'G')
if gene_type == 'miscrna' or gene_type == 'ncrna' and 'microRNA' in name:
encoding = 'GRM'
if gene_type not in RGDData.ENC:
print(
'WARNING ' +
gene_type +
' not defined for RGD. G assigned as default encoding.')
return encoding
def get_alt_symbols(self, term_id):
synonyms = set()
if self._dict.get(term_id).get('OLD_SYMBOL'):
old_symbols = self._dict.get(term_id).get('OLD_SYMBOL').split(';')
synonyms.update(old_symbols)
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('OLD_NAME'):
old_names = mapping.get('OLD_NAME').split(';')
synonyms.update(old_names)
synonyms = {s for s in synonyms if s}
return synonyms
class RGDObsoleteData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='rgd'):
super().__init__(dictionary, prefix)
class SwissProtData(NamespaceDataSet):
def __init__(
self,
dictionary=defaultdict(list),
*,
name='swissprot',
prefix='sp',
domain=['gene and gene product'],
ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_encoding(self, term_id):
return 'GRP'
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_name(self, term_id):
mapping = self._dict.get(term_id)
name = mapping.get('recommendedFullName')
return name
def get_alt_ids(self, term_id):
alt_ids = self._dict.get(term_id).get('accessions')
alt_ids = set(alt_ids)
alt_ids = {alt_id for alt_id in alt_ids if alt_id != term_id}
return alt_ids
def get_alt_symbols(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('alternativeShortNames'))
if mapping.get('recommendedShortName'):
synonyms.add(mapping.get('recommendedShortname'))
if mapping.get('geneName'):
synonyms.add(mapping.get('geneName'))
if mapping.get('geneSynonyms'):
synonyms.update(mapping.get('geneSynonyms'))
return synonyms
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('alternativeFullNames'))
return synonyms
def get_xrefs(self, term_id):
''' Returns GeneIDs or HGNC/MGI/RGD IDs. '''
mapping = self._dict.get(term_id)
xrefs = set()
xrefs_dict = mapping.get('dbreference')
for ns, values in xrefs_dict.items():
if ns == 'GeneId':
values = {('EGID:' + v) for v in values}
xrefs.update(values)
elif ns == 'HGNC' or ns == 'MGI':
xrefs.update(values)
elif ns == 'RGD':
values = {('RGD:' + v) for v in values}
xrefs.update(values)
return xrefs
def get_species(self, term_id):
species = self._dict.get(term_id).get('tax_id')
return species
class AffyData(NamespaceDataSet):
def __init__(
self,
dictionary=defaultdict(list),
*,
name='affy-probeset',
prefix='affx',
domain=['gene and gene product'],
ids=True,
labels=False):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.labels = labels
def get_species(self, term_id):
species = self._dict.get(term_id).get('Species')
species_dict = {'Homo sapiens': '9606',
'Mus musculus': '10090',
'Rattus norvegicus': '10116'}
tax_id = species_dict.get(species)
return tax_id
def get_encoding(self, term_id):
''' Return encoding (allowed abundance types) for value.
R - RNAAbundance. '''
return 'R'
def get_xrefs(self, term_id):
''' Returns equivalent Entrez Gene IDs for value . '''
entrez_ids = self._dict.get(term_id).get('Entrez Gene').split('///')
if entrez_ids[0] == '---':
return None
else:
entrez_ids = ['EGID:' + eid.strip() for eid in entrez_ids]
return set(entrez_ids)
class CHEBIData(NamespaceDataSet):
def __init__(
self,
dictionary={},
*,
name='chebi',
prefix='chebi',
domain=['chemical'],
ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
if mapping.get('synonyms'):
synonyms.update(mapping.get('synonyms'))
return synonyms
class Gene2AccData(DataSet):
def __init__(self, dictionary={}, prefix='gene2acc'):
super().__init__(dictionary, prefix)
def get_eq_values(self):
for entrez_gene in self._dict:
mapping = self._dict.get(entrez_gene)
status = mapping.get('status')
taxid = mapping.get('tax_id')
yield entrez_gene, status, taxid
class GOData(NamespaceDataSet, HistoryDataSet):
# dictionary is required, since GO file parsed into multiple objects
def __init__(self, dictionary, *, name, prefix, domain, ids=True):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
def get_values(self):
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
continue
else:
yield term_id
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
obsolete[term_id] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if self._dict.get(term_id):
if self._dict.get(term_id).get('is_obsolete'):
return 'withdrawn'
else:
return term_id
else:
return None
def get_label(self, term_id):
label = self._dict.get(term_id).get('termname')
return label
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('synonyms'))
return synonyms
def get_encoding(self, term_id):
if self._dict.get(term_id).get('complex'):
encoding = 'C'
elif self._prefix == 'gobp':
encoding = 'B'
else:
encoding = 'A'
return encoding
class MESHData(NamespaceDataSet):
# NOTE dictionary and other arguments are required since MeSH file parsed
# into multiple objects
def __init__(
self,
dictionary,
*,
name,
prefix,
domain,
ids=True,
scheme_type=['ns']):
super().__init__(dictionary, name, prefix, domain)
self.ids = ids
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('mesh_header')
return label
def get_encoding(self, term_id):
if self._prefix == 'meshd':
return 'O'
elif self._prefix == 'meshpp':
return 'B'
else:
return 'A'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
if self._prefix == 'meshd':
return {'Disease'}
elif self._prefix == 'mesha':
return {'Anatomy'}
elif self._prefix == 'meshcs':
return {'Location'}
else:
return None
def get_alt_names(self, term_id):
synonyms = set()
mapping = self._dict.get(term_id)
synonyms.update(mapping.get('synonyms'))
return synonyms
class SwissWithdrawnData(HistoryDataSet):
def __init__(self, dictionary={}, prefix='sp'):
super().__init__(dictionary, prefix)
def get_obsolete_ids(self):
accessions = self._dict.get('accessions')
obsolete = {}
for a in accessions:
obsolete[a] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if term_id in self._dict.get('accessions'):
return 'withdrawn'
else:
return None
class OWLData(NamespaceDataSet, HistoryDataSet):
def __init__(
self,
dictionary={},
*,
name,
prefix,
domain,
ids=True,
scheme_type):
super().__init__(dictionary, name, prefix, domain)
self._dict = {} # make unique dict for each instance of class
self.ids = ids
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_encoding(self, term_id):
return 'O'
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
concept_type = set()
if 'anno' not in self.scheme_type:
return None
elif self._prefix == 'clo':
concept_type = {'CellLine'}
elif self._prefix == 'cl':
concept_type = {'Cell'}
elif self._prefix == 'uberon':
concept_type = {'Anatomy'}
elif self._prefix == 'efo':
concept_type = self._dict.get(term_id).get("term_type")
elif self._prefix == 'do':
concept_type = {'Disease'}
return concept_type
def get_alt_names(self, term_id):
mapping = self._dict.get(term_id)
synonyms = set(mapping.get('synonyms'))
return synonyms
def find_xref(self, ref):
''' Used only in equiv module. '''
for term_id, mapping in self._dict.items():
dbxrefs = mapping.get('dbxrefs')
if ref in dbxrefs:
return term_id
def get_xrefs(self, term_id):
''' Returns MeSH (MSH) xrefs for a given DO ID . '''
xrefs = set()
mapping = self._dict.get(term_id)
xrefs.update(mapping.get('dbxrefs'))
if self._prefix == 'do':
xrefs = {x.replace('MSH:', 'MESHD:')
for x in xrefs if x.startswith('MSH:')}
return xrefs
def get_obsolete_ids(self):
obsolete = {}
for term_id in self._dict:
if self._dict.get(term_id).get('is_obsolete'):
obsolete[term_id] = 'withdrawn'
return obsolete
def get_id_update(self, term_id):
if self._dict.get(term_id):
if self._dict.get(term_id).get('is_obsolete'):
return 'withdrawn'
else:
return term_id
else:
return None
class NCBITaxonomyData(NamespaceDataSet):
def __init__(
self,
dictionary={},
*,
name,
prefix,
domain,
ids=True,
scheme_type):
super().__init__(dictionary, name, prefix, domain)
self.scheme_type = scheme_type
def get_label(self, term_id):
label = self._dict.get(term_id).get('name')
return label
def get_alt_names(self, term_id):
mapping = self._dict.get(term_id)
synonyms = set(mapping.get('synonyms'))
return synonyms
def get_concept_type(self, term_id):
# TODO - merge with get_encoding
''' For Annotation Concept Schemes, return set of AnnotationConcept types.
Default = 'AnnotationConcept' (parent class) '''
if 'anno' not in self.scheme_type:
return None
else:
return {'Species'}
| apache-2.0 | -6,744,172,609,000,880,000 | 31.792934 | 109 | 0.536599 | false |
phbradley/tcr-dist | make_mouse_table.py | 1 | 8705 | from basic import *
import html_colors
import util
with Parser(locals()) as p:
# p.str('args').unspecified_default().multiple().required()
p.str('clones_file').required()
p.str('outfile_prefix')
p.flag('horizontal_lines')
p.flag('show')
p.flag('include_counts_in_mouse_labels')
if not outfile_prefix:
outfile_prefix = clones_file[:-4]
import matplotlib
if not show: matplotlib.use('Agg')
import matplotlib.pyplot as plt
#import numpy as np
# all_tcrs = {}
# all_epitopes = []
# infields = []
# for line in open( clones_file,'r'):
# if not infields:
# if line[0] == '#':
# infields = line[1:-1].split('\t')
# else:
# infields = line[:-1].split('\t')
# continue
# assert infields
# l = parse_tsv_line( line[:-1], infields )
# mouse = l['mouse']
# epitope = l['epitope']
# clone_size = int(l['clone_size'])
# if mouse not in all_tcrs:
# all_tcrs[mouse] = {}
# if epitope not in all_tcrs[mouse]:
# all_tcrs[mouse][epitope] = []
# if epitope not in all_epitopes:
# all_epitopes.append( epitope )
# all_tcrs[mouse][epitope].append( clone_size ) ## just store the clone sizes
all_tcrs = parse_tsv_file( clones_file, ['subject','epitope'], ['clone_size'], False )
all_epitopes = list( reduce( set.union, ( set( x.keys() ) for x in all_tcrs.values() ) ) )
all_epitopes.sort()
all_mice= all_tcrs.keys()[:]
all_mice.sort()
counts = {}
for e in all_epitopes: counts[e] = [0,0]
for m in all_mice: counts[m] = [0,0]
for mouse in all_tcrs:
for epitope in all_tcrs[mouse]:
clone_sizes = [int(x[0]) for x in all_tcrs[mouse][epitope]]
total_reads = sum(clone_sizes)
for k in [mouse,epitope]:
counts[k][0] += len(clone_sizes)
counts[k][1] += total_reads
mouse_labels = {}
for mouse in all_mice:
if include_counts_in_mouse_labels:
mouse_labels[mouse] = '{} ({};{})'.format( mouse, counts[mouse][0], counts[mouse][1] )
else:
mouse_labels[mouse] = mouse
epitope_labels = {}
for epitope in all_epitopes:
epitope_labels[epitope] = '{} ({};{})'.format( epitope, counts[epitope][0], counts[epitope][1] )
nrows = len( all_mice )
ncols = len( all_epitopes )
preferred_plot_width = 12.0
preferred_plot_height = 12.0
preferred_cell_size = max( 0.5, min( preferred_plot_height/nrows, preferred_plot_width/ncols ) )
plot_width = ncols * preferred_cell_size
plot_height = nrows * preferred_cell_size
fontsize_small = 8.
fontsize_medium = 10.
fontsize_names = 12.
for repeat in range(3):
if plot_width <= 1.2 * preferred_plot_width and plot_height <= 1.2 * preferred_plot_height: break
if plot_width / preferred_plot_width > plot_height / preferred_plot_height: ## too wide
plot_width *= 0.75
plot_height *= 0.9
fontsize_small *= 0.9
fontsize_medium *= 0.9
else: ## too tall
plot_height *= 0.75
plot_width *= 0.9
fontsize_small *= 0.9
fontsize_medium *= 0.9
fontsize_small = max(5,int(floor(0.5+fontsize_small)))
fontsize_medium = max(6,int(floor(0.5+fontsize_medium)))
fudge = 1.2
bottom_spacer = 0.3 # inches
left_margin_inches = fudge * max( ( len(mouse_labels[x]) for x in all_mice ) ) * 0.6 * fontsize_names / 72.0
bottom_margin_inches = fudge * max( ( len(epitope_labels[x]) for x in all_epitopes ) ) * 0.75 * fontsize_names / 72.0 + bottom_spacer
top_margin_inches = 0.25
right_margin_inches = 0.25
fig_width = left_margin_inches + plot_width + right_margin_inches
fig_height = bottom_margin_inches + plot_height + top_margin_inches
top_margin = float( bottom_margin_inches + plot_height ) / fig_height
bottom_margin = float( bottom_margin_inches ) / fig_height
left_margin = float( left_margin_inches ) / fig_width
right_margin = float( left_margin_inches + plot_width ) / fig_width
print 'fig_width: {:.1f} fig_height: {:.1f}'.format(fig_width,fig_height)
fig = plt.figure(1,figsize=(fig_width,fig_height))
#fig = plt.figure(1,figsize=(23,8))
#fig1.add_line(Line2D([0.5,0.5], [0,1], linewidth=2, color='blue'))
#ax = fig.add_axes( [ left_margin, bottom_margin, right_margin,top_margin ] )
#ax.grid(True)
plotno=0
for mouse in all_mice:
for epitope in all_epitopes:
plotno += 1
if epitope not in all_tcrs[mouse]:
continue
plt.subplot( nrows, ncols, plotno )
clone_sizes = [int(x[0]) for x in all_tcrs[mouse][epitope]]
clone_sizes.sort()
clone_sizes.reverse()
colors = html_colors.get_rank_colors_no_lights(len(clone_sizes))
wedges, texts = plt.pie( clone_sizes )
for ii,w in enumerate(wedges):
w.set_edgecolor('none')
w.set_facecolor(colors[ii])
topsize = clone_sizes[0]
total_size = sum(clone_sizes)
## show the size of the largest wedge?
if len(wedges)>1:
w = wedges[0]
#print w.center, w.r, w.theta1, w.theta2
## show the size at radius distance in middle of edge
angle_degrees = w.theta2*0.5
if 65<=angle_degrees<=115: angle_degrees = 65. if angle_degrees < 90. else 115.
x=1.1*w.r*math.cos( math.pi * angle_degrees / 180.0 )
y=1.1*w.r*math.sin( math.pi * angle_degrees / 180.0 )
thresh = 0.3*w.r
ha = 'left' if x>thresh else ( 'center' if x>-thresh else 'right' )
va = 'bottom' if y>thresh else ( 'center' if y>-thresh else 'top' )
plt.text(x,y,`topsize`,fontdict={'fontsize':fontsize_small},color='r',
horizontalalignment=ha,verticalalignment=va)
## show the total number of reads
radius = wedges[0].r
plt.text(0,-1.1*radius,`total_size`,fontdict={'fontsize':fontsize_medium},
horizontalalignment='center',verticalalignment='top' )
#t = plt.title(`sum(clone_sizes)`,fontdict={'fontsize':8})
if False:
if epitope==all_epitopes[0]:
plt.title(mouse)
elif mouse==all_mice[0]:
plt.title(epitope)
#break
#break
#plt.hlines(0.5,0.0,1.0)
#plt.vlines(0.5,0.0,1.0)
epsilon = 0.0
plt.subplots_adjust(
left=left_margin+epsilon,
right=right_margin-epsilon,
bottom=bottom_margin+epsilon,
top=top_margin-epsilon
)
ywidth = (top_margin-bottom_margin) / ( len(all_mice) )
xwidth = (right_margin-left_margin) / ( len(all_epitopes) )
#ystep = (top_margin-bottom_margin) / ( len(all_epitopes)-1 )
lines = []
# if horizontal_lines:
# for ii in range(len(all_epitopes)):
# #for ii in range(len(all_epitopes)+1):
# y = bottom_margin + 1.02 * ii * ywidth
# lines.append( matplotlib.lines.Line2D( [0,1], [y,y],
# transform=fig.transFigure, figure=fig, c='k' ) )
if False:
for ii in range(len(all_mice)+1):
x = left_margin + ii*xwidth
lines.append( matplotlib.lines.Line2D( [x,x], [0,1],
transform=fig.transFigure, figure=fig, c='k' ) )
fig.lines.extend(lines)
for ii,mouse in enumerate( all_mice ):
plt.figtext( left_margin-0.005, top_margin - 3*ywidth/5 - ii * ywidth, mouse_labels[mouse], ha='right', va='center',
fontdict={'fontsize':fontsize_names})
#plt.figtext( right_margin+0.005, top_margin - 3*ywidth/5 - ii * ywidth, epitope,ha='left')
#xstep = (right_margin-left_margin) / ( len(all_mice)-1 )
for ii,epitope in enumerate( all_epitopes ):
#name = mouse[:]
# if name[0] == 'd' and 'Mouse' in name:
# name = name.replace('Mouse','_')
plt.figtext(left_margin + xwidth/2 + ii * xwidth, bottom_margin - (bottom_spacer)/fig_height,
epitope_labels[epitope],
rotation='vertical', ha='center', va='top',
fontdict={'fontsize':fontsize_names})
#plt.figtext(left_margin + xwidth/2 + ii * xwidth, 0.98, epitope, ha='center', va='top' )
pngfile = outfile_prefix+'_subject_table.png'
print 'making:',pngfile
plt.savefig(pngfile)
util.readme(pngfile,"""This subject-table plot shows all the successfully parsed, paired reads, split by mouse/subject (the rows)
and epitope (the columns, labeled at the bottom). The epitope column labels include in parentheses the number of clones followed by
the total number of TCRs. Each pie shows the paired reads for a single mouse/epitope combination, with each wedge corresponding to
a clone. The size of the top clone is shown in red near the red wedge, and the total number of reads is shown below the pie in black.
""")
if show:
plt.show()
| mit | -1,046,512,849,625,852,400 | 31.360595 | 133 | 0.611832 | false |
latture/dic | dic/dic_utils.py | 1 | 3905 | """
:mod:`dic_utils` contains several utility functions used when analyzing DIC data, e.g. determining the step size,
going from pixel to millimeter coordinates, and determining deformations.
"""
import numpy as np
import warnings
__all__ = ["get_step", "point_to_indices", "get_initial_position", "get_displacement", "point_to_position"]
def get_step(dic_data):
"""
Returns the step size of the DIC data
Parameters
----------
dic_data : dict
Dictionary containing the DIC data.
Returns
-------
int
Step size.
"""
return dic_data["x"][0, 1] - dic_data["x"][0, 0]
def point_to_indices(dic_data, pt):
"""
Transforms ``(x, y)`` in pixel coordinates into the corresponding ``(row, col)`` to access the closest data point
in the specified DIC data.
Parameters
----------
dic_data : dict
Dictionary containing the DIC data.
pt : (x, y)
Two-dimensional coordinates of the pixel in global space.
Returns
-------
(row, col) : (int, int)
The row and column in ``dic_data`` that corresponds to the given pixel point.
"""
step = get_step(dic_data)
keys = ("y", "x")
indices = [None, None]
for i, key in enumerate(keys):
min_key = '{}_min'.format(key)
if min_key in dic_data:
px_min = dic_data[min_key]
else:
px_min = dic_data[key].min()
px = pt[(i + 1) % 2]
indices[i] = int(round((px - px_min) / step))
return indices
def get_initial_position(dic_data, row, col):
"""
Retrieves the initial position (in mm if available, otherwise in pixels) held at the specified row and column.
Parameters
----------
dic_data : dict
Dictionary containing the DIC data.
row : int
Row in the DIC data to access.
col : int
Column in the DIC data to access.
Returns
-------
``numpy.ndarray``
Initial position ``(x, y, z)``.
"""
try:
return np.array([dic_data["X"][row, col], dic_data["Y"][row, col], dic_data["Z"][row, col]])
except KeyError:
warnings.warn("Position data in millimeters not provided. Falling back to position in pixels.")
return np.array([dic_data["x"][row, col], dic_data["y"][row, col], dic_data["z"][row, col]])
def get_displacement(dic_data, row, col):
"""
Retrieves the displacement (in mm if available, otherwise in pixels) held at the specified row and column.
Parameters
----------
dic_data : dict
Dictionary containing the DIC data.
row : int
Row in the DIC data to access.
col : int
Column in the DIC data to access.
Returns
-------
``numpy.ndarray``
Displacements ``(u, v, w)``.
"""
try:
return np.array([dic_data["U"][row, col], dic_data["V"][row, col], dic_data["W"][row, col]])
except KeyError:
warnings.warn("Displacement data in millimeters not provided. Falling back to displacement in pixels.")
return np.array([dic_data["u"][row, col], dic_data["v"][row, col], dic_data["w"][row, col]])
def point_to_position(dic_data, pt, add_displacement=True):
"""
Transforms a point in pixel space into its displaced coordinates (in mm if available, otherwise in pixels).
Parameters
----------
dic_data : dict
Dictionary containing the DIC data.
pt : (x, y)
Two-dimensional coordinates of the pixel in global space.
add_displacement : bool, optional
Whether to add deformation to the undeformed position. Default is ``True``.
Returns
-------
``numpy.ndarray``
``(x, y, z)`` position of the point.
"""
row, col = point_to_indices(dic_data, pt)
pos = get_initial_position(dic_data, row, col)
if add_displacement:
pos += get_displacement(dic_data, row, col)
return pos
| mit | -2,652,472,472,064,460,300 | 28.360902 | 117 | 0.595134 | false |
agabert/zeus | stages/mysql/fabfile.py | 1 | 1593 |
import os
from zeus.config import ConfigManager
from zeus.common import FabricManager
from zeus.common import PasswordManager
from zeus.ubuntu import RepoManager
from zeus.services import ServiceControl
from fabric.api import parallel, roles, run, env
metadata = ConfigManager(os.environ["CONFIGFILE"])
passwords = PasswordManager(os.environ["PASSWORDCACHE"]).passwords
FabricManager.setup(metadata.roles_ports)
@parallel
@roles('openstack_mysql')
def mysql():
RepoManager.install("mariadb-server")
RepoManager.install("python-pymysql")
this = env.host_string.split(":")[0]
run("""
IP="%s"
cat >/etc/mysql/mariadb.conf.d/51-openstack.cnf <<EOF
[mysqld]
bind-address = ${IP}
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
init-connect='SET NAMES utf8'
[client]
default-character-set = utf8
[mysql]
default-character-set = utf8
EOF
""" % metadata.servers[this]["ip"])
ServiceControl.relaunch("mysql")
ServiceControl.check("mysqld")
for database in ["keystone", "glance", "nova_api", "nova", "neutron"]:
run("""
echo 'create database if not exists %s;' | mysql -uroot
""" % database)
run("""
cat <<EOF | mysql -uroot
GRANT ALL PRIVILEGES ON %s.* TO '%s'@'localhost' IDENTIFIED BY '%s';
EOF
""" % (database, database, passwords["%s_DBPASS" % database.upper()]))
run("""
cat <<EOF | mysql -uroot
GRANT ALL PRIVILEGES ON %s.* TO '%s'@'%%' IDENTIFIED BY '%s';
EOF
""" % (database, database, passwords["%s_DBPASS" % database.upper()]))
| apache-2.0 | -7,195,580,750,168,113,000 | 22.086957 | 74 | 0.697426 | false |
wf4ever/ro-manager | src/MiscUtils/MockHttpResources.py | 1 | 2438 | # Utilities to mock HTTP resources for testing.
#
# with HttpMockResources(baseuri, path):
# # test code here
# or
# @HttpMockResourcesZZZZ(baseuri, path)
# def test_stuff(...)
#
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import urllib
import httpretty
import ScanDirectories
from FileMimeTypes import FileMimeTypes
FileType_MimeType = dict([ (ft,ct) for (ct, fts) in FileMimeTypes
for ft in fts ])
def HttpContentType(filename):
fsplit = filename.rsplit(".", 1)
if len(fsplit) == 2 and fsplit[1] in FileType_MimeType:
return FileType_MimeType[fsplit[1]]
return "application/octet-stream"
class MockHttpFileResources(object):
def __init__(self, baseuri, path):
self._baseuri = baseuri
self._path = path
return
def __enter__(self):
httpretty.enable()
# register stuff...
refs = ScanDirectories.CollectDirectoryContents(self._path, baseDir=self._path,
listDirs=False, listFiles=True, recursive=True)
for r in refs:
ru = self._baseuri + urllib.pathname2url(r)
rt = HttpContentType(r)
with open(self._path+r, 'r') as cf:
httpretty.register_uri(httpretty.GET, ru, status=200, content_type=rt,
body=cf.read())
httpretty.register_uri(httpretty.HEAD, ru, status=200, content_type=rt)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
suppress_exc = False
httpretty.disable()
return suppress_exc
class MockHttpDictResources(object):
def __init__(self, baseuri, resourcedict):
self._baseuri = baseuri
self._dict = resourcedict
return
def __enter__(self):
httpretty.enable()
# register stuff...
for r in self._dict.keys():
ru = self._baseuri + r
rt = HttpContentType(r)
httpretty.register_uri(httpretty.GET, ru, status=200, content_type=rt,
body=self._dict[r])
httpretty.register_uri(httpretty.HEAD, ru, status=200, content_type=rt)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
suppress_exc = False
httpretty.disable()
return suppress_exc
# End.
| mit | 6,390,194,522,604,970,000 | 30.25641 | 88 | 0.599262 | false |
drdangersimon/lomb_scargle | numpy_imp.py | 1 | 2279 | import numpy as np
import numexpr as ne
'''Numpy implimantation of lomb-scargle periodgram'''
def lombscargle_num(x, y, freqs):
# Check input sizes
if x.shape[0] != y.shape[0]:
raise ValueError("Input arrays do not have the same size.")
# Create empty array for output periodogram
pgram = np.empty(freqs.shape[0], dtype=np.float64)
for i in xrange(freqs.shape[0]):
c = np.cos(freqs[i] * x)
s = np.sin(freqs[i] * x)
xc = np.sum(y * c)
xs = np.sum(y * s)
cc = np.sum(c**2)
ss = np.sum(s**2)
cs = np.sum(c * s)
tau = np.math.atan2(2 * cs, cc - ss) / (2 * freqs[i])
c_tau = np.cos(freqs[i] * tau)
s_tau = np.sin(freqs[i] * tau)
c_tau2 = c_tau * c_tau
s_tau2 = s_tau * s_tau
cs_tau = 2 * c_tau * s_tau
pgram[i] = 0.5 * (((c_tau * xc + s_tau * xs)**2 / \
(c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) + \
((c_tau * xs - s_tau * xc)**2 / \
(c_tau2 * ss - cs_tau * cs + s_tau2 * cc)))
return pgram
def lombscargle_ne(x, y, freqs):
'''uses numexp to do numpy stuff'''
# Check input sizes
if x.shape[0] != y.shape[0]:
raise ValueError("Input arrays do not have the same size.")
# Create empty array for output periodogram
pgram = np.empty(freqs.shape[0], dtype=np.float64)
for i in xrange(freqs.shape[0]):
f = freqs[i]
c = ne.evaluate('cos(f * x)')
s = ne.evaluate('sin(f * x)')
xc = ne.evaluate('sum(y * c)')
xs = ne.evaluate('sum(y * s)')
cc = ne.evaluate('sum(c**2)')
ss = ne.evaluate('sum(s**2)')
cs = ne.evaluate('sum(c * s)')
tau = ne.evaluate('arctan2(2 * cs, cc - ss) / (2. * f)')
c_tau = ne.evaluate('cos(f * tau)')
s_tau = ne.evaluate('sin(f * tau)')
c_tau2 = ne.evaluate('c_tau * c_tau')
s_tau2 = ne.evaluate('s_tau * s_tau')
cs_tau = ne.evaluate('2 * c_tau * s_tau')
pgram[i] = ne.evaluate('''0.5 * (((c_tau * xc + s_tau * xs)**2 /
(c_tau2 * cc + cs_tau * cs + s_tau2 * ss)) +
((c_tau * xs - s_tau * xc)**2 /
(c_tau2 * ss - cs_tau * cs + s_tau2 * cc)))''')
return pgram
| mit | -4,128,462,303,800,078,000 | 31.557143 | 73 | 0.48925 | false |
x-web/social-network-user-influence-analysis | aminer/cralwer.py | 1 | 1916 | #!/usr/bin/python
# coding: utf-8
# crawling aminer.org data
__author__ = "x-web"
import sys
import urllib2
import json
import time
def fetchUser(start = 0, offset = 99, limit = 100, sleeptime = 3):
fwriter = open('user.json', 'w')
api = 'https://api.aminer.org/api/rank/person/h_index/'
errortime = 0
count = 1
while (start < limit):
curapi = api + str(start) + '/' + str(offset)
print 'fetch ' + curapi
try:
response = urllib2.urlopen(urllib2.Request(curapi))
data = response.read()
fwriter.write(data + "\n")
start = start + offset + 1
print str(count) + ' ok!'
except:
print str(count) + ' error!'
errortime += 1
if errortime > 3:
start = start + offset + 1
time.sleep(sleeptime)
count += 1
fwriter.close()
return
def fetchPub(sleeptime = 3, pass = 0):
freader = open('user.json', 'r')
fwriter = open('publication.json', 'w')
count = 0
for raw_data in freader:
json_array = json.loads(raw_data)
for user in json_array:
count += 1
if count <= pass:
print 'pass ' + str(count)
continue
uid = user['id']
n_pubs = user['n_pubs']
api = 'https://api.aminer.org/api/person/pubs/' + str(uid) +'/all/year/0/' + str(n_pubs)
print 'fetch ' + api
try:
response = urllib2.urlopen(urllib2.Request(api))
data = response.read()
fwriter.write(data + "\n")
print str(count) + ' ok!'
except:
print str(count) + ' error!'
time.sleep(sleeptime)
freader.close()
fwriter.close()
return
if __name__ == '__main__':
# fetchUser(limit = 10000)
fetchPub()
print 'Done!'
| mit | 1,649,795,766,065,398,500 | 27.597015 | 100 | 0.505219 | false |
abinit/abinit | tests/paral/__init__.py | 1 | 1109 | """Global variables associated to the test suite."""
#: List of CPP variables that should be defined in config.h in order to enable this suite.
need_cpp_vars = [
]
#: List of keywords that are automatically added to all the tests of this suite.
keywords = [
]
#: This suite contains tests executed with different numbers of MPI processes.
is_multi_parallel = True
#: List of input files
inp_files = [
"t01.abi",
"t02.abi",
"t03.abi",
"t05.abi",
"t06.abi",
"t07.abi",
"t08.abi",
"t09.abi",
"t21.abi",
"t22.abi",
"-t23.abi", # disabled
"t24.abi",
"t25.abi",
"t26.abi",
"t27.abi",
"t28.abi",
"t29.abi",
"t30.abi",
"t31.abi",
"t32.abi",
"t41.abi",
"t51.abi",
"t52.abi",
"t53.abi",
"t54.abi",
"t55.abi",
"t56.abi",
"t57.abi",
"-t58.abi", # disabled
"t59.abi",
"t60.abi",
"-t61.abi", # disabled for now
"t62.abi",
"t63.abi",
"t64.abi",
"t71.abi",
"t72.abi",
"t73.abi",
"t74.abi",
"t75.abi",
"t76.abi",
"t77.abi",
"t80.abi",
"t81.abi",
"t82.abi",
"t83.abi",
"t84.abi",
"-t85.abi", # disabled for now
"t86.abi",
"t91.abi",
"t92.abi",
"t93.abi",
"t94.abi",
"t95.abi",
"t96.abi",
"t97.abi",
"t98.abi",
"t99.abi"
]
| gpl-3.0 | 127,647,302,140,744,350 | 13.786667 | 90 | 0.622182 | false |
CodeScaleInc/log4django | log4django/views/logrecord/__init__.py | 1 | 2055 | from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from ...models import LogRecord, App
from ...settings import PAGE_SIZE
from ...decorators import authenticate
from .. import _filter_records
class LogRecordList(TemplateView):
template_name = 'log4django/bootstrap/logrecord/list.html'
http_method_names = ('get',)
@method_decorator(authenticate())
def get(self, request, *args, **kwargs):
logrecord_qs = _filter_records(request)
paginator = Paginator(logrecord_qs, PAGE_SIZE)
page = request.GET.get('page', None)
try:
records = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
records = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
records = paginator.page(paginator.num_pages)
# Getting filtering values.
apps = App.objects.all()
loggers = set([r.loggerName for r in records])
levels = LogRecord.LEVEL
return self.render_to_response(dict(
records=records, apps=apps, loggers=loggers, levels=levels,
filter_levels=[int(l) for l in request.GET.getlist('level')]
))
class LogRecordDetail(TemplateView):
template_name = 'log4django/bootstrap/logrecord/detail.html'
http_method_names = ('get',)
@method_decorator(authenticate())
def get(self, request, logrecord_id=None):
record = get_object_or_404(LogRecord, pk=logrecord_id)
related = None
if record.request_id:
related = LogRecord.objects.filter(
Q(request_id=record.request_id)
& ~Q(pk=record.pk)
)
return self.render_to_response(dict(
record=record, related=related
))
| bsd-3-clause | 2,427,831,677,460,595,000 | 35.052632 | 80 | 0.650122 | false |
amonmoce/corba_examples | omniORBpy-4.2.1/build/python/COS/CosTypedNotifyComm_idl.py | 1 | 6759 | # Python stubs generated by omniidl from /usr/local/share/idl/omniORB/COS/CosTypedNotifyComm.idl
# DO NOT EDIT THIS FILE!
import omniORB, _omnipy
from omniORB import CORBA, PortableServer
_0_CORBA = CORBA
_omnipy.checkVersion(4,2, __file__, 1)
try:
property
except NameError:
def property(*args):
return None
# #include "CosNotification.idl"
import CosNotification_idl
_0_CosNotification = omniORB.openModule("CosNotification")
_0_CosNotification__POA = omniORB.openModule("CosNotification__POA")
# #include "CosEventComm.idl"
import CosEventComm_idl
_0_CosEventComm = omniORB.openModule("CosEventComm")
_0_CosEventComm__POA = omniORB.openModule("CosEventComm__POA")
# #include "CosNotifyComm.idl"
import CosNotifyComm_idl
_0_CosNotifyComm = omniORB.openModule("CosNotifyComm")
_0_CosNotifyComm__POA = omniORB.openModule("CosNotifyComm__POA")
# #include "CosNotifyFilter.idl"
import CosNotifyFilter_idl
_0_CosNotifyFilter = omniORB.openModule("CosNotifyFilter")
_0_CosNotifyFilter__POA = omniORB.openModule("CosNotifyFilter__POA")
# #include "CosEventChannelAdmin.idl"
import CosEventChannelAdmin_idl
_0_CosEventChannelAdmin = omniORB.openModule("CosEventChannelAdmin")
_0_CosEventChannelAdmin__POA = omniORB.openModule("CosEventChannelAdmin__POA")
# #include "CosNotifyChannelAdmin.idl"
import CosNotifyChannelAdmin_idl
_0_CosNotifyChannelAdmin = omniORB.openModule("CosNotifyChannelAdmin")
_0_CosNotifyChannelAdmin__POA = omniORB.openModule("CosNotifyChannelAdmin__POA")
# #include "CosTypedEventComm.idl"
import CosTypedEventComm_idl
_0_CosTypedEventComm = omniORB.openModule("CosTypedEventComm")
_0_CosTypedEventComm__POA = omniORB.openModule("CosTypedEventComm__POA")
#
# Start of module "CosTypedNotifyComm"
#
__name__ = "CosTypedNotifyComm"
_0_CosTypedNotifyComm = omniORB.openModule("CosTypedNotifyComm", r"/usr/local/share/idl/omniORB/COS/CosTypedNotifyComm.idl")
_0_CosTypedNotifyComm__POA = omniORB.openModule("CosTypedNotifyComm__POA", r"/usr/local/share/idl/omniORB/COS/CosTypedNotifyComm.idl")
# interface TypedPushConsumer
_0_CosTypedNotifyComm._d_TypedPushConsumer = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosTypedNotifyComm/TypedPushConsumer:1.0", "TypedPushConsumer")
omniORB.typeMapping["IDL:omg.org/CosTypedNotifyComm/TypedPushConsumer:1.0"] = _0_CosTypedNotifyComm._d_TypedPushConsumer
_0_CosTypedNotifyComm.TypedPushConsumer = omniORB.newEmptyClass()
class TypedPushConsumer (_0_CosTypedEventComm.TypedPushConsumer, _0_CosNotifyComm.NotifyPublish):
_NP_RepositoryId = _0_CosTypedNotifyComm._d_TypedPushConsumer[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosTypedNotifyComm.TypedPushConsumer = TypedPushConsumer
_0_CosTypedNotifyComm._tc_TypedPushConsumer = omniORB.tcInternal.createTypeCode(_0_CosTypedNotifyComm._d_TypedPushConsumer)
omniORB.registerType(TypedPushConsumer._NP_RepositoryId, _0_CosTypedNotifyComm._d_TypedPushConsumer, _0_CosTypedNotifyComm._tc_TypedPushConsumer)
# TypedPushConsumer object reference
class _objref_TypedPushConsumer (_0_CosTypedEventComm._objref_TypedPushConsumer, _0_CosNotifyComm._objref_NotifyPublish):
_NP_RepositoryId = TypedPushConsumer._NP_RepositoryId
def __init__(self, obj):
_0_CosTypedEventComm._objref_TypedPushConsumer.__init__(self, obj)
_0_CosNotifyComm._objref_NotifyPublish.__init__(self, obj)
omniORB.registerObjref(TypedPushConsumer._NP_RepositoryId, _objref_TypedPushConsumer)
_0_CosTypedNotifyComm._objref_TypedPushConsumer = _objref_TypedPushConsumer
del TypedPushConsumer, _objref_TypedPushConsumer
# TypedPushConsumer skeleton
__name__ = "CosTypedNotifyComm__POA"
class TypedPushConsumer (_0_CosTypedEventComm__POA.TypedPushConsumer, _0_CosNotifyComm__POA.NotifyPublish):
_NP_RepositoryId = _0_CosTypedNotifyComm.TypedPushConsumer._NP_RepositoryId
_omni_op_d = {}
_omni_op_d.update(_0_CosTypedEventComm__POA.TypedPushConsumer._omni_op_d)
_omni_op_d.update(_0_CosNotifyComm__POA.NotifyPublish._omni_op_d)
TypedPushConsumer._omni_skeleton = TypedPushConsumer
_0_CosTypedNotifyComm__POA.TypedPushConsumer = TypedPushConsumer
omniORB.registerSkeleton(TypedPushConsumer._NP_RepositoryId, TypedPushConsumer)
del TypedPushConsumer
__name__ = "CosTypedNotifyComm"
# interface TypedPullSupplier
_0_CosTypedNotifyComm._d_TypedPullSupplier = (omniORB.tcInternal.tv_objref, "IDL:omg.org/CosTypedNotifyComm/TypedPullSupplier:1.0", "TypedPullSupplier")
omniORB.typeMapping["IDL:omg.org/CosTypedNotifyComm/TypedPullSupplier:1.0"] = _0_CosTypedNotifyComm._d_TypedPullSupplier
_0_CosTypedNotifyComm.TypedPullSupplier = omniORB.newEmptyClass()
class TypedPullSupplier (_0_CosTypedEventComm.TypedPullSupplier, _0_CosNotifyComm.NotifySubscribe):
_NP_RepositoryId = _0_CosTypedNotifyComm._d_TypedPullSupplier[1]
def __init__(self, *args, **kw):
raise RuntimeError("Cannot construct objects of this type.")
_nil = CORBA.Object._nil
_0_CosTypedNotifyComm.TypedPullSupplier = TypedPullSupplier
_0_CosTypedNotifyComm._tc_TypedPullSupplier = omniORB.tcInternal.createTypeCode(_0_CosTypedNotifyComm._d_TypedPullSupplier)
omniORB.registerType(TypedPullSupplier._NP_RepositoryId, _0_CosTypedNotifyComm._d_TypedPullSupplier, _0_CosTypedNotifyComm._tc_TypedPullSupplier)
# TypedPullSupplier object reference
class _objref_TypedPullSupplier (_0_CosTypedEventComm._objref_TypedPullSupplier, _0_CosNotifyComm._objref_NotifySubscribe):
_NP_RepositoryId = TypedPullSupplier._NP_RepositoryId
def __init__(self, obj):
_0_CosTypedEventComm._objref_TypedPullSupplier.__init__(self, obj)
_0_CosNotifyComm._objref_NotifySubscribe.__init__(self, obj)
omniORB.registerObjref(TypedPullSupplier._NP_RepositoryId, _objref_TypedPullSupplier)
_0_CosTypedNotifyComm._objref_TypedPullSupplier = _objref_TypedPullSupplier
del TypedPullSupplier, _objref_TypedPullSupplier
# TypedPullSupplier skeleton
__name__ = "CosTypedNotifyComm__POA"
class TypedPullSupplier (_0_CosTypedEventComm__POA.TypedPullSupplier, _0_CosNotifyComm__POA.NotifySubscribe):
_NP_RepositoryId = _0_CosTypedNotifyComm.TypedPullSupplier._NP_RepositoryId
_omni_op_d = {}
_omni_op_d.update(_0_CosTypedEventComm__POA.TypedPullSupplier._omni_op_d)
_omni_op_d.update(_0_CosNotifyComm__POA.NotifySubscribe._omni_op_d)
TypedPullSupplier._omni_skeleton = TypedPullSupplier
_0_CosTypedNotifyComm__POA.TypedPullSupplier = TypedPullSupplier
omniORB.registerSkeleton(TypedPullSupplier._NP_RepositoryId, TypedPullSupplier)
del TypedPullSupplier
__name__ = "CosTypedNotifyComm"
#
# End of module "CosTypedNotifyComm"
#
__name__ = "CosTypedNotifyComm_idl"
_exported_modules = ( "CosTypedNotifyComm", )
# The end.
| mit | 2,434,784,445,631,023,000 | 41.778481 | 152 | 0.79065 | false |
astrotuvi/pyplate | tests/test_db_pgconn.py | 1 | 1766 | import os
import sys
import re
import psycopg2
from collections import OrderedDict
tests_dir = os.path.dirname(__file__)
root_dir = os.path.abspath(os.path.join(tests_dir, '..'))
sys.path.insert(0, root_dir)
from pyplate.config.local import SCHEMAFILE, RDBMS, PGHOST, PGPORT, PGUSER, PGDATABASE, PGPASSWD
from pyplate.db_pgsql import PlateDB
def _precord(rec):
print("\n")
for r in rec:
row = ','.join(['{}'.format(k)
for k in r])
print(row)
print("\n")
## main ##
pdb = PlateDB()
pdb.open_connection(host=PGHOST,port=PGPORT,user=PGUSER,password=PGPASSWD,database=PGDATABASE)
print(pdb.database,'\n')
tbl='applause_dr4.archive'
sx = '*'
# try insert (only works once, may be 'not null' for timestamp is too restritctive?
## throws an error if run twice
cols="archive_id,archive_name,institute,timestamp_insert,timestamp_update"
vals= [None] * 5
vals[0]= "1000,'test_2dr4','aip_test',make_timestamp(2020,5,1,1,2,23.0),make_timestamp(2020,5,1,1,2,23.1)"
vals[1]= "1001,'test_1dr4','aip_test',make_timestamp(2020,5,2,1,2,23.0),make_timestamp(2020,5,1,2,2,23.1)"
vals[2]= "1002,'test_1dr4','aip_test',make_timestamp(2020,5,2,1,2,23.0),make_timestamp(2020,5,1,2,2,23.1)"
vals[3]= "1003,'test_dr4','aip_test',make_timestamp(2020,5,1,1,2,23.0),make_timestamp(2020,5,1,1,2,23.1)"
vals[4]= "1004,'test_dr4','aip_test',make_timestamp(2020,5,4,1,2,23.0),Null"
for v in vals:
qry2 = ("INSERT INTO %s (%s) VALUES(%s);" % (tbl,cols,v))
nrow = pdb.execute_query(qry2)
# try select
qry3 = ("SELECT %s FROM %s where archive_id > 1000;" % (sx, tbl))
nrow = pdb.execute_query(qry3)
print(nrow)
rec = pdb.cursor.fetchall()
if(rec):
_precord(rec)
print(pdb.dbversion,'\n')
pdb.close_connection()
| apache-2.0 | -6,782,719,617,402,637,000 | 31.703704 | 106 | 0.664779 | false |
XDocker/Engine | xdocker/job/views.py | 1 | 3609 | from flask import Blueprint
from flask.ext.login import current_user, login_required
from ..helpers import check_args, make_response
from .helpers import get_job_log, get_job_status
from ..app_exceptions import PermissionDenied
job = Blueprint('job', __name__)
@job.route("/getLog/<job_id>", methods=["POST"])
@login_required
def get_log(job_id):
"""Get log for job
**Example request**
.. sourcecode:: http
POST /getLog/<job_id> HTTP/1.1
{
"token": "<token>",
"line_num": 10
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Encoding: gzip
Content-Type: application/json
Server: nginx/1.1.19
Vary: Accept-Encoding
{
"status": "OK",
"log": "<log lines>"
}
:jsonparam string token: Authentication token
:jsonparam integer line_num: Number of log lines to return(max 100, 10 default)
:statuscode 200: no error
:statuscode 401: not authorized
:>json string log: Last logs
"""
data = check_args()
log = get_job_log(data['username'], job_id)
return make_response(log=log)
@job.route("/getDeploymentStatus/<job_id>", methods=["POST"])
@login_required
def job_status(job_id):
"""Get job status
**Example request**
.. sourcecode:: http
POST /getDeploymentStatus/<job_id> HTTP/1.1
{
"token": "<token>"
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Encoding: gzip
Content-Type: application/json
Server: nginx/1.1.19
Vary: Accept-Encoding
{
"status": "OK",
"job_status": "Completed"
}
:jsonparam string token: Authentication token
:statuscode 200: no error
:statuscode 401: not authorized
:>json string job_status: Job status
"""
res_dict = get_job_status(job_id)
return make_response(**res_dict)
@job.route("/getStatusOfAllDeployments", methods=["POST"])
@login_required
def get_all_deployments():
"""Get job ids
**Example request**
.. sourcecode:: http
POST /getStatusOfAllDeployments HTTP/1.1
{
"token": "<token>",
}
**Example response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"status": "OK",
"jobs": [
{
"job_id": "<job-id>",
"fail": true,
"fail_code": "BadPort",
"fail_message": "Wrong port: 20,",
"result": null,
"job_status": "failed"
}
]
}
:jsonparam string token: Authentication token
:statuscode 200: no error
:statuscode 401: not authorized
:>json array jobs: Statuses of user`s jobs
:>json string jobs.job_status: Status of user`s jobs(failed, Completed, started, null)
:>json boolean jobs.fail: whether it failed
:>json any jobs.result: job result
:>json string jobs.fail_code: fail code if failed
:>json string jobs.job_id: Job id
:>json string jobs.fail_message: fail message if failed
"""
statuses = []
for job_id in current_user.jobs:
try:
res_dict = get_job_status(job_id)
except PermissionDenied:
continue
if res_dict['job_status'] is None:
continue
res_dict['job_id'] = job_id
statuses.append(res_dict)
return make_response(jobs=statuses)
| apache-2.0 | 3,459,422,245,718,154,000 | 23.385135 | 90 | 0.563037 | false |
openstack/storlets | tests/unit/sbus/client/test_client.py | 1 | 6192 | # Copyright (c) 2015-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import os
import unittest
import errno
from contextlib import contextmanager
from storlets.sbus.client.exceptions import SBusClientSendError, \
SBusClientMalformedResponse
from storlets.sbus.client import SBusClient
@contextmanager
def _mock_sbus(send_status=0):
with mock.patch('storlets.sbus.client.client.SBus.send') as fake_send:
fake_send.return_value = send_status
yield
@contextmanager
def _mock_os_pipe(bufs):
class FakeFd(object):
def __init__(self, rbuf=''):
self.rbuf = rbuf
self.closed = False
def read(self, size):
size = min(len(self.rbuf), size)
ret = self.rbuf[:size]
self.rbuf = self.rbuf[size:]
return ret
def close(self):
if self.closed:
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
self.closed = True
def fake_os_read(fd, size):
return fd.read(size)
def fake_os_close(fd):
fd.close()
pipes = [(FakeFd(buf), FakeFd()) for buf in bufs]
pipe_generator = iter(pipes)
def mock_os_pipe():
try:
return next(pipe_generator)
except StopIteration:
raise AssertionError('pipe called more than expected')
with mock.patch('storlets.sbus.client.client.os.pipe', mock_os_pipe), \
mock.patch('storlets.sbus.client.client.os.read', fake_os_read), \
mock.patch('storlets.sbus.client.client.os.close', fake_os_close):
yield pipes
class TestSBusClient(unittest.TestCase):
def setUp(self):
self.pipe_path = 'pipe_path'
self.client = SBusClient(self.pipe_path, 4)
def test_parse_response(self):
raw_resp = json.dumps({'status': True, 'message': 'OK'})
resp = self.client._parse_response(raw_resp)
self.assertTrue(resp.status)
self.assertEqual('OK', resp.message)
self.assertIsNone(resp.task_id)
raw_resp = json.dumps({'status': True, 'message': 'OK',
'task_id': 'SOMEID'})
resp = self.client._parse_response(raw_resp)
self.assertTrue(resp.status)
self.assertEqual('OK', resp.message)
self.assertEqual('SOMEID', resp.task_id)
raw_resp = json.dumps({'status': False, 'message': 'ERROR'})
resp = self.client._parse_response(raw_resp)
self.assertFalse(resp.status)
self.assertEqual('ERROR', resp.message)
self.assertIsNone(resp.task_id)
raw_resp = json.dumps({'status': True, 'message': 'Sample:Message'})
resp = self.client._parse_response(raw_resp)
self.assertTrue(resp.status)
self.assertEqual('Sample:Message', resp.message)
self.assertIsNone(resp.task_id)
with self.assertRaises(SBusClientMalformedResponse):
self.client._parse_response('Foo')
raw_resp = json.dumps({'status': True})
with self.assertRaises(SBusClientMalformedResponse):
self.client._parse_response(raw_resp)
raw_resp = json.dumps({'message': 'foo'})
with self.assertRaises(SBusClientMalformedResponse):
self.client._parse_response(raw_resp)
def _check_all_pipes_closed(self, pipes):
# Make sure that pipes are not empty
self.assertGreater(len(pipes), 0)
for _pipe in pipes:
self.assertTrue(_pipe[0].closed)
self.assertTrue(_pipe[1].closed)
def _test_service_request(self, method, *args, **kwargs):
raw_resp = json.dumps(
{'status': True, 'message': 'OK'}).encode("utf-8")
with _mock_os_pipe([raw_resp]) as pipes, _mock_sbus(0):
resp = method(*args, **kwargs)
self.assertTrue(resp.status)
self.assertEqual('OK', resp.message)
self._check_all_pipes_closed(pipes)
raw_resp = json.dumps(
{'status': False, 'message': 'ERROR'}).encode("utf-8")
with _mock_os_pipe([raw_resp]) as pipes, _mock_sbus(0):
resp = method(*args, **kwargs)
self.assertFalse(resp.status)
self.assertEqual('ERROR', resp.message)
self._check_all_pipes_closed(pipes)
raw_resp = json.dumps(
{'status': True, 'message': 'OK'}).encode("utf-8")
with _mock_os_pipe([raw_resp]) as pipes, _mock_sbus(-1):
with self.assertRaises(SBusClientSendError):
method(*args, **kwargs)
self._check_all_pipes_closed(pipes)
# TODO(takashi): Add IOError case
with _mock_os_pipe([b'Foo']) as pipes, _mock_sbus(0):
with self.assertRaises(SBusClientMalformedResponse):
method(*args, **kwargs)
self._check_all_pipes_closed(pipes)
def test_ping(self):
self._test_service_request(self.client.ping)
def test_start_daemon(self):
self._test_service_request(
self.client.start_daemon, 'java', 'path/to/storlet',
'storleta', 'path/to/uds', 'INFO', '10', '11')
def test_stop_daemon(self):
self._test_service_request(self.client.stop_daemon, 'storleta')
def test_stop_daemons(self):
self._test_service_request(self.client.stop_daemons)
def test_halt(self):
self._test_service_request(self.client.halt)
def test_daemon_status(self):
self._test_service_request(self.client.daemon_status, 'storleta')
def test_cancel(self):
self._test_service_request(self.client.cancel, 'taskid')
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,656,226,593,670,562,600 | 33.4 | 78 | 0.619186 | false |
TRManderson/petersen | petersen/app/users/__init__.py | 1 | 1244 | import flask
from flask import request, abort
from petersen.app.base import app
from petersen.models import User, UserBadge, Tag, needs_db
from sqlalchemy import or_
@app.route('/users', methods=['GET'])
@needs_db
def user_filter(db_session):
data = request.args
if data is None:
abort(400)
filters = []
for (k, v) in data.items():
if k == 'name':
filters.append(
User.name.like("%{}%".format(v))
)
elif k == 'tags':
filters.append(
or_(
*[
Tag.tag == t
for t in v.split(',')
]
)
)
elif k == 'badges':
filters.append(
or_(
*[
UserBadge.badge_id == t
for t in v.split(',')
]
)
)
else:
abort(400)
peeps = db_session.query(
User
).join(UserBadge, Tag).filter(
*filters
)
resp = [
{
p.to_json()
}
for p in peeps
]
return flask.jsonify(**{
'users': resp
})
| mit | -2,760,935,717,966,158,300 | 20.084746 | 58 | 0.39791 | false |
migonzalvar/mfs2011-practicum-saas | server/server.py | 1 | 13043 | """HTTP REST API server. """
from collections import OrderedDict
import calendar
import datetime
import json
import time
import pytz
from bottle import Bottle, run, request, response, debug, HTTPResponse
from agenda import ds, RedisDatastore, AgendaController, ShiftNotEmptyError, NotAvailableSlotError
# Settings
AUTH = ("user", "s3cr3ts3cr3t")
DEFAULT_TZ = "Europe/Madrid"
DEFAULT_PATH = "http://localhos:8008/agendas/shifts/%s"
UTCTIMEFORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOCALTIMEFORMAT = "%Y-%m-%d %H:%M:%S"
# Converters
def epoch(a_dtstring):
tt = time.strptime(a_dtstring, UTCTIMEFORMAT)
return int(calendar.timegm(tt))
def today(context=None):
context = context or {}
zone = context.get("zone", DEFAULT_TZ)
localtz = pytz.timezone(zone)
dt = datetime.date.today()
dt = datetime.datetime.combine(dt, datetime.time(0))
dt = dt.replace(tzinfo=localtz).astimezone(pytz.utc)
return calendar.timegm(dt.timetuple())
def tomorrow(context=None):
context = context or {}
zone = context.get("zone", DEFAULT_TZ)
localtz = pytz.timezone(zone)
dt = datetime.date.today() + datetime.timedelta(days=1)
dt = datetime.datetime.combine(dt, datetime.time(0))
dt = dt.replace(tzinfo=localtz).astimezone(pytz.utc)
return calendar.timegm(dt.timetuple())
def epoch2datetime(an_epoch):
return datetime.datetime.utcfromtimestamp(an_epoch).replace(tzinfo=pytz.utc)
def filter_request(form_dict, name, to_python, default=None):
try:
return to_python(form_dict[name])
except KeyError:
return None
except (ValueError, TypeError):
return default
# Authentication
def require_authentication(fn):
def new_function(*args, **kwargs):
if request.auth == AUTH:
return fn(*args, **kwargs)
else:
return render_to_error(403, "Incorrect credentials.")
return new_function
# Helpers
def dict_to_response(items, status=200, headers=None):
"""Returns a HTTPResponse with ``items`` as a JSON.
The parameter ``data``could be a mapping or a sequence, a container
that supports iteration, or an iterator object. The elements of the
argument must each also be of one of those kinds, and each must in
turn contain exactly two objects. The first is used as a key in the
new dictionary, and the second as the key's value.
"""
headers = headers or {}
payload = OrderedDict(items)
payload["status"] = status
output = json.dumps(payload, indent=2)
for key, value in headers.iteritems():
response.headers[key] = value
response.set_header('Content-Type', 'application/json')
headers = {'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*', }
return HTTPResponse(status=status, body=output, **headers)
def render_epoch(an_epoch, context=None):
context = context or {}
zone = context.get("zone", DEFAULT_TZ)
localtz = pytz.timezone(zone)
dt = epoch2datetime(an_epoch)
return OrderedDict((
("datetime", dt.strftime(UTCTIMEFORMAT)),
("timestamp", an_epoch),
("localtime", dt.astimezone(localtz).strftime(LOCALTIMEFORMAT)),
("timezone", zone)
))
def render_shift(shift, context=None):
context = context or {}
return OrderedDict((
("kind", "shift"),
("id", shift.key),
("name", "Testing"),
("href", context.get("href", "")),
("start", render_epoch(shift.interval.start, context)),
("end", render_epoch(shift.interval.end, context))
))
def render_shifts(shifts, context=None):
context = context or {}
return OrderedDict((
("kind", "shifts"),
("shifts", [render_shift(shift, context) for shift in shifts])
))
def render_agenda(agenda, context=None):
context = context or {}
return OrderedDict((
("id", agenda.key),
("name", "Testing"),
))
def render_slot(slot, context=None):
context = context or {}
path = context.get("path", DEFAULT_PATH)
return OrderedDict((
("kind", "freeslot"),
("href", path),
("start", render_epoch(slot.start, context)),
("end", render_epoch(slot.end, context))
))
def render_slots(slots, context=None):
context = context or {}
return OrderedDict((
("kind", "freeslots"),
("freeslots", [render_slot(slot, context) for slot in slots])
))
def render_appointments(appos, context=None):
context = context or {}
return OrderedDict((
("kind", "appointments"),
("appointments", [render_appointment(appo, context) for appo in appos])
))
def render_appointment(appo, context=None):
context = context or {}
return OrderedDict((
("kind", "appointment"),
("id", appo.key),
("shift_id", appo.parent_key),
("href", context.get("href", "")),
("start", render_epoch(appo.interval.start, context)),
("end", render_epoch(appo.interval.end, context))
))
def render_to_error(status, message):
headers = {'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*', }
output = json.dumps({"status": status, "message": message})
return HTTPResponse(status=status, body=output, **headers)
# Shortcuts
def get_agenda_or_404(aid):
try:
agenda = AgendaController(aid)
except KeyError:
raise render_to_error(404, "Agenda was not found.")
return agenda
class Context(dict):
def __init__(self, request, *args, **kwargs):
super(Context, self).__init__(*args, **kwargs)
self.process_request(request)
def __getattr__(self, key):
return self.__getitem__(key)
def __setattr__(self, key, value):
return self.__setitem__(key, value)
def process_request(self, request):
(scheme, host, _, _, _) = request.urlparts
self.update(url=scheme + "://" + host)
def error404(error):
return render_to_error(error.status, "The URL is not found ")
def error500(error):
return render_to_error(error.status, "Internal error.")
# Routes
def options():
response.headers["Allow"] = "GET,HEAD,POST,OPTIONS,DELETE"
return HTTPResponse(status=200, output="")
def test():
"""Test the server.
Echoes string and integer and converts a string datetime to epoch."""
if request.method == "GET":
my_string = filter_request(request.query, 'string', str)
my_integer = filter_request(request.query, 'integer', int)
my_epoch = filter_request(request.query, 'datetime', epoch)
return dict_to_response(dict(string=my_string, integer=my_integer, epoch=my_epoch))
elif request.method == "POST":
my_string = filter_request(request.forms, 'string', str)
my_integer = filter_request(request.forms, 'integer', int)
my_epoch = filter_request(request.forms, 'datetime', epoch)
return dict_to_response(dict(string=my_string, integer=my_integer, epoch=my_epoch))
def test_post():
"""Test the server.
Echoes string and integer and converts a string datetime to epoch."""
if request.content_type == "application/x-www-form-urlencoded":
my_string = filter_request(request.forms, 'string', str)
my_integer = filter_request(request.forms, 'integer', int)
my_epoch = filter_request(request.forms, 'datetime', epoch)
d = dict(string=my_string, integer=my_integer, epoch=my_epoch)
elif request.content_type == "application/json":
d = request.json
# Query
d["query"] = filter_request(request.query, 'query', str)
return dict_to_response(d)
@require_authentication
def get_agenda(aid):
agenda = get_agenda_or_404(aid)
return dict_to_response(render_agenda(agenda))
@require_authentication
def post_agenda():
agenda = AgendaController()
context = Context(request)
path = "/agendas/{agenda}".format(agenda=agenda.key)
href = context.url + path
context["href"] = href
headers = {"Location": href}
return dict_to_response(render_agenda(agenda, context), 201, headers)
@require_authentication
def get_slots(aid):
length = filter_request(request.query, "length", int)
start_from = filter_request(request.query, "start", epoch)
start_until = filter_request(request.query, "end", epoch)
agenda = get_agenda_or_404(aid)
intervals = agenda.get_slots(length, start_from, start_until)
return dict_to_response(render_slots(intervals))
@require_authentication
def get_free_slots(aid):
length = filter_request(request.query, "length", int)
start = filter_request(request.query, "start", epoch) or today()
end = filter_request(request.query, "end", epoch) or tomorrow()
agenda = get_agenda_or_404(aid)
intervals = agenda.get_free_slots(start, end, length)
return dict_to_response(render_slots(intervals))
@require_authentication
def get_appointment(aid, app_id):
agenda = get_agenda_or_404(aid)
appo = agenda.get_appointment(app_id)
return dict_to_response(render_appointment(appo))
@require_authentication
def get_appointments(aid):
agenda = get_agenda_or_404(aid)
appos = agenda.get_appointments_itervalues()
return dict_to_response(render_appointments(appos))
@require_authentication
def delete_appointment(aid, app_id):
agenda = get_agenda_or_404(aid)
try:
agenda.del_appointment(app_id)
except KeyError:
return render_to_error(404, "Appointment was not found.")
return dict_to_response((), 204)
@require_authentication
def get_shifts(aid):
agenda = get_agenda_or_404(aid)
shifts = agenda.get_shifts_itervalues()
return dict_to_response(render_shifts(shifts))
@require_authentication
def get_shift(aid, sid):
agenda = get_agenda_or_404(aid)
shift = agenda.get_shift(sid)
return dict_to_response(render_shift(shift))
@require_authentication
def post_shift(aid):
start = filter_request(request.forms, "start", epoch)
end = filter_request(request.forms, "end", epoch)
if start == None or end == None:
return render_to_error(403, "Incorrect parameter value.")
agenda = get_agenda_or_404(aid)
shift = agenda.add_shift(start, end)
context = Context(request)
path = "/agendas/{aid}/shifts/{sid}".format(aid=aid, sid=shift.key)
href = context.url + path
context["href"] = href
headers = {"Location": href}
return dict_to_response(render_shift(shift, context), 201, headers)
@require_authentication
def delete_shift(aid, sid):
agenda = get_agenda_or_404(aid)
try:
_ = agenda.del_shift(sid)
except KeyError:
return render_to_error(404, "Shift %s was not found." % sid)
except ShiftNotEmptyError:
return render_to_error(409, "Shift %s is not empty. Please, first delete all appointments." % sid)
return dict_to_response((), 204)
@require_authentication
def post_appointment(aid):
start = filter_request(request.forms, "start", epoch)
end = filter_request(request.forms, "end", epoch)
if start == None or end == None:
return render_to_error(400, "Incorrect parameter value.")
agenda = get_agenda_or_404(aid)
try:
appo = agenda.add_appointment(start, end)
except NotAvailableSlotError:
return render_to_error(409, "Appointment overlaps. Please, choose another slot.")
context = Context(request)
path = "/agendas/{agenda}/shifts/{shift}/appointments/{appo}".format(
agenda=aid, shift=appo.parent_key, appo=appo.key)
href = context.url + path
context["href"] = href
headers = {"Location": href}
return dict_to_response(render_appointment(appo, context), 201, headers)
def setup_routing(app):
app.route('/test', ['GET', ], test)
app.route('/test', ['POST', ], test_post)
app.route("/*", "OPTIONS", options)
app.route('/agendas/<aid:int>', "GET" , get_agenda)
app.route('/agendas', "POST", post_agenda)
app.route('/agendas/<aid:int>/shifts', "GET", get_shifts)
app.route('/agendas/<aid:int>/shifts/<sid:int>', "GET", get_shift)
app.route('/agendas/<aid:int>/shifts', "POST", post_shift)
app.route('/agendas/<aid:int>/shifts/<sid:int>', "DELETE", delete_shift)
app.route('/agendas/<aid:int>/appointments/<app_id:int>', "GET", get_appointment)
app.route('/agendas/<aid:int>/appointments', "GET", get_appointments)
app.route('/agendas/<aid:int>/appointments/<app_id:int>', "DELETE", delete_appointment)
app.route('/agendas/<aid:int>/appointments', "POST", post_appointment)
app.route('/agendas/<aid:int>/slots', "GET", get_slots)
app.route('/agendas/<aid:int>/freeslots', "GET", get_free_slots)
def setup_error_handling(app):
app.error_handler[404] = error404
app.error_handler[500] = error500
# Main
debug(True)
setattr(ds, 'datastore', RedisDatastore())
app = Bottle()
setup_routing(app)
setup_error_handling(app)
if __name__ == '__main__':
run(app, host='localhost', port=8008, reloader=True)
| isc | -3,317,249,630,812,671,000 | 29.19213 | 106 | 0.655064 | false |
ActiveState/code | recipes/Python/578976_Objectify_of_a_XML_node/recipe-578976.py | 1 | 3154 | """
Tool for converting an XML node into an object instance.
.. module:: objectify
:platform: Unix, Windows
:synopsis: providing conversion for XML nodes.
.. moduleauthor:: Thomas Lehmann
License
=======
Copyright (c) 2014 Thomas Lehmann
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import re
def objectify(node, attributes=None):
"""
Converting XML node into an object instance.
Taking the tag name with first letter as upper case
as the name forgenerating a class derived from object
with the node attributes as fields and the values as 'default'.
>>> import xml.etree.ElementTree as ET
>>> document = ET.fromstring('<test-obj int-val="1" str-val="hello" float-val="1.23"/>')
>>> instance = objectify(document, {"object-id": "1234"})
>>> print(instance.__class__.__name__)
TestObj
>>> print(instance.object_id)
1234
>>> print(instance.int_val)
1
>>> print(instance.str_val)
hello
>>> print(instance.float_val)
1.23
:param node: xml node (from lxml.etree or xml.etree)
:param attributes: allows providing fields and default values
which might be overwritten by the XML node attributes.
:returns: instance with node attributes as fields
"""
def convert(attribute_value):
"""
Convert string to float or int were possible.
:param attribute_value: string value
:return: depend on re.match a string, a float or an int value.
"""
if re.match(r"\d+\.\d+", attribute_value):
return float(attribute_value)
if re.match(r"\d+", attribute_value):
return int(attribute_value)
return attribute_value
if None == attributes:
attributes = {}
else:
attributes = (dict([(key.replace("-", "_"), convert(value))
for key, value in attributes.items()]))
attributes.update(dict([(key.replace("-", "_"), convert(value))
for key, value in node.attrib.items()]))
class_name = "".join([entry.title() for entry in node.tag.split("-")])
return type(class_name, (object,), attributes)()
| mit | 4,937,893,628,476,440,000 | 37 | 92 | 0.677235 | false |
sinnwerkstatt/landmatrix | apps/grid/views/filter.py | 1 | 25004 | from collections import OrderedDict
from datetime import datetime
from bootstrap3_datetime.widgets import DateTimePicker
from django import forms
from django.utils.translation import ugettext as _
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from apps.api.filters import Filter, PresetFilter
from apps.grid.fields import TitleField, YearMonthDateField
from apps.grid.forms.investor_form import (
OperationalCompanyForm,
ParentInvestorForm,
ParentStakeholderForm,
)
from apps.grid.views.browse_filter_conditions import (
get_activity_field_by_key,
get_investor_field_by_key,
)
from apps.grid.views.utils import DEAL_FORMS
from apps.landmatrix.forms import ActivityFilterForm, InvestorFilterForm
from apps.landmatrix.models import Country, FilterPreset, FilterPresetGroup, Region
class FilterWidgetAjaxView(APIView):
renderer_classes = (JSONRenderer,)
TYPE_STRING = "string"
TYPE_NUMERIC = "numeric"
TYPE_BOOLEAN = "boolean"
TYPE_LIST = "list"
TYPE_AUTOCOMPLETE = "autocomplete"
TYPE_LIST_MULTIPLE = "multiple"
TYPE_DATE = "date"
FIELD_TYPE_MAPPING = OrderedDict(
(
(
YearMonthDateField,
TYPE_DATE,
), # Placed before CharField since it inherits from CharField
(forms.CharField, TYPE_STRING),
(forms.IntegerField, TYPE_NUMERIC),
(forms.BooleanField, TYPE_BOOLEAN),
(forms.ChoiceField, TYPE_LIST),
(forms.MultipleChoiceField, TYPE_LIST_MULTIPLE),
)
)
FIELD_NAME_TYPE_MAPPING = {
"activity_identifier": TYPE_NUMERIC,
"fully_updated": TYPE_DATE,
"fully_updated_date": TYPE_DATE,
"updated_date": TYPE_DATE,
"operational_stakeholder": TYPE_AUTOCOMPLETE,
"target_country": TYPE_AUTOCOMPLETE,
}
TYPE_OPERATION_MAPPING = {
TYPE_STRING: ("contains", "is", "is_empty"),
TYPE_NUMERIC: ("lt", "gt", "gte", "lte", "is", "is_empty"),
TYPE_BOOLEAN: ("is", "is_empty"),
TYPE_LIST: ("is", "not_in", "in", "is_empty"),
TYPE_LIST_MULTIPLE: ("is", "not_in", "in", "is_empty"),
TYPE_DATE: ("lt", "gt", "gte", "lte", "is", "is_empty"),
TYPE_AUTOCOMPLETE: ("is", "not_in", "in", "is_empty"),
}
OPERATION_WIDGET_MAPPING = {"is_empty": None}
TYPE_WIDGET_MAPPING = {
TYPE_STRING: [{"operations": ("contains", "is"), "widget": forms.TextInput}],
TYPE_NUMERIC: [
{
"operations": ("lt", "gt", "gte", "lte", "is"),
"widget": forms.NumberInput,
}
],
TYPE_BOOLEAN: [{"operations": ("is",), "widget": forms.Select}],
TYPE_LIST: [
{"operations": ("is",), "widget": forms.Select},
{"operations": ("not_in", "in"), "widget": forms.CheckboxSelectMultiple},
],
TYPE_LIST_MULTIPLE: [
{"operations": ("is",), "widget": forms.CheckboxSelectMultiple},
{"operations": ("not_in", "in"), "widget": forms.CheckboxSelectMultiple},
],
TYPE_DATE: [
{"operations": ("lt", "gt", "gte", "lte", "is"), "widget": DateTimePicker}
],
TYPE_AUTOCOMPLETE: [
{"operations": ("is",), "widget": forms.Select},
{"operations": ("not_in", "in"), "widget": forms.SelectMultiple},
],
}
FIELD_NAME_MAPPING = {"operational_stakeholder": "operating_company_id"}
field_name = ""
name = ""
operation = ""
doc_type = "deal"
def get(self, request, *args, **kwargs):
"""render form to enter values for the requested field in the filter widget for the grid view
form to select operations is updated by the javascript function update_widget() in /media/js/main.js
"""
self.doc_type = kwargs.get("doc_type", "deal")
self.field_name = self.request.GET.get("key_id", "")
self.name = self.request.GET.get("name", "")
self.operation = self.request.GET.get("operation", "")
return Response(
{
"allowed_operations": self.get_allowed_operations(),
"widget": self.render_widget(),
}
)
@property
def field(self):
if not hasattr(self, "_field"):
if self.field_name:
# Deprecated?
if "inv_" in self.field_name: # pragma: no cover
field = get_activity_field_by_key(self.field_name[4:])
elif self.doc_type == "investor":
field = get_investor_field_by_key(self.field_name)
else:
field = get_activity_field_by_key(self.field_name)
# MultiValueField?
if isinstance(field, forms.MultiValueField):
# Get first field instead
field = field.fields[0]
self._field = field
else:
return None
return self._field
@property
def type(self):
field = self.field
if not hasattr(self, "_type"):
# Get type by field class
for field_class, field_type in self.FIELD_TYPE_MAPPING.items():
if isinstance(field, field_class):
self._type = field_type
break
# Get type by field name
if self.field_name in self.FIELD_NAME_TYPE_MAPPING.keys():
self._type = self.FIELD_NAME_TYPE_MAPPING.get(self.field_name)
# Fallback to string
if not hasattr(self, "_type"):
self._type = self.TYPE_STRING
return self._type
@property
def value(self):
if not hasattr(self, "_value"):
value = self.request.GET.get("value", "")
if value:
# Date?
if self.type == self.TYPE_DATE:
value = datetime.strptime(value, "%Y-%m-%d")
else:
# Boolean?
if self.type == self.TYPE_BOOLEAN:
value = "True"
# Make list
if self.type in (self.TYPE_LIST, self.TYPE_LIST_MULTIPLE):
self._value = value and value.split(",") or []
else:
self._value = value
return self._value
def get_allowed_operations(self):
return self.TYPE_OPERATION_MAPPING[self.type]
def get_attrs(self):
# Merge custom with existing field attributes
attrs = {"id": "id_{}".format(self.name)}
if not self.field or not hasattr(
self.field.widget, "attrs"
): # pragma: no cover
return attrs
if not self.type == self.TYPE_LIST_MULTIPLE and not (
self.type == self.TYPE_LIST and self.operation in ("in", "not_in")
):
attrs["class"] = "valuefield form-control"
field_attrs = self.field.widget.attrs
for key, value in field_attrs.items(): # pragma: no cover
if key in ("readonly",):
continue
if key in attrs and key == "class":
attrs[key] += " %s" % field_attrs[key]
else:
attrs[key] = field_attrs[key]
return attrs
def get_widget_init_kwargs(self):
kwargs = {}
# Get boolean choices (Yes/No)
if self.type == self.TYPE_BOOLEAN:
kwargs["choices"] = [("True", _("Yes")), ("False", _("No"))]
# Get list choices
if self.type in (self.TYPE_LIST, self.TYPE_LIST_MULTIPLE):
kwargs["choices"] = self.field.choices
# Get date options
if self.type == self.TYPE_DATE:
kwargs["options"] = {"format": "YYYY-MM-DD", "inline": True}
return kwargs
def get_widget_render_kwargs(self):
return {"name": self.name, "value": self.value, "attrs": self.get_attrs()}
def get_widget_class(self):
operation_mappings = self.TYPE_WIDGET_MAPPING[self.type]
widget = None
for operation_mapping in operation_mappings:
if self.operation in operation_mapping["operations"]:
widget = operation_mapping["widget"]
return widget
def render_widget(self):
widget = self.get_widget_class()
if widget:
widget = widget(**self.get_widget_init_kwargs())
widget = self._pre_render_widget(widget)
widget = widget.render(**self.get_widget_render_kwargs())
widget = self._post_render_widget(widget)
return widget
def _pre_render_widget(self, widget):
if self.type == self.TYPE_DATE:
# See here: https://github.com/jorgenpt/django-bootstrap3-datetimepicker/commit/042dd1da3a7ff21010c1273c092cba108d95baeb#commitcomment-16877308
widget.js_template = """
<script>
$(function(){$("#%(picker_id)s:has(input:not([readonly],[disabled]))")
.datetimepicker(%(options)s);});
</script>
"""
return widget
def _post_render_widget(self, widget):
return widget
def get_activity_variable_table():
"""
Create an OrderedDict of group name keys with lists of dicts for each
variable in the group (each dict contains 'name' and 'label' keys).
This whole thing is static, and maybe should just be written out, but
for now generate it dynamcially on app load.
"""
# for formsets, we want form.form
deal_forms = [form.form if hasattr(form, "form") else form for form in DEAL_FORMS]
variable_table = OrderedDict()
group_items = []
group_title = ""
# Add Activity attributes
variable_table[str(_("Deal"))] = []
for field_name, field in ActivityFilterForm.base_fields.items():
if field_name == "id": # pragma: no cover
continue
variable_table[str(_("Deal"))].append(
{"name": field_name, "label": str(field.label)}
)
# Add deal attributes
exclude = ("intended_area", "contract_area", "production_area")
for form in deal_forms:
for field_name, field in form.base_fields.items():
if field_name in exclude:
continue
if isinstance(field, TitleField):
if group_title and group_items:
variable_table[group_title] = group_items
group_items = []
group_title = str(field.initial)
else:
group_items.append({"name": field_name, "label": field.label})
if group_title and group_items:
variable_table[group_title] = group_items
# Add operating company attributes
if _("Operating company") not in variable_table: # pragma: no cover
variable_table[str(_("Operating company"))] = []
for field_name, field in OperationalCompanyForm.base_fields.items():
if field_name == "id":
continue
variable_table[str(_("Operating company"))].append(
{
"name": "operating_company_%s" % field_name,
"label": "%s %s" % (str(_("Operating company")), str(field.label)),
}
)
# Add parent company attributes
variable_table[str(_("Parent company"))] = []
for field_name, field in ParentStakeholderForm.base_fields.items():
if field_name == "id":
continue
variable_table[str(_("Parent company"))].append(
{
"name": "parent_stakeholder_%s" % field_name,
"label": "%s %s" % (str(_("Parent company")), str(field.label)),
}
)
# Add tertiary investors/lenders attributes
variable_table[str(_("Tertiary investor/lender"))] = []
for field_name, field in ParentInvestorForm.base_fields.items():
if field_name == "id":
continue
variable_table[str(_("Tertiary investor/lender"))].append(
{
"name": "parent_investor_%s" % field_name,
"label": "%s %s"
% (str(_("Tertiary investor/lender")), str(field.label)),
}
)
return variable_table
def get_investor_variable_table():
"""
Create an OrderedDict of group name keys with lists of dicts for each
variable in the group (each dict contains 'name' and 'label' keys).
This whole thing is static, and maybe should just be written out, but
for now generate it dynamcially on app load.
"""
variable_table = OrderedDict()
group_items = []
group_title = ""
# Add investor attributes
investor_variables = []
for field_name, field in InvestorFilterForm.base_fields.items():
if field_name == "id": # pragma: no cover
continue
investor_variables.append({"name": field_name, "label": str(field.label)})
variable_table[str(_("Investor"))] = investor_variables
# Add parent company attributes
pc_variables = []
for field_name, field in ParentStakeholderForm.base_fields.items():
if field_name == "id":
continue
pc_variables.append(
{
"name": "parent_stakeholder_%s" % field_name,
"label": "%s %s" % (str(_("Parent company")), str(field.label)),
}
)
variable_table[str(_("Parent company"))] = pc_variables
# Add tertiary investors/lenders attributes
til_variables = []
for field_name, field in ParentInvestorForm.base_fields.items():
if field_name == "id":
continue
til_variables.append(
{
"name": "parent_investor_%s" % field_name,
"label": "%s %s"
% (str(_("Tertiary investor/lender")), str(field.label)),
}
)
variable_table[str(_("Tertiary investor/lender"))] = til_variables
return variable_table
class FilterWidgetMixin:
doc_type = "deal"
variable_table = get_activity_variable_table()
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.rules = []
#
# @property
# def filters(self):
# return self.get_filter_context(self.current_formset_conditions)
#
# @property
# def current_formset_conditions(self):
# data = self.request.GET.copy()
# filter_set = self._filter_set(data)
# conditions_formset = self.get_formset_conditions(filter_set, data)
#
# return conditions_formset
def get_context_data(self, **kwargs):
if hasattr(super(), "get_context_data"):
context = super().get_context_data(**kwargs)
else:
context = {}
data = self.request.GET.copy()
self.set_country_region_filter(data)
self.set_default_filters(data)
context.update(
{
# 'filters': self.filters,
# 'empty_form_conditions': self.current_formset_conditions,
# 'rules': self.rules,
"variables": self.variable_table,
"presets": FilterPresetGroup.objects.all(),
"set_default_filters": self.request.session.get(
"%s:set_default_filters" % self.doc_type
),
"status": self.status,
}
)
return context
# def get_filter_context(self, formset_conditions, order_by=None, group_by=None,
# group_value=None, starts_with=None):
# filters = BrowseFilterConditions(formset_conditions, [], 0).parse()
#
# filters['order_by'] = order_by # required for table group view
# filters['group_by'] = group_by
# filters['group_value'] = group_value
#
# filters['starts_with'] = starts_with
#
# return filters
def set_country_region_filter(self, data):
filter_values = {}
# Country or region filter set?
if data.get("country", None) or data.get("region", None):
stored_filters = self.request.session.get("%s:filters" % self.doc_type, {})
if not stored_filters:
stored_filters = {}
if data.get("country", None):
if self.doc_type == "deal":
filter_values["variable"] = "target_country"
filter_values["label"] = _("Target country")
else:
filter_values["variable"] = "fk_country"
filter_values["label"] = _("Country of registration/origin")
filter_values["operator"] = "is"
filter_values["value"] = data.get("country")
try:
country = Country.objects.defer("geom").get(pk=data.get("country"))
filter_values["display_value"] = country.name
except: # pragma: no cover
pass
filter_values["name"] = "country"
data.pop("country")
elif data.get("region", None):
if self.doc_type == "deal":
filter_values["variable"] = "target_region"
filter_values["label"] = str(_("Target region"))
else:
filter_values["variable"] = "region"
filter_values["label"] = str(_("Region of registration/origin"))
filter_values["operator"] = "is"
filter_values["value"] = data.get("region")
try:
region = Region.objects.get(pk=data.get("region"))
filter_values["display_value"] = region.name
except: # pragma: no cover
pass
filter_values["name"] = "region"
data.pop("region")
# Remove existing target country/region filters
filters = filter(
lambda f: f.get("name") in ("country", "region"),
stored_filters.values(),
)
for stored_filter in list(filters):
stored_filters.pop(stored_filter["name"], None)
if filter_values:
# Set filter
new_filter = Filter(
variable=filter_values["variable"],
operator=filter_values["operator"],
value=filter_values["value"],
name=filter_values.get("name", None),
label=filter_values["label"],
display_value=filter_values.get("display_value", None),
)
stored_filters[new_filter.name] = new_filter
self.request.session["%s:filters" % self.doc_type] = stored_filters
else:
self.remove_country_region_filter()
def remove_country_region_filter(self):
stored_filters = self.request.session.get("%s:filters" % self.doc_type, {})
if stored_filters:
stored_filters = dict(
filter(
lambda i: i[1].get("name", "") not in ("country", "region"),
stored_filters.items(),
)
)
self.request.session["%s:filters" % self.doc_type] = stored_filters
# stored_filters = self.request.session['filter_query_params']
# stored_filters = dict(filter(lambda i: i[1].get('variable', '') not in ('target_country', 'target_region'), stored_filters.items()))
self.request.session["%s:filter_query_params" % self.doc_type] = None
def set_default_filters(self, data, disabled_presets=[], enabled_presets=[]):
self.remove_default_filters()
# Don't set default filters? Set them by default (required e.g. for statistics).
if not self.request.session.get(
"%s:set_default_filters" % self.doc_type, False
):
return
if not disabled_presets:
if hasattr(self, "disabled_presets") and self.disabled_presets:
disabled_presets = self.disabled_presets
if not enabled_presets:
if hasattr(self, "enabled_presets") and self.enabled_presets:
enabled_presets = self.enabled_presets
stored_filters = self.request.session.get("%s:filters" % self.doc_type, {})
if not stored_filters:
stored_filters = {}
# Target country or region set?
filter_names = [v.get("name", "") for k, v in stored_filters.items()]
preset_ids = dict(
[(v.get("preset_id", ""), k) for k, v in stored_filters.items()]
)
if "country" in filter_names:
# Use national presets
for preset in FilterPreset.objects.filter(is_default_country=True):
if preset.id in preset_ids.keys(): # pragma: no cover
del stored_filters[preset_ids[preset.id]]
if preset.id in disabled_presets: # pragma: no cover
continue
if preset.id in enabled_presets: # pragma: no cover
del enabled_presets[enabled_presets.index(preset.id)]
filter_name = "default_preset_%i" % preset.id
stored_filters[filter_name] = PresetFilter(
preset, name=filter_name, hidden=preset.is_hidden
)
else:
# Use global presets
for preset in FilterPreset.objects.filter(is_default_global=True):
if preset.id in preset_ids.keys(): # pragma: no cover
del stored_filters[preset_ids[preset.id]]
if preset.id in disabled_presets: # pragma: no cover
continue
filter_name = "default_preset_%i" % preset.id
stored_filters[filter_name] = PresetFilter(
preset, name=filter_name, hidden=preset.is_hidden
)
# Add enabled filters (if not already set)
for preset_id in enabled_presets:
if "default_preset_%i" % preset_id not in stored_filters.keys():
preset = FilterPreset.objects.get(pk=preset_id)
if preset.id in preset_ids.keys(): # pragma: no cover
del stored_filters[preset_ids[preset.id]]
if preset.id in disabled_presets:
continue
filter_name = "default_preset_%i" % preset.id
stored_filters[filter_name] = PresetFilter(
preset, name=filter_name, hidden=preset.is_hidden
)
self.request.session["%s:filters" % self.doc_type] = stored_filters
def remove_default_filters(self):
stored_filters = self.request.session.get("%s:filters" % self.doc_type, {})
if stored_filters:
stored_filters = dict(
filter(lambda i: "default_preset" not in i[0], stored_filters.items())
)
self.request.session["%s:filters" % self.doc_type] = stored_filters
# def get_formset_conditions(self, filter_set, data, group_by=None):
# self.set_country_region_filter(data)
# self.set_default_filters(data)
#
# if filter_set:
# # set given filters
# result = ConditionFormset(data, prefix="conditions_empty")
# else:
# if group_by == "database":
# result = None
# else:
# result = ConditionFormset(self._get_filter_dict(self.rules), prefix="conditions_empty")
# return result
#
# def _filter_set(self, data):
# return data and data.get("filtered") and not data.get("reset", None)
#
# def _get_filter_dict(self, browse_rules):
# filter_dict = MultiValueDict()
# for record, c in enumerate(browse_rules):
# rule_dict = MultiValueDict({
# "conditions_empty-%i-variable" % record: [c.variable],
# "conditions_empty-%i-operator" % record: [c.operator]
# })
# # pass comma separated list as multiple values for operators in/not in
# if c.operator in ("in", "not_in"):
# rule_dict.setlist("conditions_empty-%i-value" % record, c.value.split(","))
# else:
# rule_dict["conditions_empty-%i-value" % record] = c.value
# filter_dict.update(rule_dict)
# filter_dict["conditions_empty-INITIAL_FORMS"] = len(browse_rules)
# filter_dict["conditions_empty-TOTAL_FORMS"] = len(browse_rules)
# filter_dict["conditions_empty-MAX_NUM_FORMS"] = ""
# return filter_dict
@property
def status(self):
if self.request.user.is_authenticated and "status" in self.request.GET:
return self.request.GET.getlist("status")
return [
"2",
"3",
] # FIXME: Use Activity.STATUS_ACTIVE + Activity.STATUS_OVERWRITTEN
| agpl-3.0 | -274,360,651,482,665,100 | 39.459547 | 155 | 0.552352 | false |
Sodel-the-Vociferous/early-code | camden-office-appt-reminder/send_reminders.py | 1 | 16094 | #!/usr/bin/env python
# ############################## #
# (C)2007 Daniel Ralston #
# Appointment Reminder Software #
# #
# callback.py #
# ############################## #
import shelve
import sys
import calendar
import sys
import time
import datetime
import os
import send_mail
internal = True
class Database:
#Provides a central storage unit to keep track of any and all our data
def __init__(self):
self.appointments = {}
self.clients = {}
self.preferences = {"save at close": True,
"send reminders": True,
"send at login": True,
"company": "",
"email": ""}
self.possible_times = { 1:"7:00",
2:"7:15",
3:"7:30",
4:"7:45",
5:"8:00",
6:"8:15",
7:"8:30",
8:"8:45",
9:"9:00",
10:"9:15",
11:"9:30",
12:"9:45",
13:"10:00",
14:"10:15",
15:"10:30",
16:"10:45",
17:"11:00",
18:"11:15",
19:"11:30",
20:"11:45",
21:"12:00pm",
22:"12:15pm",
23:"12:30pm",
24:"12:45pm",
25:"1:00pm",
26:"1:15pm",
27:"1:30pm",
28:"1:45pm",
29:"2:00pm",
30:"2:15pm",
31:"2:30pm",
32:"2:45pm",
33:"3:00pm",
34:"3:15pm",
35:"3:30pm",
36:"3:45pm",
37:"4:00pm",
38:"4:15pm",
39:"4:30pm",
40:"4:45pm",
41:"5:00pm",
42:"5:15pm",
43:"5:30pm",
44:"5:45pm",
45:"6:00pm",
46:"6:15pm",
47:"6:30pm",
48:"6:45pm",
49:"7:00pm",
50:"7:15pm",
51:"7:30pm",
52:"7:45pm",
53:"8:00pm",
54:"8:15pm",
55:"8:30pm",
56:"8:45pm",
57:"9:00pm"}
self.day_names = {1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
7: "Sunday"}
self.current_user = ""
self.close_program = False
def new_appointment(self, year, month, day, time, length, email, email_bool, client, notes = None, force = False):
# length is the length in minutes divided by 15
if client != "":
if force == True:
if (year, month, day, time) in database.appointments:
database.remove_appointment(year, month, day, time)
if (year, month, day, time) not in self.appointments:
i = 1
exists = 0
while i < length:
if (year, month, day, time+i) not in self.appointments:
pass
else:
error_handler.error(None, "Prior Appointment Exists In Specified Time Range")
return 1
i = i + 1
else:
self.appointments[(year, month, day, time)] = Appointment(year, month, day, time, length, email, email_bool, client, notes)
i = 1
while (i < length) and (time + i in self.possible_times):
self.appointments[(year, month, day, time + i)] = client
i = i + 1
return 0
else:
error_handler.error(None, "Prior Appointment Exists In Specified Timeslot")
return 1
def remove_appointment(self, year, month, day, time):
#where time is the length of the appointment divided by 15(minutes)
if (year, month, day, time) in self.appointments:
length = self.appointments[(year, month, day, time)].length
del self.appointments[(year, month, day, time)]
i = 1
while (i < length) and (time + i in self.possible_times):
del self.appointments[(year, month, day, time + i)]
i = i + 1
else:
print "yo"
error_handler.error(None, "No Appointment At Specified Timeslot")
return
def new_client(self, name, email, email_bool, notes = None, force = False):
if name not in self.clients:
self.clients[name] = name
self.clients[name] = Client(name, email, email_bool)
else:
if force == False:
error_handler.error(None, "Client Of That Name In Record")
else:
del self.clients[name]
self.new_client(name, email, email_bool, notes)
return
def remove_client(self, widget, name):
appts = self.appointments
if name in self.clients:
del self.clients[name]
for entry in appts:
if self.appointments[entry].client == name:
del self.appointments[entry]
return
def save_data(self, widget = None, user = None):
preferences = shelve.open("preferences")
key_base = shelve.open("key_base")
appointments = shelve.open("appointments")
clients = shelve.open("clients")
for i in key_base:
del key_base[i]
for i in appointments:
del appointments[i]
for i in preferences:
del preferences[i]
for i in self.preferences:
preferences[i] = self.preferences[i]
for i in clients:
del clients[i]
for i in self.clients:
clients[i] = self.clients[i]
iteration = 0
for i in self.appointments:
appointments[str(iteration)] = self.appointments[i]
key_base[str(iteration)] = i
iteration = iteration + 1
appointments.close()
clients.close()
preferences.close()
return
def get_data(self, widget = None, user = None):
preferences = shelve.open("preferences")
appointments = shelve.open("appointments")
key_base = shelve.open("key_base")
clients = shelve.open("clients")
for i in preferences:
self.preferences[i] = preferences[i]
for i in clients:
self.clients[i] = clients[i]
iteration = 0
for i in appointments:
if appointments[str(iteration)] != "":
self.appointments[key_base[str(iteration)]] = appointments[str(iteration)]
iteration = iteration + 1
appointments.close()
clients.close()
preferences.close()
return
class Client:
def __init__(self, name, email, email_bool, notes = None):
self.name = name
self.email = email
self.email_bool = email_bool
notes = []
if notes != None:
for i in notes:
self.notes.append(notes[i]) #Special notes can be added easily
class Appointment:
def __init__(self, year, month, day, time, length, email, email_bool, client, auto_blocked = 0, notes = None):
self.year = year
self.month = month
self.day = day
self.time = time
self.length = length
self.email = email
self.email_bool = email_bool
self.client = client
self.auto_blocked = auto_blocked
self.notes = []
self.sent = False
if notes != None:
for i in notes:
self.notes.append(notes[i])
class Error_Handler:
def error(self, widget = None, message = None, type = "ok", positive = None, negative = None, parameter1 = None, parameter2 = None, size_x = 320, size_y = 200, prev_window = None):
#Error "hub" where the appropraite dialogs are dispatched from.
#"positive" is the appropriate function to call if the type is "yes/no", and the anser is affirmative
#"parameter1" is the "positive" function's parameter
#"negative" and "parameter2"hold the call if the type is "yes/no", and the answer is negative
if prev_window != None:
prev_window.hide_all()
self.error_window = gtk.Window()
self.error_window.set_title('Error')
self.error_window.set_border_width(5)
self.error_window.connect("destroy", self.destroy_error_dialog, prev_window)
self.error_window.set_resizable(False)
error_box = gtk.VBox(False, 10)
error_box.set_size_request(size_x, size_y)
self.error_window.add(error_box)
error_box.add(gtk.Label(message))
if type == "ok":
ok_button = gtk.Button("OK")
ok_button.connect("clicked", self.destroy_error_dialog)
error_box.add(ok_button)
elif type == "yes/no":
prev_window.hide_all()
yes_button = gtk.Button("Okay")
error_box.add(yes_button)
no_button = gtk.Button("Cancel")
error_box.add(no_button)
if positive != None:
yes_button.connect("clicked", self.exec_positive, prev_window, positive, parameter1)
if negative != None:
no_button.connect("clicked", negative, parameter2)
self.error_window.show_all()
def destroy_error_dialog(self, widget = None, prev_window = None):
if prev_window != None:
prev_window.show_all()
self.error_window.destroy()
pass
def exec_positive(self, widget, prev_window, positive, parameter1):
if prev_window != None:
prev_window.show_all()
self.destroy_error_dialog
positive(None, parameter1)
class Date:
def __init__(self, year, month, day):
self.year = year
self.month = month
self.day = day
self.time = time
def increment_days(self, days):
if (days >= 0):
target_year = self.year
#print target_year
target_month = self.month
#print target_month
target_day = self.day
#print target_day
month_length = self.month_length(self.month, self.year)
#print month_length, "len"
iterations = 0
while (iterations < days):
if target_day == month_length:
target_day = 1
#print target_day, "day"
target_month = self.increment_month()[0]
#print target_month, "month"
target_year = self.increment_month()[1]
#print target_year, "year"
iterations = iterations + 1
#print iterations, "\n"
else:
target_day = target_day + 1
#print target_day, "Tag"
#print target_month, "month#"
#print target_year, "Jahre"
iterations = iterations + 1
#print iterations, "\n"
return (target_year, target_month, target_day)
else:
error_handler.error("increment_days(self, days): Error, negative input")
def increment_month(self, months = 1):
if months >= 0:
if self.month == 12:
return (1, self.year + 1)
else:
return (self.month + 1, self.year)
else:
error_handler.error("increment_months(self.months): Error, negative input")
def month_length(self, month, year):
if month == 1:
return 31
elif month == 2:
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
return 28
else:
return 29
elif month == 3:
return 31
elif month == 4:
return 30
elif month == 5:
return 31
elif month == 6:
return 30
elif month == 7:
return 31
elif month == 8:
return 31
elif month == 9:
return 30
elif month == 10:
return 31
elif month == 11:
return 30
elif month == 12:
return 31
class Sender:
def get_today(self):
year, month, day, a, b, c, d, e, f = time.localtime()
return year, month, day
def query(self):
print ("Querying...")
for year, month, day, time in database.appointments:
if str(type(database.appointments[year, month, day, time])) !="<type \'str\'>":
if database.appointments[year, month, day, time].sent == False:
if database.appointments[year, month, day, time].email_bool == True:
company = database.preferences["company"]
sender = database.current_user
sender_email = "[email protected]"
password = "password"
recipient_name = database.appointments[year, month, day, time].client
recipient_email = database.clients[recipient_name].email
for i in database.possible_times:
ntime = database.possible_times[time]
if i == time:
time = i
if send_mail.send_message(company, sender, sender_email, password, recipient_email, recipient_name, year, month, day, ntime) == 0:
database.appointments[year, month, day, time].sent = True
print ("Sent message to "+recipient_name+" for appointment "+str(year)+", "+str(month)+", "+str(day)+str(time))
else:
print ("Error sending message to "+recipient_name+" for appointment "+str(year)+", "+str(month)+", "+str(day)+str(ntime))
if __name__ == "__main__":
yn = ""
print "This program automatically checks for pending e-mail reminders."
print "Do you want to send pending e-mails now?"
yn = str(raw_input("[y/n]>"))
if yn == "y":
error_handler = Error_Handler()
database = Database()
today = Sender().get_today()
database.current_user = "JRandomUser"
os.chdir((str(sys.path[0])+"/databases/"+"JRandomUser"))
database.get_data()
Sender().query()
database.save_data(user = database.current_user)
elif yn == "n":
print "Closing..."
else:
print "Unrecognized Command.\nClosing..." | gpl-2.0 | 6,182,198,387,813,300,000 | 40.375321 | 184 | 0.456568 | false |
happz/ducky | tests/hdt.py | 1 | 2496 | import ducky.config
import ducky.boot
import ducky.mm
from hypothesis import given
from hypothesis.strategies import integers
from ctypes import sizeof
from . import common_run_machine, LOGGER
from functools import partial
def setup_machine(cpus, cores, memory):
machine_config = ducky.config.MachineConfig()
machine_config.add_section('memory')
machine_config.set('memory', 'size', memory)
M = common_run_machine(machine_config = machine_config, cpus = cpus, cores = cores, post_boot = [lambda _M: False])
return M
@given(cpus = integers(min_value = 0, max_value = 0xF), cores = integers(min_value = 0, max_value = 0xF), memory = integers(min_value = ducky.mm.MINIMAL_SIZE * ducky.mm.PAGE_SIZE, max_value = 0xFFFFFF00))
def test_sanity(cpus, cores, memory):
memory &= ducky.mm.PAGE_MASK
LOGGER.debug('TEST: cpus=%d, cores=%d, memory=0x%08X', cpus, cores, memory)
M = setup_machine(cpus, cores, memory)
assert M.nr_cpus == cpus
assert M.nr_cores == cores
S = M.capture_state()
memory_node = S.get_child('machine').get_child('memory')
hdt_page = ducky.boot.DEFAULT_HDT_ADDRESS // ducky.mm.PAGE_SIZE
hdt_page = [pg_node for pg_node in memory_node.get_page_states() if pg_node.index == hdt_page][0]
def __base_assert(size, page, offset, value):
for i, byte_offset, byte_shift in [(1, 0, 0), (2, 1, 8), (3, 2, 16), (4, 3, 24)]:
expected = (value >> byte_shift) & 0xFF
actual = page.content[offset + byte_offset]
assert expected == actual, 'Byte at offset %d + %d expected 0x%02X, 0x%02X found instead' % (offset, byte_offset, expected, actual)
if i == size:
break
__assert_u16 = partial(__base_assert, 2, hdt_page)
__assert_u32 = partial(__base_assert, 4, hdt_page)
from ducky.mm import u16_t, u32_t
ptr = 0
# HDT header - magic
__assert_u32(ptr, ducky.hdt.HDT_MAGIC); ptr += sizeof(u32_t)
# HDT header - entries count
__assert_u32(ptr, 2); ptr += sizeof(u32_t)
# HDT header - length
__assert_u32(ptr, 28); ptr += sizeof(u32_t)
# Memory
__assert_u16(ptr, ducky.hdt.HDTEntryTypes.MEMORY); ptr += sizeof(u16_t)
__assert_u16(ptr, sizeof(ducky.hdt.HDTEntry_Memory)); ptr += sizeof(u16_t)
__assert_u32(ptr, memory); ptr += sizeof(u32_t)
# CPU
__assert_u16(ptr, ducky.hdt.HDTEntryTypes.CPU); ptr += sizeof(u16_t)
__assert_u16(ptr, sizeof(ducky.hdt.HDTEntry_CPU)); ptr += sizeof(u16_t)
__assert_u16(ptr, cpus); ptr += sizeof(u16_t)
__assert_u16(ptr, cores); ptr += sizeof(u16_t)
| mit | 549,640,371,277,279,550 | 33.191781 | 204 | 0.665865 | false |
EnderCheng/pyspider | tests/test_fetcher.py | 1 | 10998 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-02-15 22:10:35
import os
import json
import copy
import time
import httpbin
import umsgpack
import subprocess
import unittest2 as unittest
from multiprocessing import Queue
import logging
import logging.config
logging.config.fileConfig("pyspider/logging.conf")
try:
from six.moves import xmlrpc_client
except ImportError:
import xmlrpclib as xmlrpc_client
from pyspider.libs import utils
from pyspider.libs.response import rebuild_response
from pyspider.fetcher.tornado_fetcher import Fetcher
class TestFetcher(unittest.TestCase):
sample_task_http = {
'taskid': 'taskid',
'project': 'project',
'url': '',
'fetch': {
'method': 'GET',
'headers': {
'Cookie': 'a=b',
'a': 'b'
},
'cookies': {
'c': 'd',
},
'timeout': 60,
'save': 'abc',
},
'process': {
'callback': 'callback',
'save': [1, 2, 3],
},
}
@classmethod
def setUpClass(self):
self.inqueue = Queue(10)
self.outqueue = Queue(10)
self.fetcher = Fetcher(self.inqueue, self.outqueue)
self.fetcher.phantomjs_proxy = '127.0.0.1:25555'
self.rpc = xmlrpc_client.ServerProxy('http://localhost:%d' % 24444)
self.xmlrpc_thread = utils.run_in_thread(self.fetcher.xmlrpc_run, port=24444)
self.httpbin_thread = utils.run_in_subprocess(httpbin.app.run, port=14887)
self.httpbin = 'http://127.0.0.1:14887'
self.thread = utils.run_in_thread(self.fetcher.run)
try:
self.phantomjs = subprocess.Popen(['phantomjs',
os.path.join(os.path.dirname(__file__),
'../pyspider/fetcher/phantomjs_fetcher.js'),
'25555'])
except OSError:
self.phantomjs = None
time.sleep(0.5)
@classmethod
def tearDownClass(self):
if self.phantomjs:
self.phantomjs.kill()
self.phantomjs.wait()
self.httpbin_thread.terminate()
self.rpc._quit()
self.thread.join()
time.sleep(1)
def test_10_http_get(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/get'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
self.assertIsNotNone(response.json, response.content)
self.assertEqual(response.json['headers'].get('A'), 'b', response.json)
self.assertEqual(response.json['headers'].get('Cookie'), 'c=d', response.json)
def test_15_http_post(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
request['fetch']['data'] = 'binux'
request['fetch']['cookies'] = {'c': 'd'}
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
self.assertIsNotNone(response.json, response.content)
self.assertEqual(response.json['form'].get('binux'), '')
self.assertEqual(response.json['headers'].get('A'), 'b', response.json)
self.assertEqual(response.json['headers'].get('Cookie'), 'c=d', response.json)
def test_e010_redirect(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/redirect-to?url=/get'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.url, self.httpbin+'/get')
def test_e020_too_much_redirect(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/redirect/10'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 599, result)
self.assertIn('redirects followed', response.error)
# FIXME: test failed
#def test_e030_cookie(self):
#request = copy.deepcopy(self.sample_task_http)
#request['url'] = self.httpbin+'/cookies/set?k1=v1&k2=v2'
#result = self.fetcher.sync_fetch(request)
#response = rebuild_response(result)
#self.assertEqual(response.status_code, 200, result)
#self.assertEqual(response.cookies, {'k1': 'v', 'k2': 'v2'}, result)
def test_20_dataurl_get(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_30_with_queue(self):
request= copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_40_with_rpc(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'data:,hello'
result = umsgpack.unpackb(self.rpc.fetch(request).data)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.text, 'hello')
def test_50_base64_data(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
# utf8 encoding 中文
request['fetch']['data'] = "[BASE64-DATA]5Lit5paH[/BASE64-DATA]"
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, response.error)
self.assertIsNotNone(response.json, response.content)
self.assertIn(u'中文', response.json['form'], response.json)
def test_55_base64_data(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/post'
request['fetch']['method'] = 'POST'
# gbk encoding 中文
request['fetch']['data'] = "[BASE64-DATA]1tDOxA==[/BASE64-DATA]"
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, response.error)
self.assertIsNotNone(response.json, response.content)
def test_60_timeout(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/delay/5'
request['fetch']['timeout'] = 3
start_time = time.time()
self.inqueue.put(request)
task, result = self.outqueue.get()
end_time = time.time()
self.assertGreater(end_time - start_time, 1.5)
self.assertLess(end_time - start_time, 4.5)
def test_65_418(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/status/418'
self.inqueue.put(request)
task, result = self.outqueue.get()
response = rebuild_response(result)
self.assertEqual(response.status_code, 418)
self.assertIn('teapot', response.text)
def test_70_phantomjs_url(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin + '/get'
request['fetch']['fetch_type'] = 'js'
result = self.fetcher.sync_fetch(request)
response = rebuild_response(result)
self.assertEqual(response.status_code, 200, result)
self.assertEqual(response.orig_url, request['url'])
self.assertEqual(response.save, request['fetch']['save'])
data = json.loads(response.doc('pre').text())
self.assertIsNotNone(data, response.content)
self.assertEqual(data['headers'].get('A'), 'b', response.json)
self.assertEqual(data['headers'].get('Cookie'), 'c=d', response.json)
def test_80_phantomjs_timeout(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin+'/delay/5'
request['fetch']['fetch_type'] = 'js'
request['fetch']['timeout'] = 3
start_time = time.time()
result = self.fetcher.sync_fetch(request)
end_time = time.time()
self.assertGreater(end_time - start_time, 2)
self.assertLess(end_time - start_time, 5)
def test_90_phantomjs_js_script(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = self.httpbin + '/html'
request['fetch']['fetch_type'] = 'js'
request['fetch']['js_script'] = 'function() { document.write("binux") }'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 200)
self.assertIn('binux', result['content'])
@unittest.skipIf(os.environ.get('IGNORE_GOOGLE'), "can't connect to google.")
def test_a100_phantomjs_sharp_url(self):
if not self.phantomjs:
raise unittest.SkipTest('no phantomjs')
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'https://groups.google.com/forum/#!forum/pyspider-users'
request['fetch']['fetch_type'] = 'js'
request['fetch']['headers']['User-Agent'] = 'Mozilla/5.0'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 200)
self.assertIn('pyspider-users', result['content'])
def test_a110_dns_error(self):
request = copy.deepcopy(self.sample_task_http)
request['url'] = 'http://www.not-exists-site.com/'
result = self.fetcher.sync_fetch(request)
self.assertEqual(result['status_code'], 599)
self.assertIn('error', result)
self.assertIn('resolve', result['error'])
self.inqueue.put(request)
task, result = self.outqueue.get()
self.assertEqual(result['status_code'], 599)
self.assertIn('error', result)
self.assertIn('resolve', result['error'])
| apache-2.0 | -5,261,693,020,750,345,000 | 37.547368 | 86 | 0.616421 | false |
mdrohmann/txtemplates | txtemplates/dist.py | 1 | 2685 | # encoding: utf-8
"""
Package for configuration of version numbers.
"""
class IncomparableVersions(TypeError):
"""
Two versions could not be compared.
"""
class Version(object):
def __init__(self, package, major, minor, patch, prerelease=None):
"""
Args:
package (str): Package name
major (int): Major version number
minor (int): Minor version number
patch (int): Patch number
Kwargs:
prerelease (str): pre-release specifier
"""
self.package = package
self.major = major
self.minor = minor
self.patch = patch
self.prerelease = prerelease
def short(self):
"""
Return a string in short version format,
<major>.<minor>
"""
return "{major}.{minor}".format(**self.__dict__)
def long(self):
"""
Return a string in version format,
<major>.<minor>.<patch>[-prerelease]
"""
s = "{major}.{minor}.{patch}".format(**self.__dict__)
if self.prerelease:
s = "{}-{}".format(s, self.prerelease)
return s
def __repr__(self):
return "[{}, version {}]".format(self.package, self.long())
def __str__(self):
return "[{}, version {}]".format(self.package, self.long())
def __cmp__(self, other):
"""
Compare two versions, considering major versions, minor versions, micro
versions, then prereleases.
A version with a prerelease is always less than a version without a
prerelease. All prerelease string are considered identical in value.
Args:
other (Version): Another version.
Returns:
one of -1, 0, or 1.
Raises:
- NotImplementedError: when the other version is not a Version
object
- IncomparableVersions: when the package names of the versions
differ.
"""
if not isinstance(other, self.__class__):
raise NotImplementedError
if self.package != other.package:
raise IncomparableVersions(
"{} != {}".format(self.package, other.package))
if self.prerelease:
pre = 0
else:
pre = 1
if other.prerelease:
otherpre = 0
else:
otherpre = 1
x = cmp(
(self.major,
self.minor,
self.patch,
pre),
(other.major,
other.minor,
other.patch,
otherpre))
return x
# vim:set ft=python sw=4 et spell spelllang=en:
| bsd-3-clause | -734,281,179,381,470,200 | 24.330189 | 79 | 0.52514 | false |
France-ioi/taskgrader | tools/genJson/scripts/defaultChecker.py | 1 | 10582 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Default checking program: checks the output of the solution is the given
# expected output (test.out).
# Note that it can be used as a library, via the diff function.
# Takes three arguments on command-line:
# ./defaultChecker.py test.solout test.in test.out
# where
# test.solout is the solution output
# test.in is the test input given to the solution (not used)
# test.out is the expected output (if given by the task, else an empty file)
from json import dumps
from subprocess import Popen, PIPE
from sys import argv, exit
DEFAULT_OPTIONS = {
'ignoreSpaceChange': True,
'ignoreBlankLines': True,
'maxChars': 500,
'diffContext': 3
}
def utf8safe(s):
"""Remove characters invalid in UTF-8."""
return s.decode('utf-8', errors='replace').encode('utf-8')
def readRealLine(handle, options):
l = "\n"
if options['ignoreBlankLines']:
if options['ignoreSpaceChange']:
while l != '' and l.strip() == '':
l = handle.readline()
else:
while l != '' and len(l) == 1:
l = handle.readline()
else:
l = handle.readline()
return l
def diff(solPath, outPath, options=None):
"""Generate a diff report of two files.
The arguments are:
-solPath: path to the solution output
-outPath: path to the expected output
-options: dict with the following options:
ignoreSpaceChange (bool): ignore consecutive whitespaces
ignoreBlankLines (bool): ignore blank lines
maxChar (int): maximum chars in the displayed output
Returns a tuple (grade, result), where grade is the grade from 0 to 100,
and result is a dict containing the diff information."""
# Read options
if options:
opt = {}
opt.update(DEFAULT_OPTIONS)
opt.update(options)
else:
opt = DEFAULT_OPTIONS
# Prepare diff
# Options:
# -u for unified (expected display)
# -a to always treat as text (avoid special messages when the output
# doesn't resemble text)
diffOptions = '-a'
if opt['ignoreSpaceChange']: diffOptions += 'b'
if opt['ignoreBlankLines']:
diffOptions += 'B'
# diff -B has an error, and doesn't compare the very last character of
# the last line if it's not a newline
# We add a space and a newline at the end of both files as a workaround
open(solPath, 'a').write(' \n')
open(outPath, 'a').write(' \n')
# Execute diff
diffProc = Popen(['/usr/bin/env', 'diff', diffOptions, '-U', '%d' % opt['diffContext'],
solPath, outPath], stdout=PIPE)
# Ignore first two lines
do = diffProc.stdout
do.readline()
o = do.readline()
# We cannot rely on returncode as diff can have closed its output without
# being finished yet
if not o:
# The files are identical
return (100, {})
# The files aren't identical, analyze diff output
# Import only because we need them
from collections import OrderedDict
from os.path import getsize
from string import whitespace
result = OrderedDict()
# The chunk line is always the same for the two files, but if one file is
# empty, diff will give a line number of 0
chunkSplit = do.readline().split()
chunkLineSol = max(-int(chunkSplit[1].split(',')[0]), 1)
chunkLineExp = max(int(chunkSplit[2].split(',')[0]), 1)
chunkLine = chunkLineExp
# Start reading the actual files
# stderr=PIPE is generally not good, but tail will never fill it
solReadProc = Popen(['/usr/bin/env', 'tail', '-q', '-n', '+%d' % chunkLineSol,
solPath], stdout=PIPE, stderr=PIPE)
solRead = solReadProc.stdout
expReadProc = Popen(['/usr/bin/env', 'tail', '-q', '-n', '+%d' % chunkLineExp,
outPath], stdout=PIPE, stderr=PIPE)
expRead = expReadProc.stdout
solLines = []
expLines = []
truncatedAfter = False
# Read diff output
curLine = chunkLine
diffLine = None
lastLine = do.readline()
# Read maximum 3 lines after the diff line
# (these variables are still incremented past 3 but the lines aren't
# actually added)
solPostDiff = 0
expPostDiff = 0
while lastLine:
if lastLine[0] == ' ':
if solPostDiff < opt['diffContext']:
solLines.append(readRealLine(solRead, opt))
else:
truncatedAfter = True
if expPostDiff < opt['diffContext']:
expLines.append(readRealLine(expRead, opt))
else:
truncatedAfter = True
if diffLine is not None:
solPostDiff += 1
expPostDiff += 1
elif lastLine[0] == '-':
if opt['ignoreBlankLines'] and lastLine[1:].strip() == '':
lastLine = do.readline()
continue
if solPostDiff < opt['diffContext']:
solLines.append(readRealLine(solRead, opt))
else:
truncatedAfter = True
if diffLine is None:
diffLine = curLine
if diffLine is not None:
solPostDiff += 1
elif lastLine[0] == '+':
if opt['ignoreBlankLines'] and lastLine[1:].strip() == '':
lastLine = do.readline()
continue
if expPostDiff < opt['diffContext']:
expLines.append(readRealLine(expRead, opt))
else:
truncatedAfter = True
if diffLine is None:
diffLine = curLine
if diffLine is not None:
expPostDiff += 1
curLine += 1
lastLine = do.readline()
# We read max 3 lines after the first difference
if diffLine is not None and solPostDiff > opt['diffContext'] and expPostDiff > opt['diffContext']:
truncatedAfter = truncatedAfter or (lastLine != '') and (lastLine != '\ No newline at end of file\n')
break
# Put a single line in the expected answer if it was empty
if len(expLines) == 0:
expLines = ["\n"]
# Find difference in the diff line
relLine = diffLine-chunkLine
while relLine >= len(solLines):
solLines.append("\n")
solDLine = solLines[relLine]
while relLine >= len(expLines):
expLines.append("\n")
expDLine = expLines[relLine]
solCur = 0
expCur = 0
while True:
if solCur >= len(solDLine) or expCur >= len(expDLine):
break
if opt['ignoreSpaceChange']:
# We ignore consecutive whitespaces
# It's a line so the character before the first one is a newline
if solDLine[solCur] in whitespace:
if solCur == len(solDLine)-1:
break
elif solCur == 0 or solDLine[solCur+1] in whitespace:
solCur += 1
continue
if expDLine[expCur] in whitespace:
if expCur == len(expDLine)-1:
break
elif expCur == 0 or expDLine[expCur+1] in whitespace:
expCur += 1
continue
if solDLine[solCur] != expDLine[expCur]:
break
else:
solCur += 1
expCur += 1
# Start building report
result['msg'] = "Answer mismatch at line %d, character %d" % (diffLine, solCur+1)
result['solutionOutputLength'] = getsize(solPath)
result['diffRow'] = diffLine
result['diffCol'] = solCur+1
# Select lines to display
maxChars = opt['maxChars']
if len(solDLine) > maxChars or len(expDLine) > maxChars:
# We only display the differing line because it's already too long
if solCur < maxChars/2:
colStart = 0
colEnd = maxChars
elif len(solDLine) - solCur < maxChars/2:
colStart = len(solDLine)-maxChars
colEnd = max(len(solDLine), len(expDLine))
else:
colStart = solCur - maxChars/2
colEnd = solCur + maxChars/2
result['displayedSolutionOutput'] = utf8safe(solDLine[colStart:colEnd])
result['displayedExpectedOutput'] = utf8safe(expDLine[colStart:colEnd])
result['truncatedBefore'] = (diffLine > 1)
result['truncatedAfter'] = True
result['excerptRow'] = diffLine
result['excerptCol'] = colStart+1
else:
# We add lines before and/or after as long as we stay within maxChars
remChars = maxChars - max(len(solDLine), len(expDLine))
dispStartLine = relLine
dispSolEndLine = relLine
dispExpEndLine = relLine
# Add lines before from both solution and expected output
while dispStartLine > 0:
if len(solLines[dispStartLine-1]) > remChars:
break
else:
remChars -= len(solLines[dispStartLine-1])
dispStartLine -= 1
# Separately add lines from solution and expected output, as it's
# possible they don't have the same lines/number of lines
while dispSolEndLine < len(solLines)-1:
if len(solLines[dispSolEndLine+1]) > remChars:
break
else:
remChars -= len(solLines[dispSolEndLine+1])
dispSolEndLine += 1
while dispExpEndLine < len(expLines)-1:
if len(expLines[dispExpEndLine+1]) > remChars:
break
else:
remChars -= len(expLines[dispExpEndLine+1])
dispExpEndLine += 1
result['displayedSolutionOutput'] = utf8safe(''.join(solLines[dispStartLine:dispSolEndLine+1]))
result['displayedExpectedOutput'] = utf8safe(''.join(expLines[dispStartLine:dispExpEndLine+1]))
result['truncatedBefore'] = (dispStartLine + chunkLine > 1)
result['truncatedAfter'] = truncatedAfter
result['excerptRow'] = dispStartLine + chunkLine
result['excerptCol'] = 1
# Return a grade of 0 (answer mismatch) and the results info
return (0, result)
if __name__ == '__main__':
if len(argv) != 4:
print "Error: invalid number of arguments."
exit(1) # Exit code of 1 means a checker error
try:
grade, result = diff(argv[1], argv[3])
except:
print '0'
print 'Error during solution check, please contact an administrator.'
from traceback import print_exc
print_exc()
exit(1)
print grade
if grade != 100:
print dumps(result)
| mit | 1,921,150,540,335,551,700 | 33.809211 | 113 | 0.589964 | false |
eyeofhell/pyuser | pyuser/grid_wx.py | 1 | 1446 | #!/usr/bin/env python
# coding:utf-8 vi:et:ts=2
# PyUser grid widget for wxWidgets backend.
# Copyright 2013 Grigory Petrov
# See LICENSE for details.
import wx
from support_widget import Widget
import pyuser as pu
##c Grid layout, default for 2 columns (settings etc).
class Grid( Widget ):
def __init__( self, s_name = None, o_parent = 'auto', n_columns = 2 ):
Widget.__init__( self, s_name = s_name, o_parent = o_parent )
self._context_o = wx.FlexGridSizer( cols = n_columns )
self._columns_n = n_columns
## 0-based index of next column being added.
self._nextColumn_n = 0
## 0-based index of current row being added.
self._row_n = 0
def context( self ):
return self._context_o
##x Overloads |Widget|.
def dadd( self, o_widget ):
Widget.dadd( self, o_widget )
mCfg = { 'proportion': 0, 'item': o_widget }
if o_widget.grow().cx() and o_widget.grow().cy():
mCfg[ 'proportion' ] = 1
mCfg[ 'flag' ] = wx.Expand
elif o_widget.grow().cx():
mCfg[ 'proportion' ] = 1
elif o_widget.grow().cy():
mCfg[ 'flag' ] = wx.Expand
self.Add( ** mCfg )
self._nextColumn_n += 1
if self._nextColumn_n >= self._columns_n:
self._nextColumn_n = 0
self._row_n += 1
##x Overloads |Whoami|.
def isGrid( self ): return True
##x Overloads |Whoami|.
def isLayout( self ): return True
| gpl-3.0 | 1,094,700,796,064,712,600 | 23.821429 | 72 | 0.587828 | false |
googleinterns/deep-stabilization | dvs/warp/read_write.py | 1 | 3803 | import numpy as np
import cv2
import os
from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
import ffmpeg
import json
import torch
def load_video(path, save_dir = None, resize = None, length = -1): # N x H x W x C
vidcap = cv2.VideoCapture(path)
fps = vidcap.get(cv2.CAP_PROP_FPS)
success,image = vidcap.read()
print(image.shape)
height, width, layers = image.shape
if resize is None:
size = (width,height)
elif type(resize) is int:
size = (width//resize,height//resize)
else:
size = resize
count = 0
frames = []
while success:
if resize is not None:
image = cv2.resize(image, size, interpolation = cv2.INTER_LINEAR)
if save_dir != None:
path = os.path.join(save_dir, "frame_" + str(count).zfill(4) + ".png")
cv2.imwrite(path, image)
frames.append(image)
success,image = vidcap.read()
count += 1
if length > 0 and count >= length:
break
print("Video length: ", len(frames))
return frames, fps, size
def video2frame(path, resize = None):
data_name = sorted(os.listdir(path))
for i in range(len(data_name)):
print(str(i+1)+" / " + str(len(data_name)))
data_folder = os.path.join(path, data_name[i])
print(data_folder)
files = os.listdir(data_folder)
for f in files:
if f[-4:] == ".mp4":
video_name = f
video_path = os.path.join(data_folder, video_name)
frame_folder = os.path.join(data_folder, "frames")
if not os.path.exists(frame_folder):
os.makedirs(frame_folder)
load_video(video_path, save_dir = frame_folder, resize=resize)
def video2frame_one_seq(path, save_dir = None, resize = None): # N x H x W x C
vidcap = cv2.VideoCapture(path)
fps = vidcap.get(cv2.CAP_PROP_FPS)
success,image = vidcap.read()
print(path)
print(image.shape)
height, width, layers = image.shape
if resize is None:
size = (width,height)
elif type(resize) is int:
size = (width//resize,height//resize)
else:
size = resize
count = 0
while success:
if resize is not None:
image = cv2.resize(image, size, interpolation = cv2.INTER_LINEAR)
if save_dir != None:
path = os.path.join(save_dir, "frame_" + str(count).zfill(5) + ".png")
cv2.imwrite(path, image)
success,image = vidcap.read()
count += 1
return fps, size
def save_video(path,frame_array, fps, size, losses = None, frame_number = False, writer = None):
if writer is None:
if path[-3:] == "mp4":
out = cv2.VideoWriter(path,cv2.VideoWriter_fourcc(*'mp4v'), fps, size)
else:
out = cv2.VideoWriter(path,cv2.VideoWriter_fourcc('M','J','P','G'), fps, size)
else:
out = writer
for i in range(len(frame_array)):
# writing to a image array
if frame_number:
frame_array[i] = draw_number(np.asarray(frame_array[i]), i)
if losses is not None:
frame_array[i] = draw_number(np.asarray(frame_array[i]), losses[i], x = 900, message = "Loss: ")
out.write(frame_array[i])
if writer is None:
out.release()
def draw_number(frame, num, x = 10, y = 10, message = "Frame: "):
image=Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(image)
font = ImageFont.truetype("./data/arial.ttf", 45)
message = message + str(num)
color = 'rgb(0, 0, 0)' # black color
draw.text((x, y), message, fill=color, font=font)
return cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
if __name__ == "__main__":
video2frame("./video", resize = 4) | apache-2.0 | 6,814,465,245,670,962,000 | 34.222222 | 108 | 0.58822 | false |
RasmusWL/generate-lstlisting | outputter.py | 1 | 2053 | from classes import *
latexBegin = r'''
\newcommand{\includecodelang}[2]{\lstinputlisting[escapechar=, language=#2]{#1}}
\newcommand{\includecode}[1]{\lstinputlisting[escapechar=]{#1}}
'''
latexIncludeCode = "\\includecode{%s}"
latexIncludeCodeLang = "\\includecodelang{%s}{%s}"
latexFileHeading = "\\%s{%s\label{%s:%s}}"
latexFileHeadingNoLabel = "\\%s{%s}"
latexReplacements = {
'\t': '\\ ' * 4,
'&': '\\&',
'%': '\\%',
'$': '\\$',
'#': '\\#',
'_': '\\_',
'{': '\\{',
'}': '\\}',
'~': '\\textasciitilde ',
'^': '\\textasciicircum '
}
def escapeForLatex(text):
text = text.replace('\\', '\\textbackslash')
text = text.replace(' ', '\\ ')
text = text.replace('\\textbackslash', '\\textbackslash ')
for i, j in latexReplacements.items():
text = text.replace(i, j)
text = text.replace('"', '\char`\"{}')
return text
def output_start(out_file):
out_file.write(latexBegin)
def output(filename, rel_path, out_file):
out_file.write("%" * 80)
out_file.write("\n")
out_file.write("%% %s\n\n" % rel_path)
if settings.shouldAddLabel:
# apparently, no escape in labels
heading = latexFileHeading % (settings.headingStyle, escapeForLatex(rel_path), settings.labelPrefix, rel_path)
else:
heading = latexFileHeadingNoLabel % (settings.headingStyle, escapeForLatex(rel_path) )
out_file.write(heading)
out_file.write("\n")
language = None
for key in fileExtensionMap:
if filename.endswith(key):
language = fileExtensionMap[key]
break
if language is None:
include_line = latexIncludeCode % (filename)
else:
include_line = latexIncludeCodeLang % (filename, language)
out_file.write(include_line)
out_file.write("\n")
out_file.write("\n")
fileExtensionMap = {
'.erl' : 'erlang'
, '.hs' : 'Haskell'
, '.py' : 'Python'
, '.java' : 'Java'
, '.sh' : 'sh'
, '.bash' : 'bash'
, '.sml' : 'ML'
, '.sig' : 'ML'
}
| mit | 2,184,732,010,869,205,800 | 23.152941 | 118 | 0.566001 | false |
escherba/marx | setup.py | 1 | 1259 | import os
from setuptools import setup, find_packages
import glob
import sys
README = "README.md"
base = os.path.dirname(__file__)
local = lambda x: os.path.join(base, x)
def read(fname):
return open(local(fname)).read()
def hydrate_examples():
examples = {}
for f in glob.glob(local('examples/*')) + glob.glob(local('tests/*')) + glob.glob(local('tests/*/*')):
if os.path.isdir(f):
continue
examples[os.path.basename(f)] = "\n ".join(read(f).split("\n"))
readme = read(README + ".in") % examples
with open(local(README), "w") as f:
f.write(readme)
hydrate_examples()
if "test" in sys.argv or "nosetests" in sys.argv:
TEST_REQS = ['nose>=1.0', 'coverage==3.6', 'nosexcover', 'mock']
else:
TEST_REQS = []
setup(
name="marx-workflows",
version='0.0.9',
author="Nino Walker",
author_email="[email protected]",
description=read(README).split("\n", 1)[0],
url='https://github.com/ninowalker/marx',
license="BSD",
packages=find_packages(exclude=["tests.*", "tests"]),
long_description=read(README),
setup_requires=TEST_REQS + ['wheel'],
test_suite='nose.collector',
classifiers=[
"License :: OSI Approved :: BSD License",
],
)
| bsd-2-clause | -4,530,123,679,348,356,600 | 26.369565 | 106 | 0.610802 | false |
rishubil/sqlalchemy-fulltext-search | setup.py | 1 | 1279 | """
SQLAlchemy FullText Search
"""
from setuptools import setup, Command
setup(
name='SQLAlchemy-FullText-Search',
version='0.2.3',
url='https://github.com/mengzhuo/sqlalchemy-fulltext-search',
license='BSD',
author='Meng Zhuo, Alejandro Mesa',
author_email='[email protected], [email protected]',
description=('Provide FullText for MYSQL & SQLAlchemy model'),
long_description = __doc__,
packages=['sqlalchemy_fulltext'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=['SQLAlchemy>=0.8',],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules' ]
)
| mit | -265,112,061,610,332,200 | 38.96875 | 99 | 0.52932 | false |
hazybluedot/manager_review | util.py | 1 | 1200 | def num_or_string(value):
try:
return float(value)
except ValueError:
return value
def num_or_none(fn, value):
try:
return fn(value)
except ValueError:
return None
def flatten_list(l):
return [ item for sublist in l for item in sublist ] # flatten list of lists
def issumable(thing):
try:
1.0 + thing
except TypeError:
return False
else:
return True
def label_to_attr(string):
return string.lower().replace(' ','_')
def levenshtein(s1, s2):
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer
deletions = current_row[j] + 1 # than s2
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
| gpl-2.0 | 2,454,069,128,475,677,000 | 25.666667 | 127 | 0.5775 | false |
jackwluo/py-quantmod | quantmod/auth.py | 1 | 1238 | """Functions that manage configuration writing
Refactored from Plotly's 'auth.py'.
"""
from __future__ import absolute_import
import os
package = 'quantmod'
AUTH_DIR = os.path.join(os.path.expanduser('~'), '.' + package)
TEST_DIR = os.path.join(AUTH_DIR, 'test')
TEST_FILE = os.path.join(AUTH_DIR, 'permission_test')
CONFIG_FILE = os.path.join(AUTH_DIR, 'config.json')
FILE_CONTENT = {
CONFIG_FILE: {
'sharing': 'public',
'dimensions': None,
'theme': 'light',
'source': 'yahoo',
'offline': False,
'offline_url': '',
'offline_show_link': True,
'offline_link_text': 'Edit Chart',
}
}
def _permissions():
"""Check for write access."""
try:
os.mkdir(TEST_DIR)
os.rmdir(TEST_DIR)
if not os.path.exists(AUTH_DIR):
os.mkdir(AUTH_DIR)
with open(TEST_FILE, 'w') as f:
f.write('Testing\n')
os.remove(TEST_FILE)
return True
except:
return False
_file_permissions = _permissions()
def check_file_permissions():
"""Return True if write permissions, else return False."""
return _file_permissions
def get_path():
"""Get path of AUTH_DIR."""
return AUTH_DIR
| mit | 3,377,181,442,729,121,300 | 20.719298 | 63 | 0.588045 | false |
OfficialMan/Sark | sark/data.py | 1 | 2198 | from collections import namedtuple
import idc
import idaapi
import itertools
import struct
from awesome.iterator import irange as range
from .core import fix_addresses
def Bytes(start=None, end=None):
start, end = fix_addresses(start, end)
return itertools.imap(idc.Byte, range(start, end))
def Words(start=None, end=None):
start, end = fix_addresses(start, end)
return itertools.imap(idc.Word, range(start, end, 2))
def Dwords(start=None, end=None):
start, end = fix_addresses(start, end)
return itertools.imap(idc.Dword, range(start, end, 4))
def Qwords(start=None, end=None):
start, end = fix_addresses(start, end)
return itertools.imap(idc.Qword, range(start, end, 4))
def bytes_until(byte=0, start=None, end=None):
return iter(Bytes(start, end).next, byte)
def words_until(word=0, start=None, end=None):
return iter(Words(start, end).next, word)
def dwords_until(dword=0, start=None, end=None):
return iter(Dwords(start, end).next, dword)
def Chars(start=None, end=None):
return itertools.imap(chr, Bytes(start, end))
def chars_until(char='\0', start=None, end=None):
return iter(Chars(start, end).next, char)
def read_ascii_string(ea, max_length=None):
if max_length is None:
end = None
else:
end = ea + max_length
return "".join(chars_until(start=ea, end=end))
def dword_to_bytes(dword):
return struct.pack(">L", dword)
def read_memory(start, end):
size = end - start
return idaapi.get_many_bytes(start, size)
def write_memory(start, data, destructive=False):
if destructive:
idaapi.put_many_bytes(start, data)
else:
idaapi.patch_many_bytes(start, data)
PatchedByte = namedtuple("PatchedByte", "ea fpos original patched")
def get_patched_bytes(start=None, end=None):
start, end = fix_addresses(start, end)
patched_bytes = dict()
def collector(ea, fpos, original, patched):
patched_bytes[ea] = PatchedByte(ea, fpos, original, patched)
return 0
idaapi.visit_patched_bytes(start, end, collector)
return patched_bytes
def undefine(start, end):
idc.MakeUnknown(start, end - start, idc.DOUNK_SIMPLE) | mit | 8,412,659,711,819,700,000 | 21.438776 | 68 | 0.682439 | false |
ri23/FISHmodel | 3Dseg.py | 1 | 6210 | """Segment 3D tissue without cell walls."""
import os
import argparse
import numpy as np
import scipy.ndimage
import scipy.misc
from scipy.ndimage.filters import laplace
from skimage.exposure import equalize_hist
from skimage.filters import gaussian_filter
from skimage.measure import label
from skimage.morphology import watershed, remove_small_objects
from jicbioimage.core.io import FileBackend
from jicbioimage.core.image import DataManager
from jicbioimage.core.image import SegmentedImage
from jicbioimage.transform import (
max_intensity_projection
)
from jicbioimage.illustrate import AnnotatedImage
HERE = os.path.dirname(__file__)
UNPACK = os.path.join(HERE, '..', 'data', 'unpack')
OUTPUT = os.path.join(HERE, '..', 'output')#'/group-share','ietswaar','test','output')#HERE, '..', 'output') RI edit 1
if not os.path.isdir(OUTPUT):
os.mkdir(OUTPUT)
DEBUG = False
def collection_from_filename(stack_filename):
file_backend = FileBackend(UNPACK)
data_manager = DataManager(file_backend)
microscopy_collection = data_manager.load(stack_filename)
return microscopy_collection
def save_sample(filename, stack, sample_z=25):
full_path = os.path.join(OUTPUT, filename)
if DEBUG:
scipy.misc.imsave(full_path, stack[:,:,sample_z])
def save_stack(stack, stack_name='stack'):
if not DEBUG:
return
stack_dir = os.path.join(OUTPUT, stack_name + '.stack')
if not os.path.isdir(stack_dir):
os.mkdir(stack_dir)
xdim, ydim, zdim = stack.shape
for z in range(zdim):
filename = 'z{}.png'.format(z)
full_name = os.path.join(stack_dir, filename)
scipy.misc.imsave(full_name, stack[:,:,z])
def blank_layers(input_array, n_layers=2, blank=1):
"""Return a copy of the input array with the top and bottom
n_layers set to a particular value."""
_, _, zdim = input_array.shape
start_z = n_layers
stop_z = zdim - n_layers
blanked = input_array.copy()
blanked[:,:,0:start_z] = blank
blanked[:,:,stop_z:] = blank
return blanked
def find_seeds(zstack):
"""Return array containing segmentation seeds."""
smooth_sigma = 10
seed_threshold = 0.13
min_size = 40000#10000 RI edit 5
xdim, ydim, zdim = zstack.shape
save_sample('start.png', zstack)
smoothed = gaussian_filter(zstack, sigma=smooth_sigma)
save_sample('smoothed.png', smoothed)
edges = laplace(smoothed)
edges = edges + np.min(edges)
save_sample('laplace.png', edges)
equalised = equalize_hist(edges)
save_sample('equalised.png', equalised)
blanked = blank_layers(equalised)
thresholded = blanked < seed_threshold
save_sample('thresholded.png', thresholded)
save_stack(thresholded, 'thresh')
connected = label(thresholded)
save_sample('connected.png', connected)
save_stack(connected, 'connected')
#rids = np.unique(connected)
#print [len(np.where(connected==rid)[0]) for rid in rids[1:]]
filtered_connected = remove_small_objects(connected, min_size=min_size)
save_stack(filtered_connected, 'filtered_connected')
return filtered_connected
def segment_from_seeds(zstack, seeds, watershed_cutoff):
smooth_sigma =5 #15 RI edit 4
size_threshold = 10000
smoothed2 = scipy.ndimage.filters.gaussian_filter(zstack,
sigma=smooth_sigma)
save_sample('smoothed2.png', smoothed2)
inverted = np.max(smoothed2) - smoothed2
save_sample('inverted.png', inverted)
# Now normalised
equalised2 = equalize_hist(inverted)
save_sample('equalised2.png', equalised2)
save_stack(equalised2, 'equalised')
mask = equalised2 < watershed_cutoff
save_sample('mask.png', mask)
segmented = watershed(equalised2, seeds, mask=mask)
save_sample('segmented.png', segmented)
save_stack(segmented, 'segmented')
# region_ids = np.unique(segmented)
# sizes = [len(np.where(segmented == rid)[0]) for rid in region_ids]
nosmall = remove_small_objects(segmented, min_size=size_threshold)
save_stack(nosmall, 'nosmall')
reseg = watershed(equalised2, nosmall, mask=mask)
save_stack(reseg, 'reseg')
return reseg
def uint8ify(input_array):
max_val = float(np.max(input_array))
min_val = float(np.min(input_array))
val_range = max_val - min_val
return 255 * ((input_array.astype(np.float) - min_val) / val_range)
def generate_annotated_image(collection, cell_level_threshold):
zstack = collection.zstack_array(s=0, c=2)
probe_stack = collection.zstack_array(s=0, c=0)
max_intensity_projection(probe_stack)
seeds = find_seeds(zstack)
#probe_stack2 = collection.zstack_array(s=0, c=1) #RI edit 2
zstack = zstack + probe_stack #+ probe_stack2#RI edit 3
segmentation = segment_from_seeds(zstack, seeds, cell_level_threshold)
projection = max_intensity_projection(zstack)
projection_as_uint8 = uint8ify(projection)
annotated_projection = AnnotatedImage.from_grayscale(projection_as_uint8)
rids = np.unique(segmentation)
for rid in rids[1:]:
x, y, z = map(np.mean, np.where(segmentation == rid))
size = len(np.where(segmentation == rid)[0])
annotated_projection.text_at(str(size), y-10, x)
annotation_filename = 'annotated_image.png'
with open(annotation_filename, 'wb') as f:
f.write(annotated_projection.png())
def main():
global DEBUG
parser = argparse.ArgumentParser(__doc__)
parser.add_argument('image_filename', help="Image filename")
parser.add_argument('--cell-level-threshold',
type=float,
default=0.3,
help="Threshold (in range 0 < t < 1) defining cell")
parser.add_argument('--verbose',
type=bool,
default=False,
help="Whether processing stages should be output")
args = parser.parse_args()
DEBUG = args.verbose
collection = collection_from_filename(args.image_filename)
generate_annotated_image(collection, args.cell_level_threshold)
if __name__ == "__main__":
main()
| mit | -6,816,007,855,000,496,000 | 27.356164 | 118 | 0.663768 | false |
petebachant/actuatorLine-2D-turbinesFoam | plot.py | 1 | 1090 | #!/usr/bin/env python
"""
This script plots results from `paramsweep.py`.
"""
from __future__ import division, print_function
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
import seaborn as sns
U_infty = 1.0
if __name__ == "__main__":
sns.set(style="white", context="paper", font_scale=1.5,
rc={"axes.grid": True, "legend.frameon": True})
df = pd.read_csv("processed/alpha_sweep.csv")
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(7.5, 3))
ax1.plot(df.alpha_geom_deg, df.alpha_deg, "o", label="Detected")
ax1.plot(df.alpha_geom_deg, df.alpha_geom_deg, "--", label="Geometric")
ax1.set_xlabel(r"$\alpha$ (geometric, degrees)")
ax1.set_ylabel(r"$\alpha$ (detected, degrees)")
ax1.legend(loc="lower right")
ax2.plot(df.alpha_deg, df.rel_vel_mag, "o", label="Detected")
ax2.plot(df.alpha_geom_deg, np.ones(len(df)), "--", label="Geometric",
lw=2)
ax2.set_xlabel(r"$\alpha$ (detected, degrees)")
ax2.set_ylabel(r"$|U_\mathrm{rel}|$")
fig.tight_layout()
plt.show()
| mit | -2,037,779,283,774,665,700 | 34.16129 | 75 | 0.631193 | false |
unapiedra/BBChop | BBChop/BBChop.py | 1 | 6208 | # Copyright 2008 Ealdwulf Wuffinga
# This file is part of BBChop.
#
# BBChop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# BBChop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BBChop. If not, see <http://www.gnu.org/licenses/>.
from .listUtils import *
from .evidence import entropiesFast
from . import numberType
import copy
from . import skipProbability
#import plot
debug=False
#debug=True
############ ABBREVIATIONS
#
#
# E : Evidence
# L : Location
# d : number of detections at location
# t : number of non-detections at location
#
#
#stratgies
# greedy strategy: always choose the location where the expected gain in entropy
# for the next observation is highest, ie, the expected entropy after the
#next observation is smallest.
def greedyStrat(counts,locPrior,likelihoodsObj,dag,skipProbs):
(currEntropy,entropyResults,findProbs)=entropiesFast(counts,locPrior,likelihoodsObj,dag)
# test where expected entropy is smallest
expectedGain = [(currEntropy-entropyResults[i])*(numberType.one-skipProbs[i]) for
i in range(len(entropyResults))]
(next,nextp)=findMax(expectedGain)
return next
# nearly greedy strategy: like greedy, but if we have a detection, see if observinf there again
# would be expected to improve next gain in entropy.
def nearlyGreedyStrat(counts,locPrior,likelihoodsObj,dag,skipProbs):
dlocs=[i for i in range(len(counts)) if counts[i][1]]
(currEntropy,entropyResults,findProbs)=entropiesFast(counts,locPrior,likelihoodsObj,dag)
(next,nextE)=findMin(entropyResults)
if len(dlocs):
# if there is a detection, calculate the expected entropy after making another observation
# there and then making a 'greedy' observation.
dloc=dlocs[-1]
(t,d)=counts[dloc]
dcounts=copy.copy(counts)
tcounts=copy.copy(counts)
dcounts[dloc]=(t,d+1)
tcounts[dloc]=(t+1,d)
(currEntropyD,entropyResultsD,findProbsD)=entropiesFast(dcounts,locPrior,likelihoodsObj,dag)
(currEntropyT,entropyResultsT,findProbsT)=entropiesFast(tcounts,locPrior,likelihoodsObj,dag)
(nextD,nextED)=findMin(entropyResultsD)
(nextT,nextET)=findMin(entropyResultsT)
expectedEntropy=findProbs[dloc]*nextED+(1-findProbs[dloc])*nextET
# print "c %1.2f n %1.02f c-n %1.04f c-e %1.04f fp %1.02f nf %1.02f nt %1.02f" %(currEntropy,nextE,currEntropy-nextE,currEntropy-expectedEntropy,findProbs[dloc],nextED,nextET)
if (currEntropy-nextE)<(currEntropy-expectedEntropy)/2.0:
return dloc
else:
return next
else:
return next
class BBChop:
def __init__(self,
locPrior,
certainty,
interactor,
likelihoodsObj,
dag,
strategy=greedyStrat,
skipProbsFunc=skipProbability.skipProbsSimple):
self.locPrior=numberType.copyList(locPrior)
self.certainty=numberType.const(certainty)
self.counts=[(0,0) for p in locPrior]
self.skipProbsFunc=skipProbsFunc
self.skipped=[False for p in locPrior]
self.dag=dag
self.skipProbs = self.skipProbsFunc(self.skipped,self.dag)
self.interactor=interactor
self.total=0
self.likelihoodsObj=likelihoodsObj
self.strategy=strategy
def addPriorKnowlege(self,knowlege):
(positives,negatives)=knowlege
(t,d)=self.counts[-1]
t+=negatives
d+=positives
self.counts[-1]=(t,d)
def addResult(self,location,observation):
(t,d)=self.counts[location]
# 'None' means we've decided that this location is invalid (eg, won't compile)
if observation is None:
self.skipped[location]=True
# set prior to zero because otherwise termination probability
# cannot always be achieved. This means that
# the probabilities we calculate are conditional on the bug not being located
# at a skipped location.
self.locPrior[location]=numberType.zero
self.skipProbs = self.skipProbsFunc(self.skipped,self.dag)
elif observation is True:
self.counts[location]=(t,d+1)
else:
self.counts[location]=(t+1,d)
if debug:
print(("ct",self.counts))
def search(self):
(locProbs,evProb)=self.likelihoodsObj.probs(self.counts,self.locPrior,self.dag)
(whereabouts,maxp) = findMax(locProbs)
if debug:
print("lp",list(map(float,locProbs)))
print("ct",self.counts)
while(maxp<self.certainty):
#decide where to seach next
self.interactor.statusCallback(False,whereabouts,maxp,locProbs,self.counts)
next=self.strategy(self.counts,
self.locPrior,
self.likelihoodsObj,
self.dag,
self.skipProbs)
observation=self.interactor.test(next)
self.total+=1
# update evidence
self.addResult(next,observation)
(locProbs,evProb)=self.likelihoodsObj.probs(self.counts,self.locPrior,self.dag)
print(locProbs)
if debug:
print("lp",list(map(float,locProbs)))
print("e",float(entropy(locProbs)),list(map(float,entropyResults)))
print("fp",list(map(float,findProbs)))
(whereabouts,maxp) = findMax(locProbs)
self.interactor.statusCallback(True,whereabouts,maxp,locProbs,self.counts)
return whereabouts
| gpl-2.0 | -4,055,131,030,520,003,000 | 31.673684 | 182 | 0.643686 | false |
ikeeip/passholder | src/twisted/plugins/passholder_plugin.py | 1 | 2217 | from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from twisted.application.service import IServiceMaker
from twisted.application import internet
from passholder.service import PassHolderService, IPassHolderFactory, ServerContextFactory
import txredisapi as redis
import os, string
class Options(usage.Options):
optParameters = [
['password', 'p'],
['port', 'P', 8123, "port number to listen on", int],
['listen', None, "127.0.0.1", "interface to listen on"],
["redis-host", None, "127.0.0.1", "hostname or ip address of the redis server"],
["redis-port", None, 6379, "port number of the redis server", int],
["redis-pool", None, 10, "connection pool size", int],
["redis-db", None, 0, "redis database", int],
['server-cert', 'c', 'keys/server-cert.pem'],
['server-key', 'k', 'keys/server-key.pem'],
['ca-cert', 'a', 'keys/ca-cert.pem'],
['scrypt-enctime', 't', 0.1, None, float],
]
class PassHolderServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = 'passholder'
description = "Secure Password Holder service"
options = Options
def makeService(self, config):
if config['password'] is None:
raise usage.UsageError, "--password is required."
if string.find(config['password'], 'env:') == 0:
env = string.replace(config['password'], 'env:', '', 1)
pwd = os.getenv(env)
if pwd is None:
raise usage.UsageError, "invalid environment variable in --password option"
else:
config['password'] = pwd
db = redis.lazyConnectionPool(config['redis-host'], config['redis-port'], poolsize=config['redis-pool'], dbid=config['redis-db'])
passHolderService = PassHolderService(config['password'], config['scrypt-enctime'], db)
return internet.SSLServer(config['port'],
IPassHolderFactory(passHolderService),
ServerContextFactory(config['server-cert'], config['server-key'], config['ca-cert']),
interface=config["listen"]
)
serviceMaker = PassHolderServiceMaker()
| mit | -829,845,813,388,175,200 | 39.309091 | 137 | 0.635092 | false |
fiji370/ostentatious-palaverer | bot_prototype 2.0/convert.py | 1 | 3005 | __all__ = ['convert']
# Don't look below, you will not understand this Python code :) I don't.
from js2py.pyjs import *
# setting scope
var = Scope( JS_BUILTINS )
set_global_object(var)
# Code follows:
var.registers([u'a', u'feach', u'g', u'checnum', u'n', u'Magnitude', u'text2num', u'Small'])
@Js
def PyJsHoisted_feach_(w, this, arguments, var=var):
var = Scope({u'this':this, u'arguments':arguments, u'w':w}, var)
var.registers([u'x', u'w'])
var.put(u'x', var.get(u'Small').get(var.get(u'w')))
if (var.get(u'x')!=var.get(u"null")):
var.put(u'g', (var.get(u'g')+var.get(u'x')))
else:
if (var.get(u'w')==Js(u'hundred')):
var.put(u'g', (var.get(u'g')*Js(100.0)))
else:
var.put(u'x', var.get(u'Magnitude').get(var.get(u'w')))
if (var.get(u'x')!=var.get(u"null")):
var.put(u'n', (var.get(u'n')+(var.get(u'g')*var.get(u'x'))))
var.put(u'g', Js(0.0))
else:
pass
PyJsHoisted_feach_.func_name = u'feach'
var.put(u'feach', PyJsHoisted_feach_)
@Js
def PyJsHoisted_checnum_(PyJsArg_6173_, this, arguments, var=var):
var = Scope({u'this':this, u'as':PyJsArg_6173_, u'arguments':arguments}, var)
var.registers([u'a', u'as'])
var.put(u'a', var.get(u'as').get(u'value'))
var.get(u'as').put(u'value', var.get(u'a').callprop(u'replace', JsRegExp(u'/[^\\d.]/g'), Js(u'')))
PyJsHoisted_checnum_.func_name = u'checnum'
var.put(u'checnum', PyJsHoisted_checnum_)
@Js
def PyJsHoisted_text2num_(s, this, arguments, var=var):
var = Scope({u'this':this, u's':s, u'arguments':arguments}, var)
var.registers([u's'])
var.put(u'a', var.get(u's').callprop(u'toString').callprop(u'split', JsRegExp(u'/[\\s-]+/')))
var.put(u'n', Js(0.0))
var.put(u'g', Js(0.0))
var.get(u'a').callprop(u'forEach', var.get(u'feach'))
return (var.get(u'n')+var.get(u'g'))
PyJsHoisted_text2num_.func_name = u'text2num'
var.put(u'text2num', PyJsHoisted_text2num_)
PyJs_Object_0_ = Js({u'zero':Js(0.0),u'one':Js(1.0),u'two':Js(2.0),u'three':Js(3.0),u'four':Js(4.0),u'five':Js(5.0),u'six':Js(6.0),u'seven':Js(7.0),u'eight':Js(8.0),u'nine':Js(9.0),u'ten':Js(10.0),u'eleven':Js(11.0),u'twelve':Js(12.0),u'thirteen':Js(13.0),u'fourteen':Js(14.0),u'fifteen':Js(15.0),u'sixteen':Js(16.0),u'seventeen':Js(17.0),u'eighteen':Js(18.0),u'nineteen':Js(19.0),u'twenty':Js(20.0),u'thirty':Js(30.0),u'forty':Js(40.0),u'fifty':Js(50.0),u'sixty':Js(60.0),u'seventy':Js(70.0),u'eighty':Js(80.0),u'ninety':Js(90.0)})
var.put(u'Small', PyJs_Object_0_)
PyJs_Object_1_ = Js({u'thousand':Js(1000.0),u'million':Js(1000000.0),u'billion':Js(1000000000.0),u'trillion':Js(1000000000000.0),u'quadrillion':Js(1000000000000000.0),u'quintillion':Js(1e+18),u'sexillion':Js(1e+21),u'septillion':Js(1e+24),u'octillion':Js(1e+27),u'nonillion':Js(1e+30),u'decillion':Js(1e+33)})
var.put(u'Magnitude', PyJs_Object_1_)
pass
pass
pass
pass
pass
pass
pass
# Add lib to the module scope
convert = var.to_python() | gpl-3.0 | 3,958,432,273,705,937,000 | 45.96875 | 532 | 0.610982 | false |
llvm-mirror/lldb | scripts/Python/finishSwigPythonLLDB.py | 3 | 13786 | """ Python SWIG post process script for each language
--------------------------------------------------------------------------
File: finishSwigPythonLLDB.py
Overview: Python script(s) to post process SWIG Python C++ Script
Bridge wrapper code on the Windows/LINUX/OSX platform.
The Python scripts are equivalent to the shell script (.sh)
files.
For the Python script interpreter (external to liblldb) to
be able to import and use the lldb module, there must be
two files, lldb.py and _lldb.so, that it can find. lldb.py
is generated by SWIG at the same time it generates the C++
file. _lldb.so is actually a symlink file that points to
the LLDB shared library/framework.
The Python script interpreter needs to be able to
automatically find these two files. On Darwin systems it
searches in the LLDB.framework, as well as in all the normal
Python search paths. On non-Darwin systems these files will
need to be put some place where Python will find them.
This shell script creates the _lldb.so symlink in the
appropriate place, and copies the lldb.py (and
embedded_interpreter.py) file to the correct directory.
Gotchas: Python debug complied pythonXX_d.lib is required for SWIG
to build correct LLDBWrapperPython.cpp in order for Visual
Studio to compile successfully. The release version of the
Python lib will not work (20/12/2013).
LLDB (dir) CMakeLists.txt uses windows environmental
variables $PYTHON_INCLUDE and $PYTHON_LIB to locate
Python files required for the build.
Copyright: None.
--------------------------------------------------------------------------
"""
# Python modules:
import os # Provide directory and file handling, determine OS information
import sys # System specific parameters and functions
import shutil # High-level operations on files and collections of files
import ctypes # Invoke Windows API for creating symlinks
# Third party modules:
# In-house modules:
import utilsOsType # Determine the OS type this script is running on
import utilsDebug # Debug Python scripts
# User facing text:
strMsgOsVersion = "The current OS is %s"
strMsgPyVersion = "The Python version is %d.%d"
strErrMsgProgFail = "Program failure: "
strErrMsgLLDBPyFileNotNotFound = "Unable to locate lldb.py at path '%s'"
strMsgCopyLLDBPy = "Copying lldb.py from '%s' to '%s'"
strErrMsgFrameWkPyDirNotExist = "Unable to find the LLDB.framework directory '%s'"
strMsgCreatePyPkgCopyPkgFile = "create_py_pkg: Copied file '%s' to folder '%s'"
strMsgCreatePyPkgInitFile = "create_py_pkg: Creating pakage init file '%s'"
strMsgCreatePyPkgMkDir = "create_py_pkg: Created folder '%s'"
strMsgConfigBuildDir = "Configuration build directory located at '%s'"
strMsgFoundLldbFrameWkDir = "Found '%s'"
strMsgPyFileLocatedHere = "Python file will be put in '%s'"
strMsgFrameWkPyExists = "Python output folder '%s' already exists"
strMsgFrameWkPyMkDir = "Python output folder '%s' will be created"
strErrMsgCreateFrmWkPyDirFailed = "Unable to create directory '%s' error: %s"
strMsgSymlinkExists = "Symlink for '%s' already exists"
strMsgSymlinkMk = "Creating symlink for %s (%s -> %s)"
strErrMsgCpLldbpy = "copying lldb to lldb package directory"
strErrMsgCreatePyPkgMissingSlash = "Parameter 3 fn create_py_pkg() missing slash"
strErrMsgMkLinkExecute = "Command mklink failed: %s"
strErrMsgMakeSymlink = "creating symbolic link"
strErrMsgUnexpected = "Unexpected error: %s"
strMsgCopySixPy = "Copying six.py from '%s' to '%s'"
strErrMsgCopySixPyFailed = "Unable to copy '%s' to '%s'"
#++---------------------------------------------------------------------------
# Details: Create Python packages and Python __init__ files.
# Args: vDictArgs - (R) Program input parameters.
# vstrFrameworkPythonDir - (R) Python framework directory.
# vstrPkgDir - (R) Destination for copied Python files.
# vListPkgFiles - (R) List of source Python files.
# Returns: Bool - True = function success, False = failure.
# Str - Error description on task failure.
# Throws: None.
#--
def create_py_pkg(
vDictArgs,
vstrFrameworkPythonDir,
vstrPkgDir,
vListPkgFiles):
dbg = utilsDebug.CDebugFnVerbose("Python script create_py_pkg()")
dbg.dump_object("Package file(s):", vListPkgFiles)
bDbg = "-d" in vDictArgs
bOk = True
strMsg = ""
if vstrPkgDir.__len__() != 0 and vstrPkgDir[0] != "/":
bOk = False
strMsg = strErrMsgCreatePyPkgMissingSlash
return (bOk, strMsg)
strPkgName = vstrPkgDir
strPkgName = "lldb" + strPkgName.replace("/", ".")
strPkgDir = vstrFrameworkPythonDir
strPkgDir += vstrPkgDir
strPkgDir = os.path.normcase(strPkgDir)
if not(os.path.exists(strPkgDir) and os.path.isdir(strPkgDir)):
if bDbg:
print((strMsgCreatePyPkgMkDir % strPkgDir))
os.makedirs(strPkgDir)
for strPkgFile in vListPkgFiles:
if os.path.exists(strPkgFile) and os.path.isfile(strPkgFile):
if bDbg:
print((strMsgCreatePyPkgCopyPkgFile % (strPkgFile, strPkgDir)))
shutil.copy(strPkgFile, strPkgDir)
# Create a packet init files if there wasn't one
strPkgIniFile = os.path.normpath(os.path.join(strPkgDir, "__init__.py"))
if os.path.exists(strPkgIniFile) and os.path.isfile(strPkgIniFile):
return (bOk, strMsg)
strPyScript = "__all__ = ["
strDelimiter = ""
for strPkgFile in vListPkgFiles:
if os.path.exists(strPkgFile) and os.path.isfile(strPkgFile):
strBaseName = os.path.basename(strPkgFile)
nPos = strBaseName.find(".")
if nPos != -1:
strBaseName = strBaseName[0: nPos]
strPyScript += "%s\"%s\"" % (strDelimiter, strBaseName)
strDelimiter = ","
strPyScript += "]\n"
strPyScript += "for x in __all__:\n"
strPyScript += "\t__import__('%s.' + x)" % strPkgName
if bDbg:
print((strMsgCreatePyPkgInitFile % strPkgIniFile))
file = open(strPkgIniFile, "w")
file.write(strPyScript)
file.close()
return (bOk, strMsg)
#++---------------------------------------------------------------------------
# Details: Retrieve the directory path for Python's dist_packages/
# site_package folder depending on the type of OS platform being
# used.
# Args: vDictArgs - (R) Program input parameters.
# Returns: Bool - True = function success, False = failure.
# Str - Python Framework directory path.
# strErrMsg - Error description on task failure.
# Throws: None.
#--
def get_framework_python_dir(vDictArgs):
dbg = utilsDebug.CDebugFnVerbose(
"Python script get_framework_python_dir()")
bOk = True
strErrMsg = ""
strWkDir = os.path.normpath(vDictArgs["--lldbPythonPath"])
return (bOk, strWkDir, strErrMsg)
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
""" Details: Program main entry point fn. Called by another Python script.
--------------------------------------------------------------------------
Details: This script is to be called by another Python script. It is not
intended to be called directly i.e from the command line.
Args: vDictArgs - (R) Map of parameter names to values.
-d (optional) Determines whether or not this script
outputs additional information when running.
-m (optional) Specify called from Makefile system. If given locate
the LLDBWrapPython.cpp in --srcRoot/source folder
else in the --targetDir folder.
--srcRoot The root of the lldb source tree.
--targetDir Where the lldb framework/shared library gets put.
--cfgBlddir Where the buildSwigPythonLLDB.py program will
(optional) put the lldb.py file it generated from running
SWIG.
--prefix Is the root directory used to determine where
(optional) third-party modules for scripting languages should
be installed. Where non-Darwin systems want to put
the .py and .so files so that Python can find them
automatically. Python install directory.
--lldbLibDir The name of the directory containing liblldb.so.
(optional) "lib" by default.
Results: 0 Success
-100+ Error from this script to the caller script.
-100 Error program failure with optional message.
--------------------------------------------------------------------------
"""
def main(vDictArgs):
dbg = utilsDebug.CDebugFnVerbose("Python script main()")
bOk = True
strMsg = ""
strErrMsgProgFail = ""
bDbg = "-d" in vDictArgs
eOSType = utilsOsType.determine_os_type()
if bDbg:
pyVersion = sys.version_info
print((strMsgOsVersion % utilsOsType.EnumOsType.name_of(eOSType)))
print((strMsgPyVersion % (pyVersion[0], pyVersion[1])))
bOk, strFrameworkPythonDir, strMsg = get_framework_python_dir(vDictArgs)
strRoot = os.path.normpath(vDictArgs["--srcRoot"])
if bOk:
# lldb
listPkgFiles = [
os.path.join(
strRoot,
"source",
"Interpreter",
"embedded_interpreter.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "", listPkgFiles)
if bOk:
# lldb/formatters/cpp
listPkgFiles = [
os.path.join(
strRoot,
"examples",
"synthetic",
"gnu_libstdcpp.py"),
os.path.join(
strRoot,
"examples",
"synthetic",
"libcxx.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/formatters/cpp", listPkgFiles)
if bOk:
# Make an empty __init__.py in lldb/runtime as this is required for
# Python to recognize lldb.runtime as a valid package (and hence,
# lldb.runtime.objc as a valid contained package)
listPkgFiles = []
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/runtime", listPkgFiles)
if bOk:
# lldb/formatters
# Having these files copied here ensure that lldb/formatters is a
# valid package itself
listPkgFiles = [
os.path.join(
strRoot, "examples", "summaries", "cocoa", "cache.py"), os.path.join(
strRoot, "examples", "summaries", "synth.py"), os.path.join(
strRoot, "examples", "summaries", "cocoa", "metrics.py"), os.path.join(
strRoot, "examples", "summaries", "cocoa", "attrib_fromdict.py"), os.path.join(
strRoot, "examples", "summaries", "cocoa", "Logger.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/formatters", listPkgFiles)
if bOk:
# lldb/utils
listPkgFiles = [
os.path.join(
strRoot,
"examples",
"python",
"symbolication.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/utils", listPkgFiles)
if bOk and (eOSType == utilsOsType.EnumOsType.Darwin):
# lldb/macosx
listPkgFiles = [
os.path.join(
strRoot,
"examples",
"python",
"crashlog.py"),
os.path.join(
strRoot,
"examples",
"darwin",
"heap_find",
"heap.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/macosx", listPkgFiles)
if bOk and (eOSType == utilsOsType.EnumOsType.Darwin):
# lldb/diagnose
listPkgFiles = [
os.path.join(
strRoot,
"examples",
"python",
"diagnose_unwind.py"),
os.path.join(
strRoot,
"examples",
"python",
"diagnose_nsstring.py")]
bOk, strMsg = create_py_pkg(
vDictArgs, strFrameworkPythonDir, "/diagnose", listPkgFiles)
if bOk:
return (0, strMsg)
else:
strErrMsgProgFail += strMsg
return (-100, strErrMsgProgFail)
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# This script can be called by another Python script by calling the main()
# function directly
if __name__ == "__main__":
print("Script cannot be called directly, called by finishSwigWrapperClasses.py")
| apache-2.0 | 5,337,334,831,581,804,000 | 40.518072 | 99 | 0.558546 | false |
pprofpc/generadorCalendario | xlsx.py | 1 | 7335 | # -*- coding: utf-8 -*-
#Para el excel
import xlsxwriter
try:
import cStringIO as StringIO
except ImportError:
import StringIO
#Descarga Libro iva
def printIva(request, idIva):
iva = RegistroIva.objects.get(id=idIva)
# Create the HttpResponse object with the appropriate PDF headers.
# create a workbook in memory
output = StringIO.StringIO()
workbook = xlsxwriter.Workbook(output)
arrayContenido = {
'border': 1,
'align': 'center',
'valign': 'vcenter'}
arrayMoney = {
'border': 1,
'align': 'rigth',
'valign': 'vcenter',
'num_format': '[$$-2C0A] #.#0'}
contenidoTabla = workbook.add_format(arrayContenido)
money = workbook.add_format(arrayMoney)
def addHoja(worksheet, tipoLibro):
negrita = workbook.add_format()
negrita.set_bold()
worksheet.set_column('A:C', 15)
worksheet.set_column('D:D', 40)
worksheet.set_column('E:S', 15)
worksheet.write('A1', 'IMPRESORA DEL CENTRO S.R.L.', negrita)
worksheet.write('A2', u'DOMICILIO: JULIO CESAR LASTRA 2220 - Bº SANTA ISABEL 1º SECCIÓN - CÓRDOBA', negrita)
worksheet.write('A3', 'CUIT: 30-71103466-4', negrita)
worksheet.write('A4', 'IVA RESPONSABLE INSCRIPTO', negrita)
worksheet.write('E4', 'IVA %s' % tipoLibro, negrita)
worksheet.write('E6', 'PERIODO: ', negrita)
worksheet.write('F6', '%s' % iva.periodo(), negrita)
##CREANDO TITULOS TABLA
tituloTabla = workbook.add_format({
'border': 2,
'align': 'center',
'valign': 'vcenter'})
worksheet.merge_range('A8:A9', 'FECHA', tituloTabla)
worksheet.merge_range('B8:C8', 'COMPROBANTE', tituloTabla)
worksheet.write('B9', 'TIPO',tituloTabla)
worksheet.write('C9', u'NÚMERO',tituloTabla)
worksheet.merge_range('D8:D9', u'NOMBRE Y APELLIDO O RAZÓN SOCIAL', tituloTabla)
worksheet.merge_range('E8:E9', u'C.U.I.T.', tituloTabla)
if tipoLibro == 'COMPRAS':
worksheet.merge_range('F8:F9', u'TOTAL\nFACTURADO', tituloTabla)
worksheet.merge_range('G8:J8', u'NETO GRAVADO', tituloTabla)
worksheet.write('G9', '21%',tituloTabla)
worksheet.write('H9', '27%',tituloTabla)
worksheet.write('I9', '17,355%',tituloTabla)
worksheet.write('J9', '10,50%',tituloTabla)
worksheet.merge_range('K8:N8', u'IVA LOQUIDADO', tituloTabla)
worksheet.write('K9', '21%',tituloTabla)
worksheet.write('L9', '27%',tituloTabla)
worksheet.write('M9', '17,355%',tituloTabla)
worksheet.write('N9', '10,50%',tituloTabla)
worksheet.merge_range('O8:O9', u'COMPRAS\nFACT. C/B', tituloTabla)
worksheet.merge_range('P8:P9', u'CONCEPTO\nNO GRAV.', tituloTabla)
worksheet.merge_range('Q8:Q9', u'RETENCIÓN\nIVA', tituloTabla)
worksheet.merge_range('R8:R9', u'RETENCIÓN\nGANANCIAS', tituloTabla)
worksheet.merge_range('S8:S9', u'IMP. CTA', tituloTabla)
else:
worksheet.merge_range('F8:F9', u'COND', tituloTabla)
worksheet.merge_range('G8:G9', u'TOTAL\nFACTURA', tituloTabla)
worksheet.merge_range('H8:I8', u'NETO GRAVADO', tituloTabla)
worksheet.write('H9', '21%',tituloTabla)
worksheet.write('I9', '10,5%',tituloTabla)
worksheet.merge_range('J8:K8', u'IVA LIQUIDADO', tituloTabla)
worksheet.write('J9', '21%',tituloTabla)
worksheet.write('K9', '10,5%',tituloTabla)
worksheet.merge_range('L8:L9', u'EXENTOS', tituloTabla)
worksheet.merge_range('M8:M9', u'RETEN.', tituloTabla)
return worksheet
#CARGO LIBRO COMPRAS
compras = addHoja(workbook.add_worksheet('LIBRO IVA COMPRAS'), 'COMPRAS')
count = 10
for fc in iva.facturasCompra():
compras.write('A%d' % count, str(fc.fecha.strftime('%d/%m/%Y')),contenidoTabla)
compras.write('B%d' % count, str(fc.letra),contenidoTabla)
compras.write('C%d' % count, str(fc.numero),contenidoTabla)
compras.write('D%d' % count, str(fc.proveedor.nombre),contenidoTabla)
compras.write('E%d' % count, str(fc.proveedor.cuit),contenidoTabla)
compras.write('F%d' % count, fc.total(),money)
if (fc.iva=='21'):
compras.write('G%d' % count, fc.subtotal(),money)
else:
compras.write('G%d' % count, '',contenidoTabla)
if (fc.iva=='27'):
compras.write('H%d' % count, fc.subtotal(),money)
else:
compras.write('H%d' % count, '',contenidoTabla)
if (fc.iva=='17.355'):
compras.write('I%d' % count, fc.subtotal(),money)
else:
compras.write('I%d' % count, '',contenidoTabla)
if (fc.iva=='10.5'):
compras.write('J%d' % count, fc.subtotal(),money)
else:
compras.write('J%d' % count, '',contenidoTabla)
if (fc.iva=='21' and fc.letra=='A'):
compras.write('K%d' % count, fc.subtotal(),money)
else:
compras.write('K%d' % count, '',contenidoTabla)
if (fc.iva=='27' and fc.letra=='A'):
compras.write('L%d' % count, fc.subtotal(),money)
else:
compras.write('L%d' % count, '',contenidoTabla)
if (fc.iva=='17.355' and fc.letra=='A'):
compras.write('M%d' % count, fc.subtotal(),money)
else:
compras.write('M%d' % count, '',contenidoTabla)
if (fc.iva=='10.5' and fc.letra=='A'):
compras.write('N%d' % count, fc.subtotal(),money)
else:
compras.write('N%d' % count, '',contenidoTabla)
if (fc.letra=='B' or fc.letra=='C'):
compras.write('O%d' % count, fc.total(),money)
else:
compras.write('O%d' % count, '',contenidoTabla)
if (fc.noGravado>0):
compras.write('P%d' % count, fc.noGravado,money)
else:
compras.write('P%d' % count, '',contenidoTabla)
if (fc.retIva>0):
compras.write('Q%d' % count, fc.retIva,money)
else:
compras.write('Q%d' % count, '',contenidoTabla)
if (fc.retGanancias>0):
compras.write('R%d' % count, fc.retGanancias,money)
else:
compras.write('R%d' % count, '',contenidoTabla)
if (fc.retImpCta>0):
compras.write('S%d' % count, fc.retImpCta,money)
else:
compras.write('S%d' % count, '',contenidoTabla)
count = count + 1
#CARGO LIBRO VENTAS
ventas = addHoja(workbook.add_worksheet('LIBRO IVA VENTAS'), 'VENTAS')
factVentas = iva.facturasVenta()
workbook.close()
#Creando El response
output.seek(0)
response = HttpResponse(output.read(), mimetype="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
response['Content-Disposition'] = "attachment; filename=RegistroIva%s.xlsx" % (iva.periodo())
print response
return response
| gpl-2.0 | -4,199,780,018,633,320,400 | 40.630682 | 120 | 0.564215 | false |
helium/helium-commander | tests/test_options.py | 1 | 2099 | import click
from click.testing import CliRunner
from click import BadParameter
import json
from helium_commander.options import (
device_sort_option,
device_mac_option,
ResourceParamType,
JSONParamType
)
import pytest
def cli_run(func, args, output):
runner = CliRunner()
result = runner.invoke(func, args,
catch_exceptions=False,
standalone_mode=False)
assert result.exit_code == 0
if output is not None:
assert result.output == output
return result.output
def test_sort():
@click.command()
@device_sort_option
def func(reverse, sort):
click.echo('{} {}'.format(sort, reverse))
cli_run(func, ['--sort', 'name', '--reverse'],
'{} {}\n'.format('name', 'True'))
cli_run(func, ['--sort', 'seen'],
'{} {}\n'.format('seen', 'False'))
cli_run(func, ['--sort', 'created'],
'{} {}\n'.format('created', 'False'))
cli_run(func, ['--reverse'],
'{} {}\n'.format('None', 'True'))
def test_mac():
@click.command()
@device_mac_option
def func(mac):
click.echo('{}'.format(mac))
cli_run(func, ['--mac'],
'{}\n'.format('True'))
cli_run(func, None,
'{}\n'.format('False'))
def test_resource(tmpdir):
@click.command()
@click.option('--add',
type=ResourceParamType(metavar='SENSOR'))
def func(add):
click.echo(','.join(add))
output = cli_run(func, ['--help'], None)
assert '--add SENSOR[,SENSOR,...]* | @filename' in output
output = cli_run(func, ['--add', '234,567'],
'234,567\n')
file = tmpdir.join('ids.txt')
file.write('123\n456\n')
output = cli_run(func, ['--add', '@{}'.format(file)],
'123,456\n')
def test_json():
@click.command()
@click.argument('value', type=JSONParamType())
def func(value):
click.echo(json.dumps(value))
cli_run(func, ['42'], '42\n')
with pytest.raises(BadParameter):
cli_run(func, ['abc'], 'abc')
| bsd-3-clause | -9,219,948,403,709,405,000 | 24.597561 | 61 | 0.54121 | false |
algenon/poc | src/zombie_test.py | 1 | 3397 | from poc_simpletest import TestSuite
from zombie import Zombie
class ZombieTest(TestSuite):
def __init__(self):
TestSuite.__init__(self)
def run(self):
self.test_zombie()
self.test_distance_human()
self.test_distance_zombie()
self.test_move_human()
self.test_move_zombie()
self.report_results()
def create_game(self):
return Zombie(
grid_height = 4,
grid_width = 4,
obstacle_list = [(2,2)],
zombie_list = [(3,0)],
human_list = [(3,1)]
)
def get_human(self, game, human_index):
cnt = 0
for human in game.humans():
if (cnt == human_index):
return human
cnt += 1
return None
def get_zombie(self, game, zombie_index):
cnt = 0
for zombie in game.zombies():
if (cnt == zombie_index):
return zombie
cnt += 1
return None
def test_zombie(self):
game = self.create_game()
game.add_human(1,1)
self.run_test(game.num_zombies(), 1, 'One zombie')
self.run_test(game.num_humans(), 2, 'Two humans')
self.run_test(self.get_human(game, 0), (3, 1), 'Human #1')
self.run_test(self.get_human(game, 1), (1, 1), 'Human #2')
def test_distance_human(self):
game = self.create_game()
distances = game.compute_distance_field('human')
self.run_test(distances,
[[4, 3, 4, 5],
[3, 2, 3, 4],
[2, 1, 16, 3],
[1, 0, 1, 2]],
'Distance to one human')
game.add_human(1,1)
distances = game.compute_distance_field('human')
self.run_test(distances,
[[2, 1, 2, 3],
[1, 0, 1, 2],
[2, 1, 16, 3],
[1, 0, 1, 2]],
'Distance to two humans')
def test_distance_zombie(self):
game = self.create_game()
distances = game.compute_distance_field('zombie')
self.run_test(distances,
[[3, 4, 5, 6],
[2, 3, 4, 5],
[1, 2, 16, 4],
[0, 1, 2, 3]],
'Distance to one zombie')
game.add_zombie(1, 2)
game.add_zombie(1, 2)
distances = game.compute_distance_field('zombie')
self.run_test(distances,
[[3, 2, 1, 2],
[2, 1, 0, 1],
[1, 2, 16, 2],
[0, 1, 2, 3]],
'Distance to three zombies')
def test_move_human(self):
game = self.create_game()
distances = game.compute_distance_field('zombie')
game.move_humans(distances)
human = self.get_human(game, 0)
self.run_test(human in [(2,1), (3,2)], True, 'Human moved')
def test_move_zombie(self):
game = self.create_game()
distances = game.compute_distance_field('human')
game.move_zombies(distances)
self.run_test(self.get_zombie(game, 0), (3,1), 'Zombie moved')
if __name__ == '__main__':
ZombieTest().run()
| unlicense | -6,144,511,313,204,583,000 | 32.303922 | 70 | 0.449514 | false |
DistrictDataLabs/yellowbrick | yellowbrick/style/utils.py | 1 | 2199 | # yellowbrick.style.utils
# Utility functions for styles
#
# Author: Neal Humphrey
# Created: Wed Mar 22 12:39:35 2017 -0400
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: utils.py [45268fc] [email protected] $
"""
Utility functions for styles
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
def find_text_color(base_color, dark_color="black", light_color="white", coef_choice=0):
"""
Takes a background color and returns the appropriate light or dark text color.
Users can specify the dark and light text color, or accept the defaults of 'black' and 'white'
base_color: The color of the background. This must be
specified in RGBA with values between 0 and 1 (note, this is the default
return value format of a call to base_color = cmap(number) to get the
color corresponding to a desired number). Note, the value of `A` in RGBA
is not considered in determining light/dark.
dark_color: Any valid matplotlib color value.
Function will return this value if the text should be colored dark
light_color: Any valid matplotlib color value.
Function will return this value if thet text should be colored light.
coef_choice: slightly different approaches to calculating brightness. Currently two options in
a list, user can enter 0 or 1 as list index. 0 is default.
"""
# Coefficients:
# option 0: http://www.nbdtech.com/Blog/archive/2008/04/27/Calculating-the-Perceived-Brightness-of-a-Color.aspx
# option 1: http://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color
coef_options = [
np.array((0.241, 0.691, 0.068, 0)),
np.array((0.299, 0.587, 0.114, 0)),
]
coefs = coef_options[coef_choice]
rgb = np.array(base_color) * 255
brightness = np.sqrt(np.dot(coefs, rgb ** 2))
# Threshold from option 0 link; determined by trial and error.
# base is light
if brightness > 130:
return dark_color
return light_color
| apache-2.0 | 9,137,326,317,813,900,000 | 35.65 | 115 | 0.644384 | false |
mediatum/mediatum | schema/bibtex.py | 1 | 15854 | # coding=utf8
"""
mediatum - a multimedia content repository
Copyright (C) 2008 Matthias Kramm <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
""" We want to parse even badly broken bibtex files, no longer adhering to
the "official" bibtex grammar. In particular, we need to handle
curly brace misleveling, missing quotation marks, missing attributes,
missing ids, etc.
Hence, we don't use a lex+yacc approach but rather a heuristic approach,
which extracts records from the source file only by looking into
"@doctype" records and "field = " fields, ignoring all in between (and
not dealing with curly braces at all)
"""
import re
import os
import shutil
import sys
import codecs
import logging
import unicodedata
import time
from bibtexparser import load as bibtex_load
from bibtexparser.bparser import BibTexParser
import bibtexparser.customization
from core import db, Node
from .schema import Metadatatype
import core.users as users
from contenttypes import Directory
from contenttypes.document import Document
from utils.utils import u, u2, utf8_decode_escape
from utils.date import parse_date
q = db.query
logg = logging.getLogger(__name__)
ESCAPE_BIBTEX_KEY = False
def normchar(char_descriptor):
return unicodedata.lookup(char_descriptor).lower()
din5007_variant2_translation = [
[normchar('LATIN CAPITAL LETTER A WITH DIAERESIS'), 'ae'], # Auml
[normchar('LATIN CAPITAL LETTER O WITH DIAERESIS'), 'oe'], # Ouml
[normchar('LATIN CAPITAL LETTER U WITH DIAERESIS'), 'ue'], # Uuml
[normchar('LATIN SMALL LETTER A WITH DIAERESIS'), 'ae'], # auml
[normchar('LATIN SMALL LETTER O WITH DIAERESIS'), 'oe'], # ouml
[normchar('LATIN SMALL LETTER U WITH DIAERESIS'), 'ue'], # uuml
[normchar('LATIN SMALL LETTER SHARP S'), 'ss'], # szlig
[normchar('LATIN SMALL LETTER E WITH GRAVE'), 'e'], # egrave
[normchar('LATIN SMALL LETTER E WITH ACUTE'), 'e'], # eacute
]
d_escape = dict(din5007_variant2_translation)
def escape_bibtexkey(s, default_char="_"):
import string
res = ""
for c in s:
if c in string.ascii_letters + string.digits + "-_+:":
res = res + c
continue
elif c in d_escape:
res = res + d_escape[c]
else:
res = res + default_char
return res
token = re.compile(r'@\w+\s*{\s*|[a-zA-Z-_]+\s*=\s*{?["\'{]|[a-zA-Z-]+\s*=\s+[0-9a-zA-Z_]')
comment = re.compile(r'%[^\n]*\n')
delim = re.compile(r'\W')
delim2 = re.compile(r'^(?u)\s*[\w+_\-\:]*\s*\,')
frontgarbage = re.compile(r'^\W*', re.UNICODE)
backgarbage = re.compile(r'[ \n\t}"\',]*$')
xspace = re.compile(r'\s+')
counterpiece = {"{": "}", '"': '"', "'": "'"}
class MissingMapping(Exception):
def __init__(self, message=""):
self.message = message
def __str__(self):
return self.message
def getNow():
import datetime
now = datetime.datetime.now().isoformat()
now = now.replace('T', '_').replace(':', '-')
now = now.split('.')[0]
return now
def save_import_file(filename):
import core.config as config
temppath = config.get("paths.tempdir")
_filename_only = filename.split(os.path.sep)[-1]
# leave following in for windows: "/" in path representation possible there
_filename_only = filename.split("/")[-1]
destname = os.path.join(temppath, "bibtex_import_saved_" + getNow() + "_" + _filename_only)
logg.info("bibtex import: going to copy/save import file %s -> %s", filename, destname)
shutil.copyfile(filename, destname)
return
article_types = [
("article", "An article from a journal or magazine.",
("author", "title", "journal", "year"),
("volume", "number", "pages", "month", "note", "key")),
("misc", "Use this type when nothing else seems appropriate.",
(),
("author", "title", "howpublished", "month", "year", "note", "key")),
("unpublished", "A document with an author and title, but not formally published. ",
("author", "title", "note"),
("month", "year", "key")),
("book", "A book with an explicit publisher. ",
("author or editor", "title", "publisher", "year"),
("volume", "series", "address", "edition", "month", "note", "key")),
("booklet", "A work that is printed and bound, but without a named publisher or sponsoring institution.",
("title",),
("author", "howpublished", "address", "month", "year", "note", "key")),
("inbook", "A part of a book, which may be a chapter and/or a range of pages. ",
("author or editor", "title", "chapter and/or pages", "publisher", "year"),
("volume", "series", "address", "edition", "month", "note", "key")),
("manual", "Technical documentation. ",
("title"),
("author", "organization", "address", "edition", "month", "year", "note", "key")),
("techreport", "A report published by a school or other institution, usually numbered within a series. ",
("author", "title", "institution", "year"),
("type", "number", "address", "month", "note", "key")),
("conference",
"An article in the proceedings of a conference. This entry is identical to the 'inproceedings' entry and is included for compatibility with another text formatting system. ",
("author", "title", "booktitle", "year"),
("editor", "pages", "organization", "publisher", "address", "month", "note", "key")),
("proceedings", " The proceedings of a conference.",
("title", "year"),
("editor", "publisher", "organization", "address", "month", "note", "key")),
("inproceedings", "An article in the proceedings of a conference. ",
("author", "title", "booktitle", "year"),
("editor", "pages", "organization", "publisher", "address", "month", "note", "key")),
("incollection", "A part of a book with its own title.",
("author", "title", "booktitle", "year"),
("editor", "pages", "organization", "publisher", "address", "month", "note", "key")),
("phdthesis", "A PhD thesis.",
("author", "title", "school", "year"),
("address", "month", "note", "key")),
("mastersthesis", "A Master's thesis.",
("author", "title", "school", "year"),
("address", "month", "note", "key"))]
from . import schema as schema
def getAllBibTeXTypes():
return [bibname for bibname, description, required, optional in article_types]
def getbibtexmappings():
bibtextypes = {}
for metatype in schema.loadTypesFromDB():
for bibtextype in metatype.get("bibtexmapping").split(";"):
if bibtextype:
metatype_name = metatype.getName()
bibtextypes[bibtextype] = bibtextypes.get(bibtextype, []) + [metatype_name]
for bibtextype in bibtextypes:
if len(bibtextypes[bibtextype]) == 1:
bibtextypes[bibtextype] = bibtextypes[bibtextype][-1]
elif len(bibtextypes[bibtextype]) > 1:
logg.error("bibtex import: ambiguous mapping for bibtex type '%s': %s - choosing last one",
bibtextype, bibtextypes[bibtextype])
bibtextypes[bibtextype] = bibtextypes[bibtextype][-1]
return bibtextypes
def checkMappings():
s = getbibtexmappings()
for bibname, description, required, optional in article_types:
if bibname not in s:
print bibname, "is not associated with any metatype"
else:
print bibname, "->", s[bibname]
def detecttype(doctype, fields):
results = []
for bibname, description, required, optional in article_types:
score = 0
if doctype.lower() == bibname.lower():
score += 120
score -= len(required)
for field in required:
if field in fields:
score += 20
for field in optional:
if field in fields:
score += 10
results += [(score, bibname)]
if not results:
# no mapping types defined
raise ValueError("no bibtex mappings defined")
score, bibname = max(results)
if score >= 30:
return bibname
else:
return None
def _bibteximport_customize(record):
"""
Sanitize bibtex records (unicode, name lists).
"""
record = bibtexparser.customization.convert_to_unicode(record)
record = bibtexparser.customization.author(record)
record = bibtexparser.customization.editor(record)
# editor function adds "ids" (s.th. like hashes), we don't need them
if record.get("editor"):
record["editor"] = list(v["name"] for v in record["editor"])
# convert author/editor lists into semicolon-separated strings
for key in ("author", "editor"):
if key in record:
record[key] = ";".join(", ".join(n for n in name.split(", ") if n.strip()) for name in record[key])
for key in ("title", "booktitle"):
if key in record:
record[key] = record[key].replace('\n', ' ')
return record
def getentries(filename):
try:
save_import_file(filename)
except IOError as e:
logg.error("bibtex import: save import file failed: {}".format(e))
raise IOError("save import file failed")
# use utf-8-sig instead of utf-8 to get rid of BOM_UTF8, which confuses bibtex parser
for encoding in ('utf-8-sig', 'utf-16', None):
try:
error = None
fi = codecs.open(filename, "r", encoding=encoding)
parser = BibTexParser(common_strings=True)
# accept also non standard records like @SCIENCEREPORT
parser.ignore_nonstandard_types = False
parser.customization = _bibteximport_customize
bibtex = bibtex_load(fi, parser=parser)
# seems to be the correct encoding, don't try other encodings
break
except Exception as e:
# check if there is a utf-encoding error, then try other encoding
if (encoding is 'utf-8-sig' and str(e).lower().find('utf8') >= 0) or \
(encoding is 'utf-16' and str(e).lower().find('utf-16') >= 0):
continue
error = e
break
if error:
logg.error("bibtex import: bibtexparser failed: {}".format(e))
raise ValueError("bibtexparser failed")
return bibtex.entries
def importBibTeX(infile, node=None, req=None):
user = None
if req:
try:
user = users.getUserFromRequest(req)
msg = "bibtex import: import started by user '%s'" % (user.name)
except:
msg = "bibtex import: starting import (unable to identify user)"
else:
msg = "bibtex import: starting import (%s)" % ustr(sys.argv)
logg.info(msg)
bibtextypes = getbibtexmappings()
result = []
entries = []
if isinstance(infile, list):
entries = infile
else:
node = node or Directory(utf8_decode_escape(os.path.basename(infile)))
try:
entries = getentries(infile)
except:
# XXX TODO This reports *everything* as encoding error
# XXX TODO (even things like full disk or other parsing errors).
# XXX TODO We should at least reformulate the error message,
# XXX TODO and -- even better -- only catch errors that are to be expected.
logg.error("getentries failed", exc_info=1)
msg = "bibtex import: getentries failed, import stopped (encoding error)"
logg.error(msg)
raise ValueError("bibtex_unspecified_error")
logg.info("bibtex import: %d entries", len(entries))
for count, fields in enumerate(entries):
docid_utf8 = fields["ID"]
fields[u"key"] = fields.pop("ID")
doctype = fields.pop("ENTRYTYPE")
mytype = detecttype(doctype, fields)
if mytype:
fieldnames = {}
datefields = {}
if mytype not in bibtextypes:
logg.error("bibtex mapping of bibtex type '%s' not defined - import stopped", mytype)
msg = "bibtex mapping of bibtex type '%s' not defined - import stopped" % mytype
raise MissingMapping(msg)
result += [(mytype.lower(), fields)]
metatype = bibtextypes[mytype]
# check for mask configuration
metadatatype = q(Metadatatype).filter_by(name=metatype).one()
mask = metadatatype.get_mask(u"bibtex_import") or metadatatype.get_mask(u"bibtex")
if mask:
for f in mask.all_maskitems:
try:
_bib_name = q(Node).get(f.get(u"mappingfield")).name
_mfield = q(Node).get(f.get(u"attribute"))
_med_name = _mfield.name
if _mfield.get(u"type") == u"date":
datefields[_med_name] = _mfield.get(u"valuelist")
except AttributeError as e:
msg = "bibtex import docid='{}': field error for bibtex mask for type {} and bibtex-type '{}': {}"
msg = msg.format(docid_utf8, metatype, mytype, e)
logg.error(msg)
else:
fieldnames[_bib_name] = _med_name
doc = Document(docid_utf8,schema=metatype)
for k, v in fields.items():
if k in fieldnames.keys():
k = fieldnames[k] # map bibtex name
if k in datefields.keys(): # format date field
try:
v = str(parse_date(v, datefields[k]))
# if date format does not contains '%' the valid digit of the result must not be longer than the date format
# e.g. if datefields[k] is 'yyyy' then the result v must be clipped after 4 characters
# afterwards the result is expanded again (without the invalid digits)
if datefields[k].find('%') < 0:
v = v[:len(datefields[k])]
v = str(parse_date(v, datefields[k]))
except ValueError as e:
logg.exception("bibtex exception: %s: %s", k, v)
raise ValueError("bibtex_date_error")
doc.set(k, v)
# because the bibtex import contains only a subset of the metadata defined in metadatatype,
# all other metadata are created and set to default values.
# this will be done in the same manner as if the document is loaded in editor and saved without
# any changes (required fields are not considered)
editmask = metadatatype.get_mask(u"editmask")
if editmask and hasattr(editmask, 'set_default_metadata'):
editmask.set_default_metadata(doc)
try:
node.children.append(doc)
if user:
doc.set("creator", user.login_name)
doc.set("creationtime", unicode(time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(time.time()))))
except Exception as e:
logg.exception("bibtex exception")
raise ValueError()
logg.debug("bibtex import: finished import")
print msg
return node
| gpl-3.0 | 7,457,262,981,495,890,000 | 38.242574 | 179 | 0.599218 | false |
pombreda/py2neo | test/ext/calendar_test.py | 1 | 10352 | #/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from py2neo import Graph, neo4j, legacy
from py2neo.ext.calendar import GregorianCalendar
@pytest.fixture(autouse=True)
def setup(request, graph):
if request.instance:
# Grab a handle to an index for linking to time data
graph = Graph()
try:
graph.legacy.delete_index(neo4j.Node, "TIME")
except LookupError:
pass
time = graph.legacy.get_or_create_index(neo4j.Node, "TIME")
request.instance.calendar = GregorianCalendar(time)
def test_can_create_date():
date = GregorianCalendar.Date(2000, 12, 25)
assert date.year == 2000
assert date.month == 12
assert date.day == 25
assert str(date) == "2000-12-25"
def test_can_create_date_with_short_numbers():
date = GregorianCalendar.Date(2000, 1, 2)
assert date.year == 2000
assert date.month == 1
assert date.day == 2
assert str(date) == "2000-01-02"
def test_can_create_month_year():
month_year = GregorianCalendar.Date(2000, 12)
assert month_year.year == 2000
assert month_year.month == 12
assert month_year.day is None
assert str(month_year) == "2000-12"
def test_can_create_year():
year = GregorianCalendar.Date(2000)
assert year.year == 2000
assert year.month is None
assert year.day is None
assert str(year) == "2000"
class TestExampleCode(object):
def test_example_code_runs(self):
from py2neo import Graph
from py2neo.ext.calendar import GregorianCalendar
graph = Graph()
time_index = graph.legacy.get_or_create_index(neo4j.Node, "TIME")
calendar = GregorianCalendar(time_index)
alice, birth, death = graph.create(
{"name": "Alice"},
(0, "BORN", calendar.day(1800, 1, 1)),
(0, "DIED", calendar.day(1900, 12, 31)),
)
assert birth.end_node["year"] == 1800
assert birth.end_node["month"] == 1
assert birth.end_node["day"] == 1
assert death.end_node["year"] == 1900
assert death.end_node["month"] == 12
assert death.end_node["day"] == 31
class TestDays(object):
def test_can_get_day_node(self):
christmas = self.calendar.day(2000, 12, 25)
assert isinstance(christmas, neo4j.Node)
assert christmas["year"] == 2000
assert christmas["month"] == 12
assert christmas["day"] == 25
def test_will_always_get_same_day_node(self):
first_christmas = self.calendar.day(2000, 12, 25)
for i in range(40):
next_christmas = self.calendar.day(2000, 12, 25)
assert next_christmas == first_christmas
def test_can_get_different_day_nodes(self):
christmas = self.calendar.day(2000, 12, 25)
boxing_day = self.calendar.day(2000, 12, 26)
assert christmas != boxing_day
class TestMonths(object):
def test_can_get_month_node(self):
december = self.calendar.month(2000, 12)
assert isinstance(december, neo4j.Node)
assert december["year"] == 2000
assert december["month"] == 12
def test_will_always_get_same_month_node(self):
first_december = self.calendar.month(2000, 12)
for i in range(40):
next_december = self.calendar.month(2000, 12)
assert next_december == first_december
def test_can_get_different_month_nodes(self):
december = self.calendar.month(2000, 12)
january = self.calendar.month(2001, 1)
assert december != january
class TestYears(object):
def test_can_get_year_node(self):
millennium = self.calendar.year(2000)
assert isinstance(millennium, neo4j.Node)
assert millennium["year"] == 2000
def test_will_always_get_same_month_node(self):
first_millennium = self.calendar.year(2000)
for i in range(40):
next_millennium = self.calendar.year(2000)
assert next_millennium == first_millennium
def test_can_get_different_year_nodes(self):
millennium_2000 = self.calendar.year(2000)
millennium_2001 = self.calendar.year(2001)
assert millennium_2000 != millennium_2001
class TestDateRanges(object):
def test_can_get_date_range(self):
xmas_year = self.calendar.date_range((2000, 12, 25), (2001, 12, 25))
assert isinstance(xmas_year, neo4j.Node)
assert xmas_year["start_date"] == "2000-12-25"
assert xmas_year["end_date"] == "2001-12-25"
rels = list(xmas_year.match_outgoing("START_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.date((2000, 12, 25))
assert rels[0].end_node == self.calendar.day(2000, 12, 25)
rels = list(xmas_year.match_outgoing("END_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.date((2001, 12, 25))
assert rels[0].end_node == self.calendar.day(2001, 12, 25)
def test_will_always_get_same_date_range_node(self):
range1 = self.calendar.date_range((2000, 12, 25), (2001, 12, 25))
range2 = self.calendar.date_range((2000, 12, 25), (2001, 12, 25))
assert range1 == range2
def test_can_get_different_date_range_nodes(self):
range1 = self.calendar.date_range((2000, 12, 25), (2001, 12, 25))
range2 = self.calendar.date_range((2000, 1, 1), (2000, 12, 31))
assert range1 != range2
def test_single_day_range(self):
range_ = self.calendar.date_range((2000, 12, 25), (2000, 12, 25))
assert range_ == self.calendar.day(2000, 12, 25)
def test_range_within_month(self):
advent = self.calendar.date_range((2000, 12, 1), (2000, 12, 24))
rels = list(advent.match_incoming("DATE_RANGE"))
assert len(rels) == 1
assert rels[0].start_node == self.calendar.month(2000, 12)
rels = list(advent.match_outgoing("START_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.date((2000, 12, 1))
assert rels[0].end_node == self.calendar.day(2000, 12, 1)
rels = list(advent.match_outgoing("END_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.date((2000, 12, 24))
assert rels[0].end_node == self.calendar.day(2000, 12, 24)
def test_range_within_year(self):
range_ = self.calendar.date_range((2000, 4, 10), (2000, 12, 24))
rels = list(range_.match_incoming("DATE_RANGE"))
assert len(rels) == 1
assert rels[0].start_node == self.calendar.year(2000)
rels = list(range_.match_outgoing("START_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.date((2000, 4, 10))
assert rels[0].end_node == self.calendar.day(2000, 4, 10)
rels = list(range_.match_outgoing("END_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.date((2000, 12, 24))
assert rels[0].end_node == self.calendar.day(2000, 12, 24)
def test_open_start_range(self):
range_ = self.calendar.date_range(None, (2000, 12, 25))
rels = list(range_.match_outgoing("START_DATE"))
assert len(rels) == 0
rels = list(range_.match_outgoing("END_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.date((2000, 12, 25))
assert rels[0].end_node == self.calendar.day(2000, 12, 25)
def test_open_end_range(self):
range_ = self.calendar.date_range((2000, 12, 25), None)
rels = list(range_.match_outgoing("START_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.date((2000, 12, 25))
assert rels[0].end_node == self.calendar.day(2000, 12, 25)
rels = list(range_.match_outgoing("END_DATE"))
assert len(rels) == 0
def test_no_fully_open_date_range(self):
try:
self.calendar.date_range(None, None)
except ValueError:
return True
else:
return False
def test_first_quarter(self):
range_ = self.calendar.quarter(2000, 1)
rels = list(range_.match_outgoing("START_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.day(2000, 1, 1)
rels = list(range_.match_outgoing("END_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.day(2000, 3, 31)
def test_second_quarter(self):
range_ = self.calendar.quarter(2000, 2)
rels = list(range_.match_outgoing("START_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.day(2000, 4, 1)
rels = list(range_.match_outgoing("END_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.day(2000, 6, 30)
def test_third_quarter(self):
range_ = self.calendar.quarter(2000, 3)
rels = list(range_.match_outgoing("START_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.day(2000, 7, 1)
rels = list(range_.match_outgoing("END_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.day(2000, 9, 30)
def test_fourth_quarter(self):
range_ = self.calendar.quarter(2000, 4)
rels = list(range_.match_outgoing("START_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.day(2000, 10, 1)
rels = list(range_.match_outgoing("END_DATE"))
assert len(rels) == 1
assert rels[0].end_node == self.calendar.day(2000, 12, 31)
def test_no_fifth_quarter(self):
try:
self.calendar.quarter(2000, 5)
except ValueError:
return True
else:
return False
| apache-2.0 | 5,594,756,984,559,983,000 | 36.507246 | 76 | 0.614664 | false |
sujaymansingh/random_cricket_profiles | random_cricket_profiles/player_generator.py | 1 | 3275 | """Generates a player profile using markov chains!
"""
import random
import sujmarkov
from random_cricket_profiles import countries, players
class PlayerGenerator():
def __init__(self, sample_players, min_profile_length):
self.min_profile_length = min_profile_length
self.profile_markov = sujmarkov.Markov(n=3)
self.surname_markovs = {}
self.firstname_markovs = {}
for country in countries.COUNTRIES:
self.surname_markovs[country.country_id] = sujmarkov.Markov(n=4)
self.firstname_markovs[country.country_id] = sujmarkov.Markov(n=4)
for player in sample_players:
self.add_player(player)
def add_player(self, player):
for line in player.profile:
if line:
sentence = line.split(" ")
self.profile_markov.add(sentence)
country_id = player.country_id
self.surname_markovs[country_id].add(player.surname)
firstnames = player.firstnames
if firstnames:
for name in firstnames.split(" "):
if name:
self.firstname_markovs[country_id].add(name)
def generate(self, country_code=None, seed=None):
"""Returns a tuple (player, seed).
If country_code is not passed, a random one is chosed.
seed is used to seed the random number generator.
This means that the same seed will always generate the same player.
"""
if seed is None:
seed = random.getrandbits(64)
random_ = random.Random(seed)
if country_code:
country = countries.get_country_by_code(country_code)
else:
country = random_.choice(countries.COUNTRIES)
surname_markov = self.surname_markovs[country.country_id]
surname = "".join(surname_markov.generate(random_=random_))
firstname_markov = self.firstname_markovs[country.country_id]
firstnames_as_list = []
for i in range(random_.choice([1, 2, 3])):
firstname = "".join(firstname_markov.generate(random_=random_))
firstnames_as_list.append(firstname)
firstnames = " ".join(firstnames_as_list)
profile = []
while get_total_length(profile) < self.min_profile_length:
line = " ".join(self.profile_markov.generate(random_=random_))
for item in [
("$fullname", surname),
("$known_as", surname),
("$surname", surname),
("$firstnames", firstnames),
("$team", country.name),
]:
placeholder, value = item
line = line.replace(placeholder, value)
profile.append(line)
player = players.Player(
country_id=country.country_id,
firstnames=firstnames,
surname=surname,
profile=profile,
fullname=firstnames + " " + surname,
known_as=""
)
return (player, seed)
def get_total_length(profile):
"""Return the sum of lengths of each string in this list.
>>> get_total_length(["This is a conversation", "Yes"])
25
"""
lengths = [len(line) for line in profile]
return sum(lengths)
| mit | 8,664,843,146,164,601,000 | 32.080808 | 78 | 0.584427 | false |
dpa-newslab/livebridge | livebridge/base/sources.py | 1 | 4313 | # -*- coding: utf-8 -*-
#
# Copyright 2016 dpa-infocom GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from livebridge.components import get_db_client
logger = logging.getLogger(__name__)
class BaseSource(object):
"""Base class for sources."""
__module__ = "livebridge.base"
type = ""
mode = ""
def __init__(self, *, config={}, **kwargs):
"""Base constructor for sources.
:param config: Configuration passed from the control file.
"""
pass
@property
def _db(self):
"""Database client for accessing storage.
:returns: :class:`livebridge.storages.base.BaseStorage` """
if not hasattr(self, "_db_client") or getattr(self, "_db_client") is None:
self._db_client = get_db_client()
return self._db_client
async def filter_new_posts(self, source_id, post_ids):
"""Filters ist of post_id for new ones.
:param source_id: id of the source
:type string:
:param post_ids: list of post ids
:type list:
:returns: list of unknown post ids."""
new_ids = []
try:
db_client = self._db
posts_in_db = await db_client.get_known_posts(source_id, post_ids)
new_ids = [p for p in post_ids if p not in posts_in_db]
except Exception as exc:
logger.error("Error when filtering for new posts {} {}".format(source_id, post_ids))
logger.exception(exc)
return new_ids
async def get_last_updated(self, source_id):
"""Returns latest update-timestamp from storage for source.
:param source_id: id of the source (source_id, ticker_id, blog_id pp)
:type string:
:returns: :py:class:`datetime.datetime` object of latest update datetime in db."""
last_updated = await self._db.get_last_updated(source_id)
logger.info("LAST UPDATED: {} {}".format(last_updated, self))
return last_updated
class PollingSource(BaseSource):
"""Base class for sources which are getting polled. Any custom adapter source, which \
should get polled, has to be inherited from this base class."""
mode = "polling"
async def poll(self):
"""Method has to be implemented by the concrete inherited source class.
:func:`poll` gets called by the interval defined by environment var *POLLING_INTERVALL*.
The inheriting class has to implement the actual poll request for the source in this method.
:return: list of new posts"""
raise NotImplementedError("Method 'poll' not implemented.")
async def stop(self):
"""Method can be implemented by the concrete inherited source class.
By implementing this method, the source class is able to handle the shutdown event explicitly."""
pass
class StreamingSource(BaseSource):
"""Base class for streaming sources. Any custom adapter source, which is using a websocket, SSE or\
any other stream as source has to be inherited from this base class."""
mode = "streaming"
async def listen(self, callback):
"""Method has to be implemented by the concrete inherited source class.
A websocket connection has to be opened and given *callback* method has to be
called with the new post as argument.
:param callback: Callback method which has to be called with list of new posts.
:return: True"""
raise NotImplementedError("Method 'listen' not implemented.")
async def stop(self):
"""Method has to be implemented by the concrete inherited source class.
By calling this method, the websocket-connection has to be stopped.
:return: True"""
raise NotImplementedError("Method 'stop' not implemented.")
| apache-2.0 | 8,024,353,077,062,878,000 | 35.243697 | 108 | 0.657083 | false |
isharacomix/rules-of-war | code/core/storage.py | 1 | 2639 | # The storage module is a platform-independent way of saving and loading
# files to certain locations. This module is global and functional. It contains
# NO state information.
#
# There are two data stores. The local data stored in the user's home directory
# and the global data stored in /data/ in the game's runtime location. The data
# directory must be two sublevels above this file. Information should never
# be saved in data... only in home.
import os
GAME_DIR = ".rules-of-war"
# This reads the text from a file in the home directory. Each arg is a
# folder in the filename, and will be joined as appropriate. Returns None if
# the file does not exist.
def read(*args):
home = os.path.join(os.path.expanduser("~"),GAME_DIR)
target = os.path.join(home, *args)
if not os.path.exists(target):
return None
try:
f = open(target,"r")
s = f.read()
f.close()
return s
except:
return None
# This returns a list of filenames under the provided directory.
def list_files(*args):
home = os.path.join(os.path.expanduser("~"),GAME_DIR)
target = os.path.join(home, *args)
if not os.path.exists(target):
return []
return [ f for f in os.listdir(target)
if os.path.isfile(os.path.join(target,f)) ]
# This saves a file to the home directory, overwriting if appropriate.
# Returns False if something goes wrong.
def save(data, *args):
home = os.path.join(os.path.expanduser("~"),GAME_DIR)
targetdir = os.path.join(home, *(args[:-1]))
target = os.path.join(home, *args)
if not os.path.exists(targetdir):
os.makedirs(targetdir)
try:
f = open(target,"w")
f.write(data)
f.close()
return True
except:
return False
# This reads a file from the provided data directory.
def read_data(*args):
data = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..","..","data")
target = os.path.join(data, *args)
if not os.path.exists(target):
return None
try:
f = open(target,"r")
s = f.read()
f.close()
return s
except:
return None
# This returns a list of filenames under the provided data directory. These
# files should be considered READ ONLY.
def list_datafiles(*args):
data = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"..","..","data")
target = os.path.join(data, *args)
if not os.path.exists(target):
return []
return [ f for f in os.listdir(target)
if os.path.isfile(os.path.join(target,f)) ]
| gpl-3.0 | -2,016,188,181,290,425,600 | 31.9875 | 79 | 0.628268 | false |
MaartenGr/BERTopic | bertopic/plotting/_topics_over_time.py | 1 | 3528 | import pandas as pd
from typing import List
import plotly.graph_objects as go
def visualize_topics_over_time(topic_model,
topics_over_time: pd.DataFrame,
top_n_topics: int = None,
topics: List[int] = None,
width: int = 1250,
height: int = 450) -> go.Figure:
""" Visualize topics over time
Arguments:
topic_model: A fitted BERTopic instance.
topics_over_time: The topics you would like to be visualized with the
corresponding topic representation
top_n_topics: To visualize the most frequent topics instead of all
topics: Select which topics you would like to be visualized
width: The width of the figure.
height: The height of the figure.
Returns:
A plotly.graph_objects.Figure including all traces
Usage:
To visualize the topics over time, simply run:
```python
topics_over_time = topic_model.topics_over_time(docs, topics, timestamps)
topic_model.visualize_topics_over_time(topics_over_time)
```
Or if you want to save the resulting figure:
```python
fig = topic_model.visualize_topics_over_time(topics_over_time)
fig.write_html("path/to/file.html")
```
<iframe src="../../tutorial/visualization/trump.html"
style="width:1000px; height: 680px; border: 0px;""></iframe>
"""
colors = ["#E69F00", "#56B4E9", "#009E73", "#F0E442", "#D55E00", "#0072B2", "#CC79A7"]
# Select topics
if topics:
selected_topics = topics
elif top_n_topics:
selected_topics = topic_model.get_topic_freq().head(top_n_topics + 1)[1:].Topic.values
else:
selected_topics = topic_model.get_topic_freq().Topic.values
# Prepare data
topic_names = {key: value[:40] + "..." if len(value) > 40 else value
for key, value in topic_model.topic_names.items()}
topics_over_time["Name"] = topics_over_time.Topic.map(topic_names)
data = topics_over_time.loc[topics_over_time.Topic.isin(selected_topics), :]
# Add traces
fig = go.Figure()
for index, topic in enumerate(data.Topic.unique()):
trace_data = data.loc[data.Topic == topic, :]
topic_name = trace_data.Name.values[0]
words = trace_data.Words.values
fig.add_trace(go.Scatter(x=trace_data.Timestamp, y=trace_data.Frequency,
mode='lines',
marker_color=colors[index % 7],
hoverinfo="text",
name=topic_name,
hovertext=[f'<b>Topic {topic}</b><br>Words: {word}' for word in words]))
# Styling of the visualization
fig.update_xaxes(showgrid=True)
fig.update_yaxes(showgrid=True)
fig.update_layout(
yaxis_title="Frequency",
title={
'text': "<b>Topics over Time",
'y': .95,
'x': 0.40,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(
size=22,
color="Black")
},
template="simple_white",
width=width,
height=height,
hoverlabel=dict(
bgcolor="white",
font_size=16,
font_family="Rockwell"
),
legend=dict(
title="<b>Global Topic Representation",
)
)
return fig
| mit | 3,429,931,196,606,199,000 | 34.28 | 105 | 0.553288 | false |
tmolteno/python-necpp | necpp/setup.py | 1 | 2166 | #!/usr/bin/env python
"""
setup.py file for necpp Python module.
"""
from setuptools import setup, Extension
from glob import glob
import os
nec_sources = []
nec_sources.extend([fn for fn in glob('necpp_src/src/*.cpp')
if not os.path.basename(fn).endswith('_tb.cpp')
if not os.path.basename(fn).startswith('net_solve.cpp')
if not os.path.basename(fn).startswith('nec2cpp.cpp')
if not os.path.basename(fn).startswith('necDiff.cpp')])
nec_sources.extend(glob("necpp_wrap.c"))
nec_headers = []
nec_headers.extend(glob("necpp_src/src/*.h"))
nec_headers.extend(glob("necpp_src/config.h"))
# At the moment, the config.h file is needed, and this should be generated from the ./configure
# command in the parent directory. Use ./configure --without-lapack to avoid dependance on LAPACK
#
necpp_module = Extension('_necpp',
sources=nec_sources,
include_dirs=['necpp_src/src/', 'necpp_src/'],
depends=nec_headers,
define_macros=[('BUILD_PYTHON', '1')]
)
with open('README.md') as f:
readme = f.read()
setup (name = 'necpp',
version = '1.7.3.5',
author = "Tim Molteno",
author_email = "[email protected]",
url = "http://github.com/tmolteno/necpp",
keywords = "nec2 nec2++ antenna electromagnetism radio",
description = "Python Antenna Simulation Module (nec2++) C-style interface",
long_description=readme,
long_description_content_type="text/markdown",
include_package_data=True,
data_files=[('examples', ['necpp_src/example/test.py'])],
ext_modules = [necpp_module],
py_modules = ["necpp"],
license='GPLv2',
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering",
"Topic :: Communications :: Ham Radio",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
"Intended Audience :: Science/Research"]
)
| gpl-2.0 | -8,857,182,121,933,700,000 | 33.380952 | 97 | 0.656971 | false |
chrismattmann/tika-python | tika/parser.py | 1 | 4869 | #!/usr/bin/env python
# encoding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .tika import parse1, callServer, ServerEndpoint
import os
import json
def from_file(filename, serverEndpoint=ServerEndpoint, service='all', xmlContent=False, headers=None, config_path=None, requestOptions={}):
'''
Parses a file for metadata and content
:param filename: path to file which needs to be parsed or binary file using open(path,'rb')
:param serverEndpoint: Server endpoint url
:param service: service requested from the tika server
Default is 'all', which results in recursive text content+metadata.
'meta' returns only metadata
'text' returns only content
:param xmlContent: Whether or not XML content be requested.
Default is 'False', which results in text content.
:param headers: Request headers to be sent to the tika reset server, should
be a dictionary. This is optional
:return: dictionary having 'metadata' and 'content' keys.
'content' has a str value and metadata has a dict type value.
'''
if not xmlContent:
output = parse1(service, filename, serverEndpoint, headers=headers, config_path=config_path, requestOptions=requestOptions)
else:
output = parse1(service, filename, serverEndpoint, services={'meta': '/meta', 'text': '/tika', 'all': '/rmeta/xml'},
headers=headers, config_path=config_path, requestOptions=requestOptions)
return _parse(output, service)
def from_buffer(string, serverEndpoint=ServerEndpoint, xmlContent=False, headers=None, config_path=None, requestOptions={}):
'''
Parses the content from buffer
:param string: Buffer value
:param serverEndpoint: Server endpoint. This is optional
:param xmlContent: Whether or not XML content be requested.
Default is 'False', which results in text content.
:param headers: Request headers to be sent to the tika reset server, should
be a dictionary. This is optional
:return:
'''
headers = headers or {}
headers.update({'Accept': 'application/json'})
if not xmlContent:
status, response = callServer('put', serverEndpoint, '/rmeta/text', string, headers, False, config_path=config_path, requestOptions=requestOptions)
else:
status, response = callServer('put', serverEndpoint, '/rmeta/xml', string, headers, False, config_path=config_path, requestOptions=requestOptions)
return _parse((status,response))
def _parse(output, service='all'):
'''
Parses response from Tika REST API server
:param output: output from Tika Server
:param service: service requested from the tika server
Default is 'all', which results in recursive text content+metadata.
'meta' returns only metadata
'text' returns only content
:return: a dictionary having 'metadata' and 'content' values
'''
parsed={'metadata': None, 'content': None}
if not output:
return parsed
parsed["status"] = output[0]
if output[1] == None or output[1] == "":
return parsed
if service == "text":
parsed["content"] = output[1]
return parsed
realJson = json.loads(output[1])
parsed["metadata"] = {}
if service == "meta":
for key in realJson:
parsed["metadata"][key] = realJson[key]
return parsed
content = ""
for js in realJson:
if "X-TIKA:content" in js:
content += js["X-TIKA:content"]
if content == "":
content = None
parsed["content"] = content
for js in realJson:
for n in js:
if n != "X-TIKA:content":
if n in parsed["metadata"]:
if not isinstance(parsed["metadata"][n], list):
parsed["metadata"][n] = [parsed["metadata"][n]]
parsed["metadata"][n].append(js[n])
else:
parsed["metadata"][n] = js[n]
return parsed
| apache-2.0 | 3,369,326,313,456,085,500 | 40.262712 | 155 | 0.648388 | false |
Subsets and Splits