repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
brahamcosoX3/TheIoTLearningInitiative | InternetOfThings101/main.py | 1 | 5409 | #!/usr/bin/python
# Libraries
import paho.mqtt.client as paho
import psutil
import pywapi
import signal
import sys
import time
import dweepy
import random
import plotly.plotly as py
import pyupm_i2clcd as lcd
import pyupm_grove as grove
from threading import Thread
from flask import Flask
from flask_restful import Api, Resource
from plotly.graph_objs import Scatter, Layout, Figure
from Adafruit_IO import MQTTClient
# Global variables
# Display config
myLcd = lcd.Jhd1313m1(0, 0x3E, 0x62)
myLcd.setColor(255, 255, 255)
# Light sensor config
light = grove.GroveLight(0)
# Relay
relay = grove.GroveRelay(4)
# Restful init
#app = Flask(__name__)
#api = Api(app)
# Adafruit variables
ADAFRUIT_IO_KEY = 'cd6bfee245bd4b2c9e14fe2eb882643a'
ADAFRUIT_IO_USERNAME = 'brahamcoso'
# Plotly variables
username = 'brahamcosoX3'
api_key = '2no5uo7af9'
stream_token = 'npg3mqqj85'
# Classes
class Network(Resource):
def get(self):
data = 'Network Data: %i' % dataNetwork()
return data
# Functions
def interruptHandler(signal, frame):
sys.exit(0)
def on_publish(mosq, obj, msg):
pass
def dataNetwork():
netdata = psutil.net_io_counters()
return netdata.packets_sent + netdata.packets_recv
def getMac(interface):
try:
mac = open('/sys/class/net/' + interface +
'/address').readline()
except:
mac = "00:00:00:00:00:00"
return mac[0:17]
def dataWeatherHandler():
weather = pywapi.get_weather_from_weather_com('MXJO0043', 'metric')
message = "Weather Report in " + weather['location']['name']
message = message + ", Temperature "
message = message + (weather['current_conditions']['temperature'] +
" C")
message = message + ", Atmospheric Pressure "
message = message + (weather['current_conditions']
['barometer']['reading'][:-3] + " mbar")
dataLcd = "%s-%s C, %s mbar" % ( weather['location']['name'],
weather['current_conditions']['temperature'],weather['current_conditions']['barometer']['reading'][:-3])
#print message
return dataLcd
def connected(client):
print 'Connected to Adafruit IO! Listening for DemoFeed changes...'
client.subscribe('my-data')
def disconnected(client):
print 'Disconnected from Adafruit IO!'
sys.exit(1)
def message(client, feed_id, payload):
print 'Feed {0} received new value: {1}'.format(feed_id, payload)
# Network Thread
def dataAdafruitHandler():
client = MQTTClient(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
client.on_connect = connected
client.on_disconnect = disconnected
client.on_message = message
client.connect()
client.loop_background()
while True:
value = random.randint(0, 100)
print 'Publishing {0} to my-data.'.format(value)
client.publish('my-data', value)
time.sleep(5)
# Network Thread
def dataNetworkHandler():
idDevice = "Charles: " + getMac("wlan0")
while True:
packets = dataNetwork()
message = idDevice + " " + str(packets)
#print "MQTT dataNetworkHandler " + message
mqttclient.publish("IoT101/Network", message)
time.sleep(2)
# Message Thread
def on_message(mosq, obj, msg):
print "MQTT dataMessageHandler %s %s" % (msg.topic, msg.payload)
if "78:4b:87:9f:39:35/Actuator" in msg.topic:
if msg.payload == '1':
relay.on()
elif msg.payload == '0':
relay.off()
def dataMessageHandler():
mqttclient.subscribe("IoT101/#", 0)
#mqttclient.subscribe("IoT101/78:4b:87:9f:39:35/Actuator", 0)
while mqttclient.loop() == 0:
pass
# Plotly Thread
def dataPlotly():
return dataNetwork()
def dataPlotlyHandler():
py.sign_in(username, api_key)
trace1 = Scatter(
x=[],
y=[],
stream = dict(
token = stream_token,
maxpoints = 200))
layout = Layout(
title='Hello Internet of Things 101 Data')
fig = Figure(data = [trace1], layout = layout)
print py.plot(fig, filename = 'Hello Internet of Things 101 Plotly', auto_open=False)
i = 0
stream = py.Stream(stream_token)
stream.open()
while True:
stream_data = dataPlotly()
stream.write({'x': i, 'y': stream_data})
i += 1
time.sleep(0.25)
# Light Thread
def dataLightHandler():
while True:
dweepy.dweet_for('brahamcosoIoT101',
{'value': str(light.value())})
time.sleep(2)
#api.add_resource(Network, '/network')
if __name__ == '__main__':
signal.signal(signal.SIGINT, interruptHandler)
# Mosquitto config
mqttclient = paho.Client()
mqttclient.on_publish = on_publish
mqttclient.on_message = on_message
mqttclient.connect("test.mosquitto.org", 1883, 60)
# Run Restful site
#app.run(host='0.0.0.0', debug=True)
# Threads
threadv = Thread(target=dataAdafruitHandler)
threadv.start()
threadw = Thread(target=dataLightHandler)
threadw.start()
threadx = Thread(target=dataNetworkHandler)
threadx.start()
thready = Thread(target=dataMessageHandler)
thready.start()
threadz = Thread(target=dataPlotlyHandler)
threadz.start()
while True:
myLcd.setCursor(0, 0)
toString = dataWeatherHandler()
a,b = toString.split("-")
myLcd.write(str(a))
myLcd.setCursor(1, 0)
myLcd.write(str(b))
time.sleep(5)
# End of File
| apache-2.0 | -6,768,222,355,278,814,000 | 23.926267 | 104 | 0.648364 | false | 3.202487 | false | false | false |
antonev/django-handlers | django_handlers.py | 1 | 6406 | from itertools import chain
from collections import (
defaultdict,
Iterable,
)
from django.http import HttpResponseNotAllowed
__version__ = '0.1.1'
class Handler(object):
"""Container for views.
:param decorators: (optional) list of decorators that will be applied
to each endpoint.
"""
def __init__(self, decorators=None):
self._decorators = decorators or []
self._views = defaultdict(dict)
self._pre_hooks = defaultdict(list)
self._post_hooks = defaultdict(list)
self._invalid_endpoint_names = dir(self)
def add_view(self, method, endpoint_name, view):
"""Adds a view to handler.
:param method: HTTP method to be handled by the view
:param endpoint_name: name of endpoint to associate the view with
:param view: function to be used for requests handling
"""
self._ensure_endpoint_exists(endpoint_name)
self._views[endpoint_name][method.upper()] = view
def _ensure_endpoint_exists(self, endpoint_name):
self._validate_endpoint_name(endpoint_name)
if endpoint_name not in self._views:
self._add_endpoint(endpoint_name)
def _validate_endpoint_name(self, endpoint_name):
if endpoint_name in self._invalid_endpoint_names:
raise ValueError('Invalid endpoint name {}'.format(endpoint_name))
def _add_endpoint(self, endpoint_name):
def endpoint(request, *args, **kwargs):
for hook in self._get_pre_hooks(endpoint_name):
hook(request, *args, **kwargs)
try:
view = self._views[endpoint_name][request.method]
except KeyError:
allowed_methods = self._views[endpoint_name].keys()
response = HttpResponseNotAllowed(allowed_methods)
else:
response = view(request, *args, **kwargs)
for hook in self._get_post_hooks(endpoint_name):
hook(request, *args, **kwargs)
return response
for decorator in reversed(self._decorators):
endpoint = decorator(endpoint)
setattr(self, endpoint_name, endpoint)
def _get_pre_hooks(self, endpoint_name):
return chain(self._pre_hooks[None], self._pre_hooks[endpoint_name])
def _get_post_hooks(self, endpoint_name):
return chain(self._post_hooks[None], self._post_hooks[endpoint_name])
def _register(self, method, endpoint_name):
def decorator(view):
self.add_view(method, endpoint_name, view)
return view
return decorator
def get(self, endpoint_name):
"""Decorates a view to use it for handling of GET requests.
:param endpoint_name: name of endpoint for given view.
"""
return self._register('GET', endpoint_name)
def head(self, endpoint_name):
"""Decorates a view to use it for handling of HEAD requests.
:param endpoint_name: name of endpoint for given view.
"""
return self._register('HEAD', endpoint_name)
def options(self, endpoint_name):
"""Decorates a view to use it for handling of OPTIONS requests.
:param endpoint_name: name of endpoint for given view.
"""
return self._register('OPTIONS', endpoint_name)
def post(self, endpoint_name):
"""Decorates a view to use it for handling of POST requests.
:param endpoint_name: name of endpoint`.
"""
return self._register('POST', endpoint_name)
def put(self, endpoint_name):
"""Decorates a view to use it for handling of PUT requests.
:param endpoint_name: name of endpoint.
"""
return self._register('PUT', endpoint_name)
def patch(self, endpoint_name):
"""Decorates a view to use it for handling of PATCH requests.
:param endpoint_name: name of endpoint.
"""
return self._register('PATCH', endpoint_name)
def delete(self, endpoint_name):
"""Decorates a view to use it for handling of DELETE requests.
:param endpoint_name: name of endpoint.
"""
return self._register('DELETE', endpoint_name)
def before(self, target):
"""Decorates a function to call it before views.
:param target: (optional) name of endpoint. Without it the
hook will be added for all endpoints.
"""
if callable(target):
endpoint_name = None
else:
endpoint_name = target
def decorator(view):
self.add_pre_hook(endpoint_name, view)
return view
if endpoint_name is None:
return decorator(target)
return decorator
def add_pre_hook(self, endpoint_name, hook):
"""Adds a function to call it before endpoint's views.
:param endpoint_name: name of handler endpoint
:param hook: function that should be called after endpoint's views
"""
self._pre_hooks[endpoint_name].append(hook)
def after(self, target):
"""Decorates a function to call it after views.
:param target: (optional) name of endpoint. Without it the
hook will be added for all endpoints.
"""
if callable(target):
endpoint_name = None
else:
endpoint_name = target
def decorator(view):
self.add_post_hook(endpoint_name, view)
return view
if endpoint_name is None:
return decorator(target)
return decorator
def add_post_hook(self, endpoint_name, hook):
"""Adds a function to call it after endpoint's views.
:param endpoint_name: name of handler endpoint
:param hook: function that should be called after endpoint's views
"""
self._post_hooks[endpoint_name].append(hook)
def decorate(self, endpoint_name, decorator):
"""Decorates an endpoint.
:param endpoint_name: an endpoint to decorate.
:param decorator: one decorator or iterable with decorators.
"""
endpoint = getattr(self, endpoint_name)
if isinstance(decorator, Iterable):
for dec in reversed(decorator):
endpoint = dec(endpoint)
else:
endpoint = decorator(endpoint)
setattr(self, endpoint_name, endpoint)
| mit | 9,030,394,841,770,216,000 | 29.650718 | 78 | 0.611146 | false | 4.445524 | false | false | false |
wintermind/pypedal | PyPedal/examples/sets/sets.py | 1 | 1026 | from PyPedal import *
import copy
def main():
options1 = {
'pedname': 'Fake Pedigree 1',
'messages': 'verbose',
'renumber': 1,
'pedfile': 'set1.ped',
'pedformat': 'asd',
'debug_messages': True,
}
options2 = copy.copy(options1)
options2['pedname'] = 'Fake Pedigree 2'
options2['pedfile'] = 'set2.ped'
set1 = pyp_newclasses.loadPedigree(options1, debugLoad=True)
print 'Animals in set1.ped:'
print set1.idmap.keys()
set2 = pyp_newclasses.loadPedigree(options2, debugLoad=True)
print 'Animals in set2.ped:'
print set2.idmap.keys()
print 'Testing the "+" operator...'
added = set1 + set2
print added.idmap.keys()
print '='*80
options3 = copy.copy(options1)
options3['pedname'] = 'Fake Pedigree 3'
options3['pedfile'] = 'set3.ped'
set3 = pyp_newclasses.loadPedigree(options3, debugLoad=True)
print 'Animals in set3.ped:'
print set3.idmap.keys()
print 'Testing the "+" operator...'
added2 = set1 + set3
print added2.idmap.keys()
if __name__ == '__main__':
main()
| gpl-2.0 | 8,673,712,461,370,547,000 | 21.8 | 68 | 0.659844 | false | 2.584383 | false | false | false |
lotcom/automateBoringstuffPython | Chapter13PracBruteForcePDF.py | 1 | 1031 | #! /usr/bin/env python3
# Chapter 13 Practice Brute-Force PDF Password Breaker
# USAGE: Change the pdfFile varible below and run the script to try 44,000 English words
# from the dictionary.txt file to decrypt the encrypted PDF.
import PyPDF2
pdfFile = open('bruteForce.pdf', 'rb') #Change this file name and location
pdfReader = PyPDF2.PdfFileReader(pdfFile)
dictionaryFile = open('dictionary.txt')
passwordList = dictionaryFile.readlines()
for word in range(len(passwordList)):
passWord = passwordList[word].strip()
passWorkedUpper = pdfReader.decrypt(passWord.upper())
if passWorkedUpper == 1:
print('The password is: ' + passWord.upper())
break
else:
print(passWord.upper() + ' did NOT work...')
passWorkedLower = pdfReader.decrypt(passWord.lower())
if passWorkedLower == 1:
print('The password is: ' + passWord.lower())
break
else:
print(passWord.lower() + ' did NOT work...')
dictionaryFile.close()
pdfFile.close()
| cc0-1.0 | -7,848,625,733,136,509,000 | 31.21875 | 88 | 0.675073 | false | 3.630282 | false | false | false |
MaxTyutyunnikov/lino | obsolete/tests/27.py | 1 | 3813 | # coding: latin1
## Copyright 2005 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import codecs
from lino.adamo.dbds.sqlite_dbd import sqlite
#import pysqlite2.dbapi2 as sqlite
from unittest import TestCase, main
filename=os.path.join(os.path.dirname(__file__),"27b.sql")
class Case(TestCase):
def test01(self):
conn = sqlite.connect(':memory:')
csr = conn.cursor()
f=codecs.open(filename,encoding="cp1252")
sql=""
lengths=[]
inserts=0
for ln in f:
ln=ln.strip()
if not ln.startswith('#'):
if ln.endswith(";"):
sql += ln[:-1]
csr.execute(sql)
#conn.commit()
#print sql
#print
if sql.startswith("SELECT "):
# use the cursor up to avoid work around
# pysqlite bug
#for t in csr:
# print t
lengths.append(len(csr.fetchall()))
#print "--> %d rows" % len(csr.fetchall())
elif sql.startswith("INSERT "):
inserts+=1
csr.close()
#else:
# conn.commit()
# print "(conn.commit())"
sql=""
else:
sql+=ln
conn.close()
#print lengths
#print "%d INSERT statements" % inserts
## self.assertEqual(lengths,
## [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
## 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
## 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 7])
self.assertEqual(
lengths,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 15, 1, 5]
)
self.assertEqual(inserts,5191)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,525,608,813,913,019,000 | 33.981651 | 73 | 0.419093 | false | 3.209596 | false | false | false |
GregDMeyer/dynamite | dynamite/subspaces.py | 1 | 10964 | '''
Classes that define the various subspaces on which operators can be defined.
The methods generally are just an interface to the backend, so that there is only
one implementation of each of the functions.
'''
import numpy as np
from copy import deepcopy
from zlib import crc32
from . import validate, states
from ._backend import bsubspace
class Subspace:
'''
Base subspace class.
'''
def __init__(self):
self._L = None
self._chksum = None
def __eq__(self, s):
'''
Returns true if the two subspaces correspond to the same mapping, even if they
are different classes.
'''
if not isinstance(s, Subspace):
raise ValueError('Cannot compare Subspace to non-Subspace type')
if self.get_dimension() != s.get_dimension():
return False
return self.get_checksum() == s.get_checksum()
@property
def L(self):
'''
The spin chain length corresponding to this space.
'''
return self._L
def check_L(self, value):
# by default, any L that passes our normal validation checks works
return value
@L.setter
def L(self, value):
# check that this value of L is compatible with the subspace
value = validate.L(value)
value = self.check_L(value)
if value != self._L:
self._chksum = None
self._L = value
def get_dimension(self):
"""
Get the dimension of the subspace.
"""
raise NotImplementedError()
@classmethod
def _numeric_to_array(cls, x):
'''
Convert numeric values of any type to the type expected by the backend
functions.
'''
x = np.array(x, copy = False, dtype = bsubspace.dnm_int_t).reshape((-1,))
return np.ascontiguousarray(x)
def idx_to_state(self, idx):
"""
Maps an index to an integer that in binary corresponds to the spin configuration.
Vectorized implementation allows passing a numpy array of indices as idx.
"""
raise NotImplementedError()
def state_to_idx(self, state):
"""
The inverse mapping of :meth:`idx_to_state`.
"""
raise NotImplementedError()
def copy(self):
return deepcopy(self)
def get_checksum(self):
'''
Get a checksum of the state mapping for this subspace. This allows subspaces to
be compared quickly.
'''
if self._chksum is None:
BLOCK = 2**14
chksum = 0
for start in range(0, self.get_dimension(), BLOCK):
stop = min(start+BLOCK, self.get_dimension())
smap = self.idx_to_state(np.arange(start, stop))
chksum = crc32(smap, chksum)
self._chksum = chksum
return self._chksum
def __hash__(self):
return self.get_checksum()
def get_cdata(self):
'''
Returns an object containing the subspace data accessible by the backend C.
'''
raise NotImplementedError()
def to_enum(self):
'''
Convert the class types used in the Python frontend to the enum values
used in the C backend.
'''
raise NotImplementedError()
class Full(Subspace):
def __init__(self):
Subspace.__init__(self)
# Full is a special case
def __eq__(self, s):
if isinstance(s, Full):
return s.L == self.L
return Subspace.__eq__(self, s)
# overriding __eq__ causes this to get unset. :(
__hash__ = Subspace.__hash__
def get_dimension(self):
"""
Get the dimension of the subspace.
"""
return self._get_dimension(self.L)
@classmethod
def _get_dimension(cls, L):
return bsubspace.get_dimension_Full(cls._get_cdata(L))
def idx_to_state(self, idx):
"""
Maps an index to an integer that in binary corresponds to the spin configuration.
Vectorized implementation allows passing a numpy array of indices as idx.
"""
return self._idx_to_state(idx, self.L)
def state_to_idx(self, state):
"""
The inverse mapping of :meth:`idx_to_state`.
"""
return self._state_to_idx(state, self.L)
@classmethod
def _idx_to_state(cls, idx, L):
idx = cls._numeric_to_array(idx)
return bsubspace.idx_to_state_Full(idx, cls._get_cdata(L))
@classmethod
def _state_to_idx(cls, state, L):
state = cls._numeric_to_array(state)
return bsubspace.state_to_idx_Full(state, cls._get_cdata(L))
def get_cdata(self):
'''
Returns an object containing the subspace data accessible by the C backend.
'''
return self._get_cdata(self.L)
@classmethod
def _get_cdata(cls, L):
return bsubspace.CFull(L)
def to_enum(self):
'''
Convert the class types used in the Python frontend to the enum values
used in the C backend.
'''
return bsubspace.SubspaceType.FULL
class Parity(Subspace):
'''
The subspaces of states in which the number of up spins is even or odd.
Parameters
----------
space : int
Either 0 or 'even' for the even subspace, or 1 or 'odd' for the other.
'''
def __init__(self, space):
Subspace.__init__(self)
self._space = self._check_space(space)
@property
def space(self):
return self._space
@classmethod
def _check_space(cls, value):
if value in [0,'even']:
return 0
elif value in [1,'odd']:
return 1
else:
raise ValueError('Invalid parity space "'+str(value)+'" '
'(valid choices are 0, 1, "even", or "odd")')
def get_dimension(self):
"""
Get the dimension of the subspace.
"""
return self._get_dimension(self.L, self.space)
@classmethod
def _get_dimension(cls, L, space):
return bsubspace.get_dimension_Parity(cls._get_cdata(L, space))
def idx_to_state(self, idx):
"""
Maps an index to an integer that in binary corresponds to the spin configuration.
Vectorized implementation allows passing a numpy array of indices as idx.
"""
idx = self._numeric_to_array(idx)
return self._idx_to_state(idx, self.L, self.space)
def state_to_idx(self, state):
"""
The inverse mapping of :meth:`idx_to_state`.
"""
state = self._numeric_to_array(state)
return self._state_to_idx(state, self.L, self.space)
@classmethod
def _idx_to_state(cls, idx, L, space):
return bsubspace.idx_to_state_Parity(idx, cls._get_cdata(L, space))
@classmethod
def _state_to_idx(cls, state, L, space):
return bsubspace.state_to_idx_Parity(state, cls._get_cdata(L, space))
def get_cdata(self):
'''
Returns an object containing the subspace data accessible by the C backend.
'''
return self._get_cdata(self.L, self.space)
@classmethod
def _get_cdata(cls, L, space):
return bsubspace.CParity(L, space)
def to_enum(self):
'''
Convert the class types used in the Python frontend to the enum values
used in the C backend.
'''
return bsubspace.SubspaceType.PARITY
class Auto(Subspace):
'''
Automatically generate a mapping that takes advantage of any possible spin conservation
law, by performing a breadth-first search of the graph of possible states using the operator
as an adjacency matrix. The subspace is defined by providing a "start" state; the returned
subspace will be whatever subspace contains that state.
Currently the actual computation of the ordering only can occur on process 0, limiting
the scalability of this subspace.
Parameters
----------
H : dynamite.operators.Operator
The operator for which this custom subspace will be defined.
state : int or string
An integer whose binary representation corresponds to the spin configuration of the "start"
state mentioned above, or string representing the same. See
:meth:`dynamite.states.State.str_to_state` for more information.
size_guess : int
A guess for the dimension of the subspace. By default, memory is allocated for the full
space, and then trimmed off if not used.
sort : bool
Whether to reorder the mapping after computing it. In some cases this may
cause a speedup.
'''
def __init__(self, H, state, size_guess=None, sort=True):
Subspace.__init__(self)
self._L = H.get_length()
self.state = states.State.str_to_state(state, self.L)
if size_guess is None:
size_guess = 2**H.get_length()
self.state_map = np.ndarray((size_guess,), dtype=bsubspace.dnm_int_t)
H.reduce_msc()
dim = bsubspace.compute_rcm(H.msc['masks'], H.msc['signs'], H.msc['coeffs'],
self.state_map, self.state, H.get_length())
self.state_map = self.state_map[:dim]
self.rmap_indices = np.argsort(self.state_map).astype(bsubspace.dnm_int_t, copy=False)
self.rmap_states = self.state_map[self.rmap_indices]
if sort:
self.state_map = self.rmap_states
self.rmap_indices = np.arange(self.state_map.size, dtype=bsubspace.dnm_int_t)
def check_L(self, value):
if value != self.L:
raise TypeError('Cannot change L for Auto subspace type.')
return value
def get_dimension(self):
"""
Get the dimension of the subspace.
"""
return bsubspace.get_dimension_Auto(self.get_cdata())
def idx_to_state(self, idx):
"""
Maps an index to an integer that in binary corresponds to the spin configuration.
Vectorized implementation allows passing a numpy array of indices as idx.
"""
idx = self._numeric_to_array(idx)
return bsubspace.idx_to_state_Auto(idx, self.get_cdata())
def state_to_idx(self, state):
"""
The inverse mapping of :meth:`idx_to_state`.
"""
state = self._numeric_to_array(state)
return bsubspace.state_to_idx_Auto(state, self.get_cdata())
def get_cdata(self):
'''
Returns an object containing the subspace data accessible by the C backend.
'''
return bsubspace.CAuto(
self.L,
np.ascontiguousarray(self.state_map),
np.ascontiguousarray(self.rmap_indices),
np.ascontiguousarray(self.rmap_states)
)
def to_enum(self):
'''
Convert the class types used in the Python frontend to the enum values
used in the C backend.
'''
return bsubspace.SubspaceType.AUTO
| mit | -2,831,508,411,975,684,600 | 29.287293 | 99 | 0.598231 | false | 4.02201 | false | false | false |
ccpgames/eve-metrics | web2py/applications/welcome/languages/hu.py | 1 | 6444 | # coding: utf8
{
'!langcode!': 'hu',
'!langname!': 'Magyar',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%s %%{row} deleted': '%s sorok törlődtek',
'%s %%{row} updated': '%s sorok frissítődtek',
'%s selected': '%s kiválasztott',
'%Y-%m-%d': '%Y.%m.%d.',
'%Y-%m-%d %H:%M:%S': '%Y.%m.%d. %H:%M:%S',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': 'az adminisztrációs felületért kattints ide',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': 'az appadmin a biztonságtalan csatorna miatt letiltva',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Available Databases and Tables': 'Elérhető adatbázisok és táblák',
'Buy this book': 'Buy this book',
'cache': 'gyorsítótár',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Nem lehet üres',
'change password': 'jelszó megváltoztatása',
'Check to delete': 'Törléshez válaszd ki',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'Client IP',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Jelenlegi lekérdezés',
'Current response': 'Jelenlegi válasz',
'Current session': 'Jelenlegi folyamat',
'customize me!': 'változtass meg!',
'data uploaded': 'adat feltöltve',
'Database': 'adatbázis',
'Database %s select': 'adatbázis %s kiválasztás',
'db': 'db',
'DB Model': 'DB Model',
'Delete:': 'Töröl:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Description',
'design': 'design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'kész!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Szerkeszt',
'Edit current record': 'Aktuális bejegyzés szerkesztése',
'edit profile': 'profil szerkesztése',
'Edit This App': 'Alkalmazást szerkeszt',
'Email and SMS': 'Email and SMS',
'Errors': 'Errors',
'export as csv file': 'exportál csv fájlba',
'FAQ': 'FAQ',
'First name': 'First name',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Group ID': 'Group ID',
'Groups': 'Groups',
'Hello World': 'Hello Világ',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Import/Export',
'Index': 'Index',
'insert new': 'új beillesztése',
'insert new %s': 'új beillesztése %s',
'Internal State': 'Internal State',
'Introduction': 'Introduction',
'Invalid email': 'Invalid email',
'Invalid Query': 'Hibás lekérdezés',
'invalid request': 'hibás kérés',
'Key': 'Key',
'Last name': 'Last name',
'Layout': 'Szerkezet',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'login': 'belép',
'logout': 'kilép',
'lost password': 'elveszett jelszó',
'Lost Password': 'Lost Password',
'Main Menu': 'Főmenü',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menü model',
'My Sites': 'My Sites',
'Name': 'Name',
'New Record': 'Új bejegyzés',
'new record inserted': 'új bejegyzés felvéve',
'next 100 rows': 'következő 100 sor',
'No databases in this application': 'Nincs adatbázis ebben az alkalmazásban',
'Online examples': 'online példákért kattints ide',
'or import from csv file': 'vagy betöltés csv fájlból',
'Origin': 'Origin',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Password',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': 'előző 100 sor',
'Python': 'Python',
'Query:': 'Lekérdezés:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'bejegyzés',
'record does not exist': 'bejegyzés nem létezik',
'Record ID': 'Record ID',
'Record id': 'bejegyzés id',
'Register': 'Register',
'register': 'regisztráció',
'Registration key': 'Registration key',
'Reset Password key': 'Reset Password key',
'Role': 'Role',
'Rows in Table': 'Sorok a táblában',
'Rows selected': 'Kiválasztott sorok',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'állapot',
'Statistics': 'Statistics',
'Stylesheet': 'Stylesheet',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': 'Biztos törli ezt az objektumot?',
'Table': 'tábla',
'Table name': 'Table name',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Timestamp',
'Twitter': 'Twitter',
'unable to parse csv file': 'nem lehet a csv fájlt beolvasni',
'Update:': 'Frissít:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User ID': 'User ID',
'Videos': 'Videos',
'View': 'Nézet',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'Isten hozott a web2py-ban',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
| mit | 480,198,011,037,141,200 | 37.179012 | 281 | 0.661415 | false | 2.634703 | false | false | false |
shincling/MemNN_and_Varieties | DataCoupus/list_document/timelist_answer.py | 1 | 1930 | # -*- coding: utf8 -*-
__author__ = 'shin'
import jieba
timelist_answer=[]
timelist_answer.append('明天')
timelist_answer.append('明天')
timelist_answer.append('明天')
timelist_answer.append('明天')
timelist_answer.append('明天')
timelist_answer.append('明天')
timelist_answer.append('明天。')
timelist_answer.append('明天。')
timelist_answer.append('明天。')
timelist_answer.append('明天。')
timelist_answer.append('明天。')
timelist_answer.append('时间是明天。')
timelist_answer.append('帮我预订明天的机票。')
timelist_answer.append('出行时间是明天。')
timelist_answer.append('订明天的机票。')
timelist_answer.append('明天走。')
timelist_answer.append('明天出发。')
timelist_answer.append('明天之前。')
timelist_answer.append('在明天出发就行。')
timelist_answer.append('需要明天出发。')
timelist_answer.append('我要订明天的飞机。')
timelist_answer.append('订购明天的机票。')
timelist_answer.append('出行时间应该是明天。')
timelist_answer.append('于明天出行。')
timelist_answer.append('在明天走。')
timelist_answer.append('明天出发')
timelist_answer.append('明天走')
timelist_answer.append('出发时间明天')
timelist_answer.append('时间明天')
timelist_answer.append('我打算明天出发')
timelist_answer.append('我想明天出发')
timelist_answer.append('明天出发的票')
timelist_answer.append('明天出发的机票')
timelist_answer.append('明天走的票')
timelist_answer.append('明天走的机票')
timelist_answer.append('明天的机票')
timelist_answer.append('明天的票')
timelist_answer_cut=[]
for ans in timelist_answer:
w_sent=''
sent=jieba._lcut(ans)
for word in (sent):
w_sent +=' '
w_sent +=word
w_sent += '\n'
w_sent=w_sent.replace('明天'.decode('utf8'),'[slot_time]')
timelist_answer_cut.append(w_sent)
pass | bsd-3-clause | 2,609,520,684,569,583,000 | 25.288136 | 60 | 0.72 | false | 2.010376 | false | false | false |
endlessm/chromium-browser | third_party/angle/scripts/generate_stats.py | 4 | 33713 | #!/usr/bin/env vpython
#
# [VPYTHON:BEGIN]
# wheel: <
# name: "infra/python/wheels/google-auth-py2_py3"
# version: "version:1.2.1"
# >
#
# wheel: <
# name: "infra/python/wheels/pyasn1-py2_py3"
# version: "version:0.4.5"
# >
#
# wheel: <
# name: "infra/python/wheels/pyasn1_modules-py2_py3"
# version: "version:0.2.4"
# >
#
# wheel: <
# name: "infra/python/wheels/six"
# version: "version:1.10.0"
# >
#
# wheel: <
# name: "infra/python/wheels/cachetools-py2_py3"
# version: "version:2.0.1"
# >
# wheel: <
# name: "infra/python/wheels/rsa-py2_py3"
# version: "version:4.0"
# >
#
# wheel: <
# name: "infra/python/wheels/requests"
# version: "version:2.13.0"
# >
#
# wheel: <
# name: "infra/python/wheels/google-api-python-client-py2_py3"
# version: "version:1.6.2"
# >
#
# wheel: <
# name: "infra/python/wheels/httplib2-py2_py3"
# version: "version:0.12.1"
# >
#
# wheel: <
# name: "infra/python/wheels/oauth2client-py2_py3"
# version: "version:3.0.0"
# >
#
# wheel: <
# name: "infra/python/wheels/uritemplate-py2_py3"
# version: "version:3.0.0"
# >
#
# wheel: <
# name: "infra/python/wheels/google-auth-oauthlib-py2_py3"
# version: "version:0.3.0"
# >
#
# wheel: <
# name: "infra/python/wheels/requests-oauthlib-py2_py3"
# version: "version:1.2.0"
# >
#
# wheel: <
# name: "infra/python/wheels/oauthlib-py2_py3"
# version: "version:3.0.1"
# >
#
# wheel: <
# name: "infra/python/wheels/google-auth-httplib2-py2_py3"
# version: "version:0.0.3"
# >
# [VPYTHON:END]
#
# Copyright 2019 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# generate_deqp_stats.py:
# Checks output of deqp testers and generates stats using the GDocs API
#
# prerequirements:
# https://devsite.googleplex.com/sheets/api/quickstart/python
# Follow the quickstart guide.
#
# usage: generate_deqp_stats.py [-h] [--auth_path [AUTH_PATH]] [--spreadsheet [SPREADSHEET]]
# [--verbosity [VERBOSITY]]
#
# optional arguments:
# -h, --help show this help message and exit
# --auth_path [AUTH_PATH]
# path to directory containing authorization data (credentials.json and
# token.pickle). [default=<home>/.auth]
# --spreadsheet [SPREADSHEET]
# ID of the spreadsheet to write stats to. [default
# ='1D6Yh7dAPP-aYLbX3HHQD8WubJV9XPuxvkKowmn2qhIw']
# --verbosity [VERBOSITY]
# Verbosity of output. Valid options are [DEBUG, INFO, WARNING, ERROR].
# [default=INFO]
import argparse
import datetime
import logging
import os
import pickle
import re
import subprocess
import sys
import urllib
from google.auth.transport.requests import Request
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
####################
# Global Constants #
####################
HOME_DIR = os.path.expanduser('~')
SCRIPT_DIR = sys.path[0]
ROOT_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, '..'))
LOGGER = logging.getLogger('generate_stats')
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
BOT_NAMES = [
'Win10 FYI x64 dEQP Release (NVIDIA)',
'Win10 FYI x64 dEQP Release (Intel HD 630)',
'Win7 FYI dEQP Release (AMD)',
'Win7 FYI x64 dEQP Release (NVIDIA)',
'Mac FYI dEQP Release Intel',
'Mac FYI dEQP Release AMD',
'Linux FYI dEQP Release (Intel HD 630)',
'Linux FYI dEQP Release (NVIDIA)',
'Android FYI dEQP Release (Nexus 5X)',
'Android FYI 32 dEQP Vk Release (Pixel 2)',
'Android FYI 64 dEQP Vk Release (Pixel 2)',
]
BOT_NAME_PREFIX = 'chromium/ci/'
BUILD_LINK_PREFIX = 'https://ci.chromium.org/p/chromium/builders/ci/'
REQUIRED_COLUMNS = ['build_link', 'time', 'date', 'revision', 'angle_revision', 'duplicate']
MAIN_RESULT_COLUMNS = ['Passed', 'Failed', 'Skipped', 'Not Supported', 'Exception', 'Crashed']
INFO_TAG = '*RESULT'
WORKAROUND_FORMATTING_ERROR_STRING = "Still waiting for the following processes to finish:"
######################
# Build Info Parsing #
######################
# Returns a struct with info about the latest successful build given a bot name. Info contains the
# build_name, time, date, angle_revision, and chrome revision.
# Uses: bb ls '<botname>' -n 1 -status success -p
def get_latest_success_build_info(bot_name):
bb = subprocess.Popen(['bb', 'ls', bot_name, '-n', '1', '-status', 'success', '-p'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb ls '" + bot_name + "' -n 1 -status success -p]")
out, err = bb.communicate()
if err:
raise ValueError("Unexpected error from bb ls: '" + err + "'")
if not out:
raise ValueError("Unexpected empty result from bb ls of bot '" + bot_name + "'")
# Example output (line 1):
# ci.chromium.org/b/8915280275579996928 SUCCESS 'chromium/ci/Win10 FYI dEQP Release (NVIDIA)/26877'
# ...
if 'SUCCESS' not in out:
raise ValueError("Unexpected result from bb ls: '" + out + "'")
info = {}
for line in out.splitlines():
# The first line holds the build name
if 'build_name' not in info:
info['build_name'] = line.strip().split("'")[1]
# Remove the bot name and prepend the build link
info['build_link'] = BUILD_LINK_PREFIX + urllib.quote(
info['build_name'].split(BOT_NAME_PREFIX)[1])
if 'Created' in line:
# Example output of line with 'Created':
# ...
# Created today at 12:26:39, waited 2.056319s, started at 12:26:41, ran for 1h16m48.14963s, ended at 13:43:30
# ...
info['time'] = re.findall(r'[0-9]{1,2}:[0-9]{2}:[0-9]{2}', line.split(',', 1)[0])[0]
# Format today's date in US format so Sheets can read it properly
info['date'] = datetime.datetime.now().strftime('%m/%d/%y')
if 'got_angle_revision' in line:
# Example output of line with angle revision:
# ...
# "parent_got_angle_revision": "8cbd321cafa92ffbf0495e6d0aeb9e1a97940fee",
# ...
info['angle_revision'] = filter(str.isalnum, line.split(':')[1])
if '"revision"' in line:
# Example output of line with chromium revision:
# ...
# "revision": "3b68405a27f1f9590f83ae07757589dba862f141",
# ...
info['revision'] = filter(str.isalnum, line.split(':')[1])
if 'build_name' not in info:
raise ValueError("Could not find build_name from bot '" + bot_name + "'")
return info
# Returns a list of step names that we're interested in given a build name. We are interested in
# step names starting with 'angle_'. May raise an exception.
# Uses: bb get '<build_name>' -steps
def get_step_names(build_name):
bb = subprocess.Popen(['bb', 'get', build_name, '-steps'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb get '" + build_name + "' -steps]")
out, err = bb.communicate()
if err:
raise ValueError("Unexpected error from bb get: '" + err + "'")
step_names = []
# Example output (relevant lines to a single step):
# ...
# Step "angle_deqp_egl_vulkan_tests on (nvidia-quadro-p400-win10-stable) GPU on Windows on Windows-10" SUCCESS 4m12s Logs: "stdout", "chromium_swarming.summary", "Merge script log", "Flaky failure: dEQP.EGL/info_version (status CRASH,SUCCESS)", "step_metadata"
# Run on OS: 'Windows-10'<br>Max shard duration: 0:04:07.309848 (shard \#1)<br>Min shard duration: 0:02:26.402128 (shard \#0)<br/>flaky failures [ignored]:<br/>dEQP.EGL/info\_version<br/>
# * [shard #0 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=9a5999a59d332e55f54f495948d0c9f959e60ed2)
# * [shard #0 (128.3 sec)](https://chromium-swarm.appspot.com/user/task/446903ae365b8110)
# * [shard #1 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=d71e1bdd91dee61b536b4057a9222e642bd3809f)
# * [shard #1 (229.3 sec)](https://chromium-swarm.appspot.com/user/task/446903b7b0d90210)
# * [shard #2 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=ac9ba85b1cca77774061b87335c077980e1eef85)
# * [shard #2 (144.5 sec)](https://chromium-swarm.appspot.com/user/task/446903c18e15a010)
# * [shard #3 isolated out](https://isolateserver.appspot.com/browse?namespace=default-gzip&hash=976d586386864abecf53915fbac3e085f672e30f)
# * [shard #3 (138.4 sec)](https://chromium-swarm.appspot.com/user/task/446903cc8da0ad10)
# ...
for line in out.splitlines():
if 'Step "angle_' not in line:
continue
step_names.append(line.split('"')[1])
return step_names
# Performs some heuristic validation of the step_info struct returned from a single step log.
# Returns True if valid, False if invalid. May write to stderr
def validate_step_info(step_info, build_name, step_name):
print_name = "'" + build_name + "': '" + step_name + "'"
if not step_info:
LOGGER.warning('Step info empty for ' + print_name + '\n')
return False
if 'Total' in step_info:
partial_sum_keys = MAIN_RESULT_COLUMNS
partial_sum_values = [int(step_info[key]) for key in partial_sum_keys if key in step_info]
computed_total = sum(partial_sum_values)
if step_info['Total'] != computed_total:
LOGGER.warning('Step info does not sum to total for ' + print_name + ' | Total: ' +
str(step_info['Total']) + ' - Computed total: ' + str(computed_total) +
'\n')
return True
# Returns a struct containing parsed info from a given step log. The info is parsed by looking for
# lines with the following format in stdout:
# '[TESTSTATS]: <key>: <value>''
# May write to stderr
# Uses: bb log '<build_name>' '<step_name>'
def get_step_info(build_name, step_name):
bb = subprocess.Popen(['bb', 'log', build_name, step_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
LOGGER.debug("Ran [bb log '" + build_name + "' '" + step_name + "']")
out, err = bb.communicate()
if err:
LOGGER.warning("Unexpected error from bb log '" + build_name + "' '" + step_name + "': '" +
err + "'")
return None
step_info = {}
# Example output (relevant lines of stdout):
# ...
# *RESULT: Total: 155
# *RESULT: Passed: 11
# *RESULT: Failed: 0
# *RESULT: Skipped: 12
# *RESULT: Not Supported: 132
# *RESULT: Exception: 0
# *RESULT: Crashed: 0
# *RESULT: Unexpected Passed: 12
# ...
append_errors = []
# Hacky workaround to fix issue where messages are dropped into the middle of lines by another
# process:
# eg.
# *RESULT: <start_of_result>Still waiting for the following processes to finish:
# "c:\b\s\w\ir\out\Release\angle_deqp_gles3_tests.exe" --deqp-egl-display-type=angle-vulkan --gtest_flagfile="c:\b\s\w\itlcgdrz\scoped_dir7104_364984996\8ad93729-f679-406d-973b-06b9d1bf32de.tmp" --single-process-tests --test-launcher-batch-limit=400 --test-launcher-output="c:\b\s\w\itlcgdrz\7104_437216092\test_results.xml" --test-launcher-summary-output="c:\b\s\w\iosuk8ai\output.json"
# <end_of_result>
#
# Removes the message and skips the line following it, and then appends the <start_of_result>
# and <end_of_result> back together
workaround_prev_line = ""
workaround_prev_line_count = 0
for line in out.splitlines():
# Skip lines if the workaround still has lines to skip
if workaround_prev_line_count > 0:
workaround_prev_line_count -= 1
continue
# If there are no more lines to skip and there is a previous <start_of_result> to append,
# append it and finish the workaround
elif workaround_prev_line != "":
line = workaround_prev_line + line
workaround_prev_line = ""
workaround_prev_line_count = 0
LOGGER.debug("Formatting error workaround rebuilt line as: '" + line + "'\n")
if INFO_TAG not in line:
continue
# When the workaround string is detected, start the workaround with 1 line to skip and save
# the <start_of_result>, but continue the loop until the workaround is finished
if WORKAROUND_FORMATTING_ERROR_STRING in line:
workaround_prev_line = line.split(WORKAROUND_FORMATTING_ERROR_STRING)[0]
workaround_prev_line_count = 1
continue
found_stat = True
line_columns = line.split(INFO_TAG, 1)[1].split(':')
if len(line_columns) is not 3:
LOGGER.warning("Line improperly formatted: '" + line + "'\n")
continue
key = line_columns[1].strip()
# If the value is clearly an int, sum it. Otherwise, concatenate it as a string
isInt = False
intVal = 0
try:
intVal = int(line_columns[2])
if intVal is not None:
isInt = True
except Exception as error:
isInt = False
if isInt:
if key not in step_info:
step_info[key] = 0
step_info[key] += intVal
else:
if key not in step_info:
step_info[key] = line_columns[2].strip()
else:
append_string = '\n' + line_columns[2].strip()
# Sheets has a limit of 50000 characters per cell, so make sure to stop appending
# below this limit
if len(step_info[key]) + len(append_string) < 50000:
step_info[key] += append_string
else:
if key not in append_errors:
append_errors.append(key)
LOGGER.warning("Too many characters in column '" + key +
"'. Output capped.")
return step_info
# Returns the info for each step run on a given bot_name.
def get_bot_info(bot_name):
info = get_latest_success_build_info(bot_name)
info['step_names'] = get_step_names(info['build_name'])
broken_step_names = []
for step_name in info['step_names']:
LOGGER.info("Parsing step '" + step_name + "'...")
step_info = get_step_info(info['build_name'], step_name)
if validate_step_info(step_info, info['build_name'], step_name):
info[step_name] = step_info
else:
broken_step_names += step_name
for step_name in broken_step_names:
info['step_names'].remove(step_name)
return info
#####################
# Sheets Formatting #
#####################
# Get an individual spreadsheet based on the spreadsheet id. Returns the result of
# spreadsheets.get(), or throws an exception if the sheet could not open.
def get_spreadsheet(service, spreadsheet_id):
LOGGER.debug("Called [spreadsheets.get(spreadsheetId='" + spreadsheet_id + "')]")
request = service.get(spreadsheetId=spreadsheet_id)
spreadsheet = request.execute()
if not spreadsheet:
raise Exception("Did not open spreadsheet '" + spreadsheet_id + "'")
return spreadsheet
# Returns a nicely formatted string based on the bot_name and step_name
def format_sheet_name(bot_name, step_name):
# Some tokens should be ignored for readability in the name
unneccesary_tokens = ['FYI', 'Release', 'Vk', 'dEQP', '(', ')']
for token in unneccesary_tokens:
bot_name = bot_name.replace(token, '')
bot_name = ' '.join(bot_name.strip().split()) # Remove extra spaces
step_name = re.findall(r'angle\w*', step_name)[0] # Separate test name
# Test names are formatted as 'angle_deqp_<frontend>_<backend>_tests'
new_step_name = ''
# Put the frontend first
if '_egl_' in step_name:
step_name = step_name.replace('_egl_', '_')
new_step_name += ' EGL'
if '_gles2_' in step_name:
step_name = step_name.replace('_gles2_', '_')
new_step_name += ' GLES 2.0 '
if '_gles3_' in step_name:
step_name = step_name.replace('_gles3_', '_')
new_step_name += ' GLES 3.0 '
if '_gles31_' in step_name:
step_name = step_name.replace('_gles31_', '_')
new_step_name += ' GLES 3.1 '
# Put the backend second
if '_d3d9_' in step_name:
step_name = step_name.replace('_d3d9_', '_')
new_step_name += ' D3D9 '
if '_d3d11' in step_name:
step_name = step_name.replace('_d3d11_', '_')
new_step_name += ' D3D11 '
if '_gl_' in step_name:
step_name = step_name.replace('_gl_', '_')
new_step_name += ' Desktop OpenGL '
if '_gles_' in step_name:
step_name = step_name.replace('_gles_', '_')
new_step_name += ' OpenGLES '
if '_vulkan_' in step_name:
step_name = step_name.replace('_vulkan_', '_')
new_step_name += ' Vulkan '
# Add any remaining keywords from the step name into the formatted name (formatted nicely)
step_name = step_name.replace('angle_', '_')
step_name = step_name.replace('_deqp_', '_')
step_name = step_name.replace('_tests', '_')
step_name = step_name.replace('_', ' ').strip()
new_step_name += ' ' + step_name
new_step_name = ' '.join(new_step_name.strip().split()) # Remove extra spaces
return new_step_name + ' ' + bot_name
# Returns the full list of sheet names that should be populated based on the info struct
def get_sheet_names(info):
sheet_names = []
for bot_name in info:
for step_name in info[bot_name]['step_names']:
sheet_name = format_sheet_name(bot_name, step_name)
sheet_names.append(sheet_name)
return sheet_names
# Returns True if the sheet is found in the spreadsheets object
def sheet_exists(spreadsheet, step_name):
for sheet in spreadsheet['sheets']:
if sheet['properties']['title'] == step_name:
return True
return False
# Validates the spreadsheets object against the list of sheet names which should appear. Returns a
# list of sheets that need creation.
def validate_sheets(spreadsheet, sheet_names):
create_sheets = []
for sheet_name in sheet_names:
if not sheet_exists(spreadsheet, sheet_name):
create_sheets.append(sheet_name)
return create_sheets
# Performs a batch update with a given service, spreadsheet id, and list <object(Request)> of
# updates to do.
def batch_update(service, spreadsheet_id, updates):
batch_update_request_body = {
'requests': updates,
}
LOGGER.debug("Called [spreadsheets.batchUpdate(spreadsheetId='" + spreadsheet_id + "', body=" +
str(batch_update_request_body) + ')]')
request = service.batchUpdate(spreadsheetId=spreadsheet_id, body=batch_update_request_body)
request.execute()
# Creates sheets given a service and spreadsheed id based on a list of sheet names input
def create_sheets(service, spreadsheet_id, sheet_names):
updates = [{'addSheet': {'properties': {'title': sheet_name,}}} for sheet_name in sheet_names]
batch_update(service, spreadsheet_id, updates)
# Calls a values().batchGet() on the service to find the list of column names from each sheet in
# sheet_names. Returns a dictionary with one list per sheet_name.
def get_headers(service, spreadsheet_id, sheet_names):
header_ranges = [sheet_name + '!A1:Z' for sheet_name in sheet_names]
LOGGER.debug("Called [spreadsheets.values().batchGet(spreadsheetId='" + spreadsheet_id +
', ranges=' + str(header_ranges) + "')]")
request = service.values().batchGet(spreadsheetId=spreadsheet_id, ranges=header_ranges)
response = request.execute()
headers = {}
for k, sheet_name in enumerate(sheet_names):
if 'values' in response['valueRanges'][k]:
# Headers are in the first row of values
headers[sheet_name] = response['valueRanges'][k]['values'][0]
else:
headers[sheet_name] = []
return headers
# Calls values().batchUpdate() with supplied list of data <object(ValueRange)> to update on the
# service.
def batch_update_values(service, spreadsheet_id, data):
batch_update_values_request_body = {
'valueInputOption': 'USER_ENTERED', # Helps with formatting of dates
'data': data,
}
LOGGER.debug("Called [spreadsheets.values().batchUpdate(spreadsheetId='" + spreadsheet_id +
"', body=" + str(batch_update_values_request_body) + ')]')
request = service.values().batchUpdate(
spreadsheetId=spreadsheet_id, body=batch_update_values_request_body)
request.execute()
# Get the sheetId of a sheet based on its name
def get_sheet_id(spreadsheet, sheet_name):
for sheet in spreadsheet['sheets']:
if sheet['properties']['title'] == sheet_name:
return sheet['properties']['sheetId']
return -1
# Update the filters on sheets with a 'duplicate' column. Filter out any duplicate rows
def update_filters(service, spreadsheet_id, headers, info, spreadsheet):
updates = []
for bot_name in info:
for step_name in info[bot_name]['step_names']:
sheet_name = format_sheet_name(bot_name, step_name)
duplicate_found = 'duplicate' in headers[sheet_name]
if duplicate_found:
sheet_id = get_sheet_id(spreadsheet, sheet_name)
if sheet_id > -1:
updates.append({
"setBasicFilter": {
"filter": {
"range": {
"sheetId": sheet_id,
"startColumnIndex": 0,
"endColumnIndex": len(headers[sheet_name])
},
"sortSpecs": [{
"dimensionIndex": headers[sheet_name].index('date'),
"sortOrder": "ASCENDING"
}],
"criteria": {
str(headers[sheet_name].index('duplicate')): {
"hiddenValues":
["1"] # Hide rows when duplicate is 1 (true)
}
}
}
}
})
if updates:
LOGGER.info('Updating sheet filters...')
batch_update(service, spreadsheet_id, updates)
# Populates the headers with any missing/desired rows based on the info struct, and calls
# batch update to update the corresponding sheets if necessary.
def update_headers(service, spreadsheet_id, headers, info):
data = []
sheet_names = []
for bot_name in info:
for step_name in info[bot_name]['step_names']:
if not step_name in info[bot_name]:
LOGGER.error("Missing info for step name: '" + step_name + "'")
sheet_name = format_sheet_name(bot_name, step_name)
headers_stale = False
# Headers should always contain the following columns
for req in REQUIRED_COLUMNS:
if req not in headers[sheet_name]:
headers_stale = True
headers[sheet_name].append(req)
# Headers also must contain all the keys seen in this step
for key in info[bot_name][step_name].keys():
if key not in headers[sheet_name]:
headers_stale = True
headers[sheet_name].append(key)
# Update the Gdoc headers if necessary
if headers_stale:
sheet_names.append(sheet_name)
header_range = sheet_name + '!A1:Z'
data.append({
'range': header_range,
'majorDimension': 'ROWS',
'values': [headers[sheet_name]]
})
if data:
LOGGER.info('Updating sheet headers...')
batch_update_values(service, spreadsheet_id, data)
# Calls values().append() to append a list of values to a given sheet.
def append_values(service, spreadsheet_id, sheet_name, values):
header_range = sheet_name + '!A1:Z'
insert_data_option = 'INSERT_ROWS'
value_input_option = 'USER_ENTERED' # Helps with formatting of dates
append_values_request_body = {
'range': header_range,
'majorDimension': 'ROWS',
'values': [values],
}
LOGGER.debug("Called [spreadsheets.values().append(spreadsheetId='" + spreadsheet_id +
"', body=" + str(append_values_request_body) + ", range='" + header_range +
"', insertDataOption='" + insert_data_option + "', valueInputOption='" +
value_input_option + "')]")
request = service.values().append(
spreadsheetId=spreadsheet_id,
body=append_values_request_body,
range=header_range,
insertDataOption=insert_data_option,
valueInputOption=value_input_option)
request.execute()
# Formula to determine whether a row is a duplicate of the previous row based on checking the
# columns listed in filter_columns.
# Eg.
# date | pass | fail
# Jan 1 100 50
# Jan 2 100 50
# Jan 3 99 51
#
# If we want to filter based on only the "pass" and "fail" columns, we generate the following
# formula in the 'duplicate' column: 'IF(B1=B0, IF(C1=C0,1,0) ,0);
# This formula is recursively generated for each column in filter_columns, using the column
# position as determined by headers. The formula uses a more generalized form with
# 'INDIRECT(ADDRESS(<row>, <col>))'' instead of 'B1', where <row> is Row() and Row()-1, and col is
# determined by the column's position in headers
def generate_duplicate_formula(headers, filter_columns):
# No more columns, put a 1 in the IF statement true branch
if len(filter_columns) == 0:
return '1'
# Next column is found, generate the formula for duplicate checking, and remove from the list
# for recursion
for i in range(len(headers)):
if headers[i] == filter_columns[0]:
col = str(i + 1)
formula = "IF(INDIRECT(ADDRESS(ROW(), " + col + "))=INDIRECT(ADDRESS(ROW() - 1, " + \
col + "))," + generate_duplicate_formula(headers, filter_columns[1:]) + ",0)"
return formula
# Next column not found, remove from recursion but just return whatever the next one is
return generate_duplicate_formula(headers, filter_columns[1:])
# Helper function to start the recursive call to generate_duplicate_formula
def generate_duplicate_formula_helper(headers):
filter_columns = MAIN_RESULT_COLUMNS
formula = generate_duplicate_formula(headers, filter_columns)
if (formula == "1"):
return ""
else:
# Final result needs to be prepended with =
return "=" + formula
# Uses the list of headers and the info struct to come up with a list of values for each step
# from the latest builds.
def update_values(service, spreadsheet_id, headers, info):
data = []
for bot_name in info:
for step_name in info[bot_name]['step_names']:
sheet_name = format_sheet_name(bot_name, step_name)
values = []
# For each key in the list of headers, either add the corresponding value or add a blank
# value. It's necessary for the values to match the order of the headers
for key in headers[sheet_name]:
if key in info[bot_name] and key in REQUIRED_COLUMNS:
values.append(info[bot_name][key])
elif key in info[bot_name][step_name]:
values.append(info[bot_name][step_name][key])
elif key == "duplicate" and key in REQUIRED_COLUMNS:
values.append(generate_duplicate_formula_helper(headers[sheet_name]))
else:
values.append('')
LOGGER.info("Appending new rows to sheet '" + sheet_name + "'...")
try:
append_values(service, spreadsheet_id, sheet_name, values)
except Exception as error:
LOGGER.warning('%s\n' % str(error))
# Updates the given spreadsheed_id with the info struct passed in.
def update_spreadsheet(service, spreadsheet_id, info):
LOGGER.info('Opening spreadsheet...')
spreadsheet = get_spreadsheet(service, spreadsheet_id)
LOGGER.info('Parsing sheet names...')
sheet_names = get_sheet_names(info)
new_sheets = validate_sheets(spreadsheet, sheet_names)
if new_sheets:
LOGGER.info('Creating new sheets...')
create_sheets(service, spreadsheet_id, new_sheets)
LOGGER.info('Parsing sheet headers...')
headers = get_headers(service, spreadsheet_id, sheet_names)
update_headers(service, spreadsheet_id, headers, info)
update_filters(service, spreadsheet_id, headers, info, spreadsheet)
update_values(service, spreadsheet_id, headers, info)
#####################
# Main/helpers #
#####################
# Loads or creates credentials and connects to the Sheets API. Returns a Spreadsheets object with
# an open connection.
def get_sheets_service(auth_path):
credentials_path = auth_path + '/credentials.json'
token_path = auth_path + '/token.pickle'
creds = None
if not os.path.exists(auth_path):
LOGGER.info("Creating auth dir '" + auth_path + "'")
os.makedirs(auth_path)
if not os.path.exists(credentials_path):
raise Exception('Missing credentials.json.\n'
'Go to: https://developers.google.com/sheets/api/quickstart/python\n'
"Under Step 1, click 'ENABLE THE GOOGLE SHEETS API'\n"
"Click 'DOWNLOAD CLIENT CONFIGURATION'\n"
'Save to your auth_path (' + auth_path + ') as credentials.json')
if os.path.exists(token_path):
with open(token_path, 'rb') as token:
creds = pickle.load(token)
LOGGER.info('Loaded credentials from ' + token_path)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
LOGGER.info('Refreshing credentials...')
creds.refresh(Request())
else:
LOGGER.info('Could not find credentials. Requesting new credentials.')
flow = InstalledAppFlow.from_client_secrets_file(credentials_path, SCOPES)
creds = flow.run_local_server()
with open(token_path, 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
sheets = service.spreadsheets()
return sheets
# Parse the input to the script
def parse_args():
parser = argparse.ArgumentParser(os.path.basename(sys.argv[0]))
parser.add_argument(
'--auth_path',
default=HOME_DIR + '/.auth',
nargs='?',
help='path to directory containing authorization data '
'(credentials.json and token.pickle). '
'[default=<home>/.auth]')
parser.add_argument(
'--spreadsheet',
default='1uttk1z8lJ4ZsUY7wMdFauMzUxb048nh5l52zdrAznek',
nargs='?',
help='ID of the spreadsheet to write stats to. '
"[default='1uttk1z8lJ4ZsUY7wMdFauMzUxb048nh5l52zdrAznek']")
parser.add_argument(
'--verbosity',
default='INFO',
nargs='?',
help='Verbosity of output. Valid options are '
'[DEBUG, INFO, WARNING, ERROR]. '
'[default=INFO]')
return parser.parse_args()
# Set up the logging with the right verbosity and output.
def initialize_logging(verbosity):
handler = logging.StreamHandler()
formatter = logging.Formatter(fmt='%(levelname)s: %(message)s')
handler.setFormatter(formatter)
LOGGER.addHandler(handler)
if 'DEBUG' in verbosity:
LOGGER.setLevel(level=logging.DEBUG)
elif 'INFO' in verbosity:
LOGGER.setLevel(level=logging.INFO)
elif 'WARNING' in verbosity:
LOGGER.setLevel(level=logging.WARNING)
elif 'ERROR' in verbosity:
LOGGER.setLevel(level=logging.ERROR)
else:
LOGGER.setLevel(level=logging.INFO)
def main():
os.chdir(ROOT_DIR)
args = parse_args()
verbosity = args.verbosity.strip().upper()
initialize_logging(verbosity)
auth_path = args.auth_path.replace('\\', '/')
try:
service = get_sheets_service(auth_path)
except Exception as error:
LOGGER.error('%s\n' % str(error))
exit(1)
info = {}
LOGGER.info('Building info struct...')
for bot_name in BOT_NAMES:
LOGGER.info("Parsing bot '" + bot_name + "'...")
try:
info[bot_name] = get_bot_info(BOT_NAME_PREFIX + bot_name)
except Exception as error:
LOGGER.error('%s\n' % str(error))
LOGGER.info('Updating sheets...')
try:
update_spreadsheet(service, args.spreadsheet, info)
except Exception as error:
LOGGER.error('%s\n' % str(error))
quit(1)
LOGGER.info('Info was successfully parsed to sheet: https://docs.google.com/spreadsheets/d/' +
args.spreadsheet)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 4,829,267,775,993,492,000 | 40.213936 | 391 | 0.605671 | false | 3.636001 | true | false | false |
datalyze-solutions/pandas-qt | pandasqt/views/CSVDialogs.py | 1 | 23796 | # -*- coding: utf-8 -*-
import os
from encodings.aliases import aliases as _encodings
import pandas
from pandasqt.compat import Qt, QtCore, QtGui, Slot, Signal
from pandasqt.encoding import Detector
from pandasqt.models.DataFrameModel import DataFrameModel
from pandasqt.views.CustomDelegates import DtypeComboDelegate
from pandasqt.views._ui import icons_rc
from pandasqt.utils import fillNoneValues, convertTimestamps
class DelimiterValidator(QtGui.QRegExpValidator):
"""A Custom RegEx Validator.
The validator checks, if the input has a length of 1.
The input may contain any non-whitespace-character
as denoted by the RegEx term `\S`.
"""
def __init__(self, parent=None):
"""Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
"""
super(DelimiterValidator, self).__init__(parent)
re = QtCore.QRegExp('\S{1}')
self.setRegExp(re)
class DelimiterSelectionWidget(QtGui.QGroupBox):
"""A custom widget with different text delimiter signs.
A user can choose between 3 predefined and one user defined
text delimiter characters. Default delimiters include `semicolon`,
`colon` and `tabulator`. The user defined delimiter may only have
a length of 1 and may not include any whitespace character.
Attributes:
delimiter (QtCore.pyqtSignal): This signal is emitted, whenever a
delimiter character is selected by the user.
semicolonRadioButton (QtGui.QRadioButton): A radio button to
select the `semicolon` character as delimiter.
commaRadioButton (QtGui.QRadioButton): A radio button to select
the `comma` character as delimiter.
tabRadioButton (QtGui.QRadioButton): A radio button to select
the `tabulator` character as delimiter.
otherRadioButton (QtGui.QRadioButton): A radio button to select
the given input text as delimiter.
otherSeparatorLineEdit (QtGui.QLineEdit): An input line to let the
user enter one character only, which may be used as delimiter.
"""
delimiter = Signal('QString')
def __init__(self, parent=None):
"""Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
"""
super(DelimiterSelectionWidget, self).__init__(parent)
self.semicolonRadioButton = None
self.commaRadioButton = None
self.tabRadioButton = None
self.otherRadioButton = None
self.otherSeparatorLineEdit = None
self._initUI()
def _initUI(self):
"""Creates the inital layout with all subwidgets.
The layout is a `QHBoxLayout`. Each time a radio button is
selected or unselected, a slot
`DelimiterSelectionWidget._delimiter` is called.
Furthermore the `QLineEdit` widget has a custom regex validator
`DelimiterValidator` enabled.
"""
#layout = QtGui.QHBoxLayout(self)
self.semicolonRadioButton = QtGui.QRadioButton(u'Semicolon')
self.commaRadioButton = QtGui.QRadioButton(u'Comma')
self.tabRadioButton = QtGui.QRadioButton(u'Tab')
self.otherRadioButton = QtGui.QRadioButton(u'Other')
self.semicolonRadioButton.setChecked(True)
self.otherSeparatorLineEdit = QtGui.QLineEdit(self)
self.otherSeparatorLineEdit.setEnabled(False)
self.semicolonRadioButton.toggled.connect(self._delimiter)
self.commaRadioButton.toggled.connect(self._delimiter)
self.tabRadioButton.toggled.connect(self._delimiter)
self.otherRadioButton.toggled.connect(self._enableLine)
self.otherSeparatorLineEdit.textChanged.connect(lambda: self._delimiter(True))
self.otherSeparatorLineEdit.setValidator(DelimiterValidator(self))
currentLayout = self.layout()
# unset and delete the current layout in order to set a new one
if currentLayout is not None:
del currentLayout
layout = QtGui.QHBoxLayout()
layout.addWidget(self.semicolonRadioButton)
layout.addWidget(self.commaRadioButton)
layout.addWidget(self.tabRadioButton)
layout.addWidget(self.otherRadioButton)
layout.addWidget(self.otherSeparatorLineEdit)
self.setLayout(layout)
@Slot('QBool')
def _enableLine(self, toggled):
self.otherSeparatorLineEdit.setEnabled(toggled)
def currentSelected(self):
"""Returns the currently selected delimiter character.
Returns:
str: One of `,`, `;`, `\t`, `*other*`.
"""
if self.commaRadioButton.isChecked():
return ','
elif self.semicolonRadioButton.isChecked():
return ';'
elif self.tabRadioButton.isChecked():
return '\t'
elif self.otherRadioButton.isChecked():
return self.otherSeparatorLineEdit.text()
return
@Slot('QBool')
def _delimiter(self, checked):
if checked:
if self.commaRadioButton.isChecked():
self.delimiter.emit(',')
elif self.semicolonRadioButton.isChecked():
self.delimiter.emit(';')
elif self.tabRadioButton.isChecked():
self.delimiter.emit('\t')
elif self.otherRadioButton.isChecked():
ret = self.otherSeparatorLineEdit.text()
if len(ret) > 0:
self.delimiter.emit(ret)
def reset(self):
"""Resets this widget to its initial state.
"""
self.semicolonRadioButton.setChecked(True)
self.otherSeparatorLineEdit.setText('')
class CSVImportDialog(QtGui.QDialog):
"""A dialog to import any csv file into a pandas data frame.
This modal dialog enables the user to enter any path to a csv
file and parse this file with or without a header and with special
delimiter characters.
On a successful load, the data can be previewed and the column data
types may be edited by the user.
After all configuration is done, the dataframe and the underlying model
may be used by the main application.
Attributes:
load (QtCore.pyqtSignal): This signal is emitted, whenever the
dialog is successfully closed, e.g. when the ok button is
pressed. Returns DataFrameModel and path of chosen csv file.
"""
load = Signal('QAbstractItemModel', str)
def __init__(self, parent=None):
"""Constructs the object with the given parent.
Args:
parent (QObject, optional): Causes the objected to be owned
by `parent` instead of Qt. Defaults to `None`.
"""
super(CSVImportDialog, self).__init__(parent)
self._modal = True
self._windowTitle = u'Import CSV'
self._encodingKey = None
self._filename = None
self._delimiter = None
self._header = None
self._detector = Detector()
self._initUI()
def _initUI(self):
"""Initiates the user interface with a grid layout and several widgets.
"""
self.setModal(self._modal)
self.setWindowTitle(self._windowTitle)
layout = QtGui.QGridLayout()
self._filenameLabel = QtGui.QLabel(u'Choose File', self)
self._filenameLineEdit = QtGui.QLineEdit(self)
self._filenameLineEdit.textEdited.connect(self._updateFilename)
chooseFileButtonIcon = QtGui.QIcon(QtGui.QPixmap(':/icons/document-open.png'))
self._chooseFileAction = QtGui.QAction(self)
self._chooseFileAction.setIcon(chooseFileButtonIcon)
self._chooseFileAction.triggered.connect(self._openFile)
self._chooseFileButton = QtGui.QToolButton(self)
self._chooseFileButton.setDefaultAction(self._chooseFileAction)
layout.addWidget(self._filenameLabel, 0, 0)
layout.addWidget(self._filenameLineEdit, 0, 1, 1, 2)
layout.addWidget(self._chooseFileButton, 0, 3)
self._encodingLabel = QtGui.QLabel(u'File Encoding', self)
encoding_names = map(lambda x: x.upper(), sorted(list(set(_encodings.viewvalues()))))
self._encodingComboBox = QtGui.QComboBox(self)
self._encodingComboBox.addItems(encoding_names)
self._encodingComboBox.activated.connect(self._updateEncoding)
layout.addWidget(self._encodingLabel, 1, 0)
layout.addWidget(self._encodingComboBox, 1, 1, 1, 1)
self._hasHeaderLabel = QtGui.QLabel(u'Header Available?', self)
self._headerCheckBox = QtGui.QCheckBox(self)
self._headerCheckBox.toggled.connect(self._updateHeader)
layout.addWidget(self._hasHeaderLabel, 2, 0)
layout.addWidget(self._headerCheckBox, 2, 1)
self._delimiterLabel = QtGui.QLabel(u'Column Delimiter', self)
self._delimiterBox = DelimiterSelectionWidget(self)
self._delimiter = self._delimiterBox.currentSelected()
self._delimiterBox.delimiter.connect(self._updateDelimiter)
layout.addWidget(self._delimiterLabel, 3, 0)
layout.addWidget(self._delimiterBox, 3, 1, 1, 3)
self._tabWidget = QtGui.QTabWidget(self)
self._previewTableView = QtGui.QTableView(self)
self._datatypeTableView = QtGui.QTableView(self)
self._tabWidget.addTab(self._previewTableView, u'Preview')
self._tabWidget.addTab(self._datatypeTableView, u'Change Column Types')
layout.addWidget(self._tabWidget, 4, 0, 3, 4)
self._datatypeTableView.horizontalHeader().setDefaultSectionSize(200)
self._datatypeTableView.setItemDelegateForColumn(1, DtypeComboDelegate(self._datatypeTableView))
self._loadButton = QtGui.QPushButton(u'Load Data', self)
#self.loadButton.setAutoDefault(False)
self._cancelButton = QtGui.QPushButton(u'Cancel', self)
# self.cancelButton.setDefault(False)
# self.cancelButton.setAutoDefault(True)
self._buttonBox = QtGui.QDialogButtonBox(self)
self._buttonBox.addButton(self._loadButton, QtGui.QDialogButtonBox.AcceptRole)
self._buttonBox.addButton(self._cancelButton, QtGui.QDialogButtonBox.RejectRole)
self._buttonBox.accepted.connect(self.accepted)
self._buttonBox.rejected.connect(self.rejected)
layout.addWidget(self._buttonBox, 9, 2, 1, 2)
self._loadButton.setDefault(False)
self._filenameLineEdit.setFocus()
self._statusBar = QtGui.QStatusBar(self)
self._statusBar.setSizeGripEnabled(False)
layout.addWidget(self._statusBar, 8, 0, 1, 4)
self.setLayout(layout)
@Slot('QString')
def updateStatusBar(self, message):
"""Updates the status bar widget of this dialog with the given message.
This method is also a `SLOT()`.
The message will be shown for only 5 seconds.
Args:
message (QString): The new message which will be displayed.
"""
self._statusBar.showMessage(message, 5000)
@Slot()
def _openFile(self):
"""Opens a file dialog and sets a value for the QLineEdit widget.
This method is also a `SLOT`.
"""
ret = QtGui.QFileDialog.getOpenFileName(self, self.tr(u'open file'), filter='Comma Separated Values (*.csv)')
if ret:
self._filenameLineEdit.setText(ret)
self._updateFilename()
@Slot('QBool')
def _updateHeader(self, toggled):
"""Changes the internal flag, whether the csv file contains a header or not.
This method is also a `SLOT`.
In addition, after toggling the corresponding checkbox, the
`_previewFile` method will be called.
Args:
toggled (boolean): A flag indicating the status of the checkbox.
The flag will be used to update an internal variable.
"""
self._header = 0 if toggled else None
self._previewFile()
@Slot()
def _updateFilename(self):
"""Calls several methods after the filename changed.
This method is also a `SLOT`.
It checks the encoding of the changed filename and generates a
preview of the data.
"""
self._filename = self._filenameLineEdit.text()
self._guessEncoding(self._filename)
self._previewFile()
def _guessEncoding(self, path):
"""Opens a file from the given `path` and checks the file encoding.
The file must exists on the file system and end with the extension
`.csv`. The file is read line by line until the encoding could be
guessed.
On a successfull identification, the widgets of this dialog will be
updated.
Args:
path (string): Path to a csv file on the file system.
"""
if os.path.exists(path) and path.lower().endswith('csv'):
encoding = self._detector.detect(path)
if encoding is not None:
if encoding.startswith('utf'):
encoding = encoding.replace('-', '')
encoding = encoding.replace('-','_')
viewValue = _encodings.get(encoding)
self._encodingKey = encoding
index = self._encodingComboBox.findText(viewValue.upper())
self._encodingComboBox.setCurrentIndex(index)
@Slot('int')
def _updateEncoding(self, index):
"""Changes the value of the encoding combo box to the value of given index.
This method is also a `SLOT`.
After the encoding is changed, the file will be reloaded and previewed.
Args:
index (int): An valid index of the combo box.
"""
encoding = self._encodingComboBox.itemText(index)
encoding = encoding.lower()
self._encodingKey = _calculateEncodingKey(encoding)
self._previewFile()
@Slot('QString')
def _updateDelimiter(self, delimiter):
"""Changes the value of the delimiter for the csv file.
This method is also a `SLOT`.
Args:
delimiter (string): The new delimiter.
"""
self._delimiter = delimiter
self._previewFile()
def _previewFile(self):
"""Updates the preview widgets with new models for both tab panes.
"""
dataFrame = self._loadCSVDataFrame()
dataFrameModel = DataFrameModel(dataFrame)
dataFrameModel.enableEditing(True)
self._previewTableView.setModel(dataFrameModel)
columnModel = dataFrameModel.columnDtypeModel()
columnModel.changeFailed.connect(self.updateStatusBar)
self._datatypeTableView.setModel(columnModel)
def _loadCSVDataFrame(self):
"""Loads the given csv file with pandas and generate a new dataframe.
The file will be loaded with the configured encoding, delimiter
and header.git
If any execptions will occur, an empty Dataframe is generated
and a message will appear in the status bar.
Returns:
pandas.DataFrame: A dataframe containing all the available
information of the csv file.
"""
if self._filename and os.path.exists(self._filename) and self._filename.endswith('.csv'):
# default fallback if no encoding was found/selected
encoding = self._encodingKey or 'uft8'
try:
dataFrame = pandas.read_csv(self._filename,
sep=self._delimiter, encoding=encoding,
header=self._header)
dataFrame = dataFrame.apply(fillNoneValues)
dataFrame = dataFrame.apply(convertTimestamps)
except Exception, err:
self.updateStatusBar(str(err))
return pandas.DataFrame()
self.updateStatusBar('Preview generated.')
return dataFrame
self.updateStatusBar('File does not exists or does not end with .csv')
return pandas.DataFrame()
def _resetWidgets(self):
"""Resets all widgets of this dialog to its inital state.
"""
self._filenameLineEdit.setText('')
self._encodingComboBox.setCurrentIndex(0)
self._delimiterBox.reset()
self._headerCheckBox.setChecked(False)
self._statusBar.showMessage('')
self._previewTableView.setModel(None)
self._datatypeTableView.setModel(None)
@Slot()
def accepted(self):
"""Successfully close the widget and return the loaded model.
This method is also a `SLOT`.
The dialog will be closed, when the `ok` button is pressed. If
a `DataFrame` was loaded, it will be emitted by the signal `load`.
"""
model = self._previewTableView.model()
if model is not None:
df = model.dataFrame().copy()
dfModel = DataFrameModel(df)
self.load.emit(dfModel, self._filename)
self._resetWidgets()
self.accept()
@Slot()
def rejected(self):
"""Close the widget and reset its inital state.
This method is also a `SLOT`.
The dialog will be closed and all changes reverted, when the
`cancel` button is pressed.
"""
self._resetWidgets()
self.reject()
class CSVExportDialog(QtGui.QDialog):
"""An widget to serialize a `DataFrameModel` to a `CSV-File`.
"""
exported = Signal('QBool')
def __init__(self, model=None, parent=None):
super(CSVExportDialog, self).__init__(parent)
self._model = model
self._modal = True
self._windowTitle = u'Export to CSV'
self._idx = -1
self._initUI()
def _initUI(self):
"""Initiates the user interface with a grid layout and several widgets.
"""
self.setModal(self._modal)
self.setWindowTitle(self._windowTitle)
layout = QtGui.QGridLayout()
self._filenameLabel = QtGui.QLabel(u'Output File', self)
self._filenameLineEdit = QtGui.QLineEdit(self)
chooseFileButtonIcon = QtGui.QIcon(QtGui.QPixmap(':/icons/document-save-as.png'))
self._chooseFileAction = QtGui.QAction(self)
self._chooseFileAction.setIcon(chooseFileButtonIcon)
self._chooseFileAction.triggered.connect(self._createFile)
self._chooseFileButton = QtGui.QToolButton(self)
self._chooseFileButton.setDefaultAction(self._chooseFileAction)
layout.addWidget(self._filenameLabel, 0, 0)
layout.addWidget(self._filenameLineEdit, 0, 1, 1, 2)
layout.addWidget(self._chooseFileButton, 0, 3)
self._encodingLabel = QtGui.QLabel(u'File Encoding', self)
encoding_names = map(lambda x: x.upper(), sorted(list(set(_encodings.viewvalues()))))
self._encodingComboBox = QtGui.QComboBox(self)
self._encodingComboBox.addItems(encoding_names)
self._idx = encoding_names.index('UTF_8')
self._encodingComboBox.setCurrentIndex(self._idx)
#self._encodingComboBox.activated.connect(self._updateEncoding)
layout.addWidget(self._encodingLabel, 1, 0)
layout.addWidget(self._encodingComboBox, 1, 1, 1, 1)
self._hasHeaderLabel = QtGui.QLabel(u'Header Available?', self)
self._headerCheckBox = QtGui.QCheckBox(self)
#self._headerCheckBox.toggled.connect(self._updateHeader)
layout.addWidget(self._hasHeaderLabel, 2, 0)
layout.addWidget(self._headerCheckBox, 2, 1)
self._delimiterLabel = QtGui.QLabel(u'Column Delimiter', self)
self._delimiterBox = DelimiterSelectionWidget(self)
layout.addWidget(self._delimiterLabel, 3, 0)
layout.addWidget(self._delimiterBox, 3, 1, 1, 3)
self._exportButton = QtGui.QPushButton(u'Export Data', self)
self._cancelButton = QtGui.QPushButton(u'Cancel', self)
self._buttonBox = QtGui.QDialogButtonBox(self)
self._buttonBox.addButton(self._exportButton, QtGui.QDialogButtonBox.AcceptRole)
self._buttonBox.addButton(self._cancelButton, QtGui.QDialogButtonBox.RejectRole)
self._buttonBox.accepted.connect(self.accepted)
self._buttonBox.rejected.connect(self.rejected)
layout.addWidget(self._buttonBox, 5, 2, 1, 2)
self._exportButton.setDefault(False)
self._filenameLineEdit.setFocus()
self._statusBar = QtGui.QStatusBar(self)
self._statusBar.setSizeGripEnabled(False)
layout.addWidget(self._statusBar, 4, 0, 1, 4)
self.setLayout(layout)
def setExportModel(self, model):
if not isinstance(model, DataFrameModel):
return False
self._model = model
return True
@Slot()
def _createFile(self):
ret = QtGui.QFileDialog.getSaveFileName(self, 'Save File', filter='Comma Separated Value (*.csv)')
self._filenameLineEdit.setText(ret)
def _saveModel(self):
delimiter = self._delimiterBox.currentSelected()
header = self._headerCheckBox.isChecked() # column labels
filename = self._filenameLineEdit.text()
index = False # row labels
encodingIndex = self._encodingComboBox.currentIndex()
encoding = self._encodingComboBox.itemText(encodingIndex)
encoding = _calculateEncodingKey(encoding.lower())
try:
dataFrame = self._model.dataFrame()
except AttributeError, err:
raise AttributeError('No data loaded to export.')
else:
try:
dataFrame.to_csv(filename, encoding=encoding, header=header, index=index, sep=delimiter)
except IOError, err:
raise IOError('No filename given')
except UnicodeError, err:
raise UnicodeError('Could not encode all data. Choose a different encoding')
except Exception:
raise
def _resetWidgets(self):
"""Resets all widgets of this dialog to its inital state.
"""
self._filenameLineEdit.setText('')
self._encodingComboBox.setCurrentIndex(self._idx)
self._delimiterBox.reset()
self._headerCheckBox.setChecked(False)
self._statusBar.showMessage('')
@Slot()
def accepted(self):
"""Successfully close the widget and emit an export signal.
This method is also a `SLOT`.
The dialog will be closed, when the `Export Data` button is
pressed. If errors occur during the export, the status bar
will show the error message and the dialog will not be closed.
"""
try:
self._saveModel()
except Exception, err:
self._statusBar.showMessage(str(err))
else:
self._resetWidgets()
self.exported.emit(True)
self.accept()
@Slot()
def rejected(self):
"""Close the widget and reset its inital state.
This method is also a `SLOT`.
The dialog will be closed and all changes reverted, when the
`cancel` button is pressed.
"""
self._resetWidgets()
self.exported.emit(False)
self.reject()
def _calculateEncodingKey(comparator):
"""Gets the first key of all available encodings where the corresponding
value matches the comparator.
Args:
comparator (string): A view name for an encoding.
Returns:
str: A key for a specific encoding used by python.
"""
encodingName = None
for k, v in _encodings.viewitems():
if v == comparator:
encodingName = k
break
return encodingName | mit | -5,449,141,558,697,622,000 | 35.001513 | 117 | 0.643512 | false | 4.383935 | false | false | false |
Diviyan-Kalainathan/causal-humans | Clustering/performance_evaluation.py | 1 | 3279 | '''
Computing the misclassification error distance between to 2 k-means clustering
according to Marina Meila, "The Uniqueness of a Good Optimum for K-Means", ICML 2006
Author : Diviyan Kalainathan
Date : 20/06/2016
'''
import csv,numpy,itertools
from sklearn import metrics
def Clustering_performance_evaluation(mode, folder_name, run1, run2, num_clusters, num_init):
"""
:param mode: selects which metric is to be used
:param folder_name: Folder of the runs (String)
:param run1: Number of the run 1 (int)
:param run2: Number of the run 2 (int)
:param num_clusters:
:param num_init:
:return: distance value (float?)
"""
numpy.set_printoptions(threshold='nan')
print('-'+str(num_clusters)+'---performance evaluation between runs : ' + str(run1) + ' ,' + str(run2))
valid_data= True
#Checking if the data is valid by loading & testing the shape of it
try:
data_1=numpy.loadtxt('output/'+folder_name+'/cluster_predictions_c'+ str(num_clusters)
+ '_n'+ str(num_init) +'_r'+ str(run1)+'.csv',delimiter=';')
data_2=numpy.loadtxt('output/'+folder_name+'/cluster_predictions_c'+ str(num_clusters)
+ '_n'+ str(num_init) +'_r'+ str(run2)+'.csv',delimiter=';')
if data_1.shape != data_2.shape:
valid_data=False
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
valid_data=False
if valid_data:
n_samples=data_1.shape[0]
data_1 = numpy.asarray(sorted(data_1, key=lambda x: x[1]))
data_2 = numpy.asarray(sorted(data_2, key=lambda x: x[1]))
if mode==1:
#Distance defined by Marina Meila : k! complexity
clustering_1=numpy.zeros((n_samples,num_clusters))
clustering_2=numpy.zeros((n_samples,num_clusters))
for x in range(0,n_samples):
clustering_1[x,data_1[x,0]]+=1
clustering_2[x,data_2[x,0]]+=1
'''for y in range(0,num_clusters):
try:
clustering_1[:,y]*=1/numpy.sqrt(numpy.sum(clustering_1[:,y]))
except ZeroDivisionError:
clustering_1[:,y]=0
try:
clustering_2[:,y]*=1/numpy.sqrt(numpy.sum(clustering_2[:,y]))
except ZeroDivisionError:
clustering_2[:,y]=0
''' # No normalisation needed
confusion_matrix=numpy.dot(numpy.transpose(clustering_1),clustering_2)
max_confusion=0
result = []
for perm in itertools.permutations(range(num_clusters)):
confusion=0
for i in range(0, num_clusters):
confusion += confusion_matrix[i, perm[i]]
if max_confusion<confusion:
max_confusion=confusion
distance=(max_confusion/n_samples)
return distance
elif mode==2:
#Ajusted rand index
distance=metrics.adjusted_rand_score(data_1[:,0],data_2[:,0])
return distance
elif mode==3:
#V-mesure
distance=metrics.v_measure_score(data_1[:,0],data_2[:,0])
return distance
return 0 | mit | 552,402,320,679,528,060 | 33.526316 | 107 | 0.568771 | false | 3.747429 | false | false | false |
genialis/resolwe | resolwe/flow/executors/docker/run.py | 1 | 19322 | """.. Ignore pydocstyle D400.
.. autoclass:: resolwe.flow.executors.docker.run.FlowExecutor
:members:
"""
# pylint: disable=logging-format-interpolation
import asyncio
import copy
import functools
import json
import logging
import os
import random
import string
import tempfile
import time
from contextlib import suppress
from pathlib import Path
from typing import Any, Dict, Iterable, Tuple, Type
import docker
from .. import constants
from ..connectors import connectors
from ..connectors.baseconnector import BaseStorageConnector
from ..global_settings import LOCATION_SUBPATH, PROCESS_META, SETTINGS
from ..local.run import FlowExecutor as LocalFlowExecutor
from ..protocol import ExecutorFiles
from .seccomp import SECCOMP_POLICY
# Limits of containers' access to memory. We set the limit to ensure
# processes are stable and do not get killed by OOM signal.
DOCKER_MEMORY_HARD_LIMIT_BUFFER = 100
DOCKER_MEMORY_SWAP_RATIO = 2
DOCKER_MEMORY_SWAPPINESS = 1
logger = logging.getLogger(__name__)
def _random_string(size: int = 5, chars=string.ascii_lowercase + string.digits):
"""Generate and return random string."""
return "".join(random.choice(chars) for x in range(size))
def retry(
max_retries: int = 3,
retry_exceptions: Tuple[Type[Exception], ...] = (
docker.errors.ImageNotFound,
docker.errors.APIError,
),
min_sleep: int = 1,
max_sleep: int = 10,
):
"""Try to call decorated method max_retries times before giving up.
The calls are retried when function raises exception in retry_exceptions.
:param max_retries: maximal number of calls before giving up.
:param retry_exceptions: retry call if one of these exceptions is raised.
:param min_sleep: minimal sleep between calls (in seconds).
:param max_sleep: maximal sleep between calls (in seconds).
:returns: return value of the called method.
:raises: the last exceptions raised by the method call if none of the
retries were successfull.
"""
def decorator_retry(func):
@functools.wraps(func)
def wrapper_retry(*args, **kwargs):
last_error: Exception = Exception("Retry failed")
sleep: int = 0
for retry in range(max_retries):
try:
time.sleep(sleep)
return func(*args, **kwargs)
except retry_exceptions as err:
sleep = min(max_sleep, min_sleep * (2 ** retry))
last_error = err
raise last_error
return wrapper_retry
return decorator_retry
class FlowExecutor(LocalFlowExecutor):
"""Docker executor."""
name = "docker"
def __init__(self, *args, **kwargs):
"""Initialize attributes."""
super().__init__(*args, **kwargs)
container_name_prefix = SETTINGS.get("FLOW_EXECUTOR", {}).get(
"CONTAINER_NAME_PREFIX", "resolwe"
)
self.container_name = self._generate_container_name(container_name_prefix)
self.tools_volumes = []
self.command = SETTINGS.get("FLOW_DOCKER_COMMAND", "docker")
self.tmpdir = tempfile.TemporaryDirectory()
# Setup Docker volumes.
def _new_volume(
self, config: Dict[str, Any], mount_path: Path, read_only: bool = True
) -> Tuple[str, Dict[str, str]]:
"""Generate a new volume entry.
:param config: must include 'path' and may include 'selinux_label'.
:param mount_moint: mount point for the volume.
"""
options = set()
if "selinux_label" in config:
options.add(config["selinux_label"])
options.add("ro" if read_only else "rw")
return (
os.fspath(config["path"]),
{"bind": os.fspath(mount_path), "mode": ",".join(options)},
)
def _get_upload_dir(self) -> str:
"""Get upload path.
: returns: the path of the first mountable connector for storage
'upload'.
:raises RuntimeError: if no applicable connector is found.
"""
for connector in connectors.for_storage("upload"):
if connector.mountable:
return f"/upload_{connector.name}"
raise RuntimeError("No mountable upload connector is defined.")
def _get_mountable_connectors(self) -> Iterable[Tuple[str, BaseStorageConnector]]:
"""Iterate through all the storages and find mountable connectors.
:returns: list of tuples (storage_name, connector).
"""
return (
(storage_name, connector)
for storage_name in SETTINGS["FLOW_STORAGE"]
for connector in connectors.for_storage(storage_name)
if connector.mountable
)
def _get_volumes(self, subpaths=False) -> Dict[str, Tuple[Dict, Path]]:
"""Get writeable volumes from settings.
:attr subpaths: when True the location subpath in added to the volume
path.
:returns: mapping between volume name and tuple (config, mount_point).
"""
results = dict()
volume_mountpoint = {
constants.PROCESSING_VOLUME_NAME: constants.PROCESSING_VOLUME,
constants.INPUTS_VOLUME_NAME: constants.INPUTS_VOLUME,
constants.SECRETS_VOLUME_NAME: constants.SECRETS_VOLUME,
constants.SOCKETS_VOLUME_NAME: constants.SOCKETS_VOLUME,
}
for volume_name, volume in SETTINGS["FLOW_VOLUMES"].items():
if "read_only" not in volume["config"]:
if volume["type"] == "host_path":
config = copy.deepcopy(volume["config"])
if subpaths:
config["path"] = Path(config["path"]) / LOCATION_SUBPATH
results[volume_name] = (config, volume_mountpoint[volume_name])
elif volume["type"] == "temporary_directory":
config = copy.deepcopy(volume["config"])
volume_path = Path(self.tmpdir.name) / volume_name
mode = config.get("mode", 0o700)
volume_path.mkdir(exist_ok=True, mode=mode)
config["path"] = volume_path
results[volume_name] = (config, volume_mountpoint[volume_name])
else:
raise RuntimeError(
"Only 'host_type' and 'temporary_directory' volumes are "
" supported by Docker executor,"
f"requested '{volume['config']['type']}' for {volume_name}."
)
assert (
constants.PROCESSING_VOLUME_NAME in results
), "Processing volume must be defined."
return results
def _init_volumes(self) -> Dict:
"""Prepare volumes for init container."""
mount_points = [
(config, mount_point, False)
for config, mount_point in self._get_volumes().values()
]
mount_points += [
(connector.config, Path("/") / f"{storage_name}_{connector.name}", False)
for storage_name, connector in self._get_mountable_connectors()
]
return dict([self._new_volume(*mount_point) for mount_point in mount_points])
def _communicator_volumes(self) -> Dict[str, Dict]:
"""Prepare volumes for communicator container."""
mount_points = [
(connector.config, Path("/") / f"{storage_name}_{connector.name}", False)
for storage_name, connector in self._get_mountable_connectors()
]
volumes = self._get_volumes()
mount_points += [
(*volumes[constants.SECRETS_VOLUME_NAME], False),
(*volumes[constants.SOCKETS_VOLUME_NAME], False),
]
return dict([self._new_volume(*mount_point) for mount_point in mount_points])
def _processing_volumes(self) -> Dict:
"""Prepare volumes for processing container."""
# Expose processing and (possibly) input volume RW.
mount_points = [
(config, mount_point, False)
for config, mount_point in self._get_volumes(True).values()
]
# Expose mountable connectors ('upload' RW, othern 'RO').
mount_points += [
(
connector.config,
Path("/") / f"{storage_name}_{connector.name}",
storage_name != "upload",
)
for storage_name, connector in self._get_mountable_connectors()
]
mount_points += [
(
{"path": self.runtime_dir / "executors" / ExecutorFiles.SOCKET_UTILS},
Path("/socket_utils.py"),
False,
),
(
{
"path": self.runtime_dir
/ "executors"
/ ExecutorFiles.STARTUP_PROCESSING_SCRIPT
},
Path("/start.py"),
False,
),
(
{"path": self.runtime_dir / "executors" / ExecutorFiles.CONSTANTS},
Path("/constants.py"),
True,
),
]
# Generate dummy passwd and create mappings for it. This is required because some tools
# inside the container may try to lookup the given UID/GID and will crash if they don't
# exist. So we create minimal user/group files.
temporary_directory = Path(self.tmpdir.name)
passwd_path = temporary_directory / "passwd"
group_path = temporary_directory / "group"
with passwd_path.open("wt") as passwd_file:
passwd_file.write(
"root:x:0:0:root:/root:/bin/bash\n"
+ f"user:x:{os.getuid()}:{os.getgid()}:user:{os.fspath(constants.PROCESSING_VOLUME)}:/bin/bash\n"
)
with group_path.open("wt") as group_file:
group_file.write("root:x:0:\n" + f"user:x:{os.getgid()}:user\n")
mount_points += [
({"path": passwd_path}, Path("/etc/passwd"), True),
({"path": group_path}, Path("/etc/group"), True),
]
# Create mount points for tools.
mount_points += [
({"path": Path(tool)}, Path("/usr/local/bin/resolwe") / str(index), True)
for index, tool in enumerate(self.get_tools_paths())
]
# Create mount_points for runtime (all read-only).
mount_points += [
({"path": self.runtime_dir / src}, dst, True)
for src, dst in SETTINGS.get("RUNTIME_VOLUME_MAPS", {}).items()
]
return dict([self._new_volume(*mount_point) for mount_point in mount_points])
async def start(self):
"""Start process execution."""
memory = (
self.process["resource_limits"]["memory"] + DOCKER_MEMORY_HARD_LIMIT_BUFFER
)
memory_swap = int(memory * DOCKER_MEMORY_SWAP_RATIO)
network = "bridge"
if "network" in self.resources:
# Configure Docker network mode for the container (if specified).
# By default, current Docker versions use the 'bridge' mode which
# creates a network stack on the default Docker bridge.
network = SETTINGS.get("FLOW_EXECUTOR", {}).get("NETWORK", "")
security_options = []
if not SETTINGS.get("FLOW_DOCKER_DISABLE_SECCOMP", False):
security_options.append(f"seccomp={json.dumps(SECCOMP_POLICY)}")
processing_image = self.requirements.get(
"image",
SETTINGS.get(
"FLOW_DOCKER_DEFAULT_PROCESSING_CONTAINER_IMAGE",
"public.ecr.aws/s4q6j6e8/resolwe/base:ubuntu-20.04",
),
)
communicator_image = SETTINGS.get(
"FLOW_DOCKER_COMMUNICATOR_IMAGE",
"public.ecr.aws/s4q6j6e8/resolwe/com:latest",
)
ulimits = []
if (
self.process["scheduling_class"]
== PROCESS_META["SCHEDULING_CLASS_INTERACTIVE"]
):
# TODO: This is not very good as each child gets the same limit.
# Note: Ulimit does not work as expected on multithreaded processes
# Limit is increased by factor 1.2 for processes with 2-8 threads.
# TODO: This should be changed for processes with over 8 threads.
cpu_time_interactive = SETTINGS.get(
"FLOW_PROCESS_RESOURCE_DEFAULTS", {}
).get("cpu_time_interactive", 30)
cpu_limit = int(cpu_time_interactive * 1.2)
ulimits.append(
docker.types.Ulimit(name="cpu", soft=cpu_limit, hard=cpu_limit)
)
environment = {
"LISTENER_SERVICE_HOST": self.listener_connection[0],
"LISTENER_SERVICE_PORT": self.listener_connection[1],
"LISTENER_PROTOCOL": self.listener_connection[2],
"DATA_ID": self.data_id,
"RUNNING_IN_CONTAINER": 1,
"RUNNING_IN_DOCKER": 1,
"GENIALIS_UID": os.getuid(),
"GENIALIS_GID": os.getgid(),
"FLOW_MANAGER_KEEP_DATA": SETTINGS.get("FLOW_MANAGER_KEEP_DATA", False),
"DESCRIPTOR_CHUNK_SIZE": 100,
"MOUNTED_CONNECTORS": ",".join(
connector.name
for connector in connectors.values()
if connector.mountable
),
}
with suppress(RuntimeError):
environment["UPLOAD_DIR"] = self._get_upload_dir()
autoremove = SETTINGS.get("FLOW_DOCKER_AUTOREMOVE", False)
# Add random string between container name and init. Since check for
# existing stdout file has been moved inside init container we should
# use different containers name in case one init contaner is still
# running when another one is fired (or when containers are not purged
# automatically): otherwise executor will fail to start the init
# container due to name clash.
init_container_name = f"{self.container_name}-{_random_string()}-init"
init_arguments = {
"auto_remove": autoremove,
"volumes": self._init_volumes(),
"command": ["/usr/local/bin/python3", "-m", "executors.init_container"],
"image": communicator_image,
"name": init_container_name,
"detach": True,
"cpu_quota": 1000000,
"mem_limit": "4000m",
"mem_reservation": "200m",
"network_mode": network,
"user": f"{os.getuid()}:{os.getgid()}",
"environment": environment,
}
communication_arguments = {
"auto_remove": autoremove,
"volumes": self._communicator_volumes(),
"command": ["/usr/local/bin/python", "/startup.py"],
"image": communicator_image,
"name": f"{self.container_name}-communicator",
"detach": True,
"cpu_quota": 100000,
"mem_limit": "4000m",
"mem_reservation": "200m",
"network_mode": network,
"cap_drop": ["all"],
"security_opt": security_options,
"user": f"{os.getuid()}:{os.getgid()}",
"environment": environment,
}
processing_arguments = {
"auto_remove": autoremove,
"volumes": self._processing_volumes(),
"command": ["python3", "/start.py"],
"image": processing_image,
"network_mode": f"container:{self.container_name}-communicator",
"working_dir": os.fspath(constants.PROCESSING_VOLUME),
"detach": True,
"cpu_quota": self.process["resource_limits"]["cores"] * (10 ** 6),
"mem_limit": f"{memory}m",
"mem_reservation": f"{self.process['resource_limits']['memory']}m",
"mem_swappiness": DOCKER_MEMORY_SWAPPINESS,
"memswap_limit": f"{memory_swap}m",
"name": self.container_name,
"cap_drop": ["all"],
"security_opt": security_options,
"user": f"{os.getuid()}:{os.getgid()}",
"ulimits": ulimits,
"environment": environment,
}
@retry(max_retries=5)
def transfer_image(client, image_name):
"""Transfer missing image, retry 5 times."""
client.images.pull(image_name)
client = docker.from_env()
# Pull all the images.
try:
try:
logger.debug("Pulling processing image %s.", processing_image)
client.images.get(processing_image)
except docker.errors.ImageNotFound:
transfer_image(client, processing_image)
try:
logger.debug("Pulling communicator image %s.", communicator_image)
client.images.get(communicator_image)
except docker.errors.ImageNotFound:
transfer_image(client, communicator_image)
except docker.errors.APIError:
logger.exception("Docker API error")
raise RuntimeError("Docker API error")
loop = asyncio.get_event_loop()
start_time = time.time()
try:
init_container = client.containers.run(**init_arguments)
except docker.errors.APIError as error:
await self.communicator.finish(
{"error": f"Error starting init container: {error}"}
)
raise
init_container_status = await loop.run_in_executor(None, init_container.wait)
# Return code is as follows:
# - 0: no error occured, continue processing.
# - 1: error running init container, abort processing and log error.
# - 2: data exists in the processing volume, abort processing.
init_rc = init_container_status["StatusCode"]
if init_rc != 0:
logger.error("Init container returned %s instead of 0.", init_rc)
# Do not set error on data objects where previous data exists.
if init_rc == 1:
await self.communicator.finish(
{"error": f"Init container returned {init_rc} instead of 0."}
)
return
try:
communication_container = client.containers.run(**communication_arguments)
except docker.errors.APIError as error:
await self.communicator.finish(
{"error": f"Error starting communication container: {error}"}
)
raise
try:
processing_container = client.containers.run(**processing_arguments)
except docker.errors.APIError as e:
await self.communicator.finish(
{"error": f"Error starting processing container: {e}"}
)
with suppress(docker.errors.APIError):
communication_container.stop(timeout=1)
raise
end_time = time.time()
logger.info(
"It took {:.2f}s for Docker containers to start".format(
end_time - start_time
)
)
with suppress(docker.errors.NotFound):
await loop.run_in_executor(None, communication_container.wait)
with suppress(docker.errors.NotFound):
await loop.run_in_executor(None, processing_container.wait)
| apache-2.0 | 6,463,769,656,858,724,000 | 38.594262 | 113 | 0.572663 | false | 4.253137 | true | false | false |
Digital-Preservation-Finland/dpres-ipt | ipt/scripts/check_xml_schematron_features.py | 1 | 1714 | #!/usr/bin/python
# -*- encoding:utf-8 -*-
# vim:ft=python
"""Validate XML file using Schematron."""
from __future__ import print_function, unicode_literals
import os
import sys
import optparse
from file_scraper.schematron.schematron_scraper import SchematronScraper
from ipt.utils import concat
from ipt.six_utils import ensure_text
def main(arguments=None):
"""Main loop"""
usage = "usage: %prog [options] xml-file-path"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-s", "--schemapath", dest="schemapath",
help="Path to schematron schemas",
metavar="PATH")
(options, args) = parser.parse_args(arguments)
if len(args) != 1:
parser.error("Must give a path to an XML file as argument")
if options.schemapath is None:
parser.error("The -s switch is required")
filename = args[0]
if os.path.isdir(filename):
filename = os.path.join(filename, 'mets.xml')
scraper = SchematronScraper(
filename, mimetype="text/xml",
params={"schematron": options.schemapath})
scraper.scrape_file()
message_string = ensure_text(concat(scraper.messages()).strip())
error_string = ensure_text(concat(scraper.errors()).strip())
if message_string:
print(message_string)
if error_string:
print(error_string, file=sys.stderr)
if error_string or not scraper.well_formed:
return 117
return 0
# pylint: disable=duplicate-code
# Main function can be similar in different scripts
if __name__ == '__main__':
# If run from the command line, take out the program name from sys.argv
RETVAL = main(sys.argv[1:])
sys.exit(RETVAL)
| lgpl-3.0 | 9,059,798,237,095,664,000 | 26.206349 | 75 | 0.65811 | false | 3.639066 | false | false | false |
steelcowboy/pyCourseManager | friday.py | 1 | 3765 | from datetime import datetime, timedelta
import course_manager
# from login_manager import LoginManager, login_manager, db
import coursedb_manager
from usage_resource import UsageResource
from secret import sqlalchemy_url
from login import (
PinResource,
SignUpResource,
AuthorizeResource,
LogoutResource,
UserManagementResource,
db,
)
from flask import Flask
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from pymongo import MongoClient
## Setup
app = Flask(__name__)
api = Api(app)
### UNCOMMENT TO ENABLE CORS ###
### IF NEEDED ###
from flask_cors import CORS
CORS(app, supports_credentials=True)
################################
# login_manager.init_app(app)
app.config.update(
SQLALCHEMY_DATABASE_URI = sqlalchemy_url,
SQLALCHEMY_TRACK_MODIFICATIONS = True,
SECRET_KEY = 'secret_xxx',
)
db.init_app(app)
mongo = MongoClient()
## API stuff
# CourseDB resources
api.add_resource(coursedb_manager.FullCatalogResource,
'/api/<string:school>/catalog',
resource_class_kwargs={'client': mongo}
)
api.add_resource(coursedb_manager.FullDeptResource,
'/api/<string:school>/catalog/<string:dept>',
resource_class_kwargs={'client': mongo}
)
api.add_resource(coursedb_manager.DepartmentResource,
'/api/<string:school>/courses',
resource_class_kwargs={'client': mongo}
)
api.add_resource(coursedb_manager.DepartmentListingResource,
'/api/<string:school>/courses/<string:dept>',
resource_class_kwargs={'client': mongo}
)
api.add_resource(coursedb_manager.CatalogCourseResource,
'/api/<string:school>/courses/<string:dept>/<int:num>',
resource_class_kwargs={'client': mongo}
)
# Login resources
api.add_resource(AuthorizeResource,
'/api/<string:school>/authorize',
resource_class_kwargs={'client': mongo}
)
api.add_resource(PinResource,
'/api/<string:school>/getpin',
resource_class_kwargs={'client': mongo}
)
api.add_resource(SignUpResource,
'/api/<string:school>/signup',
resource_class_kwargs={'client': mongo}
)
api.add_resource(UserManagementResource, '/api/<string:school>/users/<string:user>')
api.add_resource(LogoutResource, '/api/<string:school>/users/<string:user>/logout')
# How to use my lovely program
api.add_resource(UsageResource, '/api')
api.add_resource(course_manager.ListStockYears,
'/api/<string:school>/stock_charts',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.ListStockCharts,
'/api/<string:school>/stock_charts/<string:year>',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.GetStockChart,
'/api/<string:school>/stock_charts/<string:year>/<string:major>',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.UserConfig,
'/api/<string:school>/users/<string:user>/config',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.NewChartResource,
'/api/<string:school>/users/<string:user>/import',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.ListUserCharts,
'/api/<string:school>/users/<string:user>/charts',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.ChartResource,
'/api/<string:school>/users/<string:user>/charts/<string:chart>',
resource_class_kwargs={'client': mongo}
)
api.add_resource(course_manager.CourseResource,
'/api/<string:school>/users/<string:user>/charts/<string:chart>/<string:c_id>',
resource_class_kwargs={'client': mongo}
)
@app.before_first_request
def create_database():
db.create_all()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=4500)
| apache-2.0 | 1,852,214,547,275,188,000 | 26.888889 | 87 | 0.694821 | false | 3.4573 | false | false | false |
tago-io/tago-python | tago/account/notifications.py | 1 | 2226 | import requests # Used to make HTTP requests
import json # Used to parse JSON
import os # Used to infer environment variables
API_TAGO = os.environ.get('TAGO_API') or 'https://api.tago.io'
REALTIME = os.environ.get('TAGO_REALTIME') or 'https://realtime.tago.io'
class Notifications:
def __init__(self, acc_token):
self.token = acc_token
self.default_headers = {
'content-type': 'application/json', 'Account-Token': acc_token}
return
def list(self, params):
return requests.get('{api_endpoint}/notification'.format(api_endpoint=API_TAGO), headers=self.default_headers, params=params).json()
def markAsRead(self, notifications):
if not isinstance(notifications, list):
try:
notifications = list(notifications)
except TypeError:
raise ValueError('Parameter should be iterable')
else:
data = {'notification_ids': notifications}
return requests.put('{api_endpoint}/notification/read'.format(api_endpoint=API_TAGO), headers=self.default_headers, json=data).json()
def accept(self, notification_id):
return requests.post('{api_endpoint}/notification/accept/{notification_id}'.format(api_endpoint=API_TAGO, notification_id=notification_id), headers=self.default_headers).json()
def refuse(self, notification_id):
return requests.post('{api_endpoint}/notification/refuse/{notification_id}'.format(api_endpoint=API_TAGO, notification_id=notification_id), headers=self.default_headers).json()
def remove(self, notification_id):
return requests.delete('{api_endpoint}/notification/{notification_id}'.format(api_endpoint=API_TAGO, notification_id=notification_id), headers=self.default_headers).json()
def registerDevice(self, device_token, platform):
data = {
'device_token': device_token,
'platform': platform,
}
return requests.post('{api_endpoint}/notification/push/register'.format(api_endpoint=API_TAGO), headers=self.default_headers, json=data).json()
def unRegisterDevice(self, device_token):
data = {
'device_token': device_token,
}
return requests.post('{api_endpoint}/notification/push/unregister'.format(api_endpoint=API_TAGO), headers=self.default_headers, json=data).json()
| mit | 1,116,200,540,419,885,400 | 44.428571 | 180 | 0.721923 | false | 3.734899 | false | false | false |
mtnsat/ics-release-dids | porta/account/account.py | 1 | 1435 | from ..with_request import WithRequest
class Account(WithRequest):
def __init__(self, url, session_id):
self.base_url = url
self.session_id=session_id
def account_get_by_id(self, i_account):
"""Get account by id"""
endpoint = "{0}".format('/rest/Account/get_account_info')
auth = '{{ "session_id": "{0}" }}'.format(self.session_id)
params = '{{ "i_account": {0} }}'.format(i_account)
payload = {
u'auth_info': auth,
u'params': params
}
return self.post_it(endpoint, payload, {})
def account_get_by_pin(self, pin_number):
"""Get account by pin number"""
endpoint = "{0}".format('/rest/Account/get_account_info')
auth = '{{ "session_id": "{0}" }}'.format(self.session_id)
params = '{{ "id": "{0}" }}'.format(pin_number)
payload = {
u'auth_info': auth,
u'params': params
}
return self.post_it(endpoint, payload, {})
def account_terminate_by_id(self, i_account):
"""Terminate account by id"""
endpoint = "{0}".format('/rest/Account/terminate_account')
auth = '{{ "session_id": "{0}" }}'.format(self.session_id)
params = '{{ "i_account": {0} }}'.format(i_account)
payload = {
u'auth_info': auth,
u'params': params
}
return self.post_it(endpoint, payload, {})
| mit | -4,234,994,092,088,408,000 | 31.613636 | 66 | 0.52892 | false | 3.61461 | false | false | false |
moyaproject/moya | moya/db.py | 1 | 10394 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import absolute_import
from sqlalchemy import create_engine, MetaData
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy.exc import (
DatabaseError,
IntegrityError,
OperationalError,
StatementError,
)
from . import namespaces
from .elements.utils import attr_bool
from .compat import text_type, implements_to_string, itervalues
from . import logic
from .console import Cell
import weakref
import logging
startup_log = logging.getLogger("moya.startup")
db_log = logging.getLogger("moya.db")
def dbobject(obj):
return getattr(obj, "__moyadbobject__", lambda: obj)()
@implements_to_string
class DBEngine(object):
def __init__(self, name, engine_name, engine, default=False):
self.name = name
self.engine_name = engine_name
self.engine = engine
self.default = default
# self.Session = sessionmaker(bind=engine) # expire_on_commit
self.session_factory = sessionmaker(bind=engine)
self.metadata = MetaData()
self.table_names = set()
def get_session(self):
return DBSession(self.session_factory, self.engine)
def __str__(self):
return "<dbengine %s>" % self.engine_name
def __repr__(self):
return '<dbengine "%s">' % self.name
def _get_db_error(e):
"""Extract information from sqlalchemy error"""
message = getattr(e, "message", text_type(e))
info = {"sql": e.statement, "params": e.params}
if hasattr(e, "orig"):
try:
code, message = e.orig.args
except:
pass
else:
info["code"] = code
message = message
return message, info
def wrap_db_errors(f):
"""Turn DB errors in to moya errors"""
def deco(self, *args, **kwargs):
try:
ret = f(self, *args, **kwargs)
except IntegrityError as e:
message, info = _get_db_error(e)
raise logic.MoyaException("db.integrity-error", message, info=info)
except OperationalError as e:
message, info = _get_db_error(e)
raise logic.MoyaException("db.operational-error", message, info=info)
except DatabaseError as e:
message, info = _get_db_error(e)
raise logic.MoyaException("db.error", message, info=info)
except StatementError as e:
message, info = _get_db_error(e)
raise logic.MoyaException(
"db.statement-error",
message,
info=info,
diagnosis="This error can occur if the models haven't been created in the database.\n\nDo you need to run **moya db sync**?",
)
except Exception as e:
raise
else:
return ret
return deco
class _SessionContextManager(object):
def __init__(self, session, element):
self._session = session
self._element = element
def __enter__(self):
self._session.enter_transaction()
def __exit__(self, exc_type, exc_val, exc_tb):
self._session.exit_transaction(
element=self._element, exc_type=exc_type, exc_val=exc_val
)
class DBSession(object):
def __init__(self, session_factory, engine=None):
self.session_factory = session_factory
self._engine = weakref.ref(engine) if engine is not None else None
self._session = None
self._transaction_level = 0
@property
def engine(self):
return self._engine() if self._engine is not None else None
@property
def session(self):
if self._session is None:
self._session = self.session_factory()
return self._session
def close(self):
if self._session:
self.session.close()
self._session = None
def __moyacontext__(self, context):
return self._session
def manage(self, element):
self.session
return _SessionContextManager(self, element)
def rollback(self):
self.session.rollback()
def __repr__(self):
if self.session is not None:
return "<dbsession %s>" % self.engine
return "<dbsession>"
def enter_transaction(self):
self._transaction_level += 1
@wrap_db_errors
def exit_transaction(self, element=None, exc_type=None, exc_val=None):
self._transaction_level -= 1
if exc_type is None:
if self._transaction_level == 0:
try:
self.session.commit()
except:
self.session.rollback()
raise
else:
self.session.rollback()
self._transaction_level = 0
def __getattr__(self, key):
return getattr(self.session, key)
def add_engine(archive, name, section):
engine_name = section["engine"]
echo = attr_bool(section.get("echo", "n"))
default = attr_bool(section.get("default", "n"))
connect_args = {}
if engine_name.startswith("sqlite:"):
connect_args["check_same_thread"] = False
sqla_engine = create_engine(
engine_name, echo=echo, pool_recycle=3600, connect_args=connect_args
)
# if engine_name.startswith('sqlite:'):
# @event.listens_for(sqla_engine, "connect")
# def do_connect(dbapi_connection, connection_record):
# # disable pysqlite's emitting of the BEGIN statement entirely.
# # also stops it from emitting COMMIT before any DDL.
# dbapi_connection.isolation_level = None
# @event.listens_for(sqla_engine, "begin")
# def do_begin(conn):
# # emit our own BEGIN
# conn.execute("BEGIN EXCLUSIVE")
engine = DBEngine(name, engine_name, sqla_engine, default)
if default or not archive.database_engines:
archive.default_db_engine = name
archive.database_engines[name] = engine
startup_log.debug("%r created", engine)
def get_session_map(archive):
"""Get a dictionary that maps db names on to session objects"""
session_map = {
db: engine.get_session() for db, engine in archive.database_engines.items()
}
if archive.default_db_engine is not None:
session_map["_default"] = session_map[archive.default_db_engine]
return session_map
def commit_sessions(context, close=True):
count = 0
for dbsession in context["._dbsessions"].values():
if dbsession.session:
try:
# db_log.debug('committing %s', dbsession)
dbsession.session.commit()
except:
db_log.exception("error committing session")
raise
else:
count += 1
if close:
try:
dbsession.close()
except:
db_log.exception("error closing session")
return count
def rollback_sessions(context, close=True):
count = 0
for dbsession in context["._dbsessions"].values():
if dbsession.session:
try:
# db_log.debug('rolling back %s', dbsession)
dbsession.session.rollback()
except:
db_log.exception("error rolling back session")
else:
count += 1
if close:
try:
dbsession.close()
except:
db_log.exception("error closing session")
return count
def close_sessions(context):
"""Close db sessions."""
for dbsession in context["._dbsessions"].values():
if dbsession.session:
try:
dbsession.close()
except:
db_log.exception("error closing session")
def sync_all(archive, console, summary=True):
if validate_all(archive, console) != 0:
return -1
engines = archive.database_engines
if not engines:
return 0
for engine in itervalues(engines):
if engine.default:
default_engine = engine
break
else:
default_engine = None
apps = archive.apps.values()
synced = []
try:
with console.progress("syncing", num_steps=len(apps), width=24) as progress:
progress.update(None, "building models...")
for app in apps:
for model in app.lib.get_elements_by_type((namespaces.db, "model")):
model._build_model(app)
for app in apps:
progress.update(None, "syncing {!r}".format(app))
count = 0
for model in app.lib.get_elements_by_type((namespaces.db, "model")):
engine_name = model.dbname
if engine_name is None:
engine = default_engine
else:
engine = engines[engine_name]
model.create_all(archive, engine, app)
count += 1
progress.step()
synced.append((app, count))
progress.update(None, "db sync complete")
finally:
if summary:
table = []
for app, count in synced:
table.append(
(
Cell(text_type(app), fg="magenta", bold=True),
Cell("{}".format(count) if count else "", bold=True),
)
)
console.table(table, header_row=["app", "synced"], dividers=True, grid=True)
return 0
def validate_all(archive, console=None):
"""Validates models and returns the number of fails"""
if not archive.database_engines:
return 0
from .tags.db import DBModel
fails = DBModel.validate_all(archive)
if console is None:
return not len(fails)
for model, app, element, error in fails:
if element:
console.document_error(
text_type(error),
element._location,
element._code,
element.source_line,
None,
)
else:
console.error(text_type(error))
if hasattr(error, "diagnosis"):
console.table([(error.diagnosis,)])
return len(fails)
| mit | 8,660,788,423,971,836,000 | 29.040462 | 141 | 0.566577 | false | 4.218344 | false | false | false |
langurmonkey/gaiasky | assets/scripts/showcases/camera-constant-turn.py | 1 | 2066 | # This Gaia Sky script showcases a constant camera turn
# Created by Toni Sagrista
from py4j.clientserver import ClientServer, JavaParameters, PythonParameters
import time
class CameraUpdateRunnable(object):
def __init__(self, gs, rotation_rate):
self.gs = gs
self.rotation_rate = rotation_rate
self.prev_time = time.perf_counter()
self.direction = [0.0, 0.0, 1.0]
self.up = [0.0, 1.0, 0.0]
self.prev_time = time.time()
# Set the direction and up
self.gs.setCameraDirection(self.direction)
self.gs.setCameraUp(self.up)
def run(self):
self.time = time.time()
# This is the number of seconds since the last frame
dt = self.time - self.prev_time
# Actual degrees to rotate this frame
rot_deg = dt * self.rotation_rate
# Rotate the direction angle around up by rot_deg degrees
self.direction = self.gs.rotate3([self.direction[0], self.direction[1], self.direction[2]], [0.0, 1.0, 0.0], rot_deg)
# Set it
self.gs.setCameraDirection(self.direction)
# We do not need to set the up vector, since it never changes
# Store prev_time for use in next frame
self.prev_time = self.time
def toString():
return "camera-update-runnable"
class Java:
implements = ["java.lang.Runnable"]
gateway = ClientServer(java_parameters=JavaParameters(auto_convert=True),
python_parameters=PythonParameters())
gs = gateway.entry_point
gs.cameraStop()
gs.setCameraFree()
gs.stopSimulationTime()
gs.setVisibility("element.orbits", True)
gs.setCameraLock(True)
gs.setCameraOrientationLock(False)
gs.setFov(49)
# Rotation rate in deg/s
rotation_rate = 15.0
# park the camera updater
gs.parkRunnable("cam-updater", CameraUpdateRunnable(gs, rotation_rate))
gs.sleep(20)
# clean up and finish
print("Cleaning up and ending")
gs.unparkRunnable("cam-updater")
gs.cameraStop()
gs.maximizeInterfaceWindow()
gs.enableInput()
# close connection
gateway.shutdown()
| mpl-2.0 | -5,602,507,680,493,509,000 | 25.487179 | 125 | 0.673282 | false | 3.431894 | false | false | false |
spoqa/dodotable | dodotable/schema.py | 1 | 12038 | # -*- coding: utf-8 -*-
""":mod:`dodotable.schema` --- table schema
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import absolute_import
import collections
try:
from collections.abc import MutableSequence
except ImportError:
from collections import MutableSequence
import math
from sqlalchemy.orm import Query
from .environment.flask import FlaskEnvironment
from .util import render, string_literal, _get_data
__all__ = (
'Cell', 'Column', 'LinkedColumn', 'ObjectColumn', 'ENVIRONMENT',
'Queryable', 'Renderable', 'Row', 'Table', 'Pager', 'Schema',
)
ENVIRONMENT = FlaskEnvironment()
class Schema(object):
"""
:param environment:
:type environment: :class:`~.environment.Environment`
"""
environment = ENVIRONMENT
def render(self, template_name, **kwargs):
return render(template_name,
extra_environments=self.environment.__dict__(),
**kwargs)
class Renderable(object):
"""jinja에서 바로 렌더링되는 클래스의 상위 클래스
jinja에서는 ``__html__`` 를 호출하여 렌더링을 하므로
:class:`~Renderable` 을 상속받아 :meth:`~Renderable.__html__` 을
구현하는 경우 바로 렌더링 할 수 있습니다.
.. code-block:: python
class SomeElem(Renderable):
def __html__(self):
return "<h1>Hello World</h1>"
.. code-block:: jinja
{{ SomeElem() }} <!-- == <h1>Hello World</h1> -->
"""
def __html__(self):
""":mod:`jinja` 내부 호출용 함수
.. note::
요즘은 :func:`__html__` 을 구현하는게 HTML 뱉는 객체의 de facto 라고하더군요.
"""
raise NotImplementedError('__html__ not implemented yet.')
class Queryable(object):
""":class:`~sqlalchemy.orm.query.Query` 로 변환 가능한 객체
쿼리를 내뱉는 모든 필더들은 :class:`~Queryable` 을 상속받고
:meth:`~Queryable.__query__` 를 구현하여 sqlalchemy 쿼리로 사용할 수 있도록
변환해야합니다.
"""
def __query__(self):
"""모든 :class:`~dodotable.Queryable` 객체가 구현해야하는 메소드."""
raise NotImplementedError('__query__ not implemented yet.')
class Cell(Schema, Renderable):
"""테이블의 셀을 나타내는 클래스
:param int col: column 위치
:param int row: row 위치
:param data: 셀에 채워질 데이터
"""
def __init__(self, col, row, data, _repr=string_literal, classes=()):
self.col = col
self.row = row
self.data = data
self.repr = _repr
self.classes = classes
def __html__(self):
return self.render('cell.html', cell=self)
class LinkedCell(Cell):
"""컨텐츠에 링크가 걸린 Cell
:param int col: column 위치
:param int row: row 위치
:param data: 셀에 채워질 데이터
:param endpoint: 데이터를 누르면 이동할 url
"""
def __init__(self, col, row, data, endpoint):
self.col = col
self.row = row
self.data = data
self.url = endpoint
def __html__(self):
return self.render('linkedcell.html', cell=self)
class Column(Schema, Renderable):
"""테이블의 열을 나타내는 클래스
:param str label: 컬럼 레이블
:param str attr: 가져올 attribute 이름
:param list order_by: 정렬 기준
:param list filters: 정렬 기준
:param function _repr: 보여질 형식
:param bool sortable: 정렬 가능 여부
:param bool visible: 테이블에 해당 칼럼이 보일지 말지의 여부.
해당 값이 False여도
:class:`~dodotable.condition.IlikeSet`의 필터에는
보이므로 검색에는 사용할 수 있습니다.
"""
def __init__(self, label, attr, order_by=(), filters=None,
_repr=string_literal, sortable=True, visible=True,
classes=()):
from .condition import Order
if filters is None:
filters = []
self.label = label
self.attr = attr
self.filters = filters
self.order_by = Order.of_column(attr, order_by)
self._repr = _repr
self.sortable = sortable
self.visible = visible
self.classes = classes
def add_filter(self, filter):
self.filters.append(filter)
def __cell__(self, col, row, data, attribute_name, default=None):
"""해당 열의 데이터를 :class:`~dodotable.Cell`로 변환합니다.
:param col:
:param row:
:param data:
:param attribute_name:
:param default:
:return:
"""
return Cell(col=col, row=row,
data=_get_data(data, attribute_name, default),
_repr=self._repr,
classes=self.classes)
def __html__(self):
return self.render('column.html', column=self)
class LinkedColumn(Column):
"""링크가 걸려야 하는 열 나타내는 클래스
:param str label: 컬럼 레이블
:param str attr: 가져올 attribute 이름
:param str or function endpoint: 걸릴 링크 형식
:param list order_by: 정렬 기준
"""
def __init__(self, *args, **kwargs):
self.endpoint = kwargs.pop('endpoint')
super(LinkedColumn, self).__init__(*args, **kwargs)
def __cell__(self, col, row, data, attribute_name, default=None):
endpoint = self.endpoint(data) if callable(
self.endpoint) else self.endpoint
return LinkedCell(col=col, row=row,
data=_get_data(data, attribute_name, default),
endpoint=endpoint)
class ObjectColumn(Column):
"""Get __cell_.data as result instead of attribute."""
def __cell__(self, col, row, data, attribute_name, default=None):
return Cell(col=col, row=row,
data=data if data else default,
_repr=self._repr,
classes=self.classes)
class HiddenColumn(Column):
"""보이지 않는 열"""
def __init__(self, *args, **kwargs):
super(HiddenColumn, self).__init__(*args, **kwargs)
self.visible = False
class Row(Schema, MutableSequence, Renderable):
"""테이블에 행을 나타내는 클래스 """
def __init__(self):
self._row = []
def __delitem__(self, key):
del self._row[key]
def __getitem__(self, item):
return self._row[item]
def __setitem__(self, key, value):
self._row[key] = value
def __len__(self):
return len(self._row)
def insert(self, index, object_):
self._row.insert(index, object_)
def append(self, cell):
"""행에 cell을 붙입니다. """
assert isinstance(cell, Cell)
super(Row, self).append(cell)
def __html__(self):
return self.render('row.html', row=self)
class Pager(Schema, Renderable):
DEFAULT_LIMIT = 10
DEFAULT_OFFSET = 0
Page = collections.namedtuple('Page',
['selected', 'number', 'limit', 'offset'])
def __init__(self, limit, offset, count, padding=10):
try:
self.limit = int(limit)
self.offset = int(offset)
self.count = int(count)
self.padding = int(padding)
except ValueError:
self.limit = 10
self.offset = 0
self.count = 0
self.padding = 10
def from_page_number(self, number):
return self.Page(limit=self.limit, offset=(number - 1) * self.limit,
selected=False, number=number)
@property
def pages(self):
page_count = int(math.ceil(self.count / float(self.limit)))
current_page_count = (self.offset // self.limit) + 1
pages = []
s = (current_page_count - 1) // self.padding
start = s * 10 + 1
for page in self.range(start,
start + self.padding - 1,
max_=page_count):
selected = False
if page == current_page_count:
selected = True
p = self.Page(selected=selected, number=page, limit=self.limit,
offset=self.limit * (page - 1))
pages.append(p)
return pages
def range(self, start, end, max_, min_=1):
i = start
yield min_
while i <= end and i <= max_:
if i > min_:
yield i
i += 1
if i < max_:
yield max_
def __html__(self):
return self.render('pager.html', pager=self)
class Table(Schema, Queryable, Renderable):
"""데이터를 나타내는 테이블의 틀
:param cls:
:param label:
:param columns:
:param sqlalchemy_session:
"""
def __init__(self, cls, label, unit_label="row",
columns=None,
sqlalchemy_session=None):
self.cls = cls
self.label = label
self.unit_label = unit_label
self._filters = []
self.rows = []
if columns is None:
self._columns = []
else:
self._columns = columns
self._count = None
self.session = sqlalchemy_session
try:
if sqlalchemy_session is None:
self.session = self.environment.get_session()
finally:
if not self.session:
raise ValueError("{0.__class__.__name__}.session "
"can't be None".format(self))
self.pager = Pager(limit=1, offset=0, count=0)
self.pager.environment = self.environment
def select(self, offset=Pager.DEFAULT_OFFSET, limit=Pager.DEFAULT_LIMIT):
self.rows = []
q = self.query.offset(offset).limit(limit)
for i, row in enumerate(q):
_row = Row()
for j, col in enumerate(self.columns):
_row.append(
col.__cell__(col=j, row=i, data=row,
attribute_name=col.attr)
)
self.rows.append(_row)
self.pager = Pager(limit=limit, offset=offset,
count=self.count)
self.pager.environment = self.environment
return self
def add_filter(self, filter):
self._filters.append(filter)
@property
def _order_queries(self):
"""쿼리의 정렬 조건을 가져옵니다."""
from .condition import Order
order = []
for column in self.columns:
if column.order_by:
o = Order(self.cls, column.attr, column.order_by)
order.append(o.__query__())
if not order:
k = self.columns[0].attr
o = Order(self.cls, k)
self.columns[0].order_by = o.order
order.append(o.__query__())
return order
@property
def _filter_queries(self):
for filter in self._filters:
if filter:
yield filter.__query__()
@property
def count(self):
return self.build_base_query().count()
def build_base_query(self):
if isinstance(self.cls, Query):
query = self.cls
else:
query = self.session.query(self.cls)
for filter in self._filter_queries:
if filter is not None:
query = query.filter(filter)
return query
@property
def query(self):
"""쿼리를 만듭니다.
:return:
"""
query = self.build_base_query().order_by(*self._order_queries)
return query
@property
def columns(self):
return [column for column in self._columns if column.visible]
def __html__(self):
return self.render('table.html', table=self)
def __query__(self):
return self.query
| mit | 7,330,063,789,316,485,000 | 26.038462 | 77 | 0.540185 | false | 3.075745 | false | false | false |
ccxt/ccxt | examples/py/bypass-cloudflare.py | 1 | 1389 | # -*- coding: utf-8 -*-
import cfscrape
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
def print_supported_exchanges():
print('Supported exchanges:')
print(', '.join(ccxt.exchanges))
try:
id = sys.argv[1] # get exchange id from command line arguments
# check if the exchange is supported by ccxt
exchange_found = id in ccxt.exchanges
if exchange_found:
print('Instantiating ' + id + ' exchange')
# instantiate the exchange by id
exchange = getattr(ccxt, id)({
'timeout': 20000,
'session': cfscrape.create_scraper(),
})
try:
# load all markets from the exchange
markets = exchange.load_markets()
# output a list of all market symbols
print(id + ' has ' + str(len(exchange.symbols)) + ' symbols: ' + ', '.join(exchange.symbols))
print('Succeeded.')
except ccxt.BaseError as e:
print(type(e).__name__, str(e))
print('Failed.')
else:
print('Exchange ' + id + ' not found')
print_supported_exchanges()
except Exception as e:
print('[' + type(e).__name__ + ']', str(e))
print('Usage: python ' + sys.argv[0] + ' id')
print_supported_exchanges()
| mit | -4,424,267,704,340,370,000 | 23.368421 | 105 | 0.577394 | false | 3.674603 | false | false | false |
ojengwa/django-multitenants | tenant_schemas/utils.py | 1 | 2758 | from contextlib import contextmanager
from django.conf import settings
from django.db import connection
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
from django.core import mail
@contextmanager
def schema_context(schema_name):
previous_tenant = connection.tenant
try:
connection.set_schema(schema_name)
yield
finally:
if previous_tenant is None:
connection.set_schema_to_public()
else:
connection.set_tenant(previous_tenant)
@contextmanager
def tenant_context(tenant):
previous_tenant = connection.tenant
try:
connection.set_tenant(tenant)
yield
finally:
if previous_tenant is None:
connection.set_schema_to_public()
else:
connection.set_tenant(previous_tenant)
def get_tenant_model():
return get_model(*settings.TENANT_MODEL.split("."))
def get_public_schema_name():
return getattr(settings, 'PUBLIC_SCHEMA_NAME', 'public')
def get_limit_set_calls():
return getattr(settings, 'TENANT_LIMIT_SET_CALLS', False)
def clean_tenant_url(url_string):
"""
Removes the TENANT_TOKEN from a particular string
"""
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'):
if (settings.PUBLIC_SCHEMA_URLCONF and url_string
.startswith(settings.PUBLIC_SCHEMA_URLCONF)):
url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):]
return url_string
def remove_www_and_dev(hostname):
"""
Legacy function - just in case someone is still using the old name
"""
return remove_www(hostname)
def remove_www(hostname):
"""
Removes www. from the beginning of the address. Only for
routing purposes. www.test.com/login/ and test.com/login/ should
find the same tenant.
"""
if hostname.startswith("www."):
return hostname[4:]
return hostname
def django_is_in_test_mode():
"""
I know this is very ugly! I'm looking for more elegant solutions.
See: http://stackoverflow.com/questions/6957016/detect-django-testing-mode
"""
return hasattr(mail, 'outbox')
def schema_exists(schema_name):
cursor = connection.cursor()
# check if this schema already exists in the db
sql = 'SELECT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace WHERE LOWER(nspname) = LOWER(%s))'
cursor.execute(sql, (schema_name, ))
row = cursor.fetchone()
if row:
exists = row[0]
else:
exists = False
cursor.close()
return exists
def app_labels(apps_list):
"""
Returns a list of app labels of the given apps_list
"""
return [app.split('.')[-1] for app in apps_list]
| mit | -1,970,988,568,842,079,000 | 24.072727 | 97 | 0.660986 | false | 3.923186 | false | false | false |
chukysoria/pyspotify-connect | tests/test_error.py | 1 | 4528 | from __future__ import unicode_literals
import unittest
import spotifyconnect
from spotifyconnect import utils
class ErrorTest(unittest.TestCase):
def test_error_is_an_exception(self):
error = spotifyconnect.Error(0)
self.assertIsInstance(error, Exception)
def test_maybe_raise(self):
with self.assertRaises(spotifyconnect.LibError):
spotifyconnect.Error.maybe_raise(
spotifyconnect.ErrorType.WrongAPIVersion)
def test_maybe_raise_does_not_raise_if_ok(self):
spotifyconnect.Error.maybe_raise(spotifyconnect.ErrorType.Ok)
def test_maybe_raise_does_not_raise_if_error_is_ignored(self):
spotifyconnect.Error.maybe_raise(
spotifyconnect.ErrorType.WrongAPIVersion,
ignores=[spotifyconnect.ErrorType.WrongAPIVersion])
def test_maybe_raise_works_with_any_iterable(self):
spotifyconnect.Error.maybe_raise(
spotifyconnect.ErrorType.WrongAPIVersion,
ignores=(spotifyconnect.ErrorType.WrongAPIVersion,))
class LibErrorTest(unittest.TestCase):
def test_is_an_error(self):
error = spotifyconnect.LibError(0)
self.assertIsInstance(error, spotifyconnect.Error)
def test_has_error_type(self):
error = spotifyconnect.LibError(0)
self.assertEqual(error.error_type, 0)
error = spotifyconnect.LibError(1)
self.assertEqual(error.error_type, 1)
def test_is_equal_if_same_error_type(self):
self.assertEqual(
spotifyconnect.LibError(0),
spotifyconnect.LibError(0))
def test_is_not_equal_if_different_error_type(self):
self.assertNotEqual(
spotifyconnect.LibError(0),
spotifyconnect.LibError(1))
def test_error_has_useful_repr(self):
error = spotifyconnect.LibError(0)
self.assertIn('Ok', repr(error))
def test_error_has_useful_string_representation(self):
error = spotifyconnect.LibError(0)
self.assertEqual('%s' % error, 'Ok')
self.assertIsInstance('%s' % error, utils.text_type)
error = spotifyconnect.LibError(3)
self.assertEqual('%s' % error, 'WrongAPIVersion')
def test_has_error_constants(self):
self.assertEqual(
spotifyconnect.LibError.Ok,
spotifyconnect.LibError(
spotifyconnect.ErrorType.Ok))
self.assertEqual(
spotifyconnect.LibError.WrongAPIVersion,
spotifyconnect.LibError(spotifyconnect.ErrorType.WrongAPIVersion))
class ErrorTypeTest(unittest.TestCase):
def test_has_error_type_constants(self):
self.assertEqual(spotifyconnect.ErrorType.Ok, 0)
self.assertEqual(spotifyconnect.ErrorType.Failed, 1)
self.assertEqual(spotifyconnect.ErrorType.InitFailed, 2)
self.assertEqual(spotifyconnect.ErrorType.WrongAPIVersion, 3)
self.assertEqual(spotifyconnect.ErrorType.NullArgument, 4)
self.assertEqual(spotifyconnect.ErrorType.InvalidArgument, 5)
self.assertEqual(spotifyconnect.ErrorType.Uninitialized, 6)
self.assertEqual(spotifyconnect.ErrorType.AlreadyInitialized, 7)
self.assertEqual(spotifyconnect.ErrorType.LoginBadCredentials, 8)
self.assertEqual(spotifyconnect.ErrorType.NeedsPremium, 9)
self.assertEqual(spotifyconnect.ErrorType.TravelRestriction, 10)
self.assertEqual(spotifyconnect.ErrorType.ApplicationBanned, 11)
self.assertEqual(spotifyconnect.ErrorType.GeneralLoginError, 12)
self.assertEqual(spotifyconnect.ErrorType.Unsupported, 13)
self.assertEqual(spotifyconnect.ErrorType.NotActiveDevice, 14)
self.assertEqual(spotifyconnect.ErrorType.PlaybackErrorStart, 1000)
self.assertEqual(spotifyconnect.ErrorType.GeneralPlaybackError, 1001)
self.assertEqual(spotifyconnect.ErrorType.PlaybackRateLimited, 1002)
self.assertEqual(spotifyconnect.ErrorType.Unknown, 1003)
class TimeoutTest(unittest.TestCase):
def test_is_an_error(self):
error = spotifyconnect.Timeout(0.5)
self.assertIsInstance(error, spotifyconnect.Error)
def test_has_useful_repr(self):
error = spotifyconnect.Timeout(0.5)
self.assertIn('Operation did not complete in 0.500s', repr(error))
def test_has_useful_string_representation(self):
error = spotifyconnect.Timeout(0.5)
self.assertEqual('%s' % error, 'Operation did not complete in 0.500s')
self.assertIsInstance('%s' % error, utils.text_type)
| apache-2.0 | 884,721,389,033,791,600 | 38.034483 | 78 | 0.703843 | false | 3.795474 | true | false | false |
vfuse/nixstatsagent | nixstatsagent/nixstatsagent.py | 2 | 24810 | #!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs: nil; -*-
# by Al Nikolov <[email protected]>
from __future__ import print_function
import bz2
import sys
if sys.version_info >= (3,):
try:
from past.builtins import basestring
except ImportError:
basestring = str
import configparser
import http.client
from queue import Queue, Empty
import io
else:
import ConfigParser
import httplib
import StringIO
from Queue import Queue, Empty
import glob
import imp
try:
import json
except ImportError:
import simplejson as json
import logging
import os
import pickle
import signal
import socket
import subprocess
import threading
import time
import types
import urllib
try:
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError
__version__ = '1.2.12'
__FILEABSDIRNAME__ = os.path.dirname(os.path.abspath(__file__))
ini_files = (
os.path.join('/etc', 'nixstats.ini'),
os.path.join('/etc', 'nixstats-token.ini'),
os.path.join(os.path.dirname(__FILEABSDIRNAME__), 'nixstats.ini'),
os.path.join(os.path.dirname(__FILEABSDIRNAME__), 'nixstats-token.ini'),
os.path.abspath('nixstats.ini'),
os.path.abspath('nixstats-token.ini'),
)
if sys.platform == 'win32':
ini_files = (
os.path.join(__FILEABSDIRNAME__, 'nixstats.ini'),
os.path.join(__FILEABSDIRNAME__, 'nixstats-token.ini'),
)
def info():
'''
Return string with info about nixstatsagent:
- version
- plugins enabled
- absolute path to plugin directory
- server id from configuration file
'''
agent = Agent(dry_instance=True)
plugins_path = agent.config.get('agent', 'plugins')
plugins_enabled = agent._get_plugins(state='enabled')
return '\n'.join((
'Version: %s' % __version__,
'Plugins enabled: %s' % ', '.join(plugins_enabled),
'Plugins directory: %s' % plugins_path,
'Server: %s' % agent.config.get('agent', 'server'),
))
def hello(proto='https'):
user_id = sys.argv[1]
if len(sys.argv) > 2:
token_filename = sys.argv[2]
else:
token_filename = os.path.join(__FILEABSDIRNAME__, 'nixstats-token.ini')
if len(sys.argv) > 3:
unique_id = sys.argv[3]
else:
unique_id = ''
if '_' in user_id:
server_id = user_id.split('_')[1]
user_id = user_id.split('_')[0]
elif os.path.isfile('/etc/nixstats/token'):
oldconfigfile = open('/etc/nixstats/token','r')
server_id = oldconfigfile.readline()
print('Upgrading from old monitoring agent')
print('Remove the old agent from the crontab (crontab -e -u nixstats)')
elif os.path.isfile('/opt/nixstats/nixstats.cfg'):
oldconfigfile = open('/opt/nixstats/nixstats.cfg')
lines=oldconfigfile.readlines()
server_id = lines[1].replace('server=', '').strip()
print('Upgrading from old python client.')
print('Run :\nchkconfig --del nixstats \nor \nupdate-rc.d -f nixstats remove \nto remove the old service.')
else:
try:
hostname = os.uname()[1]
except AttributeError:
hostname = socket.getfqdn()
server_id = urlopen(
proto + '://api.nixstats.com/hello.php',
data=urlencode({
'user': user_id,
'hostname': hostname,
'unique_id': unique_id
}).encode("utf-8")
).read().decode()
print('Got server_id: %s' % server_id)
open(token_filename, 'w').\
write('[DEFAULT]\nuser=%s\nserver=%s\n' % (user_id, server_id))
# def run_agent():
# Agent().run()
def _plugin_name(plugin):
if isinstance(plugin, basestring):
basename = os.path.basename(plugin)
return os.path.splitext(basename)[0]
else:
return plugin.__name__
def test_plugins(plugins=[]):
'''
Test specified plugins and print their data output after single check.
If plugins list is empty test all enabled plugins.
'''
agent = Agent(dry_instance=True)
plugins_path = agent.config.get('agent', 'plugins')
if plugins_path not in sys.path:
sys.path.insert(0, plugins_path)
if not plugins:
plugins = agent._get_plugins(state='enabled')
print('Check all enabled plugins: %s' % ', '.join(plugins))
for plugin_name in plugins:
print('%s:' % plugin_name)
try:
fp, pathname, description = imp.find_module(plugin_name)
except Exception as e:
print('Find error:', e)
continue
try:
module = imp.load_module(plugin_name, fp, pathname, description)
except Exception as e:
print('Load error:', e)
continue
finally:
# Since we may exit via an exception, close fp explicitly.
if fp:
fp.close()
try:
payload = module.Plugin().run(agent.config)
print(json.dumps(payload, indent=4, sort_keys=True))
except Exception as e:
print('Execution error:', e)
class Agent:
execute = Queue()
metrics = Queue()
data = Queue()
cemetery = Queue()
shutdown = False
def __init__(self, dry_instance=False):
'''
Initialize internal strictures
'''
self._config_init()
# Cache for plugins so they can store values related to previous checks
self.plugins_cache = {}
if dry_instance:
return
self._logging_init()
self._plugins_init()
self._data_worker_init()
self._dump_config()
def _config_init(self):
'''
Initialize configuration object
'''
defaults = {
'max_data_span': 60,
'max_data_age': 60 * 10,
'logging_level': logging.INFO,
'threads': 100,
'ttl': 60,
'interval': 60,
'plugins': os.path.join(__FILEABSDIRNAME__, 'plugins'),
'enabled': 'no',
'subprocess': 'no',
'user': '',
'server': '',
'api_host': 'api.nixstats.com',
'api_path': '/v2/server/poll',
'log_file': '/var/log/nixstatsagent.log',
'log_file_mode': 'a',
'max_cached_collections': 10,
}
sections = [
'agent',
'execution',
'data',
]
if sys.version_info >= (3,):
config = configparser.RawConfigParser(defaults)
else:
config = ConfigParser.RawConfigParser(defaults)
config.read(ini_files)
self.config = config
for section in sections:
self._config_section_create(section)
if section is 'data':
self.config.set(section, 'interval', 1)
if section is 'agent':
self.config.set(section, 'interval', .5)
def _config_section_create(self, section):
'''
Create an addition section in the configuration object
if it's not exists
'''
if not self.config.has_section(section):
self.config.add_section(section)
def _logging_init(self):
'''
Initialize logging faculty
'''
level = self.config.getint('agent', 'logging_level')
log_file = self.config.get('agent', 'log_file')
log_file_mode = self.config.get('agent', 'log_file_mode')
if log_file_mode in ('w', 'a'):
pass
elif log_file_mode == 'truncate':
log_file_mode = 'w'
elif log_file_mode == 'append':
log_file_mode = 'a'
else:
log_file_mode = 'a'
if log_file == '-':
logging.basicConfig(level=level) # Log to sys.stderr by default
else:
try:
logging.basicConfig(filename=log_file, filemode=log_file_mode, level=level, format="%(asctime)-15s %(levelname)s %(message)s")
except IOError as e:
logging.basicConfig(level=level)
logging.info('IOError: %s', e)
logging.info('Drop logging to stderr')
logging.info('Agent logging_level %i', level)
def _plugins_init(self):
'''
Discover the plugins
'''
logging.info('_plugins_init')
plugins_path = self.config.get('agent', 'plugins')
filenames = glob.glob(os.path.join(plugins_path, '*.py'))
if plugins_path not in sys.path:
sys.path.insert(0, plugins_path)
self.schedule = {}
for filename in filenames:
name = _plugin_name(filename)
if name == 'plugins':
continue
self._config_section_create(name)
if self.config.getboolean(name, 'enabled'):
if self.config.getboolean(name, 'subprocess'):
self.schedule[filename] = 0
else:
fp, pathname, description = imp.find_module(name)
try:
module = imp.load_module(name, fp, pathname, description)
except Exception:
module = None
logging.error('import_plugin_exception:%s', str(sys.exc_info()[0]))
finally:
# Since we may exit via an exception, close fp explicitly.
if fp:
fp.close()
if module:
self.schedule[module] = 0
else:
logging.error('import_plugin:%s', name)
def _subprocess_execution(self, task):
'''
Execute /task/ in a subprocess
'''
process = subprocess.Popen((sys.executable, task),
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True)
logging.debug('%s:process:%i', threading.currentThread(), process.pid)
interval = self.config.getint('execution', 'interval')
name = _plugin_name(task)
ttl = self.config.getint(name, 'ttl')
ticks = ttl / interval or 1
process.poll()
while process.returncode is None and ticks > 0:
logging.debug('%s:tick:%i', threading.currentThread(), ticks)
time.sleep(interval)
ticks -= 1
process.poll()
if process.returncode is None:
logging.error('%s:kill:%i', threading.currentThread(), process.pid)
os.kill(process.pid, signal.SIGTERM)
stdout, stderr = process.communicate()
if process.returncode != 0 or stderr:
logging.error('%s:%s:%s:%s', threading.currentThread(),
task, process.returncode, stderr)
if stdout:
ret = pickle.loads(stdout)
else:
ret = None
return ret
def _execution(self):
'''
Take queued execution requests, execute plugins and queue the results
'''
while True:
if self.shutdown:
logging.info('%s:shutdown', threading.currentThread())
break
logging.debug('%s:exec_queue:%i', threading.currentThread(), self.execute.qsize())
try:
task = self.execute.get_nowait()
except Empty:
break
logging.debug('%s:task:%s', threading.currentThread(), task)
name = _plugin_name(task)
try:
interval = self.config.get(name, 'interval')
except:
interval = 60
ts = time.time()
if isinstance(task, basestring):
payload = self._subprocess_execution(task)
else:
try:
# Setup cache for plugin instance
# if name not in self.plugins_cache.iterkeys():
# self.plugins_cache[name] = []
self.plugins_cache.update({
name: self.plugins_cache.get(name, [])
})
plugin = task.Plugin(agent_cache=self.plugins_cache[name])
payload = plugin.run(self.config)
except Exception:
logging.exception('plugin_exception')
payload = {'exception': str(sys.exc_info()[0])}
self.metrics.put({
'ts': ts,
'task': task,
'name': name,
'interval': interval,
'payload': payload,
})
self.cemetery.put(threading.currentThread())
self.hire.release()
def _data(self):
'''
Take and collect data, send and clean if needed
'''
logging.info('%s', threading.currentThread())
api_host = self.config.get('data', 'api_host')
api_path = self.config.get('data', 'api_path')
max_age = self.config.getint('agent', 'max_data_age')
max_span = self.config.getint('agent', 'max_data_span')
server = self.config.get('agent', 'server')
user = self.config.get('agent', 'user')
interval = self.config.getint('data', 'interval')
max_cached_collections = self.config.get('agent', 'max_cached_collections')
cached_collections = []
collection = []
while True:
loop_ts = time.time()
if self.shutdown:
logging.info('%s:shutdown', threading.currentThread())
break
logging.debug('%s:data_queue:%i:collection:%i',
threading.currentThread(), self.data.qsize(), len(collection))
while self.data.qsize():
try:
collection.append(self.data.get_nowait())
except Exception as e:
logging.error('Data queue error: %s' % e)
if collection:
first_ts = min((e['ts'] for e in collection))
last_ts = max((e['ts'] for e in collection))
now = time.time()
send = False
if last_ts - first_ts >= max_span:
logging.debug('Max data span')
send = True
clean = False
elif now - first_ts >= max_age:
logging.warning('Max data age')
send = True
clean = True
if send:
headers = {
"Content-type": "application/json",
"Authorization": "ApiKey %s:%s" % (user, server),
}
logging.debug('collection: %s',
json.dumps(collection, indent=2, sort_keys=True))
if not (server and user):
logging.warning('Empty server or user, nowhere to send.')
clean = True
else:
try:
if sys.version_info >= (3,):
connection = http.client.HTTPSConnection(api_host, timeout=15)
else:
connection = httplib.HTTPSConnection(api_host, timeout=15)
# Trying to send cached collections if any
if cached_collections:
logging.info('Sending cached collections: %i', len(cached_collections))
while cached_collections:
connection.request('PUT', '%s?version=%s' % (api_path, __version__),
cached_collections[0],
headers=headers)
response = connection.getresponse()
response.read()
if response.status == 200:
del cached_collections[0] # Remove just sent collection
logging.debug('Successful response: %s', response.status)
else:
raise ValueError('Unsuccessful response: %s' % response.status)
logging.info('All cached collections sent')
# Send recent collection (reuse existing connection)
connection.request('PUT', '%s?version=%s' % (api_path, __version__),
bz2.compress(str(json.dumps(collection)+"\n").encode()),
headers=headers)
response = connection.getresponse()
response.read()
if response.status == 200:
logging.debug('Successful response: %s', response.status)
clean = True
else:
raise ValueError('Unsuccessful response: %s' % response.status)
except Exception as e:
logging.error('Failed to submit collection: %s' % e)
# Store recent collection in cached_collections if send failed
if max_cached_collections > 0:
if len(cached_collections) >= max_cached_collections:
del cached_collections[0] # Remove oldest collection
logging.info('Reach max_cached_collections (%s): oldest cached collection dropped',
max_cached_collections)
logging.info('Cache current collection to resend next time')
cached_collections.append(bz2.compress(str(json.dumps(collection)+"\n").encode()))
collection = []
finally:
connection.close()
if clean:
collection = []
sleep_interval = interval - (time.time() - loop_ts)
if sleep_interval > 0:
time.sleep(sleep_interval)
def _data_worker_init(self):
'''
Initialize data worker thread
'''
logging.info('_data_worker_init')
threading.Thread(target=self._data).start()
def _dump_config(self):
'''
Dumps configuration object
'''
if sys.version_info >= (3,):
buf = io.StringIO()
else:
buf = StringIO.StringIO()
self.config.write(buf)
logging.info('Config: %s', buf.getvalue())
def _get_plugins(self, state='enabled'):
'''
Return list with plugins names
'''
plugins_path = self.config.get('agent', 'plugins')
plugins = []
for filename in glob.glob(os.path.join(plugins_path, '*.py')):
plugin_name = _plugin_name(filename)
if plugin_name == 'plugins':
continue
self._config_section_create(plugin_name)
if state == 'enabled':
if self.config.getboolean(plugin_name, 'enabled'):
plugins.append(plugin_name)
elif state == 'disabled':
if not self.config.getboolean(plugin_name, 'enabled'):
plugins.append(plugin_name)
return plugins
def _rip(self):
'''
Join with dead workers
Workaround for https://bugs.python.org/issue37788
'''
logging.debug('cemetery:%i', self.cemetery.qsize())
while True:
try:
thread = self.cemetery.get_nowait()
except Empty:
break
logging.debug('joining:%s', thread)
thread.join()
def run(self):
'''
Start all the worker threads
'''
logging.info('Agent main loop')
interval = self.config.getfloat('agent', 'interval')
self.hire = threading.Semaphore(
self.config.getint('execution', 'threads'))
try:
while True:
self._rip()
now = time.time()
logging.debug('%i threads', threading.activeCount())
while self.metrics.qsize():
metrics = self.metrics.get_nowait()
name = metrics['name']
logging.debug('metrics:%s', name)
plugin = metrics.get('task')
if plugin:
self.schedule[plugin] = \
int(now) + self.config.getint(name, 'interval')
if isinstance(plugin, types.ModuleType):
metrics['task'] = plugin.__file__
self.data.put(metrics)
execute = [
what
for what, when in self.schedule.items()
if when <= now
]
for name in execute:
logging.debug('scheduling:%s', name)
del self.schedule[name]
self.execute.put(name)
if self.hire.acquire(False):
try:
thread = threading.Thread(target=self._execution)
thread.start()
logging.debug('new_execution_worker_thread:%s', thread)
except Exception as e:
logging.warning('Can not start new thread: %s', e)
else:
logging.warning('threads_capped')
self.metrics.put({
'ts': now,
'name': 'agent_internal',
'payload': {
'threads_capping':
self.config.getint('execution', 'threads')}
})
sleep_interval = .5-(time.time()-now)
if sleep_interval > 0:
time.sleep(sleep_interval)
else:
logging.warning('not enough time to start worker threads')
time.sleep(.1)
except KeyboardInterrupt:
logging.warning(sys.exc_info()[0])
logging.info('Shutting down')
self._rip()
wait_for = True
while wait_for:
all_threads = threading.enumerate()
logging.info('Remaining threads: %s', all_threads)
wait_for = [
thread for thread in all_threads
if not thread.isDaemon() and
not isinstance(thread, threading._MainThread)
]
if not wait_for:
logging.info('Bye!')
sys.exit(0)
self.shutdown = True
logging.info('Waiting for %i threads to exit', len(wait_for))
for thread in wait_for:
logging.info('Joining with %s/%f', thread, interval)
thread.join(interval)
except Exception as e:
logging.error('Worker error: %s' % e)
def main():
if len(sys.argv) > 1:
if sys.argv[1].startswith('--'):
sys.argv[1] = sys.argv[1][2:]
if sys.argv[1] == 'help':
print('\n'.join((
'Run without options to run agent.',
'Acceptable options (leading -- is optional):',
' help, info, version, hello, insecure-hello, test',
)))
sys.exit()
elif sys.argv[1] == 'info':
print(info())
sys.exit()
elif sys.argv[1] == 'version':
print(__version__)
sys.exit()
elif sys.argv[1] == 'hello':
del sys.argv[1]
sys.exit(hello())
elif sys.argv[1] == 'insecure-hello':
del sys.argv[1]
sys.exit(hello(proto='http'))
elif sys.argv[1] == 'test':
sys.exit(test_plugins(sys.argv[2:]))
else:
print('Invalid option:', sys.argv[1], file=sys.stderr)
sys.exit(1)
else:
Agent().run()
if __name__ == '__main__':
main()
| bsd-3-clause | 4,973,914,409,906,243,000 | 35.864785 | 146 | 0.49742 | false | 4.570744 | true | false | false |
mpaf/pywinauto-64bit | pywinauto/handleprops.py | 1 | 12146 | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"""Functions to retrieve properties from a window handle
These are implemented in a procedural way so as to to be
useful to other modules with the least conceptual overhead
"""
__revision__ = "$Revision: 727 $"
import ctypes
import win32functions
import win32defines
import win32structures
import findwindows # for children
#=========================================================================
def text(handle):
"Return the text of the window"
length = ctypes.c_long()
win32functions.SendMessageTimeout(
handle,
win32defines.WM_GETTEXTLENGTH,
0,
0,
win32defines.SMTO_ABORTIFHUNG,
100, # .1 of a second
ctypes.byref(length))
length = length.value
textval = ''
if length:
length += 1
buffer_ = ctypes.create_unicode_buffer(length)
ret = win32functions.SendMessage(
handle, win32defines.WM_GETTEXT, length, ctypes.byref(buffer_))
if ret:
textval = buffer_.value
return textval
#=========================================================================
def classname(handle):
"Return the class name of the window"
class_name = (ctypes.c_wchar * 257)()
win32functions.GetClassName (handle, ctypes.byref(class_name), 256)
return class_name.value
#=========================================================================
def parent(handle):
"Return the handle of the parent of the window"
return win32functions.GetParent(handle)
#=========================================================================
def style(handle):
"Return the style of the window"
return win32functions.GetWindowLong (handle, win32defines.GWL_STYLE)
#=========================================================================
def exstyle(handle):
"Return the extended style of the window"
return win32functions.GetWindowLong (handle, win32defines.GWL_EXSTYLE)
#=========================================================================
def controlid(handle):
"Return the ID of the control"
return win32functions.GetWindowLong (handle, win32defines.GWL_ID)
#=========================================================================
def userdata(handle):
"Return the value of any userdata associated with the window"
return win32functions.GetWindowLong (handle, win32defines.GWL_USERDATA)
#=========================================================================
def contexthelpid(handle):
"Return the context help id of the window"
return win32functions.GetWindowContextHelpId (handle)
#=========================================================================
def iswindow(handle):
"Return True if the handle is a window"
return bool(win32functions.IsWindow(handle))
#=========================================================================
def isvisible(handle):
"Return True if the window is visible"
return bool(win32functions.IsWindowVisible(handle))
#=========================================================================
def isunicode(handle):
"Teturn True if the window is a unicode window"
return bool(win32functions.IsWindowUnicode(handle))
#=========================================================================
def isenabled(handle):
"Return True if the window is enabled"
return bool(win32functions.IsWindowEnabled(handle))
#=========================================================================
def clientrect(handle):
"Return the client rectangle of the control"
client_rect = win32structures.RECT()
win32functions.GetClientRect(handle, ctypes.byref(client_rect))
return client_rect
#=========================================================================
def rectangle(handle):
"Return the rectangle of the window"
rect = win32structures.RECT()
win32functions.GetWindowRect(handle, ctypes.byref(rect))
return rect
#=========================================================================
def font(handle):
"Return the font as a LOGFONTW of the window"
# get the font handle
font_handle = win32functions.SendMessage(
handle, win32defines.WM_GETFONT, 0, 0)
# if the fondUsed is 0 then the control is using the
# system font (well probably not - even though that is what the docs say)
# instead we switch to the default GUI font - which is more likely correct.
if not font_handle:
# So just get the default system font
font_handle = win32functions.GetStockObject(win32defines.DEFAULT_GUI_FONT)
# if we still don't have a font!
# ----- ie, we're on an antiquated OS, like NT 3.51
if not font_handle:
# ----- On Asian platforms, ANSI font won't show.
if win32functions.GetSystemMetrics(win32defines.SM_DBCSENABLED):
# ----- was...(SYSTEM_FONT)
font_handle = win32functions.GetStockObject(
win32defines.SYSTEM_FONT)
else:
# ----- was...(SYSTEM_FONT)
font_handle = win32functions.GetStockObject(
win32defines.ANSI_VAR_FONT)
else:
fontval = win32structures.LOGFONTW()
ret = win32functions.GetObject(
font_handle, ctypes.sizeof(fontval), ctypes.byref(fontval))
# Get the Logfont structure of the font of the control
fontval = win32structures.LOGFONTW()
ret = win32functions.GetObject(
font_handle, ctypes.sizeof(fontval), ctypes.byref(fontval))
# The function could not get the font - this is probably
# because the control does not have associated Font/Text
# So we should make sure the elements of the font are zeroed.
if not ret:
fontval = win32structures.LOGFONTW()
# if it is a main window
if is_toplevel_window(handle):
if "MS Shell Dlg" in fontval.lfFaceName or \
fontval.lfFaceName == "System":
# these are not usually the fonts actaully used in for
# title bars so we need to get the default title bar font
# get the title font based on the system metrics rather
# than the font of the control itself
ncms = win32structures.NONCLIENTMETRICSW()
ncms.cbSize = ctypes.sizeof(ncms)
win32functions.SystemParametersInfo(
win32defines.SPI_GETNONCLIENTMETRICS,
ctypes.sizeof(ncms),
ctypes.byref(ncms),
0)
# with either of the following 2 flags set the font of the
# dialog isthe small one (but there is normally no difference!
if has_style(handle, win32defines.WS_EX_TOOLWINDOW) or \
has_style(handle, win32defines.WS_EX_PALETTEWINDOW):
fontval = ncms.lfSmCaptionFont
else:
fontval = ncms.lfCaptionFont
return fontval
#=========================================================================
def processid(handle):
"Retrun the ID of process that controls this window"
process_id = ctypes.c_int()
win32functions.GetWindowThreadProcessId(handle, ctypes.byref(process_id))
return process_id.value
#=========================================================================
def children(handle):
"Return a list of handles to the children of this window"
return findwindows.enum_child_windows(handle)
#=========================================================================
def has_style(handle, tocheck):
"Return True if the control has style tocheck"
hwnd_style = style(handle)
return tocheck & hwnd_style == tocheck
#=========================================================================
def has_exstyle(handle, tocheck):
"Return True if the control has extended style tocheck"
hwnd_exstyle = exstyle(handle)
return tocheck & hwnd_exstyle == tocheck
#=========================================================================
def is_toplevel_window(handle):
"Return whether the window is a top level window or not"
# only request the style once - this is an optimization over calling
# (handle, style) for each style I wan to check!
style_ = style(handle)
if (style_ & win32defines.WS_OVERLAPPED == win32defines.WS_OVERLAPPED or \
style_ & win32defines.WS_CAPTION == win32defines.WS_CAPTION) and \
not (style_ & win32defines.WS_CHILD == win32defines.WS_CHILD):
return True
else:
return False
#=========================================================================
#def get_button_friendlyclassname(handle):
# "Return the friendly class name of a button control"
#
# # get the least significant bit
# style_lsb = style(handle) & 0xF
#
# # default to "Button"
# f_classname = "Button"
#
# if style_lsb == win32defines.BS_3STATE or \
# style_lsb == win32defines.BS_AUTO3STATE or \
# style_lsb == win32defines.BS_AUTOCHECKBOX or \
# style_lsb == win32defines.BS_CHECKBOX:
# f_classname = "CheckBox"
#
# elif style_lsb == win32defines.BS_RADIOBUTTON or \
# style_lsb == win32defines.BS_AUTORADIOBUTTON:
# f_classname = "RadioButton"
#
# elif style_lsb == win32defines.BS_GROUPBOX:
# f_classname = "GroupBox"
#
# if style(handle) & win32defines.BS_PUSHLIKE:
# f_classname = "Button"
#
# return f_classname
#def friendlyclassname(handle):
# """Return the friendly class name of the window
#
# The friendly class name might be subjective, but it
# tries to be what a normal user would call a window
# rather then the windows class name for the window.
# """
#
# import warnings
# warnings.warn("handleprops.friendlyclassname() is deprecated. Please use"
# "FriendlyClassMethod() of HwndWrapper",
# DeprecationWarning)
#
# # if it's a dialog then return that
# if is_toplevel_window(handle) and classname(handle) == "#32770":
# return "Dialog"
#
# # otherwise ask the wrapper class for the friendly class name
# class_name = classname(handle)
#
# from controls import wraphandle
# info = wraphandle._find_wrapper(class_name)
#
# if info:
# return info.friendlyclassname
#
# else:
# return class_name
#
#
# # Check if the class name is in the known classes
# for cls_name, f_cls_name in _class_names.items():
#
# # OK we found it
# if re.match(cls_name, classname(handle)):
# # If it is a string then just return it
# if isinstance(f_cls_name, basestring):
# return f_cls_name
# # otherwise it is a function so call it
# else:
# return f_cls_name(handle)
#
# # unknown class - just return it's classname
# return classname(handle)
#=========================================================================
def dumpwindow(handle):
"Dump a window to a set of properties"
props = {}
for func in (
text,
classname,
rectangle,
clientrect,
style,
exstyle,
contexthelpid,
controlid,
userdata,
font,
parent,
processid,
isenabled,
isunicode,
isvisible,
children,
):
props[func.__name__] = func(handle)
return props
| lgpl-2.1 | -6,504,744,661,773,950,000 | 32.185792 | 82 | 0.571464 | false | 4.279774 | false | false | false |
puiterwijk/product-definition-center | pdc/apps/compose/tests.py | 1 | 99430 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
import json
import mock
from StringIO import StringIO
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from rest_framework.test import APITestCase
from rest_framework import status
from pdc.apps.bindings import models as binding_models
from pdc.apps.common.test_utils import create_user, TestCaseWithChangeSetMixin
from pdc.apps.release.models import Release, ProductVersion
from pdc.apps.component.models import (ReleaseComponent,
BugzillaComponent)
import pdc.apps.release.models as release_models
import pdc.apps.common.models as common_models
from . import models
class ComposeModelTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.compose = models.Compose.objects.get(id=1)
def test_get_rpms_existing(self):
self.assertEqual(unicode(self.compose.get_rpms('bash')),
'[<RPM: bash-0:1.2.3-4.b1.x86_64.rpm>]')
def test_get_rpms_nonexisting(self):
self.assertEqual(list(self.compose.get_rpms('foo')), [])
def test_get_arch_testing_status(self):
self.assertDictEqual(self.compose.get_arch_testing_status(),
{'Server': {'x86_64': 'untested'}, 'Server2': {'x86_64': 'untested'}})
class VersionFinderTestCase(APITestCase):
# TODO: This test case could be removed after removing endpoint 'compose/package'
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/product.json",
"pdc/apps/release/fixtures/tests/product_version.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/more_composes.json",
]
def setUp(self):
self.url = reverse('findcomposewitholderpackage-list')
def test_bad_args_missing_rpm_name(self):
response = self.client.get(self.url, {'compose': 'compose-1'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('rpm_name', response.data.get('detail'))
def test_bad_args_missing_release_and_compose(self):
response = self.client.get(self.url, {'rpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('release', response.data.get('detail'))
self.assertIn('compose', response.data.get('detail'))
def test_missing_previous_compose(self):
response = self.client.get(self.url, {'compose': 'compose-1', 'rpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_previous_compose_has_same_version(self):
response = self.client.get(self.url, {'compose': 'compose-2', 'rpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_previous_compose_has_older_rpm(self):
response = self.client.get(self.url, {'compose': 'compose-3', 'rpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('compose'), "compose-2")
self.assertEqual(response.data.get('packages'), ["bash-0:1.2.3-4.b1.x86_64.rpm"])
def test_same_version_different_arch(self):
"""There is a previous compose with same version of package, but with different RPM.arch."""
models.ComposeRPM.objects.filter(pk=1).update(rpm=3)
response = self.client.get(self.url, {'compose': 'compose-2', 'rpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_for_release(self):
response = self.client.get(self.url, {'rpm_name': 'bash', 'release': 'release-1.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_release_with_latest(self):
response = self.client.get(self.url, {'rpm_name': 'bash', 'release': 'release-1.0', 'latest': 'True'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_release_to_dict(self):
response = self.client.get(self.url, {'rpm_name': 'bash', 'release': 'release-1.0', 'to_dict': True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected = [
{'compose': u'compose-1', 'packages': [
{'name': u'bash', 'version': u'1.2.3', 'epoch': 0, 'release': u'4.b1',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': u'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm', 'id': 1,
'linked_composes': [u'compose-1', u'compose-2'], 'linked_releases': []}]},
{'compose': u'compose-2', 'packages': [
{'name': u'bash', 'version': u'1.2.3', 'epoch': 0, 'release': u'4.b1',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': u'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm', 'id': 1,
'linked_composes': [u'compose-1', u'compose-2'], 'linked_releases': []}]},
{'compose': u'compose-3', 'packages': [
{'name': u'bash', 'version': u'5.6.7', 'epoch': 0, 'release': u'8',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': None,
'filename': 'bash-5.6.7-8.x86_64.rpm', 'id': 2,
'linked_composes': [u'compose-3'], 'linked_releases': []}]}
]
self.assertEqual(response.data, expected)
def test_get_for_product_version(self):
product_version = ProductVersion.objects.get(short='product', version='1')
release = Release.objects.get(release_id='release-1.0')
release.product_version = product_version
release.save()
response = self.client.get(self.url, {'rpm_name': 'bash', 'product_version': 'product-1'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_product_version_with_latest(self):
product_version = ProductVersion.objects.get(short='product', version='1')
release = Release.objects.get(release_id='release-1.0')
release.product_version = product_version
release.save()
response = self.client.get(self.url, {'rpm_name': 'bash', 'product_version': 'product-1',
'latest': 'True'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_included_compose_type(self):
response = self.client.get(self.url, {'rpm_name': 'bash', 'release': 'release-1.0',
'included_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']}])
def test_get_for_excluded_compose_type(self):
response = self.client.get(self.url, {'rpm_name': 'bash', 'release': 'release-1.0',
'excluded_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
class FindComposeByReleaseRPMTestCase(APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/more_composes.json",
]
def test_get_for_release(self):
url = reverse('findcomposebyrr-list', kwargs={'rpm_name': 'bash', 'release_id': 'release-1.0'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_release_with_latest(self):
url = reverse('findcomposebyrr-list', kwargs={'rpm_name': 'bash', 'release_id': 'release-1.0'})
response = self.client.get(url, {'latest': 'True'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_release_to_dict(self):
url = reverse('findcomposebyrr-list', kwargs={'rpm_name': 'bash', 'release_id': 'release-1.0'})
response = self.client.get(url, {'to_dict': True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
expected = [
{'compose': u'compose-1', 'packages': [
{'name': u'bash', 'version': u'1.2.3', 'epoch': 0, 'release': u'4.b1',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': u'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm', 'id': 1,
'linked_composes': ['compose-1', 'compose-2'], 'linked_releases': []}]},
{'compose': u'compose-2', 'packages': [
{'name': u'bash', 'version': u'1.2.3', 'epoch': 0, 'release': u'4.b1',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': u'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm', 'id': 1,
'linked_composes': ['compose-1', 'compose-2'], 'linked_releases': []}]},
{'compose': u'compose-3', 'packages': [
{'name': u'bash', 'version': u'5.6.7', 'epoch': 0, 'release': u'8',
'arch': u'x86_64', 'srpm_name': u'bash', 'srpm_nevra': None,
'filename': 'bash-5.6.7-8.x86_64.rpm', 'id': 2,
'linked_composes': ['compose-3'], 'linked_releases': []}]}
]
self.assertEqual(response.data, expected)
def test_get_for_excluded_compose_type(self):
url = reverse('findcomposebyrr-list', kwargs={'rpm_name': 'bash', 'release_id': 'release-1.0'})
response = self.client.get(url, {'excluded_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_included_compose_type(self):
url = reverse('findcomposebyrr-list', kwargs={'rpm_name': 'bash', 'release_id': 'release-1.0'})
response = self.client.get(url, {'included_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']}])
class FindOlderComposeByComposeRPMTestCase(APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/more_composes.json",
]
def test_missing_previous_compose(self):
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-1', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_previous_compose_has_same_version(self):
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-2', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_previous_compose_has_older_rpm(self):
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-3', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('compose'), "compose-2")
self.assertEqual(response.data.get('packages'), ["bash-0:1.2.3-4.b1.x86_64.rpm"])
def test_previous_compose_has_older_rpm_with_to_dict(self):
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-3', 'rpm_name': 'bash'})
response = self.client.get(url, {'to_dict': True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('compose'), "compose-2")
packages = response.data.get('packages')
self.assertEqual(len(packages), 1)
self.assertItemsEqual(packages[0].pop('linked_composes'), ['compose-1', 'compose-2'])
self.assertEqual(packages[0].pop('linked_releases'), [])
packages[0].pop('id')
self.assertDictEqual(
dict(packages[0]),
{'name': 'bash', 'version': '1.2.3', 'epoch': 0, 'release': '4.b1',
'arch': 'x86_64', 'srpm_name': 'bash', 'srpm_nevra': 'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm'})
def test_same_version_different_arch(self):
"""There is a previous compose with same version of package, but with different RPM.arch."""
models.ComposeRPM.objects.filter(pk=1).update(rpm=3)
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-2', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_get_compose_from_previous_release(self):
r = release_models.Release.objects.create(release_type_id=1, short='release',
name='Test Release', version='0.5')
for cid in ('compose-1', 'compose-2'):
c = models.Compose.objects.get(compose_id=cid)
c.release = r
c.save()
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-3', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('compose'), 'compose-2')
def test_can_not_get_compose_from_previous_updates_release(self):
r = release_models.Release.objects.create(release_type_id=2, short='release',
name='Test Release', version='0.5')
for cid in ('compose-1', 'compose-2'):
c = models.Compose.objects.get(compose_id=cid)
c.release = r
c.save()
url = reverse('findoldercomposebycr-list', kwargs={'compose_id': 'compose-3', 'rpm_name': 'bash'})
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
class FindCompoeByProductVersionRPMTestCase(APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/product.json",
"pdc/apps/release/fixtures/tests/product_version.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/more_composes.json",
]
def setUp(self):
product_version = ProductVersion.objects.get(short='product', version='1')
release = Release.objects.get(release_id='release-1.0')
release.product_version = product_version
release.save()
self.url = reverse('findcomposesbypvr-list', kwargs={'rpm_name': 'bash', 'product_version': 'product-1'})
def test_get_for_product_version(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_product_version_with_latest(self):
product_version = ProductVersion.objects.get(short='product', version='1')
release = Release.objects.get(release_id='release-1.0')
release.product_version = product_version
release.save()
response = self.client.get(self.url, {'latest': 'True'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
def test_get_for_included_compose_type(self):
response = self.client.get(self.url, {'included_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-1', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']},
{'compose': 'compose-2', 'packages': ['bash-0:1.2.3-4.b1.x86_64.rpm']}])
def test_get_for_excluded_compose_type(self):
response = self.client.get(self.url, {'excluded_compose_type': 'production'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data,
[{'compose': 'compose-3', 'packages': ['bash-0:5.6.7-8.x86_64.rpm']}])
class ComposeAPITestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def test_get_existing(self):
response = self.client.get(reverse('compose-detail', args=["compose-1"]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['sigkeys'], ['ABCDEF'])
self.assertEqual(response.data['rpm_mapping_template'],
'http://testserver/rest_api/v1/composes/compose-1/rpm-mapping/{{package}}/')
def test_compose_with_unsigned_package(self):
crpm = models.ComposeRPM.objects.all()[0]
crpm.sigkey = None
crpm.save()
response = self.client.get(reverse('compose-detail', args=["compose-1"]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual(response.data['sigkeys'], ['ABCDEF', None])
def test_get_nonexisting(self):
response = self.client.get(reverse('compose-detail', args=["does-not-exist"]))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_list(self):
response = self.client.get(reverse('compose-list'), {})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_composeid(self):
response = self.client.get(reverse('compose-list'), {"compose_id": "compose-1"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_composeid_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"compose_id": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmname(self):
response = self.client.get(reverse('compose-list'), {"rpm_name": "bash"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmname_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_name": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_srpmname(self):
response = self.client.get(reverse('compose-list'), {"srpm_name": "bash"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_srpmname_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"srpm_name": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmversion(self):
response = self.client.get(reverse('compose-list'), {"rpm_version": "1.2.3"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmversion_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_version": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmrelease(self):
response = self.client.get(reverse('compose-list'), {"rpm_release": "4.b1"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmrelease_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_release": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmarch(self):
response = self.client.get(reverse('compose-list'), {"rpm_arch": "x86_64"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmarch_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_arch": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmnvr(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvr": "bash-1.2.3-4.b1"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmnvr_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvr": "does-not-exist"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmnvr_invalid(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvr": "invalid"})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_query_compose_rpmnvra(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvra": "bash-1.2.3-4.b1.x86_64"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_rpmnvra_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvra": "does-not-exist.arch"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_query_compose_rpmnvra_invalid(self):
response = self.client.get(reverse('compose-list'), {"rpm_nvra": "invalid"})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_query_compose_acceptance_testing(self):
response = self.client.get(reverse('compose-list'), {"acceptance_testing": "untested"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_query_compose_acceptance_testing_nonexisting(self):
response = self.client.get(reverse('compose-list'), {"acceptance_testing": "broken"})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
class ComposeApiOrderingTestCase(APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/product.json",
"pdc/apps/release/fixtures/tests/product_version.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/more_composes.json",
]
def test_compose_list_is_ordered(self):
response = self.client.get(reverse('compose-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(
[x['compose_id'] for x in response.data.get('results', [])],
['compose-1', 'compose-2', 'compose-3']
)
def test_compose_in_release_are_ordered(self):
response = self.client.get(reverse('release-detail', args=['release-1.0']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('compose_set', []),
['compose-1', 'compose-2', 'compose-3'])
class ComposeUpdateTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/more_releases.json",
]
def test_can_not_perform_full_update(self):
response = self.client.put(reverse('compose-detail', args=['compose-1']), {})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_can_update_acceptance_testing_state(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'acceptance_testing': 'passed'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('acceptance_testing'), 'passed')
self.assertNumChanges([1])
def test_can_not_update_compose_label(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'compose_label': 'i am a label'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_linked_releases(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': ['release-1.0-updates']},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('linked_releases'),
['release-1.0-updates'])
self.assertNumChanges([1])
def test_update_both_linked_release_and_acceptance(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': ['release-1.0-updates'],
'acceptance_testing': 'passed'},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('linked_releases'),
['release-1.0-updates'])
self.assertEqual(response.data.get('acceptance_testing'), 'passed')
self.assertNumChanges([2])
def test_update_acceptance_preserves_links(self):
self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': ['release-1.0-updates']},
format='json')
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'acceptance_testing': 'passed'},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('linked_releases'),
['release-1.0-updates'])
self.assertNumChanges([1, 1])
def test_update_can_not_link_to_same_release(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': ['release-1.0']},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('detail', response.data)
def test_update_can_not_link_to_same_release_twice(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': ['release-1.0-updates', 'release-1.0-updates']},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('linked_releases'), ['release-1.0-updates'])
def test_partial_update_empty(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch_linked_releases_not_a_list(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': 'release-1.0-updates'},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'linked_releases': ['Expected a list.']})
self.assertNumChanges([])
def test_patch_linked_releases_null(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': None},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {'linked_releases': ['This field may not be null.']})
self.assertNumChanges([])
def test_patch_linked_releases_list_with_null(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'linked_releases': [None]},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data,
{'linked_releases': ['Expected a string instead of <None>.']})
self.assertNumChanges([])
def test_bulk_update_put(self):
response = self.client.put(reverse('compose-list'),
{'compose-1': {'linked_releases': []}},
format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
self.assertNumChanges([])
def test_bulk_update_patch(self):
response = self.client.patch(reverse('compose-list'),
{'compose-1': {'linked_releases': ['release-1.0-updates']}},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(response.data.keys(), ['compose-1'])
self.assertEqual(response.data['compose-1'].get('linked_releases'),
['release-1.0-updates'])
def test_partial_update_extra_field(self):
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'foo': 'bar'}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_testing_status_on_arch(self):
data = {'Server': {'x86_64': 'passed'}, 'Server2': {'x86_64': 'untested'}}
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'rtt_tested_architectures': data},
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('rtt_tested_architectures', {}), data)
vararch = models.VariantArch.objects.get(arch__name='x86_64',
variant__variant_uid='Server',
variant__compose__compose_id='compose-1')
self.assertEqual(vararch.rtt_testing_status.name, 'passed')
self.assertNumChanges([1])
def test_update_testing_status_on_non_existing_tree(self):
inputs = [
({'Foo': {'x86_64': 'passed'}}, 'Foo.x86_64 not in compose compose-1.'),
({'Server': {'foo': 'passed'}}, 'Server.foo not in compose compose-1.'),
]
for data, err in inputs:
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'rtt_tested_architectures': data},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('rtt_tested_architectures', ''), err)
self.assertNumChanges([])
def test_update_testing_status_to_non_existing_status(self):
data = {'Server': {'x86_64': 'awesome'}}
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'rtt_tested_architectures': data},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('rtt_tested_architectures', ''),
'"awesome" is not a known testing status for Server.x86_64.')
def test_update_testing_status_with_malformed_data(self):
inputs = [
({'Server': 'passed'}, 'Server: "passed" is not a dict'),
('passed', 'rtt_tested_architectures: "passed" is not a dict'),
]
for data, err in inputs:
response = self.client.patch(reverse('compose-detail', args=['compose-1']),
{'rtt_tested_architectures': data},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('detail', []), [err])
self.assertNumChanges([])
class OverridesRPMAPITestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
'pdc/apps/release/fixtures/tests/release.json',
'pdc/apps/compose/fixtures/tests/compose_overriderpm.json',
]
def setUp(self):
self.release = release_models.Release.objects.get(release_id='release-1.0')
self.override_rpm = {'id': 1, 'release': 'release-1.0', 'variant': 'Server', 'arch': 'x86_64',
'srpm_name': 'bash', 'rpm_name': 'bash-doc', 'rpm_arch': 'x86_64',
'include': False, 'comment': '', 'do_not_delete': False}
self.do_not_delete_orpm = {'release': 'release-1.0', 'variant': 'Server', 'arch': 'x86_64',
'srpm_name': 'bash', 'rpm_name': 'bash-doc', 'rpm_arch': 'src',
'include': True, 'comment': '', 'do_not_delete': True}
def test_query_existing(self):
response = self.client.get(reverse('overridesrpm-list'), {'release': 'release-1.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
self.assertEqual(response.data['results'][0], self.override_rpm)
def test_query_nonexisting(self):
response = self.client.get(reverse('overridesrpm-list'), {'release': 'release-1.1'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_delete_existing(self):
response = self.client.delete(reverse('overridesrpm-detail', args=[1]))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEqual(models.OverrideRPM.objects.count(), 0)
self.assertNumChanges([1])
def test_delete_non_existing(self):
response = self.client.delete(reverse('overridesrpm-list', args=[42]))
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertEqual(models.OverrideRPM.objects.count(), 1)
self.assertNumChanges([])
def test_create_duplicit(self):
response = self.client.post(reverse('overridesrpm-list'), self.override_rpm)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(models.OverrideRPM.objects.count(), 1)
def test_create_correct(self):
self.override_rpm["rpm_name"] = "bash-debuginfo"
del self.override_rpm["id"]
response = self.client.post(reverse('overridesrpm-list'), self.override_rpm)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(models.OverrideRPM.objects.count(), 2)
def test_create_extra_field(self):
self.override_rpm["rpm_name"] = "bash-debuginfo"
self.override_rpm["foo"] = "bar"
response = self.client.post(reverse('overridesrpm-list'), self.override_rpm)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_clear(self):
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'release-1.0'})
self.assertEqual(models.OverrideRPM.objects.count(), 0)
self.assertItemsEqual(response.data, [self.override_rpm])
def test_clear_with_no_matched_record(self):
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'no_such_release'})
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_clear_preserve_do_not_delete(self):
models.OverrideRPM.objects.create(release=self.release, variant="Server", arch="x86_64",
rpm_name="bash-doc", rpm_arch="src", include=True,
do_not_delete=True, srpm_name="bash")
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'release-1.0'})
self.assertEqual(models.OverrideRPM.objects.count(), 1)
self.assertItemsEqual(response.data, [self.override_rpm])
def test_delete_with_extra_param(self):
models.OverrideRPM.objects.create(release=self.release, variant="Server", arch="x86_64",
rpm_name="bash-doc", rpm_arch="src", include=True,
do_not_delete=True, srpm_name="bash")
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'release-1.0', 'variant': "Server",
'arch': 'x86_64', 'rpm_name': 'bash-doc',
'rpm_arch': 'src', 'srpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_clear_with_extra_param(self):
models.OverrideRPM.objects.create(release=self.release, variant="Server", arch="x86_64",
rpm_name="bash-doc", rpm_arch="src", include=True,
do_not_delete=True, srpm_name="bash")
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'release-1.0', 'srpm_name': 'bash'})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_clear_force(self):
override = models.OverrideRPM.objects.create(release=self.release, variant="Server",
arch="x86_64", rpm_name="bash-doc",
rpm_arch="src", include=True,
do_not_delete=True, srpm_name="bash")
self.do_not_delete_orpm['id'] = override.pk
response = self.client.delete(reverse('overridesrpm-list'), {'release': 'release-1.0', 'force': True})
self.assertEqual(models.OverrideRPM.objects.count(), 0)
self.assertItemsEqual(response.data, [self.override_rpm, self.do_not_delete_orpm])
def test_delete_two_by_id(self):
override = models.OverrideRPM.objects.create(release=self.release, variant="Server",
arch="x86_64", rpm_name="bash-doc",
rpm_arch="src", include=True,
do_not_delete=True, srpm_name="bash")
response = self.client.delete(reverse('overridesrpm-list'),
[1, override.pk],
format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertNumChanges([2])
self.assertEqual(models.OverrideRPM.objects.count(), 0)
class ComposeRPMViewAPITestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
]
def setUp(self):
with open('pdc/apps/release/fixtures/tests/composeinfo.json', 'r') as f:
self.compose_info = json.loads(f.read())
with open('pdc/apps/compose/fixtures/tests/rpm-manifest.json', 'r') as f:
self.manifest = json.loads(f.read())
self.client.post(reverse('releaseimportcomposeinfo-list'),
self.compose_info, format='json')
# Caching ids makes it faster, but the cache needs to be cleared for each test.
models.Path.CACHE = {}
common_models.SigKey.CACHE = {}
def test_import_inconsistent_data(self):
self.manifest['payload']['compose']['id'] = 'TP-1.0-20150315.0'
response = self.client.post(reverse('composerpm-list'),
{'rpm_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_import_and_retrieve_manifest(self):
response = self.client.post(reverse('composerpm-list'),
{'rpm_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([11, 5])
self.assertEqual(models.ComposeRPM.objects.count(), 6)
response = self.client.get(reverse('composerpm-detail', args=['TP-1.0-20150310.0']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(dict(response.data),
self.manifest)
class ComposeImageAPITestCase(TestCaseWithChangeSetMixin, APITestCase):
def setUp(self):
with open('pdc/apps/release/fixtures/tests/composeinfo.json', 'r') as f:
self.compose_info = json.loads(f.read())
with open('pdc/apps/compose/fixtures/tests/image-manifest.json', 'r') as f:
self.manifest = json.loads(f.read())
self.client.post(reverse('releaseimportcomposeinfo-list'),
self.compose_info, format='json')
# Caching ids makes it faster, but the cache needs to be cleared for each test.
models.Path.CACHE = {}
def test_import_images_by_deprecated_api(self):
# TODO: remove this test after next release
response = self.client.post(reverse('composeimportimages-list'),
{'image_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([11, 5])
self.assertEqual(models.ComposeImage.objects.count(), 4)
response = self.client.get(reverse('image-list'), {'compose': 'TP-1.0-20150310.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 4)
def test_import_images(self):
response = self.client.post(reverse('composeimage-list'),
{'image_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([11, 5])
self.assertEqual(models.ComposeImage.objects.count(), 4)
response = self.client.get(reverse('image-list'), {'compose': 'TP-1.0-20150310.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 4)
def test_import_inconsistent_data(self):
self.manifest['payload']['compose']['id'] = 'TP-1.0-20150315.0'
response = self.client.post(reverse('composeimage-list'),
{'image_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_import_and_retrieve_images(self):
response = self.client.post(reverse('composeimage-list'),
{'image_manifest': self.manifest,
'release_id': 'tp-1.0',
'composeinfo': self.compose_info},
format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response = self.client.get(reverse('composeimage-detail', args=['TP-1.0-20150310.0']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertDictEqual(dict(response.data), self.manifest)
class RPMMappingAPITestCase(APITestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.release = release_models.Release.objects.latest('id')
self.compose = models.Compose.objects.get(compose_id='compose-1')
self.url = reverse('composerpmmapping-detail', args=[self.compose.compose_id, 'bash'])
def test_get_rpm_mapping(self):
response = self.client.get(self.url, {}, format='json')
expected_data = {
'Server': {
'x86_64': {
'bash': ['x86_64'],
}
}
}
self.assertEqual(response.data, expected_data)
def test_get_rpm_mapping_for_nonexisting_compose(self):
url = reverse('composerpmmapping-detail', args=['foo-bar', 'bash'])
response = self.client.get(url, {}, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_rpm_mapping_includes_overrides(self):
models.OverrideRPM.objects.create(variant='Server', arch='x86_64', srpm_name='bash', rpm_name='bash',
rpm_arch='src', include=True, release=self.release)
response = self.client.get(self.url, {}, format='json')
expected_data = {
'Server': {
'x86_64': {
'bash': ['src', 'x86_64'],
}
}
}
self.assertEqual(response.data, expected_data)
def test_rpm_mapping_can_exclude_overrides(self):
models.OverrideRPM.objects.create(variant='Server', arch='x86_64', srpm_name='bash', rpm_name='bash',
rpm_arch='src', include=True, release=self.release)
self.url += '?disable_overrides=1'
response = self.client.get(self.url, {}, format='json')
expected_data = {
'Server': {
'x86_64': {
'bash': ['x86_64'],
'bash-doc': ['x86_64'],
}
}
}
self.assertEqual(response.data, expected_data)
def test_does_not_return_empty_container(self):
models.OverrideRPM.objects.create(variant='Server', arch='x86_64', srpm_name='bash', rpm_name='bash',
rpm_arch='x86_64', include=False, release=self.release)
response = self.client.get(self.url, {}, format='json')
self.assertEqual(response.data, {})
def test_partial_update(self):
self.client.force_authenticate(create_user("user", perms=[]))
self.client.patch(self.url, [{"action": "create", "srpm_name": "bash", "rpm_name": "bash-magic",
"rpm_arch": "src", "variant": "Client", "arch": "x86_64",
"do_not_delete": False, "comment": "", "include": True}],
format='json')
orpm = models.OverrideRPM.objects.get(srpm_name="bash", rpm_name="bash-magic", rpm_arch="src",
variant="Client", arch="x86_64", include=True,
do_not_delete=False, comment="")
self.assertIsNotNone(orpm)
def test_update(self):
self.client.force_authenticate(create_user("user", perms=[]))
new_mapping = {'Server': {'x86_64': {'bash': ['x86_64', 'i386']}}}
response = self.client.put(self.url, new_mapping, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [{'action': 'create', 'srpm_name': 'bash', 'rpm_name': 'bash',
'rpm_arch': 'i386', 'variant': 'Server', 'arch': 'x86_64',
'include': True, 'release_id': 'release-1.0'}])
self.assertEqual(0, models.OverrideRPM.objects.filter(rpm_arch='i386').count())
def test_update_with_perform(self):
self.client.force_authenticate(create_user("user", perms=[]))
new_mapping = {'Server': {'x86_64': {'bash': ['x86_64', 'i386']}}}
response = self.client.put(self.url + '?perform=1', new_mapping, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data, [{'action': 'create', 'srpm_name': 'bash', 'rpm_name': 'bash',
'rpm_arch': 'i386', 'variant': 'Server', 'arch': 'x86_64',
'include': True, 'release_id': 'release-1.0'}])
self.assertEqual(1, models.OverrideRPM.objects.filter(rpm_arch='i386').count())
class FilterBugzillaProductsAndComponentsTestCase(APITestCase):
fixtures = [
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/component/fixtures/tests/release_component.json",
"pdc/apps/component/fixtures/tests/upstream.json",
"pdc/apps/component/fixtures/tests/global_component.json"
]
def setUp(self):
# Construct a new release and release component
self.release = Release.objects.create(
release_id='release-2.0',
short='release',
version='2.0',
name='Awesome Release',
release_type_id=1,
)
self.bugzilla_component = BugzillaComponent.objects.create(name='kernel')
filesystems = BugzillaComponent.objects.create(name='filesystems', parent_component=self.bugzilla_component)
BugzillaComponent.objects.create(name='ext4', parent_component=filesystems)
pyth = BugzillaComponent.objects.create(name='python', parent_component=self.bugzilla_component)
BugzillaComponent.objects.create(name='bin', parent_component=pyth)
ReleaseComponent.objects.create(
release=self.release,
global_component_id=1,
name='kernel',
bugzilla_component=self.bugzilla_component
)
def test_filter_bugzilla_products_components_with_rpm_nvr(self):
url = reverse('bugzilla-list')
response = self.client.get(url + '?nvr=bash-1.2.3-4.b1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_filter_with_invalid_nvr(self):
url = reverse('bugzilla-list')
response = self.client.get(url + '?nvr=xxx', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_filter_with_nvr_without_rpms(self):
url = reverse('bugzilla-list')
response = self.client.get(url + '?nvr=GConf2-3.2.6-8.el71', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_filter_without_nvr(self):
url = reverse('bugzilla-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@mock.patch('pdc.apps.compose.models.Compose.objects.filter')
def test_filter_without_srpm_component_name_mapping(self, mock_filter):
release_component, _ = ReleaseComponent.objects.get_or_create(
global_component_id=1,
release=self.release,
bugzilla_component=self.bugzilla_component,
name='bash')
mock_filter.return_value = mock.Mock()
mock_filter.return_value.distinct.return_value = [mock.Mock()]
mock_filter.return_value.distinct.return_value[0].release = self.release.release_id
url = reverse('bugzilla-list')
response = self.client.get(url + '?nvr=bash-1.2.3-4.b1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('kernel', response.content)
@mock.patch('pdc.apps.compose.models.Compose.objects.filter')
def test_filter_with_srpm_component_name_mapping(self, mock_filter):
release_component, _ = ReleaseComponent.objects.get_or_create(
global_component_id=1,
release=self.release,
name='kernel')
binding_models.ReleaseComponentSRPMNameMapping.objects.create(
srpm_name='bash',
release_component=release_component)
mock_filter.return_value = mock.Mock()
mock_filter.return_value.distinct.return_value = [mock.Mock()]
mock_filter.return_value.distinct.return_value[0].release = self.release.release_id
url = reverse('bugzilla-list')
response = self.client.get(url + '?nvr=bash-1.2.3-4.b1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIn('kernel', response.content)
class RPMMappingTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.compose = models.Compose.objects.get(compose_id='compose-1')
self.mapping, _ = self.compose.get_rpm_mapping('bash')
def test_compute_diff_add_new(self):
new_mapping = models.ComposeRPMMapping(data={'Server': {'x86_64': {'bash': ['src', 'x86_64']}}})
changes = self.mapping.compute_changes(new_mapping)
self.assertEqual(len(changes), 1)
self.assertEqual(changes[0], {'action': 'create', 'variant': 'Server', 'arch': 'x86_64',
'include': True, 'release_id': 'release-1.0', 'rpm_name': 'bash',
'srpm_name': 'bash', 'rpm_arch': 'src'})
def test_compute_diff_add_excluded(self):
new_mapping = models.ComposeRPMMapping(data={'Server': {'x86_64': {'bash': ['x86_64'],
'bash-doc': ['x86_64']}}})
changes = self.mapping.compute_changes(new_mapping)
self.assertEqual(len(changes), 1)
self.assertEqual(changes[0], {'action': 'delete', 'variant': 'Server', 'arch': 'x86_64',
'include': False, 'release_id': 'release-1.0', 'rpm_name': 'bash-doc',
'srpm_name': 'bash', 'rpm_arch': 'x86_64'})
def test_compute_diff_remove_existing(self):
new_mapping = models.ComposeRPMMapping(data={})
changes = self.mapping.compute_changes(new_mapping)
self.assertEqual(len(changes), 1)
self.assertEqual(changes[0], {'action': 'create', 'variant': 'Server', 'arch': 'x86_64',
'include': False, 'release_id': 'release-1.0', 'rpm_name': 'bash',
'srpm_name': 'bash', 'rpm_arch': 'x86_64'})
class OverrideManagementTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.initial_form_data = {
'checks-0-included': 'on',
'checks-0-variant': 'Server',
'checks-0-arch': 'x86_64',
'checks-0-rpm_name': 'bash',
'checks-0-rpm_arch': 'x86_64',
'checks-1-variant': 'Server',
'checks-1-arch': 'x86_64',
'checks-1-rpm_name': 'bash-doc',
'checks-1-rpm_arch': 'x86_64',
'checks-MAX_NUM_FORMS': '1000',
'checks-INITIAL_FORMS': 2,
'checks-TOTAL_FORMS': 2,
'news-MAX_NUM_FORMS': '1000',
'news-INITIAL_FORMS': 1,
'news-TOTAL_FORMS': 0,
'vararch-MAX_NUM_FORMS': '1000',
'vararch-INITIAL_FORMS': 1,
'vararch-TOTAL_FORMS': 0,
'for_new_vararch-MAX_NUM_FORMS': '1000',
'for_new_vararch-INITIAL_FORMS': 0,
'for_new_vararch-TOTAL_FORMS': 0,
}
def test_can_access_management_form(self):
client = Client()
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.status_code, 200)
# There is one package in fixtures
self.assertEqual(len(response.context['forms']), 1)
def test_submit_no_changes(self):
client = Client()
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 0)
def test_submit_disable(self):
client = Client()
del self.initial_form_data['checks-0-included']
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 1)
self.assertEqual({'variant': 'Server', 'arch': 'x86_64', 'rpm_name': 'bash', 'rpm_arch': 'x86_64',
'include': False, 'action': 'create', 'srpm_name': 'bash', 'release_id': 'release-1.0'},
data[0])
def test_submit_enable(self):
client = Client()
self.initial_form_data['checks-1-included'] = 'on'
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 1)
self.assertEqual({'variant': 'Server', 'arch': 'x86_64', 'rpm_name': 'bash-doc', 'rpm_arch': 'x86_64',
'include': False, 'action': 'delete', 'srpm_name': 'bash', 'release_id': 'release-1.0',
'comment': '', 'do_not_delete': False},
data[0])
def test_submit_new_override(self):
client = Client()
self.initial_form_data.update({
'news-0-variant': 'Server',
'news-0-arch': 'x86_64',
'news-0-rpm_name': 'bash-completion',
'news-0-rpm_arch': 'x86_64',
'news-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 1)
self.assertEqual({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server',
'arch': 'x86_64', 'rpm_name': 'bash-completion', 'rpm_arch': 'x86_64', 'include': True},
data[0])
def test_submit_new_override_on_new_variant(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-new_variant': 0,
'for_new_vararch-0-rpm_name': 'bash-completion',
'for_new_vararch-0-rpm_arch': 'x86_64',
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 1)
self.assertEqual({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server-optional',
'arch': 'x86_64', 'rpm_name': 'bash-completion', 'rpm_arch': 'x86_64', 'include': True},
data[0])
def test_submit_more_different_changes(self):
client = Client()
del self.initial_form_data['checks-0-included']
self.initial_form_data.update({
'news-0-variant': 'Server',
'news-0-arch': 'x86_64',
'news-0-rpm_name': 'bash-completion',
'news-0-rpm_arch': 'x86_64',
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-new_variant': 0,
'for_new_vararch-0-rpm_name': 'bash-completion',
'for_new_vararch-0-rpm_arch': 'x86_64',
'news-TOTAL_FORMS': 1,
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 3)
self.assertIn({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server',
'arch': 'x86_64', 'rpm_name': 'bash-completion', 'rpm_arch': 'x86_64', 'include': True},
data)
self.assertIn({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server-optional',
'arch': 'x86_64', 'rpm_name': 'bash-completion', 'rpm_arch': 'x86_64', 'include': True},
data)
self.assertIn({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server',
'arch': 'x86_64', 'rpm_name': 'bash', 'rpm_arch': 'x86_64', 'include': False},
data)
def test_submit_more_same_changes(self):
client = Client()
self.initial_form_data.update({
'news-0-variant': 'Server',
'news-0-arch': 'x86_64',
'news-0-rpm_name': 'bash-completion',
'news-0-rpm_arch': 'x86_64',
'news-1-variant': 'Server',
'news-1-arch': 'x86_64',
'news-1-rpm_name': 'bash-magic',
'news-1-rpm_arch': 'src',
'news-TOTAL_FORMS': 2,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 2)
self.assertIn({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server',
'arch': 'x86_64', 'rpm_name': 'bash-completion', 'rpm_arch': 'x86_64', 'include': True},
data)
self.assertIn({'action': 'create', 'release_id': 'release-1.0', 'srpm_name': 'bash', 'variant': 'Server',
'arch': 'x86_64', 'rpm_name': 'bash-magic', 'rpm_arch': 'src', 'include': True},
data)
def test_submit_enable_and_disable(self):
client = Client()
del self.initial_form_data['checks-0-included']
self.initial_form_data['checks-1-included'] = 'on'
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertIn('compressed', response.context)
data = json.loads(response.context['compressed'])
self.assertEqual(len(data), 2)
self.assertIn({'variant': 'Server', 'arch': 'x86_64', 'rpm_name': 'bash-doc', 'rpm_arch': 'x86_64',
'include': False, 'action': 'delete', 'srpm_name': 'bash', 'release_id': 'release-1.0',
'comment': '', 'do_not_delete': False},
data)
self.assertIn({'variant': 'Server', 'arch': 'x86_64', 'rpm_name': 'bash', 'rpm_arch': 'x86_64',
'include': False, 'action': 'create', 'srpm_name': 'bash', 'release_id': 'release-1.0'},
data)
def test_submit_incorrect_new_override_missing_rpm_arch(self):
client = Client()
self.initial_form_data.update({
'news-0-variant': 'Server',
'news-0-arch': 'x86_64',
'news-0-rpm_name': 'bash-completion',
'news-0-rpm_arch': '',
'news-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'override_forms', 0, None, 'Both RPM name and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_missing_rpm_name(self):
client = Client()
self.initial_form_data.update({
'news-0-variant': 'Server',
'news-0-arch': 'x86_64',
'news-0-rpm_name': '',
'news-0-rpm_arch': 'src',
'news-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'override_forms', 0, None, 'Both RPM name and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_for_new_variant_missing_rpm_arch(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-rpm_name': 'bash-completion',
'for_new_vararch-0-rpm_arch': '',
'for_new_vararch-0-new_variant': 0,
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'override_v_forms', 0, None, 'Both RPM name and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_for_new_variant_missing_rpm_name(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-rpm_name': '',
'for_new_vararch-0-rpm_arch': 'src',
'for_new_vararch-0-new_variant': 0,
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'override_v_forms', 0, None, 'Both RPM name and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_for_new_variant_missing_variant_name(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': '',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-rpm_name': 'bash-magic',
'for_new_vararch-0-rpm_arch': 'src',
'for_new_vararch-0-new_variant': 0,
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'variant_forms', 0, None, 'Both variant and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_for_new_variant_missing_variant_arch(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': '',
'for_new_vararch-0-rpm_name': 'bash-magic',
'for_new_vararch-0-rpm_arch': 'src',
'for_new_vararch-0-new_variant': 0,
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'variant_forms', 0, None, 'Both variant and arch must be filled in.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_incorrect_new_override_for_new_variant_and_old_variant(self):
client = Client()
self.initial_form_data.update({
'vararch-0-variant': 'Server-optional',
'vararch-0-arch': 'x86_64',
'for_new_vararch-0-rpm_name': 'bash-magic',
'for_new_vararch-0-rpm_arch': 'src',
'for_new_vararch-0-new_variant': 0,
'for_new_vararch-0-variant': 'Server',
'for_new_vararch-0-arch': 'i686',
'vararch-TOTAL_FORMS': 1,
'for_new_vararch-TOTAL_FORMS': 1,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertFormsetError(response, 'override_v_forms', 0, None, 'Can not reference both old and new variant.arch.')
self.assertContains(response, 'There are errors in the form.')
def test_submit_preview_no_change(self):
client = Client()
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'No changes')
class OverridePreviewTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.form_data = {
'checks-0-included': 'on',
'checks-0-variant': 'Server',
'checks-0-arch': 'x86_64',
'checks-0-rpm_name': 'bash',
'checks-0-rpm_arch': 'x86_64',
'checks-1-variant': 'Server',
'checks-1-arch': 'x86_64',
'checks-1-rpm_name': 'bash-doc',
'checks-1-rpm_arch': 'x86_64',
'checks-MAX_NUM_FORMS': '1000',
'checks-INITIAL_FORMS': 2,
'checks-TOTAL_FORMS': 2,
'news-MAX_NUM_FORMS': '1000',
'news-INITIAL_FORMS': 1,
'news-TOTAL_FORMS': 0,
'vararch-MAX_NUM_FORMS': '1000',
'vararch-INITIAL_FORMS': 1,
'vararch-TOTAL_FORMS': 0,
'for_new_vararch-MAX_NUM_FORMS': '1000',
'for_new_vararch-INITIAL_FORMS': 0,
'for_new_vararch-TOTAL_FORMS': 0,
}
self.preview_form_data = {
'preview_submit': True,
'form-TOTAL_FORMS': 0,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 1000,
}
def _populate_preview_form(self, response):
"""Parse response and prepare form data for preview submission."""
def set_val(dict, key, val):
if isinstance(val, bool):
if val:
dict[key] = 'on'
dict[key] = val
for (i, action) in enumerate(json.loads(response.context['compressed'])):
for k in action:
set_val(self.preview_form_data, 'form-%d-%s' % (i, k), action[k])
self.preview_form_data['form-TOTAL_FORMS'] += 1
self.preview_form_data['initial_data'] = response.context['compressed']
def test_submit_with_comment_and_missing_do_not_delete(self):
client = Client()
del self.form_data['checks-0-included']
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
self.preview_form_data['form-0-comment'] = 'do not delete me'
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'There are errors in the form.')
self.assertFormsetError(response, 'forms', 0, None, 'Comment needs do_not_delete checked.')
def test_submit_ok_no_comment(self):
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
del self.form_data['checks-0-included']
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 2)
orpm = models.OverrideRPM.objects.latest('id')
self.assertEqual(orpm.include, False)
self.assertEqual(orpm.do_not_delete, False)
self.assertEqual(orpm.comment, '')
def test_submit_ok_with_comment(self):
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
del self.form_data['checks-0-included']
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
self.preview_form_data.update({
'form-0-do_not_delete': 'on',
'form-0-comment': 'do not delete me',
})
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 2)
orpm = models.OverrideRPM.objects.latest('id')
self.assertEqual(orpm.include, False)
self.assertEqual(orpm.do_not_delete, True)
self.assertEqual(orpm.comment, 'do not delete me')
def test_submit_ok_should_delete(self):
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
self.form_data['checks-1-included'] = 'on'
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
del self.preview_form_data['form-0-do_not_delete']
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 0)
def test_submit_ok_should_set_do_not_delete(self):
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
self.form_data['checks-1-included'] = 'on'
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
self.preview_form_data.update({
'form-0-comment': 'comment',
'form-0-do_not_delete': 'on',
})
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 1)
orpm = models.OverrideRPM.objects.latest('id')
self.assertEqual(orpm.do_not_delete, True)
self.assertEqual(orpm.comment, 'comment')
self.assertEqual(orpm.include, True)
def test_submit_ok_should_remove_do_not_delete_and_delete(self):
orpm = models.OverrideRPM.objects.latest('id')
orpm.do_not_delete = True
orpm.save()
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
self.form_data['checks-1-included'] = 'on'
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self._populate_preview_form(response)
del self.preview_form_data['form-0-do_not_delete']
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 0)
def test_submit_ok_disable_override_without_compose_rpm__should_delete(self):
orpm = models.OverrideRPM.objects.latest('id')
orpm.rpm_name = 'bash-magic'
orpm.include = True
orpm.save()
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
self.form_data.update({
'checks-1-included': 'on',
'checks-2-variant': 'Server',
'checks-2-arch': 'x86_64',
'checks-2-rpm_name': 'bash-magic',
'checks-2-rpm_arch': 'x86_64',
'checks-TOTAL_FORMS': 3,
})
response = client.post('/override/manage/release-1.0/?package=bash', self.form_data)
self.assertEqual(len(response.context['forms']), 1)
self._populate_preview_form(response)
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.OverrideRPM.objects.count(), 0)
class OverridePreviewBulkTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm_more.json",
]
def setUp(self):
self.initial_form_data = {
'checks-0-variant': 'Server',
'checks-0-arch': 'x86_64',
'checks-0-rpm_name': 'bash',
'checks-0-rpm_arch': 'x86_64',
'checks-1-variant': 'Server',
'checks-1-arch': 'x86_64',
'checks-1-rpm_name': 'bash-completion',
'checks-1-rpm_arch': 'x86_64',
'checks-2-included': 'on',
'checks-2-variant': 'Server',
'checks-2-arch': 'x86_64',
'checks-2-rpm_name': 'bash-debuginfo',
'checks-2-rpm_arch': 'x86_64',
'checks-3-included': 'on',
'checks-3-variant': 'Server',
'checks-3-arch': 'x86_64',
'checks-3-rpm_name': 'bash-doc',
'checks-3-rpm_arch': 'x86_64',
'checks-4-variant': 'Server',
'checks-4-arch': 'x86_64',
'checks-4-rpm_name': 'bash-magic',
'checks-4-rpm_arch': 'x86_64',
'checks-MAX_NUM_FORMS': '1000',
'checks-INITIAL_FORMS': 5,
'checks-TOTAL_FORMS': 5,
'news-MAX_NUM_FORMS': '1000',
'news-INITIAL_FORMS': 1,
'news-TOTAL_FORMS': 0,
'vararch-MAX_NUM_FORMS': '1000',
'vararch-INITIAL_FORMS': 1,
'vararch-TOTAL_FORMS': 0,
'for_new_vararch-MAX_NUM_FORMS': '1000',
'for_new_vararch-INITIAL_FORMS': 0,
'for_new_vararch-TOTAL_FORMS': 0,
}
self.preview_form_data = {
'preview_submit': True,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 1000,
}
def test_more_changes_at_the_same_time(self):
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
response = client.post('/override/manage/release-1.0/?package=bash', self.initial_form_data)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['forms']), 5)
self.preview_form_data.update({
'initial_data': response.context['compressed'],
'form-TOTAL_FORMS': 5,
'form-0-action': 'create',
'form-0-variant': 'Server',
'form-0-arch': 'x86_64',
'form-0-rpm_name': 'bash',
'form-0-rpm_arch': 'x86_64',
'form-0-include': 'False',
'form-1-action': 'create',
'form-1-variant': 'Server',
'form-1-arch': 'x86_64',
'form-1-rpm_name': 'bash-competion',
'form-1-rpm_arch': 'x86_64',
'form-1-include': 'False',
'form-2-action': 'delete',
'form-2-variant': 'Server',
'form-2-arch': 'x86_64',
'form-2-rpm_name': 'bash-debuginfo',
'form-2-rpm_arch': 'x86_64',
'form-2-include': 'False',
'form-3-action': 'delete',
'form-3-variant': 'Server',
'form-3-arch': 'x86_64',
'form-3-rpm_name': 'bash-doc',
'form-3-rpm_arch': 'x86_64',
'form-3-include': 'False',
'form-4-action': 'delete',
'form-4-variant': 'Server',
'form-4-arch': 'x86_64',
'form-4-rpm_name': 'bash-magic',
'form-4-rpm_arch': 'x86_64',
'form-4-include': 'False',
})
response = client.post('/override/manage/release-1.0/?package=bash', self.preview_form_data)
self.assertEqual(response.status_code, 302)
self.assertItemsEqual(
[o.export() for o in models.OverrideRPM.objects.all()],
[{"release_id": 'release-1.0', "variant": 'Server', "arch": 'x86_64',
"srpm_name": 'bash', "rpm_name": 'bash', "rpm_arch": 'x86_64',
"include": False, "comment": '', "do_not_delete": False},
{"release_id": 'release-1.0', "variant": 'Server', "arch": 'x86_64',
"srpm_name": 'bash', "rpm_name": 'bash-completion', "rpm_arch": 'x86_64',
"include": False, "comment": '', "do_not_delete": False}]
)
class UselessOverrideTestCase(TestCase):
fixtures = [
"pdc/apps/common/fixtures/test/sigkey.json",
"pdc/apps/package/fixtures/test/rpm.json",
"pdc/apps/release/fixtures/tests/release.json",
"pdc/apps/release/fixtures/tests/variant.json",
"pdc/apps/release/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/variant.json",
"pdc/apps/compose/fixtures/tests/variant_arch.json",
"pdc/apps/compose/fixtures/tests/compose_overriderpm.json",
"pdc/apps/compose/fixtures/tests/compose.json",
"pdc/apps/compose/fixtures/tests/compose_composerpm.json",
]
def setUp(self):
self.release = release_models.Release.objects.latest('id')
def test_delete_unused_include_override(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash',
rpm_arch='x86_64',
include=True)
client = Client()
with mock.patch('sys.stdout', new_callable=StringIO) as out:
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [])
self.assertIn('NOTICE', out.getvalue())
self.assertIn(str(orpm), out.getvalue())
self.assertEqual(models.OverrideRPM.objects.count(), 1)
def test_delete_unused_exclude_override(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash-missing',
rpm_arch='x86_64',
include=False)
client = Client()
with mock.patch('sys.stdout', new_callable=StringIO) as out:
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [])
self.assertIn('NOTICE', out.getvalue())
self.assertIn(str(orpm), out.getvalue())
self.assertEqual(models.OverrideRPM.objects.count(), 1)
def test_delete_unused_exclude_override_on_new_variant_arch(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash',
rpm_arch='rpm_arch',
include=False)
client = Client()
with mock.patch('sys.stdout', new_callable=StringIO) as out:
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [])
self.assertIn('NOTICE', out.getvalue())
self.assertIn(str(orpm), out.getvalue())
self.assertEqual(models.OverrideRPM.objects.count(), 1)
def test_do_not_delete_unused_include_override(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash',
rpm_arch='x86_64',
include=True,
do_not_delete=True)
client = Client()
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [orpm])
self.assertEqual(models.OverrideRPM.objects.count(), 2)
def test_do_not_delete_unused_exclude_override(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash-missing',
rpm_arch='x86_64',
include=False,
do_not_delete=True)
client = Client()
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [orpm])
self.assertEqual(models.OverrideRPM.objects.count(), 2)
def test_do_not_delete_unused_exclude_override_on_new_variant_arch(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash',
rpm_arch='rpm_arch',
include=False,
do_not_delete=True)
client = Client()
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [orpm])
self.assertEqual(models.OverrideRPM.objects.count(), 2)
def test_update_unused_override_when_creating_conflict(self):
orpm = models.OverrideRPM.objects.create(release=self.release,
variant='Server',
arch='x86_64',
srpm_name='bash',
rpm_name='bash',
rpm_arch='x86_64',
include=True,
do_not_delete=True)
client = Client()
create_user("user", perms=["pdc.overrides"])
client.login(username="user", password="user")
response = client.get('/override/manage/release-1.0/', {'package': 'bash'})
self.assertEqual(response.context['useless_overrides'], [orpm])
form_data = {
'checks-0-variant': 'Server',
'checks-0-arch': 'x86_64',
'checks-0-rpm_name': 'bash',
'checks-0-rpm_arch': 'x86_64',
'checks-MAX_NUM_FORMS': '1000',
'checks-INITIAL_FORMS': 1,
'checks-TOTAL_FORMS': 1,
'news-MAX_NUM_FORMS': '1000',
'news-INITIAL_FORMS': 1,
'news-TOTAL_FORMS': 0,
'vararch-MAX_NUM_FORMS': '1000',
'vararch-INITIAL_FORMS': 1,
'vararch-TOTAL_FORMS': 0,
'for_new_vararch-MAX_NUM_FORMS': '1000',
'for_new_vararch-INITIAL_FORMS': 0,
'for_new_vararch-TOTAL_FORMS': 0,
}
response = client.post('/override/manage/release-1.0/?package=bash', form_data)
self.assertContains(response, 'warning')
self.assertContains(response, 'Will modify override with do_not_delete set.')
preview_data = {
'preview_submit': True,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': 1000,
'form-TOTAL_FORMS': 1,
'initial_data': response.context['compressed'],
'form-0-action': 'create',
'form-0-variant': 'Server',
'form-0-arch': 'x86_64',
'form-0-rpm_name': 'bash',
'form-0-rpm_arch': 'x86_64',
'form-0-include': 'False',
}
response = client.post('/override/manage/release-1.0/?package=bash', preview_data)
self.assertEqual(response.status_code, 302)
orpm = models.OverrideRPM.objects.latest('id')
self.assertFalse(orpm.include)
| mit | 1,441,402,328,042,708,700 | 49.9375 | 125 | 0.578668 | false | 3.662517 | true | false | false |
Daniel15/PathPicker | src/test.py | 1 | 7076 | # Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
#
# @nolint
import unittest
import os
import format
import parse
fileTestCases = [{
'input': 'html/js/hotness.js',
'match': True,
'file': 'html/js/hotness.js',
'num': 0
}, {
'input': '/absolute/path/to/something.txt',
'match': True,
'file': '/absolute/path/to/something.txt',
'num': 0
}, {
'input': '/html/js/hotness.js42',
'match': True,
'file': '/html/js/hotness.js42',
'num': 0
}, {
'input': '/html/js/hotness.js',
'match': True,
'file': '/html/js/hotness.js',
'num': 0
}, {
'input': './asd.txt:83',
'match': True,
'file': './asd.txt',
'num': 83
}, {
'input': 'flib/asd/ent/berkeley/two.py-22',
'match': True,
'file': 'flib/asd/ent/berkeley/two.py',
'num': 22
}, {
'input': 'flib/foo/bar',
'match': True,
'file': 'flib/foo/bar',
'num': 0
}, {
'input': 'flib/foo/bar ', # note space
'match': True,
'file': 'flib/foo/bar',
'num': 0
}, {
'input': 'foo/b ',
'match': True,
'file': 'foo/b',
'num': 0
}, {
'input': 'foo/bar/baz/',
'match': False
}, {
'input': 'flib/ads/ads.thrift',
'match': True,
'file': 'flib/ads/ads.thrift',
'num': 0
}, {
'input': 'banana hanana Wilde/ads/story.m',
'match': True,
'file': 'Wilde/ads/story.m',
'num': 0
}, {
'input': 'flib/asd/asd.py two/three/four.py',
'match': True,
'file': 'flib/asd/asd.py',
'num': 0
}, {
'input': 'asd/asd/asd/ 23',
'match': False
}, {
'input': 'foo/bar/TARGETS:23',
'match': True,
'num': 23,
'file': 'foo/bar/TARGETS'
}, {
'input': 'foo/bar/TARGETS-24',
'match': True,
'num': 24,
'file': 'foo/bar/TARGETS'
}, {
'input':
'fbcode/search/places/scorer/PageScorer.cpp:27:46:#include "search/places/scorer/linear_scores/MinutiaeVerbScorer.h',
'match': True,
'num': 27,
'file': 'fbcode/search/places/scorer/PageScorer.cpp'
}, {
# Pretty intense case
'input':
'fbcode/search/places/scorer/TARGETS:590:28: srcs = ["linear_scores/MinutiaeVerbScorer.cpp"]',
'match': True,
'num': 590,
'file': 'fbcode/search/places/scorer/TARGETS'
}, {
'input':
'fbcode/search/places/scorer/TARGETS:1083:27: "linear_scores/test/MinutiaeVerbScorerTest.cpp"',
'match': True,
'num': 1083,
'file': 'fbcode/search/places/scorer/TARGETS'
}, {
'input': '~/foo/bar/something.py',
'match': True,
'num': 0,
'file': '~/foo/bar/something.py'
}, {
'input': '~/foo/bar/inHomeDir.py:22',
'match': True,
'num': 22,
'file': '~/foo/bar/inHomeDir.py'
}, {
'input': 'blarge assets/retina/[email protected]',
'match': True,
'num': 0,
'file': 'assets/retina/[email protected]'
}, {
'input': '~/assets/retina/[email protected]',
'match': True,
'num': 0,
'file': '~/assets/retina/[email protected]'
}, {
'input': 'So.many.periods.txt',
'match': True,
'num': 0,
'file': 'So.many.periods.txt'
}, {
'input': 'SO.MANY.PERIODS.TXT',
'match': True,
'num': 0,
'file': 'SO.MANY.PERIODS.TXT'
}, {
'input': 'blarg blah So.MANY.PERIODS.TXT:22',
'match': True,
'num': 0, # we ignore the number here
'file': 'So.MANY.PERIODS.TXT'
}, {
'input': 'SO.MANY&&PERIODSTXT',
'match': False
}]
prependDirTestCases = [
{
'in': 'home/absolute/path.py',
'out': '/home/absolute/path.py'
}, {
'in': '~/www/asd.py',
'out': '~/www/asd.py'
}, {
'in': 'www/asd.py',
'out': '~/www/asd.py'
}, {
'in': 'foo/bar/baz/asd.py',
'out': parse.PREPEND_PATH + 'foo/bar/baz/asd.py'
}, {
'in': 'a/foo/bar/baz/asd.py',
'out': parse.PREPEND_PATH + 'foo/bar/baz/asd.py'
}, {
'in': 'b/foo/bar/baz/asd.py',
'out': parse.PREPEND_PATH + 'foo/bar/baz/asd.py'
}, {
'in': '',
'out': ''
}]
class TestParseFunction(unittest.TestCase):
def testPrependDir(self):
for testCase in prependDirTestCases:
inFile = testCase['in']
result = parse.prependDir(inFile)
expected = testCase['out']
if inFile[0:2] == '~/':
expected = os.path.expanduser(expected)
self.assertEqual(expected, result)
print 'Tested %d dir cases.' % len(prependDirTestCases)
def testFileFuzz(self):
befores = ['M ', 'Modified: ', 'Changed: ', '+++ ',
'Banana asdasdoj pjo ']
afters = [' * Adapts AdsErrorCodestore to something',
':0:7: var AdsErrorCodeStore', ' jkk asdad']
for testCase in fileTestCases:
for before in befores:
for after in afters:
testInput = '%s%s%s' % (before, testCase['input'], after)
thisCase = testCase.copy()
thisCase['input'] = testInput
self.checkFileResult(thisCase)
print 'Tested %d cases for file fuzz.' % len(fileTestCases)
def testUnresolvable(self):
fileLine = ".../something/foo.py"
result = parse.matchLine(fileLine)
lineObj = format.LineMatch(fileLine, result, 0)
self.assertTrue(
not lineObj.isResolvable(),
'"%s" should not be resolvable' % fileLine
)
print 'Tested unresolvable case.'
def testResolvable(self):
toCheck = [case for case in fileTestCases if case['match']]
for testCase in toCheck:
result = parse.matchLine(testCase['input'])
lineObj = format.LineMatch(testCase['input'], result, 0)
self.assertTrue(
lineObj.isResolvable(),
'Line "%s" was not resolvable' % testCase['input']
)
print 'Tested %d resolvable cases.' % len(toCheck)
def testFileMatch(self):
for testCase in fileTestCases:
self.checkFileResult(testCase)
print 'Tested %d cases.' % len(fileTestCases)
def checkFileResult(self, testCase):
result = parse.matchLine(testCase['input'])
if not result:
self.assertFalse(testCase['match'],
'Line "%s" did not match any regex' %
testCase['input'])
return
file, num, match = result
self.assertTrue(testCase['match'], 'Line "%s" did match' %
testCase['input'])
self.assertEqual(testCase['file'], file, 'files not equal |%s| |%s|' %
(testCase['file'], file))
self.assertEqual(testCase['num'], num, 'num matches not equal %d %d for %s'
% (testCase['num'], num, testCase.get('input')))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 9,048,890,270,372,936,000 | 27.304 | 121 | 0.539994 | false | 3.091306 | true | false | false |
darencard/ContigAnnotator | ensembl_orthologs.py | 1 | 2122 | #!/usr/local/env python
import optparse
usage_line = """
A script to extract orthologous Ensembl IDs from a genome-of-interest using a list \
of Ensembl IDs already in hand. Input is a list of Ensembl IDs and a "database" file \
downloaded from Ensembl that has the query Ensembl IDs in one column and the target/subject \
Ensembl IDs in another column. The user can also specify which column contains the \
query and the target Ensembl IDs and an output file name (tab-delimited text file). \
python ensembl_orthologs.py --query <query_list> --database <ensembl_database> \
-q <query_column> -s <subject_column> --output <output.txt>
"""
usage = usage_line
parser = optparse.OptionParser(usage=usage)
parser.add_option("--query", action= "store", type= "string", dest="query", help="""The query list of Ensembl IDs to find orthologs for""")
parser.add_option("--database", action="store", type= "string", dest="database", help="""A tab-delimited file with query IDs and subject IDs obtained from BioMart""")
parser.add_option("-q", action = "store", type = "string", dest = "q", help = """Column number where query IDs are located in "database" file (1, 2, ..., N)""")
parser.add_option("-s", action = "store", type = "string", dest = "s", help = """Column number where subject IDs are located in "database" file (1, 2, ..., N)""")
parser.add_option("--output", action = "store", type = "string", dest = "output" , help = """Output file to write results""", default = "output.txt")
options, args = parser.parse_args()
if __name__ == '__main__':
db_dict = {}
for line in open(options.database, "r"):
if not line.strip().startswith("#"):
record = line.rstrip().split("\t")
q = str(options.q)
s = str(options.s)
query = int(q)-1
subject = int(s)-1
if len(record) == 2:
db_dict[record[query]] = record[subject]
else:
db_dict[record[query]] = "NA"
out = open(options.output, "w")
for line in open(options.query, "r"):
if not line.strip().startswith("#"):
record = line.rstrip()
value = db_dict[record]
outline = record+"\t"+value+"\n"
out.write(outline)
out.close()
| gpl-2.0 | 6,088,053,830,567,926,000 | 43.229167 | 166 | 0.669651 | false | 3.254601 | false | false | false |
martindurant/starclassifier | ui/pysynphot/wavetable.py | 1 | 2684 | from __future__ import division
""" This module handles the wavecat.dat table presently used by the
synphot countrate task (and thus the ETC) to select an appropriate wavelength
set for a given obsmode. """
import re
import os
import numpy as N
import locations
class Wavetable(object):
""" Class to handle wavecat.dat initialization and access. (This class
may need a better name; wavetable and waveset are awfully close.)
Also, put the default waveset into this object with a key of NONE."""
def __init__(self, fname):
""" Instantiate a Wavetable from a file """
self.file=fname
self.lookup={}
self.setlookup={}
fs = open(wavecat_file, mode='r')
lines = fs.readlines()
fs.close()
regx = re.compile(r'\S+', re.IGNORECASE)
for line in lines:
if not line.startswith("#"):
try:
[obm,coeff] = regx.findall(line)
self.lookup[obm] = coeff
self.setlookup[frozenset(obm.split(','))] = coeff
except ValueError:
raise ValueError("Error processing line: %s"%line)
def __getitem__(self, key):
"""Fairly smart lookup: if no exact match, find the most complete
match.
"""
ans=None
try:
#Try an exact match
ans = self.lookup[key]
except KeyError:
ans=None
#Try a setwise match.
#The correct key will be a subset of the input key.
setkey=set(key.split(','))
candidates=[]
for k in self.setlookup:
if k.issubset(setkey):
candidates.append(k)
#We may have 1, 0, or >1 candidates.
if len(candidates) == 1:
ans = self.setlookup[candidates[0]]
elif len(candidates) == 0:
raise KeyError("%s not found in %s; candidates:%s"%(setkey,self.file,str(candidates)))
elif len(candidates) > 1:
setlens=N.array([len(k) for k in candidates])
srtlen=setlens.argsort()
k,j=srtlen[-2:]
if setlens[k] == setlens[j]:
#It's really ambiguous
raise ValueError("Ambiguous key %s; candidates %s"%(setkey, candidates))
else:
#We have a winner
k=candidates[srtlen[-1]]
ans=self.setlookup[k]
return ans
wavecat_file=locations.wavecat
wavetable=Wavetable(wavecat_file)
| mit | -1,998,349,719,848,117,800 | 31.974684 | 102 | 0.525335 | false | 4.260317 | false | false | false |
JJMC89/delsort | scripts/upload.py | 1 | 3001 | """
Usage:
python scripts/upload.py SITE TARGET USERNAME
SITE: enwiki or testwiki
TARGET: the page on SITE where the script will be uploaded
USERNAME: the account to make the edit under
"""
import datetime
import getpass
import os.path
import re
import sys
from clint.textui import colored
from clint.textui import prompt
import git
from wikitools import page
from wikitools import wiki
API_PAGES = {"enwiki": "https://en.wikipedia.org/w/api.php",
"testwiki": "https://test.wikipedia.org/w/api.php"}
HEADER = "/* Uploaded from the Git repo @ {} (branch {}) */\n"
SUMMARY = "Updating delsort: {} @ {}"
if len(sys.argv) < 4:
print(colored.yellow("Incorrect number of arguments supplied."))
print(__doc__)
sys.exit(1)
if "--help" in sys.argv:
print(__doc__)
sys.exit(0)
site_name = sys.argv[1]
if not site_name in API_PAGES:
print(colored.yellow("Unrecognized wiki '%s'. Must be 'enwiki' or" +
" 'testwiki'" % site_name))
sys.exit(1)
site = wiki.Wiki(API_PAGES[site_name])
root = sys.argv[2]
username = sys.argv[3]
if len(sys.argv) > 4:
password = sys.argv[4]
else:
password = getpass.getpass("Password for {} on {}: "
.format(username, site_name))
login_result = site.login(username, password)
if not login_result:
print(colored.yellow("Error logging in."))
sys.exit(1)
else:
print("Successfully logged in.")
target = page.Page(site, title=root)
if not os.path.isfile("delsort.js"):
print(colored.yellow("Couldn't find a file called 'delsort.js' in the project home."))
sys.exit(1)
repo = git.Repo(os.getcwd())
branch = repo.active_branch
sha1 = branch.commit.hexsha
header = HEADER.format(sha1, branch)
print("Made a header.")
if site_name == "enwiki" and root == "User:Enterprisey/delsort.js" and str(branch) == "master":
print("Updating script documentation page.")
docs = page.Page(site, title="User:Enterprisey/delsort")
docs_wikitext = docs.getWikiText()
date = re.search("start date and age\|\d+\|\d+\|\d+", docs_wikitext).group(0)
now = datetime.datetime.now()
revised_date = "start date and age|%d|%d|%d" % (now.year, now.month, now.day)
new_wikitext = docs_wikitext.replace(date, revised_date)
result = docs.edit(text=new_wikitext, summary="Updating delsort \"updated\" time")
if result["edit"]["result"] == "Success":
print(colored.green("Success!") + " Updated the \"updated\" time on the documentation.")
else:
print(colored.red("Error updating the \"updated\" time: ") + result)
with open("delsort.js", "r") as delsort:
new_text = header + delsort.read()
edit_summary = SUMMARY.format(branch, sha1[:7])
print("Uploading delsort...")
result = target.edit(text=new_text, summary=edit_summary)
if result["edit"]["result"] == "Success":
print(colored.green("Success!") + " Uploaded delsort to " + root)
else:
print(colored.red("Error uploading delsort: ") + result)
| mit | -6,782,603,428,927,825,000 | 31.978022 | 96 | 0.658447 | false | 3.202775 | false | false | false |
Yubico/yubioath-desktop-dpkg | yubioath/cli/controller.py | 1 | 3176 | # Copyright (c) 2014 Yubico AB
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Additional permission under GNU GPL version 3 section 7
#
# If you modify this program, or any covered work, by linking or
# combining it with the OpenSSL project's OpenSSL library (or a
# modified version of that library), containing parts covered by the
# terms of the OpenSSL or SSLeay licenses, We grant you additional
# permission to convey the resulting work. Corresponding Source for a
# non-source form of such a combination shall include the source code
# for the parts of OpenSSL used as well as that of the covered work.
from ..core.controller import Controller
from ..core.standard import YubiOathCcid
from ..core.exc import CardError
from getpass import getpass
import sys
class CliController(Controller):
def __init__(self, keystore, save=False):
self.keystore = keystore
self._save = save
def _prompt_touch(self):
sys.stderr.write('Touch your YubiKey...\n')
def unlock(self, device):
key = self.keystore.get(device.id)
if key:
try:
device.unlock(key)
except CardError:
sys.stderr.write('Incorrect password from file.\n')
self.keystore.delete(device.id)
while device.locked:
pw = getpass('Password: ')
key = device.calculate_key(pw)
try:
device.unlock(key)
if self._save:
self.keystore.put(device.id, key)
sys.stderr.write('Password saved to %s\n' %
self.keystore.fname)
except CardError:
sys.stderr.write('Incorrect password!\n')
def set_password(self, ccid_dev, password, remember=False):
dev = YubiOathCcid(ccid_dev)
key = super(CliController, self).set_password(dev, password)
if remember:
self.keystore.put(dev.id, key)
sys.stderr.write('Password saved to %s\n' % self.keystore.fname)
else:
self.keystore.delete(dev.id)
def add_cred(self, ccid_dev, *args, **kwargs):
dev = YubiOathCcid(ccid_dev)
super(CliController, self).add_cred(dev, *args, **kwargs)
def delete_cred(self, ccid_dev, name):
dev = YubiOathCcid(ccid_dev)
super(CliController, self).delete_cred(dev, name)
def reset_device(self, ccid_dev):
dev = YubiOathCcid(ccid_dev)
self.keystore.delete(dev.id)
super(CliController, self).reset_device(dev)
| gpl-3.0 | 2,109,234,010,021,809,700 | 36.809524 | 76 | 0.653652 | false | 3.873171 | false | false | false |
c3cashdesk/c6sh | src/postix/backoffice/checks.py | 1 | 2378 | from decimal import Decimal
from django.utils.translation import ugettext as _
from postix.core.models import (
ListConstraintProduct, Product, WarningConstraintProduct,
)
_check_registry = set()
def register_check(fn):
_check_registry.add(fn)
return fn
class CheckError(Exception):
pass
@register_check
def check_quotas():
prods = []
for p in Product.objects.filter(is_visible=True).prefetch_related('quota_set'):
quotas = bool(p.quota_set.all())
if not quotas:
prods.append(p)
if prods:
raise CheckError(
_(
'The following products are visible but have no quota: {products}'.format(
products=', '.join(str(r) for r in prods)
)
)
)
@register_check
def check_tax_rates():
product_rates = set(
Product.objects.exclude(price=0).values_list('tax_rate', flat=True).distinct()
)
constraint_rates = set(
ListConstraintProduct.objects.exclude(price=0)
.values_list('tax_rate', flat=True)
.distinct()
) | set(
WarningConstraintProduct.objects.exclude(price=0)
.values_list('tax_rate', flat=True)
.distinct()
)
if len(constraint_rates - product_rates):
raise CheckError(
_(
'You have list or warning constraints with tax rates of {constraint_rates} '
'but your products only use the tax rates {product_rates}. Are you sure this is '
'correct?'
).format(
constraint_rates=', '.join(str(r) + '%' for r in constraint_rates),
product_rates=', '.join(str(r) + '%' for r in product_rates),
)
)
if Decimal('0.00') in product_rates and len(product_rates) > 1:
raise CheckError(
_(
'You have some products that use a non-zero tax rate but the following products are set to 0%: '
'{products}'
).format(
products=', '.join(
str(p) for p in Product.objects.filter(tax_rate=0).exclude(price=0)
)
)
)
def all_errors():
errors = []
for check in _check_registry:
try:
check()
except CheckError as e:
errors.append(str(e))
return errors
| agpl-3.0 | 86,373,377,420,104,510 | 26.976471 | 112 | 0.55635 | false | 4.092943 | false | false | false |
tylertian/Openstack | openstack F/glance/glance/store/scrubber.py | 1 | 7036 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import calendar
import eventlet
import os
import time
from glance import context
from glance.common import utils
from glance.openstack.common import cfg
import glance.openstack.common.log as logging
from glance import registry
from glance import store
import glance.store.filesystem
import glance.store.http
import glance.store.s3
import glance.store.swift
LOG = logging.getLogger(__name__)
scrubber_opts = [
cfg.BoolOpt('cleanup_scrubber', default=False),
cfg.IntOpt('cleanup_scrubber_time', default=86400)
]
CONF = cfg.CONF
CONF.register_opts(scrubber_opts)
class Daemon(object):
def __init__(self, wakeup_time=300, threads=1000):
LOG.info(_("Starting Daemon: wakeup_time=%(wakeup_time)s "
"threads=%(threads)s") % locals())
self.wakeup_time = wakeup_time
self.event = eventlet.event.Event()
self.pool = eventlet.greenpool.GreenPool(threads)
def start(self, application):
self._run(application)
def wait(self):
try:
self.event.wait()
except KeyboardInterrupt:
msg = _("Daemon Shutdown on KeyboardInterrupt")
LOG.info(msg)
def _run(self, application):
LOG.debug(_("Running application"))
self.pool.spawn_n(application.run, self.pool, self.event)
eventlet.spawn_after(self.wakeup_time, self._run, application)
LOG.debug(_("Next run scheduled in %s seconds") % self.wakeup_time)
class Scrubber(object):
CLEANUP_FILE = ".cleanup"
def __init__(self):
self.datadir = CONF.scrubber_datadir
self.cleanup = CONF.cleanup_scrubber
self.cleanup_time = CONF.cleanup_scrubber_time
# configs for registry API store auth
self.admin_user = CONF.admin_user
self.admin_tenant = CONF.admin_tenant_name
host, port = CONF.registry_host, CONF.registry_port
LOG.info(_("Initializing scrubber with conf: %s") %
{'datadir': self.datadir, 'cleanup': self.cleanup,
'cleanup_time': self.cleanup_time,
'registry_host': host, 'registry_port': port})
registry.configure_registry_client()
registry.configure_registry_admin_creds()
ctx = context.RequestContext()
self.registry = registry.get_registry_client(ctx)
utils.safe_mkdirs(self.datadir)
store.create_stores()
def run(self, pool, event=None):
now = time.time()
if not os.path.exists(self.datadir):
LOG.info(_("%s does not exist") % self.datadir)
return
delete_work = []
for root, dirs, files in os.walk(self.datadir):
for id in files:
if id == self.CLEANUP_FILE:
continue
file_name = os.path.join(root, id)
delete_time = os.stat(file_name).st_mtime
if delete_time > now:
continue
uri, delete_time = read_queue_file(file_name)
if delete_time > now:
continue
delete_work.append((id, uri, now))
LOG.info(_("Deleting %s images") % len(delete_work))
pool.starmap(self._delete, delete_work)
# NOTE(bourke): When not running as a daemon, a slight pause is needed
# to allow the starmap to begin it's work.
eventlet.sleep(0.1)
if self.cleanup:
self._cleanup(pool)
def _delete(self, id, uri, now):
file_path = os.path.join(self.datadir, str(id))
try:
LOG.debug(_("Deleting %(uri)s") % {'uri': uri})
# Here we create a request context with credentials to support
# delayed delete when using multi-tenant backend storage
ctx = context.RequestContext(auth_tok=self.registry.auth_tok,
user=self.admin_user,
tenant=self.admin_tenant)
store.delete_from_backend(ctx, uri)
except store.UnsupportedBackend:
msg = _("Failed to delete image from store (%(uri)s).")
LOG.error(msg % {'uri': uri})
write_queue_file(file_path, uri, now)
self.registry.update_image(id, {'status': 'deleted'})
utils.safe_remove(file_path)
def _cleanup(self, pool):
now = time.time()
cleanup_file = os.path.join(self.datadir, self.CLEANUP_FILE)
if not os.path.exists(cleanup_file):
write_queue_file(cleanup_file, 'cleanup', now)
return
_uri, last_run_time = read_queue_file(cleanup_file)
cleanup_time = last_run_time + self.cleanup_time
if cleanup_time > now:
return
LOG.info(_("Getting images deleted before %s") % self.cleanup_time)
write_queue_file(cleanup_file, 'cleanup', now)
filters = {'deleted': True, 'is_public': 'none',
'status': 'pending_delete'}
pending_deletes = self.registry.get_images_detailed(filters=filters)
delete_work = []
for pending_delete in pending_deletes:
deleted_at = pending_delete.get('deleted_at')
if not deleted_at:
continue
time_fmt = "%Y-%m-%dT%H:%M:%S"
# NOTE: Strip off microseconds which may occur after the last '.,'
# Example: 2012-07-07T19:14:34.974216
date_str = deleted_at.rsplit('.', 1)[0].rsplit(',', 1)[0]
delete_time = calendar.timegm(time.strptime(date_str,
time_fmt))
if delete_time + self.cleanup_time > now:
continue
delete_work.append((pending_delete['id'],
pending_delete['location'],
now))
LOG.info(_("Deleting %s images") % len(delete_work))
pool.starmap(self._delete, delete_work)
def read_queue_file(file_path):
with open(file_path) as f:
uri = f.readline().strip()
delete_time = int(f.readline().strip())
return uri, delete_time
def write_queue_file(file_path, uri, delete_time):
with open(file_path, 'w') as f:
f.write('\n'.join([uri, str(int(delete_time))]))
os.chmod(file_path, 0600)
os.utime(file_path, (delete_time, delete_time))
| apache-2.0 | -6,856,816,283,492,491,000 | 33.490196 | 78 | 0.592951 | false | 3.946158 | false | false | false |
tboyce1/home-assistant | homeassistant/components/light/tplink.py | 2 | 7236 | """
Support for TPLink lights.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/light.tplink/
"""
import logging
import colorsys
import time
import voluptuous as vol
from homeassistant.const import (CONF_HOST, CONF_NAME)
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_KELVIN, ATTR_RGB_COLOR,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_RGB_COLOR, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
from homeassistant.util.color import \
color_temperature_mired_to_kelvin as mired_to_kelvin
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired)
from typing import Tuple
REQUIREMENTS = ['pyHS100==0.3.0']
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_POWER_W = 'current_power_w'
ATTR_DAILY_ENERGY_KWH = 'daily_energy_kwh'
ATTR_MONTHLY_ENERGY_KWH = 'monthly_energy_kwh'
DEFAULT_NAME = 'TP-Link Light'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Initialise pyLB100 SmartBulb."""
from pyHS100 import SmartBulb
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
add_devices([TPLinkSmartBulb(SmartBulb(host), name)], True)
def brightness_to_percentage(byt):
"""Convert brightness from absolute 0..255 to percentage."""
return int((byt*100.0)/255.0)
def brightness_from_percentage(percent):
"""Convert percentage to absolute value 0..255."""
return (percent*255.0)/100.0
# Travis-CI runs too old astroid https://github.com/PyCQA/pylint/issues/1212
# pylint: disable=invalid-sequence-index
def rgb_to_hsv(rgb: Tuple[float, float, float]) -> Tuple[int, int, int]:
"""Convert RGB tuple (values 0-255) to HSV (degrees, %, %)."""
hue, sat, value = colorsys.rgb_to_hsv(rgb[0]/255, rgb[1]/255, rgb[2]/255)
return int(hue * 360), int(sat * 100), int(value * 100)
# Travis-CI runs too old astroid https://github.com/PyCQA/pylint/issues/1212
# pylint: disable=invalid-sequence-index
def hsv_to_rgb(hsv: Tuple[float, float, float]) -> Tuple[int, int, int]:
"""Convert HSV tuple (degrees, %, %) to RGB (values 0-255)."""
red, green, blue = colorsys.hsv_to_rgb(hsv[0]/360, hsv[1]/100, hsv[2]/100)
return int(red * 255), int(green * 255), int(blue * 255)
class TPLinkSmartBulb(Light):
"""Representation of a TPLink Smart Bulb."""
def __init__(self, smartbulb: 'SmartBulb', name) -> None:
"""Initialize the bulb."""
self.smartbulb = smartbulb
self._name = name
self._state = None
self._available = True
self._color_temp = None
self._brightness = None
self._rgb = None
self._supported_features = 0
self._emeter_params = {}
@property
def name(self):
"""Return the name of the Smart Bulb, if any."""
return self._name
@property
def available(self) -> bool:
"""Return if bulb is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
def turn_on(self, **kwargs):
"""Turn the light on."""
self.smartbulb.state = self.smartbulb.BULB_STATE_ON
if ATTR_COLOR_TEMP in kwargs:
self.smartbulb.color_temp = \
mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
if ATTR_KELVIN in kwargs:
self.smartbulb.color_temp = kwargs[ATTR_KELVIN]
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness or 255)
self.smartbulb.brightness = brightness_to_percentage(brightness)
if ATTR_RGB_COLOR in kwargs:
rgb = kwargs.get(ATTR_RGB_COLOR)
self.smartbulb.hsv = rgb_to_hsv(rgb)
def turn_off(self):
"""Turn the light off."""
self.smartbulb.state = self.smartbulb.BULB_STATE_OFF
@property
def color_temp(self):
"""Return the color temperature of this light in mireds for HA."""
return self._color_temp
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def rgb_color(self):
"""Return the color in RGB."""
return self._rgb
@property
def is_on(self):
"""Return True if device is on."""
return self._state
def update(self):
"""Update the TP-Link Bulb's state."""
from pyHS100 import SmartDeviceException
try:
self._available = True
if self._supported_features == 0:
self.get_features()
self._state = (
self.smartbulb.state == self.smartbulb.BULB_STATE_ON)
# Pull the name from the device if a name was not specified
if self._name == DEFAULT_NAME:
self._name = self.smartbulb.alias
if self._supported_features & SUPPORT_BRIGHTNESS:
self._brightness = brightness_from_percentage(
self.smartbulb.brightness)
if self._supported_features & SUPPORT_COLOR_TEMP:
if (self.smartbulb.color_temp is not None and
self.smartbulb.color_temp != 0):
self._color_temp = kelvin_to_mired(
self.smartbulb.color_temp)
if self._supported_features & SUPPORT_RGB_COLOR:
self._rgb = hsv_to_rgb(self.smartbulb.hsv)
if self.smartbulb.has_emeter:
self._emeter_params[ATTR_CURRENT_POWER_W] = '{:.1f}'.format(
self.smartbulb.current_consumption())
daily_statistics = self.smartbulb.get_emeter_daily()
monthly_statistics = self.smartbulb.get_emeter_monthly()
try:
self._emeter_params[ATTR_DAILY_ENERGY_KWH] \
= "{:.3f}".format(
daily_statistics[int(time.strftime("%d"))])
self._emeter_params[ATTR_MONTHLY_ENERGY_KWH] \
= "{:.3f}".format(
monthly_statistics[int(time.strftime("%m"))])
except KeyError:
# device returned no daily/monthly history
pass
except (SmartDeviceException, OSError) as ex:
_LOGGER.warning("Could not read state for %s: %s", self._name, ex)
self._available = False
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
def get_features(self):
"""Determine all supported features in one go."""
if self.smartbulb.is_dimmable:
self._supported_features += SUPPORT_BRIGHTNESS
if self.smartbulb.is_variable_color_temp:
self._supported_features += SUPPORT_COLOR_TEMP
if self.smartbulb.is_color:
self._supported_features += SUPPORT_RGB_COLOR
| apache-2.0 | 1,386,101,221,474,034,700 | 34.126214 | 79 | 0.614704 | false | 3.689954 | false | false | false |
isandlaTech/cohorte-3rdparty | herald/src/main/python/herald/transports/xmpp/directory.py | 1 | 3659 | #!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Herald XMPP transport directory
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.0.1
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 0, 2)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Herald XMPP
from . import SERVICE_XMPP_DIRECTORY, ACCESS_ID
from .beans import XMPPAccess
# Herald
import herald
# Standard library
import logging
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Property, Validate, Invalidate, Instantiate
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory('herald-xmpp-directory-factory')
@Requires('_directory', herald.SERVICE_DIRECTORY)
@Property('_access_id', herald.PROP_ACCESS_ID, ACCESS_ID)
@Provides((herald.SERVICE_TRANSPORT_DIRECTORY, SERVICE_XMPP_DIRECTORY))
@Instantiate('herald-xmpp-directory')
class XMPPDirectory(object):
"""
XMPP Directory for Herald
"""
def __init__(self):
"""
Sets up the transport directory
"""
# Herald Core Directory
self._directory = None
self._access_id = ACCESS_ID
# JID -> Peer UID
self._jid_uid = {}
# Group name -> XMPP room JID
self._groups = {}
@Validate
def _validate(self, context):
"""
Component validated
"""
self._jid_uid.clear()
self._groups.clear()
@Invalidate
def _invalidate(self, context):
"""
Component invalidated
"""
self._jid_uid.clear()
self._groups.clear()
def load_access(self, data):
"""
Loads a dumped access
:param data: Result of a call to XmppAccess.dump()
:return: An XMPPAccess bean
"""
return XMPPAccess(data)
def peer_access_set(self, peer, data):
"""
The access to the given peer matching our access ID has been set
:param peer: The Peer bean
:param data: The peer access data, previously loaded with load_access()
"""
if peer.uid != self._directory.local_uid:
self._jid_uid[data.jid] = peer
def peer_access_unset(self, peer, data):
"""
The access to the given peer matching our access ID has been removed
:param peer: The Peer bean
:param data: The peer access data
"""
try:
del self._jid_uid[data.jid]
except KeyError:
pass
def from_jid(self, jid):
"""
Returns the peer UID associated to the given JID
:param jid: A peer (full) JID
:return: A peer UID
:raise KeyError: Unknown JID
"""
return self._jid_uid[jid]
| apache-2.0 | -196,636,214,492,240,220 | 26.103704 | 80 | 0.586499 | false | 4.13914 | false | false | false |
jantman/tuxtruck | networkmanager/TuxTruck_NetworkManager.py | 1 | 2942 | # TuxTruck_NetworkManager.py
#
# Time-stamp: "2009-08-26 08:59:30 jantman"
#
# +----------------------------------------------------------------------+
# | TuxTruck Project http://tuxtruck.jasonantman.com |
# +----------------------------------------------------------------------+
# | Copyright (c) 2009 Jason Antman. |
# | |
# | This program is free software; you can redistribute it and/or modify |
# | it under the terms of the GNU General Public License as published by |
# | the Free Software Foundation; either version 3 of the License, or |
# | (at your option) any later version. |
# | |
# | This program is distributed in the hope that it will be useful, |
# | but WITHOUT ANY WARRANTY; without even the implied warranty of |
# | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
# | GNU General Public License for more details. |
# | |
# | You should have received a copy of the GNU General Public License |
# | along with this program; if not, write to: |
# | |
# | Free Software Foundation, Inc. |
# | 59 Temple Place - Suite 330 |
# | Boston, MA 02111-1307, USA. |
# +----------------------------------------------------------------------+
# |Please use the above URL for bug reports and feature/support requests.|
# +----------------------------------------------------------------------+
# | Authors: Jason Antman <[email protected]> |
# +----------------------------------------------------------------------+
# | $LastChangedRevision:: $ |
# | $HeadURL:: $ |
# +----------------------------------------------------------------------+
import dbus
import time
class TuxTruck_NetworkManager():
"""
Class to perform DBUS-based cotrol of NetworkManager.
"""
PARENT = None
BUS = None
NM = None
def __init__(self, parent):
"""
Get the DBUS object and initialize things.
"""
self.PARENT = parent
self.BUS = dbus.SystemBus()
self.NM = self.BUS.get_object('org.freedesktop.NetworkManager', '/org/freedesktop/NetworkManager')
def run(self):
"""
DO the shit.
"""
print "run()"
print "STATE:"
print self.NM.state()
print "ACTIVE CONNECTIONS:"
print self.NM.GetActiveConnections()
| gpl-3.0 | 4,532,818,929,343,650,300 | 45.698413 | 106 | 0.395309 | false | 5.161404 | false | false | false |
ls-cwi/heinz | script/makeHeinzRpcstMcDimacs.py | 1 | 1116 | #!/usr/bin/python
import sys
if len(sys.argv) != 5:
sys.stderr.write("Usage: " + sys.argv[0] + " <instances> <full_output_dir> <timelimit_pbs> <timelimit_heinz>\n")
sys.exit(1)
lines = open(sys.argv[1]).readlines()
n = len(lines)
bins = n / 15
if n % 15 != 0:
bins += 1
full_output_dir = sys.argv[2]
timelimit_pbs = sys.argv[3]
timelimit_heinz = sys.argv[4]
for i in range(bins):
with open(str(i) + ".pbs", "w") as f:
f.write("#PBS -N " + str(i)+"\n")
f.write("#PBS -o " + str(i) + ".out\n")
f.write("#PBS -e " + str(i) + ".err\n")
f.write("#PBS -lwalltime=" + timelimit_pbs + "\n")
f.write("#PBS -lnodes=1:cpu3\n")
f.write("cd ~/DIMACS2014/\n")
nn = 15
if i == bins - 1:
nn = n % 15
for j in range(nn):
idx = 15 * i + j
s = lines[idx].rstrip("\n").split(" ")
filename1 = full_output_dir + "/" + "4-" + s[0]
f.write("( /usr/bin/time -o " + filename1 + ".time bin/heinz_rpcst_mc " + s[2] + " " + timelimit_heinz + " 2 " + filename1 + ".dimacs" + " > " + filename1 + ".out 2> " + filename1 + ".err ) &\n")
f.write("wait\n")
| mit | -4,746,288,819,771,932,000 | 30 | 201 | 0.525986 | false | 2.442013 | false | false | false |
0ED/Toy | auto_sender/gmail.py | 1 | 1234 | #!/usr/bin/ python
# -*- coding: utf-8 -*-
import sys
import smtplib
from email import Encoders
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.Header import Header
from email.Utils import formatdate
class Gmail(object):
def create_message(self, from_address, to_address, a_title, file_info, a_body):
a_message = MIMEMultipart()
a_body = MIMEText(a_body, _charset='iso-2022-jp')
a_message['Subject'] = a_title
a_message['From'] = from_address
a_message['To'] = to_address
a_message['Date'] = formatdate()
a_message.attach(a_body)
attachment = MIMEBase(file_info['type'], file_info['subtype'])
with open(file_info['path']) as a_file:
attachment.set_payload(a_file.read())
Encoders.encode_base64(attachment)
a_message.attach(attachment)
attachment.add_header("Content-Disposition", "attachment", filename=file_info['name'])
return a_message
def send(self, from_address, to_address, a_message):
a_smtp = smtplib.SMTP('smtp.gmail.com', 587)
a_smtp.ehlo()
a_smtp.starttls()
a_smtp.ehlo(0)
a_smtp.login(from_address,'4?SiFLV=tY')
a_smtp.sendmail(from_address, to_address, a_message.as_string())
a_smtp.close()
| mit | 1,113,345,427,749,023,500 | 31.473684 | 88 | 0.718801 | false | 2.88993 | false | false | false |
kawu/tagger | utils/tei2linc/tei2linc.py | 1 | 7737 | import sys
import os
import shutil
import argparse
import tarfile
import re
from collections import defaultdict
class Lexem:
def __init__(self, ctag=None, base=None, msds=None):
self.ctag = ctag
self.base = base
self.msds = msds if msds is not None else set()
def add(self, msd):
self.msds.add(msd)
class Disamb:
def __init__(self, ctag, base, msd):
self.ctag = ctag
self.base = base if base.strip() else None
self.msd = msd
def label(self):
return (self.ctag + ":" + self.msd).strip(":")
class Segment:
def __init__(self, orth=None, nps=False, disamb=None, lexs=None):
self.orth = orth
self.nps = nps
self.disamb = disamb
self.lexs = lexs if lexs is not None else []
def append(self, lex):
self.lexs.append(lex)
def label(self):
return self.disamb.label()
def labels(self):
return set([
(lex.ctag + ":" + msd).strip(":")
for lex in self.lexs
for msd in lex.msds
])
SYMBOL = re.compile('<symbol .*? ?value="(.*?)"')
STRING = re.compile('<string>(.*?)</string>')
BINARY = re.compile('<binary .*? ?value="(.*?)"')
CTAGS = set(["adja", "adjp", "adjc", "conj", "comp", "interp", "pred",
"xxx", "adv", "imps", "inf", "pant", "pcon", "qub", "prep",
"siebie", "subst", "depr", "ger", "ppron12", "ppron3", "num",
"numcol", "adj", "pact", "ppas", "winien", "praet", "bedzie",
"fin", "impt", "aglt", "ign", "brev", "burk", "interj"])
def parse_disamb(disamb):
k = 0
disamb = list(reversed(disamb.split(":")))
for x in disamb:
if x.strip() in CTAGS:
break
k += 1
ctag = disamb[k].strip()
base = ":".join(reversed(disamb[k+1:]))
msd = ":".join(reversed([x.strip() for x in disamb[:k]]))
# return ":".join(reversed(result)).strip(": ")
return Disamb(ctag, base, msd)
def value(line, regex):
match = regex.search(line)
return match.group(1) if match else None
def check_dir(path, overwrite):
"""Is string representing a valid, non-existing directory path ?"""
if not os.path.isdir(os.path.dirname(path)):
msg = "%s is not a valid path" % path
raise argparse.ArgumentTypeError(msg)
elif os.path.exists(path) and overwrite == False:
msg = "%s already exists" % path
raise argparse.ArgumentTypeError(msg)
return path
def make_args_parser():
parser = argparse.ArgumentParser(
description="Convert TEI corpus to LINC corpus.")
parser.add_argument("tei",
help="TEI corpus compressed as a tar archive file")
parser.add_argument("-d", "--out-dir",
help="Save results in separate files in the output directory.")
parser.add_argument("-w", "--overwrite",
default=False, action="store_true",
help="Overwrite files in output directory when using -d option.")
parser.add_argument("-b", "--preserve-bases",
default=False, action="store_true",
help="Preserve base forms of individual words.")
return parser
def parse_args(parser):
args = parser.parse_args()
if args.out_dir is not None:
check_dir(args.out_dir, args.overwrite)
return args
def morph_files_in_tar(tar):
for member in tar:
if "ann_morphosyntax.xml" in member.name:
yield member
def _parse_morph(f):
felem = re.compile('<f name="(.*?)"')
seg = []
sent = []
for line in f:
line = line.strip()
if "</s>" in line:
yield sent
sent = []
if "</seg>" in line:
sent.append(seg)
seg = []
match = felem.search(line)
if match != None:
inside = match.group(1)
if line.startswith("<string"):
seg.append((inside, value(line, STRING)))
elif line.startswith("<symbol"):
seg.append((inside, value(line, SYMBOL)))
elif line.startswith("<binary"):
seg.append((inside, value(line, BINARY)))
def parse_morph(f):
for sent_info in _parse_morph(f):
sent = []
for k, seg_info in enumerate(sent_info):
for (tp, val) in seg_info:
if tp == "orth":
seg = Segment(orth=val)
elif tp == "nps" and val == "true":
seg.nps = True
elif tp == "base":
lex = Lexem(base=val)
seg.append(lex)
elif tp == "ctag":
lex.ctag = val
elif tp == "msd":
lex.add(val.strip(": "))
# interp = (ctag + ":" + val).strip(": ")
# interps.append(interp)
elif tp == "interpretation":
seg.disamb = parse_disamb(val)
# interps = list(set(interps))
# sent.append((orth, disamb, interps))
assert seg.label() in seg.labels()
# print [msd for lex in seg.lexs for msd in lex.msds]
sent.append(seg)
yield sent
def esc(s):
if s is None:
s = ""
return '"' + s.replace('"', '""') + '"'
def print_full_sent(output, sent, src):
print >> output, "Sent"
print >> output, " Source"
print >> output, " Path", esc(src[0])
print >> output, " Id", esc(src[1])
for seg in sent:
if seg.nps:
print >> output, " Word", "Nps"
else:
print >> output, " Word"
print >> output, " Orth", esc(seg.orth)
for lex in seg.lexs:
print >> output, " Lex", esc(lex.base)
for msd in sorted(lex.msds):
interp = (lex.ctag + ":" + msd).strip(":")
if (
msd == seg.disamb.msd and
lex.ctag == seg.disamb.ctag and
(seg.disamb.base is None or lex.base == seg.disamb.base)):
print >> output, " * " + interp
else:
print >> output, " - " + interp
# print >> output, " WordEnd"
# print >> output, "SentEnd"
def print_sent(output, sent, src):
print >> output, "Sent"
print >> output, " Source"
print >> output, " Path", esc(src[0])
print >> output, " Id", src[1]
for seg in sent:
interps = seg.labels()
disamb = seg.label()
if seg.nps:
print >> output, " Word", "Nps"
else:
print >> output, " Word"
print >> output, " Orth", seg.orth
print >> output, " Interps"
for interp in sorted(interps):
if interp == disamb:
print >> output, " * " + interp
else:
print >> output, " - " + interp
print >> output, " InterpsEnd"
# print >> output, " WordEnd"
# print >> output, "SentEnd"
def output_for(path, k):
if path is None:
return sys.stdout
else:
return open(os.path.join(path, str(k) + ".linc"), "w")
if __name__ == "__main__":
parser = make_args_parser()
args = parse_args(parser)
if args.out_dir is not None:
if os.path.exists(args.out_dir):
shutil.rmtree(args.out_dir)
os.mkdir(args.out_dir)
tar = tarfile.open(args.tei)
n = 0
for morph in morph_files_in_tar(tar):
for i, sent in enumerate(parse_morph(tar.extractfile(morph))):
src = (morph.name, str(i))
out = output_for(args.out_dir, n)
if not args.preserve_bases:
print_sent(out, sent, src)
else:
print_full_sent(out, sent, src)
n += 1
tar.close()
| bsd-3-clause | 2,646,679,915,105,169,000 | 30.579592 | 78 | 0.516996 | false | 3.553973 | false | false | false |
erikulven/flapmagick | camera.py | 1 | 1925 | """ Uses camera and takes images for documentation of motion """
import time
from PIL import Image
import urllib
import StringIO
import settings
user = settings.cam_user
pwd = settings.cam_pwd
cam_url = settings.cam_url
def fetch_snapshot_image():
im = StringIO.StringIO(urllib.urlopen(settings.cam_url).read())
return im
def dummy():
img = Image.open(im)
r = requests.get(settings.cam_url, auth=(user, pwd), stream=True)
if r.status_code == 200:
imageData = StringIO.StringIO()
imageData.write(r.raw)
imageData.seek(0)
return imageData
return None
def compare(buffer1, buffer2, threshold=0):
"""
diffs images by pixels and exits if diffs exceeds threshold
code taken from script written by brainflakes posted at raspberry
forum. http://www.raspberrypi.org/phpBB3/viewtopic.php?t=45235
"""
# Count changed pixels
changedPixels = 0
print "In compare buf1: %s buf2: %s" % (buffer1, buffer2)
for x in xrange(0, 100):
# Scan one line of image then check sensitivity for movement
for y in xrange(0, 75):
# Just check green channel as it's the highest quality channel
pixdiff = abs(buffer1[x, y][1] - buffer2[x, y][1])
if pixdiff > threshold:
changedPixels += 1
if __name__ == "__main__":
print "Starting camera surv"
counter = 0
prev_img = None
while counter < 50:
img = fetch_snapshot_image()
print "found img: %s" % img
if img is not None and prev_img is not None:
print "Doing comparison"
im = Image.open(img)
buf = im.load()
prev_im = Image.open(prev_img)
prev_buf = prev_im.load()
print "Diff in images is: %s" % compare(prev_buf, buf)
im.close()
prev_im.close()
prev_img = img
time.sleep(1)
| bsd-2-clause | -8,973,370,309,740,577,000 | 29.078125 | 74 | 0.604156 | false | 3.701923 | false | false | false |
patrickm/chromium.src | tools/perf/measurements/thread_times.py | 1 | 2083 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import timeline_controller
from metrics import timeline
from telemetry.page import page_measurement
class ThreadTimes(page_measurement.PageMeasurement):
def __init__(self):
super(ThreadTimes, self).__init__('RunSmoothness')
self._timeline_controller = None
@classmethod
def AddCommandLineArgs(cls, parser):
parser.add_option('--report-silk-results', action='store_true',
help='Report results relevant to silk.')
parser.add_option('--report-silk-details', action='store_true',
help='Report details relevant to silk.')
@property
def results_are_the_same_on_every_page(self):
return False
def WillRunActions(self, page, tab):
self._timeline_controller = timeline_controller.TimelineController()
if self.options.report_silk_details:
# We need the other traces in order to have any details to report.
self.timeline_controller.trace_categories = \
timeline_controller.DEFAULT_TRACE_CATEGORIES
else:
self._timeline_controller.trace_categories = \
timeline_controller.MINIMAL_TRACE_CATEGORIES
self._timeline_controller.Start(page, tab)
def DidRunAction(self, page, tab, action):
self._timeline_controller.AddActionToIncludeInMetric(action)
def DidRunActions(self, page, tab):
self._timeline_controller.Stop(tab)
def MeasurePage(self, page, tab, results):
metric = timeline.ThreadTimesTimelineMetric(
self._timeline_controller.model,
self._timeline_controller.renderer_process,
self._timeline_controller.action_ranges)
if self.options.report_silk_results:
metric.results_to_report = timeline.ReportSilkResults
if self.options.report_silk_details:
metric.details_to_report = timeline.ReportSilkDetails
metric.AddResults(tab, results)
def CleanUpAfterPage(self, _, tab):
self._timeline_controller.CleanUp(tab)
| bsd-3-clause | -1,464,746,998,573,629,700 | 38.301887 | 72 | 0.722516 | false | 4.013487 | false | false | false |
MTG/dunya | jingju/models.py | 1 | 5375 | from django.db import models
import data.models
from jingju import managers
class JingjuStyle(object):
def get_style(self):
return "jingju"
def get_object_map(self, key):
return {
"performance": RecordingInstrumentalist,
"release": Release,
"artist": Artist,
"recording": Recording,
"work": Work,
"instrument": Instrument
}[key]
class Recording(JingjuStyle, data.models.BaseModel):
class Meta:
ordering = ['id']
title = models.CharField(max_length=200, blank=True, null=True)
mbid = models.UUIDField(blank=True, null=True)
work = models.ForeignKey('Work', null=True, on_delete=models.CASCADE)
performers = models.ManyToManyField('Artist')
instrumentalists = models.ManyToManyField('Artist', through='RecordingInstrumentalist',
related_name='instrumentalist')
shengqiangbanshi = models.ManyToManyField('ShengqiangBanshi')
objects = managers.CollectionRecordingManager()
def __str__(self):
return u"%s" % self.title
class RecordingInstrumentalist(JingjuStyle, models.Model):
recording = models.ForeignKey('Recording', on_delete=models.CASCADE)
artist = models.ForeignKey('Artist', on_delete=models.CASCADE)
instrument = models.ForeignKey('Instrument', on_delete=models.CASCADE)
class Artist(data.models.Artist):
romanisation = models.CharField(max_length=200, blank=True, null=True)
role_type = models.ForeignKey('RoleType', blank=True, null=True, on_delete=models.CASCADE)
instrument = models.ForeignKey('Instrument', blank=True, null=True, related_name='jingju', on_delete=models.CASCADE)
objects = managers.ArtistManager()
class Meta:
ordering = ['id']
class Composer(data.models.Composer):
alias = models.CharField(max_length=200, blank=True, null=True)
objects = managers.ArtistManager()
class Meta:
ordering = ['id']
class Instrument(data.models.Instrument):
class Meta:
ordering = ['id']
class RecordingRelease(models.Model):
recording = models.ForeignKey('Recording', on_delete=models.CASCADE)
release = models.ForeignKey('Release', on_delete=models.CASCADE)
sequence = models.IntegerField(blank=True, null=True)
# The number that the track comes in the concert. Numerical 1-n
track = models.IntegerField(blank=True, null=True)
# The disc number. 1-n
disc = models.IntegerField(blank=True, null=True)
# The track number within this disc. 1-n
disctrack = models.IntegerField(blank=True, null=True)
class Meta:
ordering = ("track",)
def __str__(self):
return u"%s: %s from %s" % (self.track, self.recording, self.release)
class Work(JingjuStyle, data.models.Work):
class Meta:
ordering = ['id']
title = models.CharField(max_length=200, blank=True, null=True)
mbid = models.UUIDField(blank=True, null=True)
score = models.ForeignKey('Score', blank=True, null=True, on_delete=models.CASCADE)
play = models.ForeignKey('Play', blank=True, null=True, on_delete=models.CASCADE)
def __str__(self):
return u"%s" % self.title
class Release(JingjuStyle, data.models.Release):
class Meta:
ordering = ['id']
recordings = models.ManyToManyField('Recording', through='RecordingRelease')
collection = models.ForeignKey('data.Collection', blank=True, null=True, on_delete=models.CASCADE)
objects = managers.CollectionReleaseManager()
class RoleType(data.models.BaseModel):
class Meta:
ordering = ['code']
# The code used in tags in musicbrainz to identify this roletype (hd00)
# the first digit specifies a "parent" roletype, and the second digit a subtype.
code = models.CharField(max_length=10, db_index=True)
name = models.CharField(max_length=100)
romanisation = models.CharField(max_length=100)
uuid = models.UUIDField()
# The "main" roletype for a more specific one. An artist who performs in a specific roletype
# by definition performs in the parent roletype
parent = models.ForeignKey('RoleType', blank=True, null=True, on_delete=models.CASCADE)
def __str__(self):
return u"{}: {}/{}".format(self.code, self.name, self.romanisation)
class Play(data.models.BaseModel):
title = models.CharField(max_length=100)
uuid = models.UUIDField(blank=True, null=True)
class Score(data.models.BaseModel):
# the name of the work
name = models.CharField(max_length=100)
# The name of the series
source = models.CharField(max_length=100)
# read this from the annotation of the series (we need to make it machine readable)
citation = models.CharField(max_length=100, blank=True, null=True)
citation_romanisation = models.CharField(max_length=100, blank=True, null=True)
# This shouldn't be a uuidfield (
uuid = models.UUIDField(blank=True, null=True)
class ShengqiangBanshi(data.models.BaseModel):
# The code used in tags in musicbrainz to identify this shengqiangbanshi (sqbs000)
code = models.CharField(max_length=10, db_index=True, unique=True)
name = models.CharField(max_length=100)
romanisation = models.CharField(max_length=100)
def __str__(self):
return u"{}: {}/{}".format(self.code, self.name, self.romanisation)
| agpl-3.0 | -2,666,498,335,469,265,000 | 34.361842 | 120 | 0.685209 | false | 3.73783 | false | false | false |
edsu/lochief | kochief/discovery/management/commands/index.py | 1 | 4772 | #! /usr/bin/python
# -*- coding: utf8 -*-
# Copyright 2009 Gabriel Sean Farrell
# Copyright 2008 Mark A. Matienzo
#
# This file is part of Kochief.
#
# Kochief is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Kochief is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kochief. If not, see <http://www.gnu.org/licenses/>.
"""Indexes documents in a Solr instance."""
import os
import optparse
import sys
import time
import urllib
from optparse import make_option
try:
from xml.etree import ElementTree as et # builtin in Python 2.5
except ImportError:
import elementtree.ElementTree as et
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
CSV_FILE = 'tmp.csv'
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-n', '--new',
action='store_true',
dest='new',
help='Create a new index. If the index already exists, it will be replaced.'),
make_option('-p', '--parser',
dest='parser',
metavar='PARSER',
help='Use PARSER (in kochief/parsers/) to parse files or urls for indexing'),
)
help = 'Indexes documents in a Solr instance.'
args = '[file_or_url ...]'
def handle(self, *file_or_urls, **options):
new = options.get('new')
if new:
# create/replace index
pass
if file_or_urls:
parser = options.get('parser')
module = None
if parser:
if parser.endswith('.py'):
parser = parser[:-3]
module = __import__('kochief.parsers.' + parser, globals(),
locals(), [parser])
for file_or_url in file_or_urls:
if not module:
# guess parser based on file extension
if file_or_url.endswith('.mrc'):
from kochief.parsers import marc as module
if not module:
raise CommandError("Please specify a parser.")
print "Converting %s to CSV ..." % file_or_url
t1 = time.time()
data_handle = urllib.urlopen(file_or_url)
try:
csv_handle = open(CSV_FILE, 'w')
record_count = module.write_csv(data_handle, csv_handle)
finally:
csv_handle.close()
t2 = time.time()
self._load_solr(CSV_FILE)
t3 = time.time()
os.remove(CSV_FILE)
p_time = (t2 - t1) / 60
l_time = (t3 - t2) / 60
t_time = p_time + l_time
rate = record_count / (t3 - t1)
print """Processing took %0.3f minutes.
Loading took %0.3f minutes.
That's %0.3f minutes total for %d records,
at a rate of %0.3f records per second.
""" % (p_time, l_time, t_time, record_count, rate)
def _get_multi(self):
"""Inspect solr schema.xml for multivalue fields."""
multivalue_fieldnames = []
schema = et.parse(settings.SOLR_DIR + 'conf/schema.xml')
fields_element = schema.find('fields')
field_elements = fields_element.findall('field')
for field in field_elements:
if field.get('multiValued') == 'true':
multivalue_fieldnames.append(field.get('name'))
return multivalue_fieldnames
def _load_solr(self, csv_file):
"""
Load CSV file into Solr. solr_params are a dictionary of parameters
sent to solr on the index request.
"""
file_path = os.path.abspath(csv_file)
solr_params = {}
for fieldname in self._get_multi():
tag_split = "f.%s.split" % fieldname
solr_params[tag_split] = 'true'
tag_separator = "f.%s.separator" % fieldname
solr_params[tag_separator] = '|'
solr_params['stream.file'] = file_path
solr_params['commit'] = 'true'
params = urllib.urlencode(solr_params)
update_url = settings.SOLR_URL + 'update/csv?%s'
print "Loading records into Solr ..."
try:
output = urllib.urlopen(update_url % params)
except IOError:
raise IOError, 'Unable to connect to the Solr instance.'
print "Solr response:\n"
print output.read()
| gpl-3.0 | -521,989,871,261,624,260 | 35.427481 | 91 | 0.586756 | false | 3.914684 | false | false | false |
xtiankisutsa/MARA_Framework | tools/qark/qark/modules/createSploit.py | 1 | 2666 | from __future__ import absolute_import
'''Copyright 2015 LinkedIn Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.'''
import shutil
import errno
import fileinput
import os
from qark.modules import common
from qark.modules.common import logger
class exploitType:
"""
Enum type for exploitatin category
"""
MANIFEST, ACTIVITY, INTENT, PERMISSION, SERVICE, RECEIVER, BROADCAST_INTENT = range(7)
def copy_template(src,dest):
"""
Given a source and destination, copy all files/folders under source to destination\n
Overwrites destination if any files/folders already exists\n
Used to copy the exploit template
"""
try:
shutil.copytree(src, dest)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, dst)
status='ERROR'
else:
print('Directory not copied. Error: %s' % e)
#TODO - give an option to specify a different dir, if the specified one already exists
status='ERROR'
return status
def modify_template(path,filename,temp_text,repl_text):
"""
Deprecated code
"""
tmp=path+filename
tmp2=path+filename+'_tmp'
f1 = open(tmp, 'r')
f2 = open(tmp2, 'w')
for line in f1:
f2.write(line.replace(temp_text, repl_text))
#putting back template text, for re-use
f2.write('//REPLACEME-TT2')
f1.close()
f2.close()
os.remove(tmp)
os.rename(tmp2,tmp)
return
def modify_template_2(filename,placeholder,replacement):
"""
Takes a filename,placeholder value to be replaced and the actual replacement value\n
Uncomments the commented out code from exploit template, replaces the placeholder with actual value and adds this content on the next line to facilitate multiple substitutions
"""
flag = False
for line in fileinput.input(filename, inplace=1):
if placeholder in line:
if str(line).strip().startswith("//"):
line1 = str(line).split("//")[1]
flag=True
#print line1.replace(placeholder, replacement)
print line,
if flag:
print line1.replace(placeholder, replacement),
flag=False
| lgpl-3.0 | 3,166,646,589,954,059,300 | 34.078947 | 179 | 0.668792 | false | 4.033283 | false | false | false |
Alecardv/College-projects | Metodos Numericos 2012/trapecio.py | 1 | 1308 | import function
from matplotlib.pyplot import *
from pylab import *
import numpy as np
import math
class Trapecio:
def __init__(self, fun, xi, xf):
self.fun = function.Function(fun,'x')
self.a,self.b = xi,xf
self.fig, self.ax = subplots()
def relativeError(self):
f = self.fun.getDerivate()
Ea = ((self.b-self.a)**3/12)*((f.evalFunction(self.b) - f.evalFunction(self.a))/(self.b-self.a))
return Ea
def graph(self):
figure()
root = self.method()
print 'AreaAprox = ',root
print 'AreaReal = ',self.fun.getAndEvalIntegral([self.a,self.b])
print 'Error = ',self.relativeError()
Ox = np.arange(self.a-5,self.b+5, 0.02)
Oy = []
for i in Ox:
Oy.append( self.fun.evalFunction(i) )
self.ax.plot(Ox, Oy, color = "blue",lw = 1,label="f(x)")
self.ax.legend(loc=2)
show()
def px(self,x):
return (self.fun.evalFunction(self.b)-self.fun.evalFunction(self.a))/(self.b-self.a)*(x-self.a) + self.fun.evalFunction(self.a)
def method(self):
I = (self.b-self.a)*((self.fun.evalFunction(self.a) + self.fun.evalFunction(self.b))/2)
self.ax.vlines(self.a,0,self.fun.evalFunction(self.a))
self.ax.vlines(self.b,0,self.fun.evalFunction(self.b))
Ox = np.arange(self.a,self.b, 0.02)
Oy = []
for i in Ox:
Oy.append(self.px(i))
self.ax.plot(Ox, Oy,lw = 2)
return I
| gpl-3.0 | 1,179,837,174,657,071,400 | 28.727273 | 129 | 0.65367 | false | 2.36528 | false | false | false |
CurrencyCloud/currencycloud-python | src/currencycloud/clients/reference.py | 1 | 2780 | '''This module provides a class for Reference calls to the CC API'''
from currencycloud.http import Http
from currencycloud.resources import BeneficiaryRequiredDetails, ConversionDates, Currency, SettlementAccount, PayerRequiredDetails, PaymentPurposeCode, BankDetails, PaymentFeeRule
class Reference(Http):
'''This class provides an interface to the Reference endpoints of the CC API'''
def beneficiary_required_details(self, **kwargs):
'''Returns required beneficiary details and their basic validation formats.'''
response = self.get('/v2/reference/beneficiary_required_details', query=kwargs)['details']
return [BeneficiaryRequiredDetails(self, **c) for c in response]
def conversion_dates(self, **kwargs):
'''Returns dates for which dates this currency pair can not be traded.'''
return ConversionDates(self, **self.get('/v2/reference/conversion_dates', query=kwargs))
def currencies(self):
'''Returns a list of all the currencies that are tradeable.'''
response = self.get('/v2/reference/currencies')['currencies']
return [Currency(self, **c) for c in response]
def payment_dates(self, **kwargs):
'''
This call returns a list of dates that are invalid when making payments of a specific
currency.
'''
return self.get('/v2/reference/payment_dates', query=kwargs)
def settlement_accounts(self, **kwargs):
'''Returns settlement account information, detailing where funds need to be sent to.'''
response = self.get('/v2/reference/settlement_accounts', query=kwargs)['settlement_accounts']
return [SettlementAccount(self, **c) for c in response]
def payer_required_details(self, **kwargs):
'''Returns required payer details and their basic validation formats.'''
response = self.get('/v2/reference/payer_required_details', query=kwargs)['details']
return [PayerRequiredDetails(self, **c) for c in response]
def payment_purpose_codes(self, **kwargs):
'''Returns a list of valid purpose codes for the specified currency.'''
response = self.get('/v2/reference/payment_purpose_codes', query=kwargs)['purpose_codes']
return [PaymentPurposeCode(self, **c) for c in response]
def bank_details(self, **kwargs):
'''Returns the details of the bank related to the specified identifier.'''
response = self.get('/v2/reference/bank_details', query=kwargs)
return BankDetails(self, **response)
def payment_fee_rules(self, **kwargs):
'''Returns a list of payment fee rules.'''
response = self.get('/v2/reference/payment_fee_rules', query=kwargs)['payment_fee_rules']
return [PaymentFeeRule(self, **c) for c in response] | mit | 8,175,697,936,148,620,000 | 50.5 | 179 | 0.690647 | false | 4.149254 | false | false | false |
flake123p/ProjectH | Cpp_Platform2X/_toolchain/list_to_build_script.py | 1 | 3345 | #!/usr/bin/python
# Usage: list_to_make_var.py <in-file1:mod list> <in-file2:OFS> <out-file1:build script> <out-file2:clean script> <OS>
# argv: argv[0] argv[1] argv[2] argv[3] argv[4] argv[5]
#
# Include library
#
import os
import sys
def OpenFile(fileName, mode = 'r'): # mode : 'r', 'w', ...
try:
fp = open(fileName, mode)
except OSError as err:
print("OS error: {0}".format(err))
sys.exit(1)
except:
print("Unexpected error:", sys.exc_info()[0])
sys.exit(1)
return fp
mod_base_path = '../../mod/'
curr_os = str(sys.argv[5])
if curr_os == 'WIN':
mod_build_file = 'build_mod.bat'
mod_clean_file = 'clean_mod.bat'
else:
mod_build_file = 'build_mod.sh'
mod_clean_file = 'clean_mod.sh'
#
# main
#
if len(sys.argv) != 6:
print("Arguments Number Error. It should be 6.")
sys.exit(1)
finList = OpenFile(str(sys.argv[1]))
finOFS = OpenFile(str(sys.argv[2]))
foutBuildfile = OpenFile(str(sys.argv[3]), 'w')
foutCleanfile = OpenFile(str(sys.argv[4]), 'w')
#
# Extract OFS
#
state = 0
OFS_String = ""
OFS_Exist = 0
for each_line in finOFS:
each_word_list = each_line.split()
#print(each_word_list)
for each_word in each_word_list:
# Find OFS
if state == 0:
if each_word != "OFS":
print("Error. Miss \"OFS\" symbol in OFS file.")
sys.exit(1)
state = 1
# Find equal sign
elif state == 1:
if each_word != "=":
print("Error. Miss \"=\" symbol in OFS file.")
sys.exit(1)
state = 2
# Make OFS string
else:
OFS_Exist = 1
OFS_String = OFS_String + " " + each_word
if curr_os == 'WIN':
if OFS_Exist == 1:
OFS_String = "OFS=\"" + OFS_String + "\""
else:
if OFS_Exist == 1:
OFS_String = OFS_String ### "OFS=\"" + OFS_String + "\""
if curr_os == 'WIN':
foutBuildfile.write('@ECHO OFF\n')
foutCleanfile.write('@ECHO OFF\n')
foutBuildfile.write('SET CURR_CD=%CD%\n')
foutCleanfile.write('SET CURR_CD=%CD%\n')
for each_line in finList:
each_mod = each_line.strip()
# build files
str = 'CD ' + mod_base_path + each_mod + '\n' + 'CALL ' + mod_build_file + ' ' + OFS_String + '\n'
foutBuildfile.write(str)
foutBuildfile.write('set rc=%ERRORLEVEL%\n')
foutBuildfile.write('CD %CURR_CD%\n')
foutBuildfile.write('IF %rc% NEQ 0 ( exit /b %rc% )\n\n')
# clean files
str = 'CD ' + mod_base_path + each_mod + '\n' + 'CALL ' + mod_clean_file + ' --DisablePause\n'
foutCleanfile.write(str)
foutCleanfile.write('CD %CURR_CD%\n')
foutCleanfile.write('if "%1" NEQ "--DisablePause" (\n')
foutCleanfile.write(' pause\n')
foutCleanfile.write(')\n')
else: #LINUX
foutBuildfile.write('temp_local_path=$PWD\n')
foutCleanfile.write('temp_local_path=$PWD\n')
for each_line in finList:
each_mod = each_line.strip()
# build files
str = 'cd ' + mod_base_path + each_mod + '\n' + './' + mod_build_file + ' ' + OFS_String + '\n'
foutBuildfile.write(str)
foutBuildfile.write('rc=$?\n')
foutBuildfile.write('cd $temp_local_path\n')
foutBuildfile.write('if [ $rc != 0 ]; then\n')
foutBuildfile.write(' exit $rc\n')
foutBuildfile.write('fi\n\n')
# clean files
str = 'cd ' + mod_base_path + each_mod + '\n' + './' + mod_clean_file + '\n'
foutCleanfile.write(str)
foutCleanfile.write('cd $temp_local_path\n')
finList.close()
foutBuildfile.close()
foutCleanfile.close()
| gpl-3.0 | 1,168,732,192,291,529,700 | 26.875 | 126 | 0.611061 | false | 2.5 | false | false | false |
priestc/moneywagon | moneywagon/tx.py | 1 | 10878 | from moneywagon import (
get_unspent_outputs, CurrentPrice, get_optimal_fee, PushTx,
get_onchain_exchange_rates
)
from moneywagon.core import get_optimal_services, get_magic_bytes
from bitcoin import mktx, sign, pubtoaddr, privtopub
from .crypto_data import crypto_data
from .currency_support import CurrencySupport
class Transaction(object):
def __init__(self, crypto, hex=None, verbose=False):
c = CurrencySupport()
if crypto not in c.supported_currencies('moneywagon', 'transaction'):
form = crypto_data[crypto]['transaction_form']
raise NotImplementedError("%s not yet supported (tx form: %s)" % (
crypto.upper(), form
))
self.change_address = None
self.crypto = crypto
self.fee_satoshi = None
self.outs = []
self.ins = []
self.onchain_rate = None
self.verbose = verbose
if hex:
self.hex = hex
def from_unit_to_satoshi(self, value, unit='satoshi'):
"""
Convert a value to satoshis. units can be any fiat currency.
By default the unit is satoshi.
"""
if not unit or unit == 'satoshi':
return value
if unit == 'bitcoin' or unit == 'btc':
return value * 1e8
# assume fiat currency that we can convert
convert = get_current_price(self.crypto, unit)
return int(value / convert * 1e8)
def add_raw_inputs(self, inputs, private_key=None):
"""
Add a set of utxo's to this transaction. This method is better to use if you
want more fine control of which inputs get added to a transaction.
`inputs` is a list of "unspent outputs" (they were 'outputs' to previous transactions,
and 'inputs' to subsiquent transactions).
`private_key` - All inputs will be signed by the passed in private key.
"""
for i in inputs:
self.ins.append(dict(input=i, private_key=private_key))
self.change_address = i['address']
def _get_utxos(self, address, services, **modes):
"""
Using the service fallback engine, get utxos from remote service.
"""
return get_unspent_outputs(
self.crypto, address, services=services,
**modes
)
def private_key_to_address(self, pk):
"""
Convert a private key (in hex format) into an address.
"""
pub = privtopub(pk)
pub_byte, priv_byte = get_magic_bytes(self.crypto)
if priv_byte >= 128:
priv_byte -= 128 #pybitcointools bug
return pubtoaddr(pub, pub_byte)
def add_inputs(self, private_key=None, address=None, amount='all', max_ins=None, password=None, services=None, **modes):
"""
Make call to external service to get inputs from an address and/or private_key.
`amount` is the amount of [currency] worth of inputs (in satoshis) to add from
this address. Pass in 'all' (the default) to use *all* inputs found for this address.
Returned is the number of units (in satoshis) that were added as inputs to this tx.
"""
if private_key:
if private_key.startswith('6P'):
if not password:
raise Exception("Password required for BIP38 encoded private keys")
from .bip38 import Bip38EncryptedPrivateKey
private_key = Bip38EncryptedPrivateKey(self.crypto, private_key).decrypt(password)
address_from_priv = self.private_key_to_address(private_key)
if address and address != address_from_priv:
raise Exception("Invalid Private key")
address = address_from_priv
self.change_address = address
if not services:
services = get_optimal_services(self.crypto, 'unspent_outputs')
total_added_satoshi = 0
ins = 0
for utxo in self._get_utxos(address, services, **modes):
if max_ins and ins >= max_ins:
break
if (amount == 'all' or total_added_satoshi < amount):
ins += 1
self.ins.append(
dict(input=utxo, private_key=private_key)
)
total_added_satoshi += utxo['amount']
return total_added_satoshi, ins
def total_input_satoshis(self):
"""
Add up all the satoshis coming from all input tx's.
"""
just_inputs = [x['input'] for x in self.ins]
return sum([x['amount'] for x in just_inputs])
def select_inputs(self, amount):
'''Maximize transaction priority. Select the oldest inputs,
that are sufficient to cover the spent amount. Then,
remove any unneeded inputs, starting with
the smallest in value.
Returns sum of amounts of inputs selected'''
sorted_txin = sorted(self.ins, key=lambda x:-x['input']['confirmations'])
total_amount = 0
for (idx, tx_in) in enumerate(sorted_txin):
total_amount += tx_in['input']['amount']
if (total_amount >= amount):
break
sorted_txin = sorted(sorted_txin[:idx+1], key=lambda x:x['input']['amount'])
for (idx, tx_in) in enumerate(sorted_txin):
value = tx_in['input']['amount']
if (total_amount - value < amount):
break
else:
total_amount -= value
self.ins = sorted_txin[idx:]
return total_amount
def add_output(self, address, value, unit='satoshi'):
"""
Add an output (a person who will receive funds via this tx).
If no unit is specified, satoshi is implied.
"""
value_satoshi = self.from_unit_to_satoshi(value, unit)
if self.verbose:
print("Adding output of: %s satoshi (%.8f)" % (
value_satoshi, (value_satoshi / 1e8)
))
self.outs.append({
'address': address,
'value': value_satoshi
})
def onchain_exchange(self, withdraw_crypto, withdraw_address, value, unit='satoshi'):
"""
This method is like `add_output` but it sends to another
"""
self.onchain_rate = get_onchain_exchange_rates(
self.crypto, withdraw_crypto, best=True, verbose=self.verbose
)
exchange_rate = float(self.onchain_rate['rate'])
result = self.onchain_rate['service'].get_onchain_exchange_address(
self.crypto, withdraw_crypto, withdraw_address
)
address = result['deposit']
value_satoshi = self.from_unit_to_satoshi(value, unit)
if self.verbose:
print("Adding output of: %s satoshi (%.8f) via onchain exchange, converting to %s %s" % (
value_satoshi, (value_satoshi / 1e8),
exchange_rate * value_satoshi / 1e8, withdraw_crypto.upper()
))
self.outs.append({
'address': address,
'value': value_satoshi
})
def fee(self, value=None, unit='satoshi'):
"""
Set the miner fee, if unit is not set, assumes value is satoshi.
If using 'optimal', make sure you have already added all outputs.
"""
convert = None
if not value:
# no fee was specified, use $0.02 as default.
convert = get_current_price(self.crypto, "usd")
self.fee_satoshi = int(0.02 / convert * 1e8)
verbose = "Using default fee of:"
elif value == 'optimal':
self.fee_satoshi = get_optimal_fee(
self.crypto, self.estimate_size(), verbose=self.verbose
)
verbose = "Using optimal fee of:"
else:
self.fee_satoshi = self.from_unit_to_satoshi(value, unit)
verbose = "Using manually set fee of:"
if self.verbose:
if not convert:
convert = get_current_price(self.crypto, "usd")
fee_dollar = convert * self.fee_satoshi / 1e8
print(verbose + " %s satoshis ($%.2f)" % (self.fee_satoshi, fee_dollar))
def estimate_size(self):
"""
Estimate how many bytes this transaction will be by countng inputs
and outputs.
Formula taken from: http://bitcoin.stackexchange.com/a/3011/18150
"""
# if there are no outs use 1 (because the change will be an out)
outs = len(self.outs) or 1
return outs * 34 + 148 * len(self.ins) + 10
def get_hex(self, signed=True):
"""
Given all the data the user has given so far, make the hex using pybitcointools
"""
total_ins_satoshi = self.total_input_satoshis()
if total_ins_satoshi == 0:
raise ValueError("Can't make transaction, there are zero inputs")
# Note: there can be zero outs (sweep or coalesc transactions)
total_outs_satoshi = sum([x['value'] for x in self.outs])
if not self.fee_satoshi:
self.fee() # use default of $0.02
change_satoshi = total_ins_satoshi - (total_outs_satoshi + self.fee_satoshi)
if change_satoshi < 0:
raise ValueError(
"Input amount (%s) must be more than all output amounts (%s) plus fees (%s). You need more %s."
% (total_ins_satoshi, total_outs_satoshi, self.fee_satoshi, self.crypto.upper())
)
ins = [x['input'] for x in self.ins]
if change_satoshi > 0:
if self.verbose:
print("Adding change address of %s satoshis to %s" % (change_satoshi, self.change_address))
change = [{'value': change_satoshi, 'address': self.change_address}]
else:
change = [] # no change ?!
if self.verbose: print("Inputs == Outputs, no change address needed.")
tx = mktx(ins, self.outs + change)
if signed:
for i, input_data in enumerate(self.ins):
if not input_data['private_key']:
raise Exception("Can't sign transaction, missing private key for input %s" % i)
tx = sign(tx, i, input_data['private_key'])
return tx
def push(self, services=None, redundancy=1):
if not services:
services = get_optimal_services(self.crypto, "push_tx")
self.pushers = []
pusher = PushTx(services=services, verbose=self.verbose)
results = [pusher.action(self.crypto, self.get_hex())]
try:
for service in services[1:redundancy-1]:
pusher = PushTx(services=[service], verbose=self.verbose)
results.append(self.pusher.action(self.crypto, self.get_hex()))
self.pushers.append(pusher)
except:
raise Exception("Partial push. Some services returned success, some failed.")
return results
| mit | 7,023,996,752,570,535,000 | 37.168421 | 124 | 0.581633 | false | 3.897528 | false | false | false |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/tools/checkperms/checkperms.py | 1 | 13388 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure files have the right permissions.
Some developers have broken SCM configurations that flip the executable
permission on for no good reason. Unix developers who run ls --color will then
see .cc files in green and get confused.
- For file extensions that must be executable, add it to EXECUTABLE_EXTENSIONS.
- For file extensions that must not be executable, add it to
NOT_EXECUTABLE_EXTENSIONS.
- To ignore all the files inside a directory, add it to IGNORED_PATHS.
- For file base name with ambiguous state and that should not be checked for
shebang, add it to IGNORED_FILENAMES.
Any file not matching the above will be opened and looked if it has a shebang
or an ELF header. If this does not match the executable bit on the file, the
file will be flagged.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and all
file paths should be only lowercase.
"""
import json
import logging
import optparse
import os
import stat
import string
import subprocess
import sys
#### USER EDITABLE SECTION STARTS HERE ####
# Files with these extensions must have executable bit set.
#
# Case-sensitive.
EXECUTABLE_EXTENSIONS = (
'bat',
'dll',
'dylib',
'exe',
)
# These files must have executable bit set.
#
# Case-insensitive, lower-case only.
EXECUTABLE_PATHS = (
'chrome/test/data/app_shim/app_shim_32_bit.app/contents/'
'macos/app_mode_loader',
'chrome/test/data/extensions/uitest/plugins/plugin.plugin/contents/'
'macos/testnetscapeplugin',
'chrome/test/data/extensions/uitest/plugins_private/plugin.plugin/contents/'
'macos/testnetscapeplugin',
)
# These files must not have the executable bit set. This is mainly a performance
# optimization as these files are not checked for shebang. The list was
# partially generated from:
# git ls-files | grep "\\." | sed 's/.*\.//' | sort | uniq -c | sort -b -g
#
# Case-sensitive.
NON_EXECUTABLE_EXTENSIONS = (
'1',
'3ds',
'S',
'am',
'applescript',
'asm',
'c',
'cc',
'cfg',
'chromium',
'cpp',
'crx',
'cs',
'css',
'cur',
'def',
'der',
'expected',
'gif',
'grd',
'gyp',
'gypi',
'h',
'hh',
'htm',
'html',
'hyph',
'ico',
'idl',
'java',
'jpg',
'js',
'json',
'm',
'm4',
'mm',
'mms',
'mock-http-headers',
'nexe',
'nmf',
'onc',
'pat',
'patch',
'pdf',
'pem',
'plist',
'png',
'proto',
'rc',
'rfx',
'rgs',
'rules',
'spec',
'sql',
'srpc',
'svg',
'tcl',
'test',
'tga',
'txt',
'vcproj',
'vsprops',
'webm',
'word',
'xib',
'xml',
'xtb',
'zip',
)
# These files must not have executable bit set.
#
# Case-insensitive, lower-case only.
NON_EXECUTABLE_PATHS = (
'build/android/tests/symbolize/liba.so',
'build/android/tests/symbolize/libb.so',
'chrome/installer/mac/sign_app.sh.in',
'chrome/installer/mac/sign_versioned_dir.sh.in',
'chrome/test/data/extensions/uitest/plugins/plugin32.so',
'chrome/test/data/extensions/uitest/plugins/plugin64.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin32.so',
'chrome/test/data/extensions/uitest/plugins_private/plugin64.so',
'courgette/testdata/elf-32-1',
'courgette/testdata/elf-32-2',
'courgette/testdata/elf-64',
)
# File names that are always whitelisted. (These are mostly autoconf spew.)
#
# Case-sensitive.
IGNORED_FILENAMES = (
'config.guess',
'config.sub',
'configure',
'depcomp',
'install-sh',
'missing',
'mkinstalldirs',
'naclsdk',
'scons',
)
# File paths starting with one of these will be ignored as well.
# Please consider fixing your file permissions, rather than adding to this list.
#
# Case-insensitive, lower-case only.
IGNORED_PATHS = (
'native_client_sdk/src/build_tools/sdk_tools/third_party/fancy_urllib/'
'__init__.py',
'out/',
# TODO(maruel): Fix these.
'third_party/bintrees/',
'third_party/closure_linter/',
'third_party/devscripts/licensecheck.pl.vanilla',
'third_party/hyphen/',
'third_party/lcov-1.9/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov-1.9/contrib/galaxy/gen_makefile.sh',
'third_party/lcov/contrib/galaxy/conglomerate_functions.pl',
'third_party/lcov/contrib/galaxy/gen_makefile.sh',
'third_party/libevent/autogen.sh',
'third_party/libevent/test/test.sh',
'third_party/libxml/linux/xml2-config',
'third_party/libxml/src/ltmain.sh',
'third_party/mesa/',
'third_party/protobuf/',
'third_party/python_gflags/gflags.py',
'third_party/sqlite/',
'third_party/talloc/script/mksyms.sh',
'third_party/tcmalloc/',
'third_party/tlslite/setup.py',
# TODO(nednguyen): Remove this when telemetry is moved to catapult
'tools/telemetry/third_party/',
)
#### USER EDITABLE SECTION ENDS HERE ####
assert set(EXECUTABLE_EXTENSIONS) & set(NON_EXECUTABLE_EXTENSIONS) == set()
assert set(EXECUTABLE_PATHS) & set(NON_EXECUTABLE_PATHS) == set()
VALID_CHARS = set(string.ascii_lowercase + string.digits + '/-_.')
for paths in (EXECUTABLE_PATHS, NON_EXECUTABLE_PATHS, IGNORED_PATHS):
assert all([set(path).issubset(VALID_CHARS) for path in paths])
def capture(cmd, cwd):
"""Returns the output of a command.
Ignores the error code or stderr.
"""
logging.debug('%s; cwd=%s' % (' '.join(cmd), cwd))
env = os.environ.copy()
env['LANGUAGE'] = 'en_US.UTF-8'
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, env=env)
return p.communicate()[0]
def get_git_root(dir_path):
"""Returns the git checkout root or None."""
root = capture(['git', 'rev-parse', '--show-toplevel'], dir_path).strip()
if root:
return root
def is_ignored(rel_path):
"""Returns True if rel_path is in our whitelist of files to ignore."""
rel_path = rel_path.lower()
return (
os.path.basename(rel_path) in IGNORED_FILENAMES or
rel_path.lower().startswith(IGNORED_PATHS))
def must_be_executable(rel_path):
"""The file name represents a file type that must have the executable bit
set.
"""
return (os.path.splitext(rel_path)[1][1:] in EXECUTABLE_EXTENSIONS or
rel_path.lower() in EXECUTABLE_PATHS)
def must_not_be_executable(rel_path):
"""The file name represents a file type that must not have the executable
bit set.
"""
return (os.path.splitext(rel_path)[1][1:] in NON_EXECUTABLE_EXTENSIONS or
rel_path.lower() in NON_EXECUTABLE_PATHS)
def has_executable_bit(full_path):
"""Returns if any executable bit is set."""
permission = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
return bool(permission & os.stat(full_path).st_mode)
def has_shebang_or_is_elf(full_path):
"""Returns if the file starts with #!/ or is an ELF binary.
full_path is the absolute path to the file.
"""
with open(full_path, 'rb') as f:
data = f.read(4)
return (data[:3] == '#!/' or data == '#! /', data == '\x7fELF')
def check_file(root_path, rel_path):
"""Checks the permissions of the file whose path is root_path + rel_path and
returns an error if it is inconsistent. Returns None on success.
It is assumed that the file is not ignored by is_ignored().
If the file name is matched with must_be_executable() or
must_not_be_executable(), only its executable bit is checked.
Otherwise, the first few bytes of the file are read to verify if it has a
shebang or ELF header and compares this with the executable bit on the file.
"""
full_path = os.path.join(root_path, rel_path)
def result_dict(error):
return {
'error': error,
'full_path': full_path,
'rel_path': rel_path,
}
try:
bit = has_executable_bit(full_path)
except OSError:
# It's faster to catch exception than call os.path.islink(). Chromium
# tree happens to have invalid symlinks under
# third_party/openssl/openssl/test/.
return None
if must_be_executable(rel_path):
if not bit:
return result_dict('Must have executable bit set')
return
if must_not_be_executable(rel_path):
if bit:
return result_dict('Must not have executable bit set')
return
# For the others, it depends on the file header.
(shebang, elf) = has_shebang_or_is_elf(full_path)
if bit != (shebang or elf):
if bit:
return result_dict('Has executable bit but not shebang or ELF header')
if shebang:
return result_dict('Has shebang but not executable bit')
return result_dict('Has ELF header but not executable bit')
def check_files(root, files):
gen = (check_file(root, f) for f in files if not is_ignored(f))
return filter(None, gen)
class ApiBase(object):
def __init__(self, root_dir, bare_output):
self.root_dir = root_dir
self.bare_output = bare_output
self.count = 0
self.count_read_header = 0
def check_file(self, rel_path):
logging.debug('check_file(%s)' % rel_path)
self.count += 1
if (not must_be_executable(rel_path) and
not must_not_be_executable(rel_path)):
self.count_read_header += 1
return check_file(self.root_dir, rel_path)
def check_dir(self, rel_path):
return self.check(rel_path)
def check(self, start_dir):
"""Check the files in start_dir, recursively check its subdirectories."""
errors = []
items = self.list_dir(start_dir)
logging.info('check(%s) -> %d' % (start_dir, len(items)))
for item in items:
full_path = os.path.join(self.root_dir, start_dir, item)
rel_path = full_path[len(self.root_dir) + 1:]
if is_ignored(rel_path):
continue
if os.path.isdir(full_path):
# Depth first.
errors.extend(self.check_dir(rel_path))
else:
error = self.check_file(rel_path)
if error:
errors.append(error)
return errors
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
return sorted(
x for x in os.listdir(os.path.join(self.root_dir, start_dir))
if not x.startswith('.')
)
class ApiAllFilesAtOnceBase(ApiBase):
_files = None
def list_dir(self, start_dir):
"""Lists all the files and directory inside start_dir."""
if self._files is None:
self._files = sorted(self._get_all_files())
if not self.bare_output:
print 'Found %s files' % len(self._files)
start_dir = start_dir[len(self.root_dir) + 1:]
return [
x[len(start_dir):] for x in self._files if x.startswith(start_dir)
]
def _get_all_files(self):
"""Lists all the files and directory inside self._root_dir."""
raise NotImplementedError()
class ApiGit(ApiAllFilesAtOnceBase):
def _get_all_files(self):
return capture(['git', 'ls-files'], cwd=self.root_dir).splitlines()
def get_scm(dir_path, bare):
"""Returns a properly configured ApiBase instance."""
cwd = os.getcwd()
root = get_git_root(dir_path or cwd)
if root:
if not bare:
print('Found git repository at %s' % root)
return ApiGit(dir_path or root, bare)
# Returns a non-scm aware checker.
if not bare:
print('Failed to determine the SCM for %s' % dir_path)
return ApiBase(dir_path or cwd, bare)
def main():
usage = """Usage: python %prog [--root <root>] [tocheck]
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python %prog
python %prog --root /path/to/source chrome"""
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'--root',
help='Specifies the repository root. This defaults '
'to the checkout repository root')
parser.add_option(
'-v', '--verbose', action='count', default=0, help='Print debug logging')
parser.add_option(
'--bare',
action='store_true',
default=False,
help='Prints the bare filename triggering the checks')
parser.add_option(
'--file', action='append', dest='files',
help='Specifics a list of files to check the permissions of. Only these '
'files will be checked')
parser.add_option('--json', help='Path to JSON output file')
options, args = parser.parse_args()
levels = [logging.ERROR, logging.INFO, logging.DEBUG]
logging.basicConfig(level=levels[min(len(levels) - 1, options.verbose)])
if len(args) > 1:
parser.error('Too many arguments used')
if options.root:
options.root = os.path.abspath(options.root)
if options.files:
# --file implies --bare (for PRESUBMIT.py).
options.bare = True
errors = check_files(options.root, options.files)
else:
api = get_scm(options.root, options.bare)
start_dir = args[0] if args else api.root_dir
errors = api.check(start_dir)
if not options.bare:
print('Processed %s files, %d files where tested for shebang/ELF '
'header' % (api.count, api.count_read_header))
if options.json:
with open(options.json, 'w') as f:
json.dump(errors, f)
if errors:
if options.bare:
print '\n'.join(e['full_path'] for e in errors)
else:
print '\nFAILED\n'
print '\n'.join('%s: %s' % (e['full_path'], e['error']) for e in errors)
return 1
if not options.bare:
print '\nSUCCESS\n'
return 0
if '__main__' == __name__:
sys.exit(main())
| mit | 8,642,970,473,588,044,000 | 27.12605 | 80 | 0.664924 | false | 3.284593 | true | false | false |
bdfoster/blumate | blumate/components/mongo.py | 1 | 5577 | """
A component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influxdb/
"""
import logging
from blumate.helpers.entity import Entity
import blumate.util as util
from blumate import bootstrap
from blumate.const import (EVENT_BLUMATE_STOP,
EVENT_BLUMATE_START,
EVENT_STATE_CHANGED,
EVENT_PLATFORM_DISCOVERED,
STATE_ACTIVE,
STATE_IDLE,
STATE_UNKNOWN,
ATTR_DISCOVERED,
ATTR_FRIENDLY_NAME,
ATTR_SERVICE)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mongo"
DEPENDENCIES = []
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 27017
DEFAULT_TZ_AWARE = True
DEFAULT_SOCKET_TIMEOUT_MS = None
DEFAULT_SSL = False
DEFAULT_MAX_POOL_SIZE = 100
DEFAULT_SOCKET_KEEP_ALIVE = False
REQUIREMENTS = ['pymongo==3.2.2']
CONF_HOST = 'host'
CONF_PORT = 'port'
CONF_TZ_AWARE = 'tz_aware'
CONF_SOCKET_TIMEOUT_MS = 'socket_timeout_ms'
CONF_SSL = 'ssl'
CONF_MAX_POOL_SIZE = 'max_pool_size'
CONF_SOCKET_KEEP_ALIVE = 'socket_keep_alive'
SERVICE_UNLOCK = 'unlock'
SERVICE_DISCOVER_DATABASES = 'discover_databases'
SERVICE_DISCONNECT = 'disconnect'
__client = None
class Mongo(Entity):
def __init__(self, bmss, config):
"""Setup the MongoDB component."""
self.__state = STATE_UNKNOWN
self.bmss = bmss
self.__config = config[DOMAIN]
self.__host = util.convert(self.__config.get(CONF_HOST), str, DEFAULT_HOST)
self.__port = util.convert(self.__config.get(CONF_PORT), int, DEFAULT_PORT)
self.__tz_aware = util.convert(self.__config.get(CONF_TZ_AWARE), bool, DEFAULT_TZ_AWARE)
self.__socket_timeout_ms = util.convert(self.__config.get(CONF_SOCKET_TIMEOUT_MS), int, DEFAULT_SOCKET_TIMEOUT_MS)
self.__ssl = util.convert(self.__config.get(CONF_SSL), bool, DEFAULT_SSL)
self.__max_pool_size = util.convert(self.__config.get(CONF_MAX_POOL_SIZE), int, DEFAULT_MAX_POOL_SIZE)
self.__socket_keep_alive = util.convert(self.__config.get(CONF_SOCKET_KEEP_ALIVE),
int,
DEFAULT_SOCKET_KEEP_ALIVE)
from pymongo import MongoClient
from pymongo.monitoring import CommandListener
class MongoCommandEvent(CommandListener):
"""
https://api.mongodb.com/python/current/api/pymongo/monitoring.html#module-pymongo.monitoring
"""
def started(self, event):
_LOGGER.debug("Command {0.command_name} with request id "
"{0.request_id} started on server "
"{0.connection_id}".format(event))
def succeeded(self, event):
_LOGGER.info("Command {0.command_name} with request id "
"{0.request_id} on server {0.connection_id} "
"succeeded in {0.duration_micros} "
"microseconds".format(event))
def failed(self, event):
_LOGGER.warn("Command {0.command_name} with request id "
"{0.request_id} on server {0.connection_id} "
"failed in {0.duration_micros} "
"microseconds".format(event))
self.__client = MongoClient(host = self.__host,
port = self.__port,
tz_aware=self.__tz_aware,
maxPoolSize=self.__max_pool_size,
socketTimeoutMS =self.__socket_timeout_ms,
ssl = self.__ssl,
socketKeepAlive = self.__socket_keep_alive,
document_class = dict,
connect = True,
event_listeners = [MongoCommandEvent()])
# Will fail here if connection is not able to be established
assert(self.__client is not None)
self.__state = STATE_IDLE
bmss.bus.listen_once(EVENT_BLUMATE_STOP, self.disconnect)
bmss.bus.listen_once(EVENT_BLUMATE_START, self.discover_databases)
bmss.services.register(DOMAIN, SERVICE_DISCOVER_DATABASES, self.discover_databases)
bmss.services.register(DOMAIN, SERVICE_UNLOCK, self.unlock)
bmss.services.register(DOMAIN, SERVICE_DISCONNECT, self.disconnect)
def discover_databases(self, event):
"""Discover available databases."""
self.__state = STATE_ACTIVE
database_list = self.__client.database_names()
self.__state = STATE_IDLE
_LOGGER.info("Available Databases: %s", database_list)
def unlock(self, event):
"""Enables writes to the server."""
_LOGGER.debug("Unlocking server...")
self.__client.unlock()
if self.__client.is_locked:
_LOGGER.warn("Server is still locked. Maybe a permissions issue?")
else:
_LOGGER.info("Server is unlocked.")
def disconnect(self, event):
"""Disconnect from the MongoDB Server."""
_LOGGER.debug("Disconnecting from MongoDB Server...")
self.__client.close()
_LOGGER.info("Disconnected from MongoDB Server.")
setup = Mongo
| mit | -8,416,987,383,555,925,000 | 38.553191 | 122 | 0.564461 | false | 4.228203 | true | false | false |
kapilgarg1996/gmc | gmc/conf/__init__.py | 1 | 1489 | import os
import importlib
from gmc.conf import global_settings
ENVIRONMENT_VARIABLE = "GMC_SETTINGS_MODULE"
class Settings:
"""
Module to load settings to configure gmc
"""
def __init__(self, *args, **kwargs):
self.settings = None
self.settings_module = None
def __getattr__(self, name):
"""
Make settings available as the attributes.
Like settings.DATASET_DIR
"""
self.load_settings()
return self.settings[name]
def __iter__(self):
self.load_settings()
return iter(self.settings)
def load_settings(self):
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if self.settings is not None and settings_module == self.settings_module:
return
self.settings = {}
for setting in dir(global_settings):
if setting.isupper():
self.settings[setting] = getattr(global_settings, setting)
self.settings_module = os.environ.get(ENVIRONMENT_VARIABLE, None)
if self.settings_module is not None:
mod = importlib.import_module(self.settings_module)
for setting in dir(mod):
if setting.isupper():
self.settings[setting] = getattr(mod, setting)
def modify(self, new_settings):
for name in new_settings:
if name in self.settings:
self.settings[name] = new_settings[name]
settings = Settings() | mit | 8,042,181,820,768,355,000 | 29.408163 | 81 | 0.601746 | false | 4.266476 | false | false | false |
ihrwein/yarg | yarg/gui/listmodel.py | 1 | 8053 | from PyQt5 import QtCore
class QObjectListModel(QtCore.QAbstractListModel):
"""
QObjectListModel provides a more powerful, but still easy to use, alternative to using
QObjectList lists as models for QML views. As a QAbstractListModel, it has the ability to
automatically notify the view of specific changes to the list, such as adding or removing
items. At the same time it provides QList-like convenience functions such as append, at,
and removeAt for easily working with the model from Python.
This class is the Python port of the C++ QObjectListModel class.
"""
def __init__(self, parent=None):
""" Constructs an object list model with the given parent. """
super(QObjectListModel, self).__init__(parent)
self._objects = list() # Internal list of objects
self.roles = QtCore.QAbstractListModel.roleNames(self)
self.ObjectRole = QtCore.Qt.UserRole + 1
self.roles[self.ObjectRole] = "object"
def roleNames(self):
return self.roles
def __iter__(self):
""" Enables iteration over the list of objects. """
return iter(self._objects)
def __len__(self):
return self.size()
def __bool__(self):
return self.size() > 0
def __getitem__(self, index):
""" Enables the [] operator """
return self._objects[index]
def data(self, index, role):
""" Returns data for the specified role, from the item with the
given index. The only valid role is ObjectRole.
If the view requests an invalid index or role, an invalid variant
is returned.
"""
if index.row() < 0 or index.row() >= len(self._objects):
return None
if role == self.ObjectRole:
return self._objects[index.row()]
return None
def rowCount(self, parent):
""" Returns the number of rows in the model. This value corresponds to the
number of items in the model's internal object list.
"""
return self.size()
def objectList(self):
""" Returns the object list used by the model to store data. """
return self._objects
def setObjectList(self, objects):
""" Sets the model's internal objects list to objects. The model will
notify any attached views that its underlying data has changed.
"""
oldSize = self.size()
self.beginResetModel()
self._objects = objects
self.endResetModel()
self.dataChanged.emit(self.index(0), self.index(self.size() - 1), [])
if self.size() != oldSize:
self.countChanged.emit()
############
# List API #
############
def append(self, toAppend):
""" Inserts object(s) at the end of the model and notifies any views.
Accepts both QObject and list of QObjects.
"""
if not isinstance(toAppend, list):
toAppend = [toAppend]
self.beginInsertRows(QtCore.QModelIndex(), self.size(), self.size() + len(toAppend) - 1)
self._objects.extend(toAppend)
self.endInsertRows()
self.countChanged.emit()
def insert(self, i, toInsert):
""" Inserts object(s) at index position i in the model and notifies
any views. If i is 0, the object is prepended to the model. If i
is size(), the object is appended to the list.
Accepts both QObject and list of QObjects.
"""
if not isinstance(toInsert, list):
toInsert = [toInsert]
self.beginInsertRows(QtCore.QModelIndex(), i, i + len(toInsert) - 1)
for obj in reversed(toInsert):
self._objects.insert(i, obj)
self.endInsertRows()
self.countChanged.emit()
def at(self, i):
""" Use [] instead - Return the object at index i. """
return self._objects[i]
def replace(self, i, obj):
""" Replaces the item at index position i with object and
notifies any views. i must be a valid index position in the list
(i.e., 0 <= i < size()).
"""
self._objects[i] = obj
self.dataChanged.emit(self.index(i), self.index(i), [])
def move(self, fromIndex, toIndex):
""" Moves the item at index position from to index position to
and notifies any views.
This function assumes that both from and to are at least 0 but less than
size(). To avoid failure, test that both from and to are at
least 0 and less than size().
"""
value = toIndex
if toIndex > fromIndex:
value += 1
if not self.beginMoveRows(QtCore.QModelIndex(), fromIndex, fromIndex, QtCore.QModelIndex(), value):
return
self._objects.insert(toIndex, self._objects.pop(fromIndex))
self.endMoveRows()
def removeAt(self, i, count=1):
""" Removes count number of items from index position i and notifies any views.
i must be a valid index position in the model (i.e., 0 <= i < size()), as
must as i + count - 1.
"""
self.beginRemoveRows(QtCore.QModelIndex(), i, i + count - 1)
for cpt in range(count):
self._objects.pop(i)
self.endRemoveRows()
self.countChanged.emit()
def remove(self, obj):
""" Removes the first occurrence of the given object. Raises a ValueError if not in list. """
if not self.contains(obj):
raise ValueError("QObjectListModel.remove(obj) : obj not in list")
self.removeAt(self.indexOf(obj))
def takeAt(self, i):
""" Removes the item at index position i (notifying any views) and returns it.
i must be a valid index position in the model (i.e., 0 <= i < size()).
"""
self.beginRemoveRows(QtCore.QModelIndex(), i, i)
obj = self._objects.pop(i)
self.endRemoveRows()
self.countChanged.emit()
return obj
def clear(self):
""" Removes all items from the model and notifies any views. """
if not self._objects:
return
self.beginRemoveRows(QtCore.QModelIndex(), 0, self.size() - 1)
self._objects = []
self.endRemoveRows()
self.countChanged.emit()
def contains(self, obj):
""" Returns true if the list contains an occurrence of object;
otherwise returns false.
"""
return obj in self._objects
def indexOf(self, matchObj, fromIndex=0, positive=True):
""" Returns the index position of the first occurrence of object in
the model, searching forward from index position from.
If positive is True, will always return a positive index.
"""
index = self._objects[fromIndex:].index(matchObj) + fromIndex
if positive and index < 0:
index += self.size()
return index
def lastIndexOf(self, matchObj, fromIndex=-1, positive=True):
""" Returns the index position of the last occurrence of object in
the list, searching backward from index position from. If
from is -1 (the default), the search starts at the last item.
If positive is True, will always return a positive index.
"""
r = list(self._objects)
r.reverse()
index = - r[-fromIndex - 1:].index(matchObj) + fromIndex
if positive and index < 0:
index += self.size()
return index
def size(self):
""" Returns the number of items in the model. """
return len(self._objects)
@QtCore.pyqtSlot(result=bool)
def isEmpty(self):
""" Returns true if the model contains no items; otherwise returns false. """
return len(self._objects) == 0
@QtCore.pyqtSlot(int, result="QVariant")
def get(self, i):
""" For usage from QML.
Note: return param is mandatory to mimic Q_INVOKABLE C++ method behavior
"""
return self._objects[i]
countChanged = QtCore.pyqtSignal()
count = QtCore.pyqtProperty(int, size, notify=countChanged) | gpl-2.0 | -3,779,364,264,348,425,700 | 36.635514 | 107 | 0.609711 | false | 4.179035 | false | false | false |
purism/pdak | dak/dakdb/update70.py | 1 | 3429 | #!/usr/bin/env python
# coding=utf8
"""
Add suite options for overrides and control-suite to DB
@contact: Debian FTP Master <[email protected]>
@copyright: 2011 Mark Hymers <[email protected]>
@license: GNU General Public License version 2 or later
"""
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
import psycopg2
from daklib.dak_exceptions import DBUpdateError
from daklib.config import Config
################################################################################
def do_update(self):
"""
Add suite options for overrides and control-suite to DB
"""
print __doc__
try:
cnf = Config()
c = self.db.cursor()
c.execute("ALTER TABLE suite ADD COLUMN overrideprocess BOOLEAN NOT NULL DEFAULT FALSE")
c.execute("COMMENT ON COLUMN suite.overrideprocess IS %s", ['If true, check-overrides will process the suite by default'])
c.execute("ALTER TABLE suite ADD COLUMN overrideorigin TEXT DEFAULT NULL")
c.execute("COMMENT ON COLUMN suite.overrideprocess IS %s", ['If NOT NULL, check-overrides will take missing overrides from the named suite'])
# Migrate config file values into database
if "Check-Overrides::OverrideSuites" in cnf:
for suitename in cnf.subtree("Check-Overrides::OverrideSuites").list():
if cnf.get("Check-Overrides::OverrideSuites::%s::Process" % suitename, "0") == "1":
print "Marking %s to have overrides processed automatically" % suitename.lower()
c.execute("UPDATE suite SET overrideprocess = TRUE WHERE suite_name = %s", [suitename.lower()])
originsuite = cnf.get("Check-Overrides::OverrideSuites::%s::OriginSuite" % suitename, '')
if originsuite != '':
print "Setting %s to use %s as origin for overrides" % (suitename.lower(), originsuite.lower())
c.execute("UPDATE suite SET overrideorigin = %s WHERE suite_name = %s", [originsuite.lower(), suitename.lower()])
c.execute("ALTER TABLE suite ADD COLUMN allowcsset BOOLEAN NOT NULL DEFAULT FALSE")
c.execute("COMMENT ON COLUMN suite.allowcsset IS %s", ['Allow control-suite to be used with the --set option without forcing'])
# Import historical hard-coded values
c.execute("UPDATE suite SET allowcsset = TRUE WHERE suite_name IN ('testing', 'squeeze-updates')")
c.execute("UPDATE config SET value = '70' WHERE name = 'db_revision'")
self.db.commit()
except psycopg2.ProgrammingError as msg:
self.db.rollback()
raise DBUpdateError('Unable to apply sick update 70, rollback issued. Error message : %s' % (str(msg)))
| gpl-2.0 | -1,284,880,315,698,919,400 | 47.295775 | 149 | 0.654418 | false | 4.351523 | false | false | false |
spyder-ide/spyder.line_profiler | spyder_line_profiler/widgets/lineprofiler.py | 1 | 22093 | # -*- coding: utf-8 -*-
#
# Copyright © 2011 Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
Line Profiler widget
See the official documentation of line_profiler:
http://pythonhosted.org/line_profiler/
"""
# Standard library imports
from __future__ import with_statement
import hashlib
import inspect
import linecache
import os
import os.path as osp
import time
import sys
# Third party imports
from qtpy.compat import getopenfilename
from qtpy.QtCore import (QByteArray, QProcess, Qt, QTextCodec,
QProcessEnvironment, Signal)
from qtpy.QtGui import QBrush, QColor, QFont
from qtpy.QtWidgets import (QHBoxLayout, QWidget, QMessageBox, QVBoxLayout,
QLabel, QTreeWidget, QTreeWidgetItem, QApplication)
# Local imports
from spyder.config.base import get_conf_path, get_translation
from spyder.utils import programs
from spyder.utils.qthelpers import create_toolbutton, get_icon
from spyder.widgets.comboboxes import PythonModulesComboBox
from spyder.utils.misc import add_pathlist_to_PYTHONPATH
from spyder.widgets.variableexplorer.texteditor import TextEditor
try:
from spyder.py3compat import to_text_string, getcwd, pickle
except ImportError:
# python2
to_text_string = unicode
getcwd = os.getcwdu
import cPickle as pickle
# This is needed for testing this module as a stand alone script
try:
_ = get_translation("line_profiler", dirname="spyder_line_profiler")
except KeyError as error:
import gettext
_ = gettext.gettext
locale_codec = QTextCodec.codecForLocale()
COL_NO = 0
COL_HITS = 1
COL_TIME = 2
COL_PERHIT = 3
COL_PERCENT = 4
COL_LINE = 5
COL_POS = 0 # Position is not displayed but set as Qt.UserRole
CODE_NOT_RUN_COLOR = QBrush(QColor.fromRgb(128, 128, 128, 200))
WEBSITE_URL = 'http://pythonhosted.org/line_profiler/'
def is_lineprofiler_installed():
"""
Checks if the program and the library for line_profiler is installed.
"""
return (programs.is_module_installed('line_profiler')
and programs.find_program('kernprof') is not None)
class LineProfilerWidget(QWidget):
"""
Line profiler widget.
"""
DATAPATH = get_conf_path('lineprofiler.results')
VERSION = '0.0.1'
redirect_stdio = Signal(bool)
sig_finished = Signal()
def __init__(self, parent):
QWidget.__init__(self, parent)
# Need running QApplication before importing runconfig
from spyder.plugins import runconfig
self.runconfig = runconfig
self.spyder_pythonpath = None
self.setWindowTitle("Line profiler")
self.output = None
self.error_output = None
self.use_colors = True
self._last_wdir = None
self._last_args = None
self._last_pythonpath = None
self.filecombo = PythonModulesComboBox(self)
self.start_button = create_toolbutton(
self, icon=get_icon('run.png'),
text=_("Profile by line"),
tip=_("Run line profiler"),
triggered=(lambda checked=False: self.analyze()), text_beside_icon=True)
self.stop_button = create_toolbutton(
self,
icon=get_icon('terminate.png'),
text=_("Stop"),
tip=_("Stop current profiling"),
text_beside_icon=True)
self.filecombo.valid.connect(self.start_button.setEnabled)
#self.filecombo.valid.connect(self.show_data)
# FIXME: The combobox emits this signal on almost any event
# triggering show_data() too early, too often.
browse_button = create_toolbutton(
self, icon=get_icon('fileopen.png'),
tip=_('Select Python script'),
triggered=self.select_file)
self.datelabel = QLabel()
self.log_button = create_toolbutton(
self, icon=get_icon('log.png'),
text=_("Output"),
text_beside_icon=True,
tip=_("Show program's output"),
triggered=self.show_log)
self.datatree = LineProfilerDataTree(self)
self.collapse_button = create_toolbutton(
self,
icon=get_icon('collapse.png'),
triggered=lambda dD=-1: self.datatree.collapseAll(),
tip=_('Collapse all'))
self.expand_button = create_toolbutton(
self,
icon=get_icon('expand.png'),
triggered=lambda dD=1: self.datatree.expandAll(),
tip=_('Expand all'))
hlayout1 = QHBoxLayout()
hlayout1.addWidget(self.filecombo)
hlayout1.addWidget(browse_button)
hlayout1.addWidget(self.start_button)
hlayout1.addWidget(self.stop_button)
hlayout2 = QHBoxLayout()
hlayout2.addWidget(self.collapse_button)
hlayout2.addWidget(self.expand_button)
hlayout2.addStretch()
hlayout2.addWidget(self.datelabel)
hlayout2.addStretch()
hlayout2.addWidget(self.log_button)
layout = QVBoxLayout()
layout.addLayout(hlayout1)
layout.addLayout(hlayout2)
layout.addWidget(self.datatree)
self.setLayout(layout)
self.process = None
self.set_running_state(False)
self.start_button.setEnabled(False)
if not is_lineprofiler_installed():
for widget in (self.datatree, self.filecombo, self.log_button,
self.start_button, self.stop_button, browse_button,
self.collapse_button, self.expand_button):
widget.setDisabled(True)
text = _(
'<b>Please install the <a href="%s">line_profiler module</a></b>'
) % WEBSITE_URL
self.datelabel.setText(text)
self.datelabel.setOpenExternalLinks(True)
else:
pass # self.show_data()
def analyze(self, filename=None, wdir=None, args=None, pythonpath=None,
use_colors=True):
self.use_colors = use_colors
if not is_lineprofiler_installed():
return
self.kill_if_running()
#index, _data = self.get_data(filename) # FIXME: storing data is not implemented yet
if filename is not None:
filename = osp.abspath(to_text_string(filename))
index = self.filecombo.findText(filename)
if index == -1:
self.filecombo.addItem(filename)
self.filecombo.setCurrentIndex(self.filecombo.count()-1)
else:
self.filecombo.setCurrentIndex(index)
self.filecombo.selected()
if self.filecombo.is_valid():
filename = to_text_string(self.filecombo.currentText())
runconf = self.runconfig.get_run_configuration(filename)
if runconf is not None:
if wdir is None:
if runconf.wdir_enabled:
wdir = runconf.wdir
elif runconf.cw_dir:
wdir = os.getcwd()
elif runconf.file_dir:
wdir = osp.dirname(filename)
elif runconf.fixed_dir:
wdir = runconf.dir
if args is None:
if runconf.args_enabled:
args = runconf.args
if wdir is None:
wdir = osp.dirname(filename)
if pythonpath is None:
pythonpath = self.spyder_pythonpath
self.start(wdir, args, pythonpath)
def select_file(self):
self.redirect_stdio.emit(False)
filename, _selfilter = getopenfilename(
self, _("Select Python script"), getcwd(),
_("Python scripts")+" (*.py ; *.pyw)")
self.redirect_stdio.emit(False)
if filename:
self.analyze(filename)
def show_log(self):
if self.output:
TextEditor(self.output, title=_("Line profiler output"),
readonly=True, size=(700, 500)).exec_()
def show_errorlog(self):
if self.error_output:
TextEditor(self.error_output, title=_("Line profiler output"),
readonly=True, size=(700, 500)).exec_()
def start(self, wdir=None, args=None, pythonpath=None):
filename = to_text_string(self.filecombo.currentText())
if wdir is None:
wdir = self._last_wdir
if wdir is None:
wdir = osp.basename(filename)
if args is None:
args = self._last_args
if args is None:
args = []
if pythonpath is None:
pythonpath = self._last_pythonpath
self._last_wdir = wdir
self._last_args = args
self._last_pythonpath = pythonpath
self.datelabel.setText(_('Profiling, please wait...'))
self.process = QProcess(self)
self.process.setProcessChannelMode(QProcess.SeparateChannels)
self.process.setWorkingDirectory(wdir)
self.process.readyReadStandardOutput.connect(self.read_output)
self.process.readyReadStandardError.connect(
lambda: self.read_output(error=True))
self.process.finished.connect(self.finished)
self.stop_button.clicked.connect(self.process.kill)
if pythonpath is not None:
env = [to_text_string(_pth)
for _pth in self.process.systemEnvironment()]
add_pathlist_to_PYTHONPATH(env, pythonpath)
processEnvironment = QProcessEnvironment()
for envItem in env:
envName, separator, envValue = envItem.partition('=')
processEnvironment.insert(envName, envValue)
self.process.setProcessEnvironment(processEnvironment)
self.output = ''
self.error_output = ''
if os.name == 'nt':
# On Windows, one has to replace backslashes by slashes to avoid
# confusion with escape characters (otherwise, for example, '\t'
# will be interpreted as a tabulation):
filename = osp.normpath(filename).replace(os.sep, '/')
p_args = ['-lvb', '-o', '"' + self.DATAPATH + '"',
'"' + filename + '"']
if args:
p_args.extend(programs.shell_split(args))
executable = '"' + programs.find_program('kernprof') + '"'
executable += ' ' + ' '.join(p_args)
executable = executable.replace(os.sep, '/')
self.process.start(executable)
else:
p_args = ['-lvb', '-o', self.DATAPATH, filename]
if args:
p_args.extend(programs.shell_split(args))
executable = 'kernprof'
self.process.start(executable, p_args)
running = self.process.waitForStarted()
self.set_running_state(running)
if not running:
QMessageBox.critical(self, _("Error"),
_("Process failed to start"))
def set_running_state(self, state=True):
self.start_button.setEnabled(not state)
self.stop_button.setEnabled(state)
def read_output(self, error=False):
if error:
self.process.setReadChannel(QProcess.StandardError)
else:
self.process.setReadChannel(QProcess.StandardOutput)
qba = QByteArray()
while self.process.bytesAvailable():
if error:
qba += self.process.readAllStandardError()
else:
qba += self.process.readAllStandardOutput()
text = to_text_string(locale_codec.toUnicode(qba.data()))
if error:
self.error_output += text
else:
self.output += text
def finished(self):
self.set_running_state(False)
self.show_errorlog() # If errors occurred, show them.
self.output = self.error_output + self.output
# FIXME: figure out if show_data should be called here or
# as a signal from the combobox
self.show_data(justanalyzed=True)
self.sig_finished.emit()
def kill_if_running(self):
if self.process is not None:
if self.process.state() == QProcess.Running:
self.process.kill()
self.process.waitForFinished()
def show_data(self, justanalyzed=False):
if not justanalyzed:
self.output = None
self.log_button.setEnabled(
self.output is not None and len(self.output) > 0)
self.kill_if_running()
filename = to_text_string(self.filecombo.currentText())
if not filename:
return
self.datatree.load_data(self.DATAPATH)
self.datelabel.setText(_('Sorting data, please wait...'))
QApplication.processEvents()
self.datatree.show_tree()
text_style = "<span style=\'color: #444444\'><b>%s </b></span>"
date_text = text_style % time.strftime("%d %b %Y %H:%M",
time.localtime())
self.datelabel.setText(date_text)
class LineProfilerDataTree(QTreeWidget):
"""
Convenience tree widget (with built-in model)
to store and view line profiler data.
"""
def __init__(self, parent=None):
QTreeWidget.__init__(self, parent)
self.header_list = [
_('Line #'), _('Hits'), _('Time (ms)'), _('Per hit (ms)'),
_('% Time'), _('Line contents')]
self.stats = None # To be filled by self.load_data()
self.max_time = 0 # To be filled by self.load_data()
self.header().setDefaultAlignment(Qt.AlignCenter)
self.setColumnCount(len(self.header_list))
self.setHeaderLabels(self.header_list)
self.clear()
self.itemActivated.connect(self.item_activated)
def show_tree(self):
"""Populate the tree with line profiler data and display it."""
self.clear() # Clear before re-populating
self.setItemsExpandable(True)
self.setSortingEnabled(False)
self.populate_tree()
self.expandAll()
for col in range(self.columnCount()-1):
self.resizeColumnToContents(col)
if self.topLevelItemCount() > 1:
self.collapseAll()
self.setSortingEnabled(True)
self.sortItems(COL_POS, Qt.AscendingOrder)
def load_data(self, profdatafile):
"""Load line profiler data saved by kernprof module"""
# lstats has the following layout :
# lstats.timings =
# {(filename1, line_no1, function_name1):
# [(line_no1, hits1, total_time1),
# (line_no2, hits2, total_time2)],
# (filename2, line_no2, function_name2):
# [(line_no1, hits1, total_time1),
# (line_no2, hits2, total_time2),
# (line_no3, hits3, total_time3)]}
# lstats.unit = time_factor
with open(profdatafile, 'rb') as fid:
lstats = pickle.load(fid)
# First pass to group by filename
self.stats = dict()
linecache.checkcache()
for func_info, stats in lstats.timings.items():
# func_info is a tuple containing (filename, line, function anme)
filename, start_line_no = func_info[:2]
# Read code
start_line_no -= 1 # include the @profile decorator
all_lines = linecache.getlines(filename)
block_lines = inspect.getblock(all_lines[start_line_no:])
# Loop on each line of code
func_stats = []
func_total_time = 0.0
next_stat_line = 0
for line_no, code_line in enumerate(block_lines):
line_no += start_line_no + 1 # Lines start at 1
code_line = code_line.rstrip('\n')
if (next_stat_line >= len(stats)
or line_no != stats[next_stat_line][0]):
# Line didn't run
hits, line_total_time, time_per_hit = None, None, None
else:
# Compute line times
hits, line_total_time = stats[next_stat_line][1:]
line_total_time *= lstats.unit
time_per_hit = line_total_time / hits
func_total_time += line_total_time
next_stat_line += 1
func_stats.append(
[line_no, code_line, line_total_time, time_per_hit,
hits])
# Compute percent time
for line in func_stats:
line_total_time = line[2]
if line_total_time is None:
line.append(None)
else:
line.append(line_total_time / func_total_time)
# Fill dict
self.stats[func_info] = [func_stats, func_total_time]
def fill_item(self, item, filename, line_no, code, time, percent, perhit,
hits):
item.setData(COL_POS, Qt.UserRole, (osp.normpath(filename), line_no))
item.setData(COL_NO, Qt.DisplayRole, line_no)
item.setData(COL_LINE, Qt.DisplayRole, code)
if percent is None:
percent = ''
else:
percent = '%.1f' % (100 * percent)
item.setData(COL_PERCENT, Qt.DisplayRole, percent)
item.setTextAlignment(COL_PERCENT, Qt.AlignCenter)
if time is None:
time = ''
else:
time = '%.3f' % (time * 1e3)
item.setData(COL_TIME, Qt.DisplayRole, time)
item.setTextAlignment(COL_TIME, Qt.AlignCenter)
if perhit is None:
perhit = ''
else:
perhit = '%.3f' % (perhit * 1e3)
item.setData(COL_PERHIT, Qt.DisplayRole, perhit)
item.setTextAlignment(COL_PERHIT, Qt.AlignCenter)
if hits is None:
hits = ''
else:
hits = '%d' % hits
item.setData(COL_HITS, Qt.DisplayRole, hits)
item.setTextAlignment(COL_HITS, Qt.AlignCenter)
def populate_tree(self):
"""Create each item (and associated data) in the tree"""
if not self.stats:
warn_item = QTreeWidgetItem(self)
warn_item.setData(
0, Qt.DisplayRole,
_('No timings to display. '
'Did you forget to add @profile decorators ?')
.format(url=WEBSITE_URL))
warn_item.setFirstColumnSpanned(True)
warn_item.setTextAlignment(0, Qt.AlignCenter)
font = warn_item.font(0)
font.setStyle(QFont.StyleItalic)
warn_item.setFont(0, font)
return
try:
monospace_font = self.window().editor.get_plugin_font()
except AttributeError: # If run standalone for testing
monospace_font = QFont("Courier New")
monospace_font.setPointSize(10)
for func_info, func_data in self.stats.items():
# Function name and position
filename, start_line_no, func_name = func_info
func_stats, func_total_time = func_data
func_item = QTreeWidgetItem(self)
func_item.setData(
0, Qt.DisplayRole,
_('{func_name} ({time_ms:.3f}ms) in file "{filename}", '
'line {line_no}').format(
filename=filename,
line_no=start_line_no,
func_name=func_name,
time_ms=func_total_time * 1e3))
func_item.setFirstColumnSpanned(True)
func_item.setData(COL_POS, Qt.UserRole,
(osp.normpath(filename), start_line_no))
# For sorting by time
func_item.setData(COL_TIME, Qt.DisplayRole, func_total_time * 1e3)
func_item.setData(COL_PERCENT, Qt.DisplayRole,
func_total_time * 1e3)
if self.parent().use_colors:
# Choose deteministic unique color for the function
md5 = hashlib.md5((filename + func_name).encode("utf8")).hexdigest()
hue = (int(md5[:2], 16) - 68) % 360 # avoid blue (unreadable)
func_color = QColor.fromHsv(hue, 200, 255)
else:
# Red color only
func_color = QColor.fromRgb(255, 0, 0)
# Lines of code
for line_info in func_stats:
line_item = QTreeWidgetItem(func_item)
(line_no, code_line, line_total_time, time_per_hit,
hits, percent) = line_info
self.fill_item(
line_item, filename, line_no, code_line,
line_total_time, percent, time_per_hit, hits)
# Color background
if line_total_time is not None:
alpha = percent
color = QColor(func_color)
color.setAlphaF(alpha) # Returns None
color = QBrush(color)
for col in range(self.columnCount()):
line_item.setBackground(col, color)
else:
for col in range(self.columnCount()):
line_item.setForeground(col, CODE_NOT_RUN_COLOR)
# Monospace font for code
line_item.setFont(COL_LINE, monospace_font)
def item_activated(self, item):
filename, line_no = item.data(COL_POS, Qt.UserRole)
self.parent().edit_goto.emit(filename, line_no, '')
def test():
"""Run widget test"""
from spyder.utils.qthelpers import qapplication
app = qapplication()
widget = LineProfilerWidget(None)
widget.resize(800, 600)
widget.show()
widget.analyze(osp.normpath(osp.join(osp.dirname(__file__), os.pardir,
'tests/profiling_test_script.py')),
use_colors=True)
sys.exit(app.exec_())
if __name__ == '__main__':
test()
| mit | 1,243,849,583,142,293,200 | 36.317568 | 92 | 0.567988 | false | 4.007983 | true | false | false |
ryfeus/lambda-packs | Spacy/source2.7/spacy/tokens/printers.py | 1 | 2687 | # coding: utf8
from __future__ import unicode_literals
from .doc import Doc
from ..symbols import HEAD, TAG, DEP, ENT_IOB, ENT_TYPE
def merge_ents(doc):
"""Helper: merge adjacent entities into single tokens; modifies the doc."""
for ent in doc.ents:
ent.merge(ent.root.tag_, ent.text, ent.label_)
return doc
def format_POS(token, light, flat):
"""Helper: form the POS output for a token."""
subtree = dict([
("word", token.text),
("lemma", token.lemma_), # trigger
("NE", token.ent_type_), # trigger
("POS_fine", token.tag_),
("POS_coarse", token.pos_),
("arc", token.dep_),
("modifiers", [])
])
if light:
subtree.pop("lemma")
subtree.pop("NE")
if flat:
subtree.pop("arc")
subtree.pop("modifiers")
return subtree
def POS_tree(root, light=False, flat=False):
"""Helper: generate a POS tree for a root token. The doc must have
`merge_ents(doc)` ran on it.
"""
subtree = format_POS(root, light=light, flat=flat)
for c in root.children:
subtree["modifiers"].append(POS_tree(c))
return subtree
def parse_tree(doc, light=False, flat=False):
"""Make a copy of the doc and construct a syntactic parse tree similar to
displaCy. Generates the POS tree for all sentences in a doc.
doc (Doc): The doc for parsing.
RETURNS (dict): The parse tree.
EXAMPLE:
>>> doc = nlp('Bob brought Alice the pizza. Alice ate the pizza.')
>>> trees = doc.print_tree()
>>> trees[1]
{'modifiers': [
{'modifiers': [], 'NE': 'PERSON', 'word': 'Alice', 'arc': 'nsubj',
'POS_coarse': 'PROPN', 'POS_fine': 'NNP', 'lemma': 'Alice'},
{'modifiers': [
{'modifiers': [], 'NE': '', 'word': 'the', 'arc': 'det',
'POS_coarse': 'DET', 'POS_fine': 'DT', 'lemma': 'the'}],
'NE': '', 'word': 'pizza', 'arc': 'dobj', 'POS_coarse': 'NOUN',
'POS_fine': 'NN', 'lemma': 'pizza'},
{'modifiers': [], 'NE': '', 'word': '.', 'arc': 'punct',
'POS_coarse': 'PUNCT', 'POS_fine': '.', 'lemma': '.'}],
'NE': '', 'word': 'ate', 'arc': 'ROOT', 'POS_coarse': 'VERB',
'POS_fine': 'VBD', 'lemma': 'eat'}
"""
doc_clone = Doc(doc.vocab, words=[w.text for w in doc])
doc_clone.from_array([HEAD, TAG, DEP, ENT_IOB, ENT_TYPE],
doc.to_array([HEAD, TAG, DEP, ENT_IOB, ENT_TYPE]))
merge_ents(doc_clone) # merge the entities into single tokens first
return [POS_tree(sent.root, light=light, flat=flat)
for sent in doc_clone.sents]
| mit | 3,038,236,058,106,726,400 | 35.310811 | 79 | 0.54224 | false | 3.317284 | false | false | false |
pombredanne/django-url-filter | url_filter/backends/sqlalchemy.py | 1 | 4251 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import itertools
from sqlalchemy import func
from sqlalchemy.orm import class_mapper
from sqlalchemy.sql.expression import not_
from .base import BaseFilterBackend
def lower(value):
try:
return value.lower()
except AttributeError:
return value
class SQLAlchemyFilterBackend(BaseFilterBackend):
supported_lookups = {
'contains',
'endswith',
'exact',
'gt',
'gte',
'icontains',
'iendswith',
'iexact',
'in',
'isnull',
'istartswith',
'lt',
'lte',
'range',
'startswith',
}
def __init__(self, *args, **kwargs):
super(SQLAlchemyFilterBackend, self).__init__(*args, **kwargs)
assert len(self.queryset._entities) == 1, (
'{} does not support filtering when multiple entities '
'are being queried (e.g. session.query(Foo, Bar)).'
''.format(self.__class__.__name__)
)
def get_model(self):
return self.queryset._primary_entity.entities[0]
def filter(self):
if not self.specs:
return self.queryset
clauses = [self.build_clause(spec) for spec in self.specs]
conditions, joins = zip(*clauses)
joins = list(itertools.chain(*joins))
qs = self.queryset
if joins:
qs = qs.join(*joins)
return qs.filter(*conditions)
def build_clause(self, spec):
to_join = []
model = self.model
for component in spec.components:
_field = getattr(model, component)
field = self._get_properties_for_model(model)[component]
try:
model = self._get_related_model_for_field(field)
except AttributeError:
break
else:
to_join.append(_field)
builder = getattr(self, '_build_clause_{}'.format(spec.lookup))
column = self._get_attribute_for_field(field)
clause = builder(spec, column)
if spec.is_negated:
clause = not_(clause)
return clause, to_join
def _build_clause_contains(self, spec, column):
return column.contains(spec.value)
def _build_clause_endswith(self, spec, column):
return column.endswith(spec.value)
def _build_clause_exact(self, spec, column):
return column == spec.value
def _build_clause_gt(self, spec, column):
return column > spec.value
def _build_clause_gte(self, spec, column):
return column >= spec.value
def _build_clause_icontains(self, spec, column):
return func.lower(column).contains(lower(spec.value))
def _build_clause_iendswith(self, spec, column):
return func.lower(column).endswith(lower(spec.value))
def _build_clause_iexact(self, spec, column):
return func.lower(column) == lower(spec.value)
def _build_clause_in(self, spec, column):
return column.in_(spec.value)
def _build_clause_isnull(self, spec, column):
if spec.value:
return column == None # noqa
else:
return column != None # noqa
def _build_clause_istartswith(self, spec, column):
return func.lower(column).startswith(lower(spec.value))
def _build_clause_lt(self, spec, column):
return column < spec.value
def _build_clause_lte(self, spec, column):
return column <= spec.value
def _build_clause_range(self, spec, column):
return column.between(*spec.value)
def _build_clause_startswith(self, spec, column):
return column.startswith(spec.value)
@classmethod
def _get_properties_for_model(cls, model):
mapper = class_mapper(model)
return {
i.key: i
for i in mapper.iterate_properties
}
@classmethod
def _get_column_for_field(cls, field):
return field.columns[0]
@classmethod
def _get_attribute_for_field(cls, field):
return field.class_attribute
@classmethod
def _get_related_model_for_field(self, field):
return field._dependency_processor.mapper.class_
| mit | -6,336,739,438,157,124,000 | 26.784314 | 72 | 0.597271 | false | 4.044719 | false | false | false |
kreeger/etcetera | urls.py | 1 | 1576 | from django.conf.urls.defaults import *
from django.contrib.auth.views import login, logout
from django.contrib import admin
from etcetera.settings import SITE_ROOT, DEBUG
admin.autodiscover()
# For master/general use.
urlpatterns = patterns('',
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/jsi18n/$', 'django.views.i18n.javascript_catalog'),
url(r'^admin/', include(admin.site.urls)),
url(r'^login/$', login, name="etcetera-login"),
url(r'^logout/$', logout, name="etcetera-logout"),
)
# For only when in development.
if DEBUG:
urlpatterns += patterns('',
url(r'^_media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': (SITE_ROOT + '/_media')}),
)
# For equipment management.
urlpatterns += patterns('',
url(r'^equipment/', include('etcetera.equipment.urls')),
)
# For checkout/reservation management.
urlpatterns += patterns('',
url(r'^checkout/', include('etcetera.checkout.urls')),
)
# For service management.
urlpatterns += patterns('',
url(r'^service/', include('etcetera.service.urls')),
)
# For report generation.
urlpatterns += patterns('',
url(r'^reports/', include('etcetera.reports.urls')),
)
# For university structure management.
urlpatterns += patterns('',
url(r'^structure/', include('etcetera.structure.urls')),
)
# For extra things.
urlpatterns += patterns('',
url(r'^extras/', include('etcetera.extras.urls')),
url(r'^user/', include('etcetera.extras.urls-profile')),
url(r'^$', 'etcetera.extras.views.index', name="etcetera-home"),
) | bsd-3-clause | -469,688,098,523,634,000 | 28.203704 | 111 | 0.673223 | false | 3.448578 | false | false | false |
kaplun/ops | modules/bibupload/lib/bibupload.py | 1 | 146505 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibUpload: Receive MARC XML file and update the appropriate database
tables according to options.
"""
__revision__ = "$Id$"
import os
import re
import sys
import time
from datetime import datetime
from zlib import compress
import socket
import marshal
import copy
import tempfile
import urlparse
import urllib2
import urllib
from invenio.config import CFG_OAI_ID_FIELD, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG, \
CFG_BIBUPLOAD_STRONG_TAGS, \
CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_DELETE_FORMATS, \
CFG_SITE_URL, \
CFG_SITE_SECURE_URL, \
CFG_SITE_RECORD, \
CFG_OAI_PROVENANCE_ALTERED_SUBFIELD, \
CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS, \
CFG_BIBUPLOAD_CONFLICTING_REVISION_TICKET_QUEUE, \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE
from invenio.jsonutils import json, CFG_JSON_AVAILABLE
from invenio.bibupload_config import CFG_BIBUPLOAD_CONTROLFIELD_TAGS, \
CFG_BIBUPLOAD_SPECIAL_TAGS, \
CFG_BIBUPLOAD_DELETE_CODE, \
CFG_BIBUPLOAD_DELETE_VALUE, \
CFG_BIBUPLOAD_OPT_MODES
from invenio.dbquery import run_sql
from invenio.bibrecord import create_records, \
record_add_field, \
record_delete_field, \
record_xml_output, \
record_get_field_instances, \
record_get_field_value, \
record_get_field_values, \
field_get_subfield_values, \
field_get_subfield_instances, \
record_modify_subfield, \
record_delete_subfield_from, \
record_delete_fields, \
record_add_subfield_into, \
record_find_field, \
record_extract_oai_id, \
record_extract_dois, \
record_has_field, \
records_identical, \
record_drop_duplicate_fields
from invenio.search_engine import get_record, record_exists, search_pattern
from invenio.dateutils import convert_datestruct_to_datetext
from invenio.errorlib import register_exception
from invenio.bibcatalog import BIBCATALOG_SYSTEM
from invenio.intbitset import intbitset
from invenio.urlutils import make_user_agent_string
from invenio.textutils import wash_for_xml
from invenio.config import CFG_BIBDOCFILE_FILEDIR
from invenio.bibtask import task_init, write_message, \
task_set_option, task_get_option, task_get_task_param, \
task_update_progress, task_sleep_now_if_required, fix_argv_paths, \
RecoverableError
from invenio.bibdocfile import BibRecDocs, file_strip_ext, normalize_format, \
get_docname_from_url, check_valid_url, download_url, \
KEEP_OLD_VALUE, decompose_bibdocfile_url, InvenioBibDocFileError, \
bibdocfile_url_p, CFG_BIBDOCFILE_AVAILABLE_FLAGS, guess_format_from_url, \
BibRelation, MoreInfo
from invenio.search_engine import search_pattern
from invenio.bibupload_revisionverifier import RevisionVerifier, \
InvenioBibUploadConflictingRevisionsError, \
InvenioBibUploadInvalidRevisionError, \
InvenioBibUploadMissing005Error, \
InvenioBibUploadUnchangedRecordError
#Statistic variables
stat = {}
stat['nb_records_to_upload'] = 0
stat['nb_records_updated'] = 0
stat['nb_records_inserted'] = 0
stat['nb_errors'] = 0
stat['nb_holdingpen'] = 0
stat['exectime'] = time.localtime()
_WRITING_RIGHTS = None
CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS = ('oracle', )
CFG_HAS_BIBCATALOG = "UNKNOWN"
def check_bibcatalog():
"""
Return True if bibcatalog is available.
"""
global CFG_HAS_BIBCATALOG # pylint: disable=W0603
if CFG_HAS_BIBCATALOG != "UNKNOWN":
return CFG_HAS_BIBCATALOG
CFG_HAS_BIBCATALOG = True
if BIBCATALOG_SYSTEM is not None:
bibcatalog_response = BIBCATALOG_SYSTEM.check_system()
else:
bibcatalog_response = "No ticket system configured"
if bibcatalog_response != "":
write_message("BibCatalog error: %s\n" % (bibcatalog_response,))
CFG_HAS_BIBCATALOG = False
return CFG_HAS_BIBCATALOG
## Let's set a reasonable timeout for URL request (e.g. FFT)
socket.setdefaulttimeout(40)
def parse_identifier(identifier):
"""Parse the identifier and determine if it is temporary or fixed"""
id_str = str(identifier)
if not id_str.startswith("TMP:"):
return (False, identifier)
else:
return (True, id_str[4:])
def resolve_identifier(tmps, identifier):
"""Resolves an identifier. If the identifier is not temporary, this
function is an identity on the second argument. Otherwise, a resolved
value is returned or an exception raised"""
is_tmp, tmp_id = parse_identifier(identifier)
if is_tmp:
if not tmp_id in tmps:
raise StandardError("Temporary identifier %s not present in the dictionary" % (tmp_id, ))
if tmps[tmp_id] == -1:
# the identifier has been signalised but never assigned a value - probably error during processing
raise StandardError("Temporary identifier %s has been declared, but never assigned a value. Probably an error during processign of an appropriate FFT has happened. Please see the log" % (tmp_id, ))
return int(tmps[tmp_id])
else:
return int(identifier)
_re_find_001 = re.compile('<controlfield\\s+tag=("001"|\'001\')\\s*>\\s*(\\d*)\\s*</controlfield>', re.S)
def bibupload_pending_recids():
"""This function embed a bit of A.I. and is more a hack than an elegant
algorithm. It should be updated in case bibupload/bibsched are modified
in incompatible ways.
This function return the intbitset of all the records that are being
(or are scheduled to be) touched by other bibuploads.
"""
options = run_sql("""SELECT arguments FROM schTASK WHERE status<>'DONE' AND
proc='bibupload' AND (status='RUNNING' OR status='CONTINUING' OR
status='WAITING' OR status='SCHEDULED' OR status='ABOUT TO STOP' OR
status='ABOUT TO SLEEP')""")
ret = intbitset()
xmls = []
if options:
for arguments in options:
arguments = marshal.loads(arguments[0])
for argument in arguments[1:]:
if argument.startswith('/'):
# XMLs files are recognizable because they're absolute
# files...
xmls.append(argument)
for xmlfile in xmls:
# Let's grep for the 001
try:
xml = open(xmlfile).read()
ret += [int(group[1]) for group in _re_find_001.findall(xml)]
except:
continue
return ret
### bibupload engine functions:
def bibupload(record, opt_mode=None, opt_notimechange=0, oai_rec_id="", pretend=False,
tmp_ids=None, tmp_vers=None):
"""Main function: process a record and fit it in the tables
bibfmt, bibrec, bibrec_bibxxx, bibxxx with proper record
metadata.
Return (error_code, recID) of the processed record.
"""
if tmp_ids is None:
tmp_ids = {}
if tmp_vers is None:
tmp_vers = {}
if opt_mode == 'reference':
## NOTE: reference mode has been deprecated in favour of 'correct'
opt_mode = 'correct'
assert(opt_mode in CFG_BIBUPLOAD_OPT_MODES)
try:
record_xml_output(record).decode('utf-8')
except UnicodeDecodeError:
msg = " Failed: Invalid utf-8 characters."
write_message(msg, verbose=1, stream=sys.stderr)
return (1, -1, msg)
error = None
affected_tags = {}
original_record = {}
rec_old = {}
now = datetime.now() # will hold record creation/modification date
record_had_altered_bit = False
is_opt_mode_delete = False
# Extraction of the Record Id from 001, SYSNO or OAIID or DOI tags:
rec_id = retrieve_rec_id(record, opt_mode, pretend=pretend)
if rec_id == -1:
msg = " Failed: either the record already exists and insert was " \
"requested or the record does not exists and " \
"replace/correct/append has been used"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, -1, msg)
elif rec_id > 0:
write_message(" -Retrieve record ID (found %s): DONE." % rec_id, verbose=2)
(unique_p, msg) = check_record_doi_is_unique(rec_id, record)
if not unique_p:
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if not record.has_key('001'):
# Found record ID by means of SYSNO or OAIID or DOI, and the
# input MARCXML buffer does not have this 001 tag, so we
# should add it now:
error = record_add_field(record, '001', controlfield_value=rec_id)
if error is None:
msg = " Failed: Error during adding the 001 controlfield " \
"to the record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error = None
write_message(" -Added tag 001: DONE.", verbose=2)
write_message(" -Check if the xml marc file is already in the database: DONE" , verbose=2)
record_deleted_p = False
if opt_mode == 'insert' or \
(opt_mode == 'replace_or_insert') and rec_id is None:
insert_mode_p = True
# Insert the record into the bibrec databases to have a recordId
rec_id = create_new_record(pretend=pretend)
write_message(" -Creation of a new record id (%d): DONE" % rec_id, verbose=2)
# we add the record Id control field to the record
error = record_add_field(record, '001', controlfield_value=rec_id)
if error is None:
msg = " Failed: Error during adding the 001 controlfield " \
"to the record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error = None
if '005' not in record:
error = record_add_field(record, '005', controlfield_value=now.strftime("%Y%m%d%H%M%S.0"))
if error is None:
msg = " ERROR: during adding to 005 controlfield to record"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
error = None
else:
write_message(" Note: 005 already existing upon inserting of new record. Keeping it.", verbose=2)
elif opt_mode != 'insert':
insert_mode_p = False
# Update Mode
# Retrieve the old record to update
rec_old = get_record(rec_id)
record_had_altered_bit = record_get_field_values(rec_old, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4], CFG_OAI_PROVENANCE_ALTERED_SUBFIELD)
# Also save a copy to restore previous situation in case of errors
original_record = get_record(rec_id)
if rec_old is None:
msg = " Failed during the creation of the old record!"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
write_message(" -Retrieve the old record to update: DONE", verbose=2)
# flag to check whether the revisions have been verified and patch generated.
# If revision verification failed, then we need to manually identify the affected tags
# and process them
revision_verified = False
rev_verifier = RevisionVerifier()
#check for revision conflicts before updating record
if record_has_field(record, '005') and not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS:
write_message(" -Upload Record has 005. Verifying Revision", verbose=2)
try:
rev_res = rev_verifier.verify_revision(record, original_record, opt_mode)
if rev_res:
opt_mode = rev_res[0]
record = rev_res[1]
affected_tags = rev_res[2]
revision_verified = True
write_message(lambda: " -Patch record generated. Changing opt_mode to correct.\nPatch:\n%s " % record_xml_output(record), verbose=2)
else:
write_message(" -No Patch Record.", verbose=2)
except InvenioBibUploadUnchangedRecordError, err:
msg = " -ISSUE: %s" % err
write_message(msg, verbose=1, stream=sys.stderr)
write_message(msg, " Continuing anyway in case there are FFT or other tags")
except InvenioBibUploadConflictingRevisionsError, err:
msg = " -ERROR: Conflicting Revisions - %s" % err
write_message(msg, verbose=1, stream=sys.stderr)
submit_ticket_for_holding_pen(rec_id, err, "Conflicting Revisions. Inserting record into holding pen.", pretend=pretend)
insert_record_into_holding_pen(record, str(rec_id), pretend=pretend)
return (2, int(rec_id), msg)
except InvenioBibUploadInvalidRevisionError, err:
msg = " -ERROR: Invalid Revision - %s" % err
write_message(msg)
submit_ticket_for_holding_pen(rec_id, err, "Invalid Revisions. Inserting record into holding pen.", pretend=pretend)
insert_record_into_holding_pen(record, str(rec_id), pretend=pretend)
return (2, int(rec_id), msg)
except InvenioBibUploadMissing005Error, err:
msg = " -ERROR: Missing 005 - %s" % err
write_message(msg)
submit_ticket_for_holding_pen(rec_id, err, "Missing 005. Inserting record into holding pen.", pretend=pretend)
insert_record_into_holding_pen(record, str(rec_id), pretend=pretend)
return (2, int(rec_id), msg)
else:
write_message(" - No 005 Tag Present. Resuming normal flow.", verbose=2)
# dictionaries to temporarily hold original recs tag-fields
existing_tags = {}
retained_tags = {}
# in case of delete operation affected tags should be deleted in delete_bibrec_bibxxx
# but should not be updated again in STAGE 4
# utilising the below flag
is_opt_mode_delete = False
if not revision_verified:
# either 005 was not present or opt_mode was not correct/replace
# in this case we still need to find out affected tags to process
write_message(" - Missing 005 or opt_mode!=Replace/Correct.Revision Verifier not called.", verbose=2)
# Identify affected tags
if opt_mode == 'correct' or opt_mode == 'replace' or opt_mode == 'replace_or_insert':
rec_diff = rev_verifier.compare_records(record, original_record, opt_mode)
affected_tags = rev_verifier.retrieve_affected_tags_with_ind(rec_diff)
elif opt_mode == 'delete':
# populate an intermediate dictionary
# used in upcoming step related to 'delete' mode
is_opt_mode_delete = True
for tag, fields in original_record.iteritems():
existing_tags[tag] = [tag + (field[1] != ' ' and field[1] or '_') + (field[2] != ' ' and field[2] or '_') for field in fields]
elif opt_mode == 'append':
for tag, fields in record.iteritems():
if tag not in CFG_BIBUPLOAD_CONTROLFIELD_TAGS:
affected_tags[tag] = [(field[1], field[2]) for field in fields]
# In Replace mode, take over old strong tags if applicable:
if opt_mode == 'replace' or \
opt_mode == 'replace_or_insert':
copy_strong_tags_from_old_record(record, rec_old)
# Delete tags to correct in the record
if opt_mode == 'correct':
delete_tags_to_correct(record, rec_old)
write_message(" -Delete the old tags to correct in the old record: DONE",
verbose=2)
# Delete tags specified if in delete mode
if opt_mode == 'delete':
record = delete_tags(record, rec_old)
for tag, fields in record.iteritems():
retained_tags[tag] = [tag + (field[1] != ' ' and field[1] or '_') + (field[2] != ' ' and field[2] or '_') for field in fields]
#identify the tags that have been deleted
for tag in existing_tags.keys():
if tag not in retained_tags:
for item in existing_tags[tag]:
tag_to_add = item[0:3]
ind1, ind2 = item[3], item[4]
if tag_to_add in affected_tags and (ind1, ind2) not in affected_tags[tag_to_add]:
affected_tags[tag_to_add].append((ind1, ind2))
else:
affected_tags[tag_to_add] = [(ind1, ind2)]
else:
deleted = list(set(existing_tags[tag]) - set(retained_tags[tag]))
for item in deleted:
tag_to_add = item[0:3]
ind1, ind2 = item[3], item[4]
if tag_to_add in affected_tags and (ind1, ind2) not in affected_tags[tag_to_add]:
affected_tags[tag_to_add].append((ind1, ind2))
else:
affected_tags[tag_to_add] = [(ind1, ind2)]
write_message(" -Delete specified tags in the old record: DONE", verbose=2)
# Append new tag to the old record and update the new record with the old_record modified
if opt_mode == 'append' or opt_mode == 'correct':
record = append_new_tag_to_old_record(record, rec_old)
write_message(" -Append new tags to the old record: DONE", verbose=2)
write_message(" -Affected Tags found after comparing upload and original records: %s"%(str(affected_tags)), verbose=2)
# 005 tag should be added everytime the record is modified
# If an exiting record is modified, its 005 tag should be overwritten with a new revision value
if record.has_key('005'):
record_delete_field(record, '005')
write_message(" Deleted the existing 005 tag.", verbose=2)
last_revision = run_sql("SELECT MAX(job_date) FROM hstRECORD WHERE id_bibrec=%s", (rec_id, ))[0][0]
if last_revision and last_revision.strftime("%Y%m%d%H%M%S.0") == now.strftime("%Y%m%d%H%M%S.0"):
## We are updating the same record within the same seconds! It's less than
## the minimal granularity. Let's pause for 1 more second to take a breath :-)
time.sleep(1)
now = datetime.now()
error = record_add_field(record, '005', controlfield_value=now.strftime("%Y%m%d%H%M%S.0"))
if error is None:
write_message(" Failed: Error during adding to 005 controlfield to record", verbose=1, stream=sys.stderr)
return (1, int(rec_id))
else:
error=None
write_message(lambda: " -Added tag 005: DONE. " + str(record_get_field_value(record, '005', '', '')), verbose=2)
# adding 005 to affected tags will delete the existing 005 entry
# and update with the latest timestamp.
if '005' not in affected_tags:
affected_tags['005'] = [(' ', ' ')]
write_message(" -Stage COMPLETED", verbose=2)
record_deleted_p = False
try:
if not record_is_valid(record):
msg = "ERROR: record is not valid"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, -1, msg)
# Have a look if we have FFT tags
write_message("Stage 2: Start (Process FFT tags if exist).", verbose=2)
record_had_FFT = False
bibrecdocs = None
if extract_tag_from_record(record, 'FFT') is not None:
record_had_FFT = True
if not writing_rights_p():
msg = "ERROR: no rights to write fulltext files"
write_message(" Stage 2 failed: %s" % msg,
verbose=1, stream=sys.stderr)
raise StandardError(msg)
try:
bibrecdocs = BibRecDocs(rec_id)
record = elaborate_fft_tags(record, rec_id, opt_mode,
pretend=pretend, tmp_ids=tmp_ids,
tmp_vers=tmp_vers, bibrecdocs=bibrecdocs)
except Exception, e:
register_exception()
msg = " Stage 2 failed: ERROR: while elaborating FFT tags: %s" % e
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if record is None:
msg = " Stage 2 failed: ERROR: while elaborating FFT tags"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
# Have a look if we have FFT tags
write_message("Stage 2B: Start (Synchronize 8564 tags).", verbose=2)
if record_had_FFT or extract_tag_from_record(record, '856') is not None:
try:
if bibrecdocs is None:
bibrecdocs = BibRecDocs(rec_id)
record = synchronize_8564(rec_id, record, record_had_FFT, bibrecdocs, pretend=pretend)
# in case if FFT is in affected list make appropriate changes
if ('4', ' ') not in affected_tags.get('856', []):
if '856' not in affected_tags:
affected_tags['856'] = [('4', ' ')]
elif ('4', ' ') not in affected_tags['856']:
affected_tags['856'].append(('4', ' '))
write_message(" -Modified field list updated with FFT details: %s" % str(affected_tags), verbose=2)
except Exception, e:
register_exception(alert_admin=True)
msg = " Stage 2B failed: ERROR: while synchronizing 8564 tags: %s" % e
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if record is None:
msg = " Stage 2B failed: ERROR: while synchronizing 8564 tags"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
write_message("Stage 3: Start (Apply fields deletion requests).", verbose=2)
write_message(lambda: " Record before deletion:\n%s" % record_xml_output(record), verbose=9)
# remove fields with __DELETE_FIELDS__
# NOTE:creating a temporary deep copy of record for iteration to avoid RunTimeError
# RuntimeError due to change in dictionary size during iteration
tmp_rec = copy.deepcopy(record)
for tag in tmp_rec:
for data_tuple in record[tag]:
if (CFG_BIBUPLOAD_DELETE_CODE, CFG_BIBUPLOAD_DELETE_VALUE) in data_tuple[0]:
# delete the tag with particular indicator pairs from original record
record_delete_field(record, tag, data_tuple[1], data_tuple[2])
write_message(lambda: " Record after cleaning up fields to be deleted:\n%s" % record_xml_output(record), verbose=9)
if opt_mode == 'append':
write_message("Stage 3b: Drop duplicate fields in append mode.", verbose=2)
record = record_drop_duplicate_fields(record)
write_message(lambda: " Record after dropping duplicate fields:\n%s" % record_xml_output(record), verbose=9)
# Update of the BibFmt
write_message("Stage 4: Start (Update bibfmt).", verbose=2)
updates_exist = not records_identical(record, original_record)
if updates_exist:
# if record_had_altered_bit, this must be set to true, since the
# record has been altered.
if record_had_altered_bit:
oai_provenance_fields = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
for oai_provenance_field in oai_provenance_fields:
for i, (code, dummy_value) in enumerate(oai_provenance_field[0]):
if code == CFG_OAI_PROVENANCE_ALTERED_SUBFIELD:
oai_provenance_field[0][i] = (code, 'true')
tmp_indicators = (CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
if tmp_indicators not in affected_tags.get(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], []):
if CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3] not in affected_tags:
affected_tags[CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3]] = [tmp_indicators]
else:
affected_tags[CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3]].append(tmp_indicators)
write_message(lambda: " Updates exists:\n%s\n!=\n%s" % (record, original_record), verbose=9)
# format the single record as xml
rec_xml_new = record_xml_output(record)
# Update bibfmt with the format xm of this record
modification_date = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(record_get_field_value(record, '005'), '%Y%m%d%H%M%S.0'))
error = update_bibfmt_format(rec_id, rec_xml_new, 'xm', modification_date, pretend=pretend)
if error == 1:
msg = " Failed: ERROR: during update_bibfmt_format 'xm'"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
error = update_bibfmt_format(rec_id, marshal.dumps(record), 'recstruct', modification_date, pretend=pretend)
if error == 1:
msg = " Failed: ERROR: during update_bibfmt_format 'recstruct'"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
if not CFG_BIBUPLOAD_DISABLE_RECORD_REVISIONS:
# archive MARCXML format of this record for version history purposes:
error = archive_marcxml_for_history(rec_id, pretend=pretend)
if error == 1:
msg = " ERROR: Failed to archive MARCXML for history"
write_message(msg, verbose=1, stream=sys.stderr)
return (1, int(rec_id), msg)
else:
write_message(" -Archived MARCXML for history: DONE", verbose=2)
# delete some formats like HB upon record change:
if updates_exist or record_had_FFT:
for format_to_delete in CFG_BIBUPLOAD_DELETE_FORMATS:
try:
delete_bibfmt_format(rec_id, format_to_delete, pretend=pretend)
except:
# OK, some formats like HB could not have been deleted, no big deal
pass
write_message(" -Stage COMPLETED", verbose=2)
## Let's assert that one and only one 005 tag is existing at this stage.
assert len(record['005']) == 1
# Update the database MetaData
write_message("Stage 5: Start (Update the database with the metadata).",
verbose=2)
if insert_mode_p:
update_database_with_metadata(record, rec_id, oai_rec_id, pretend=pretend)
write_message(" -Stage COMPLETED", verbose=2)
elif opt_mode in ('replace', 'replace_or_insert',
'append', 'correct', 'delete') and updates_exist:
# now we clear all the rows from bibrec_bibxxx from the old
record_deleted_p = True
delete_bibrec_bibxxx(rec_old, rec_id, affected_tags, pretend=pretend)
# metadata update will insert tags that are available in affected_tags.
# but for delete, once the tags have been deleted from bibrec_bibxxx, they dont have to be inserted
# except for 005.
if is_opt_mode_delete:
tmp_affected_tags = copy.deepcopy(affected_tags)
for tag in tmp_affected_tags:
if tag != '005':
affected_tags.pop(tag)
write_message(" -Clean bibrec_bibxxx: DONE", verbose=2)
update_database_with_metadata(record, rec_id, oai_rec_id, affected_tags, pretend=pretend)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED in mode %s" % opt_mode,
verbose=2)
record_deleted_p = False
# Finally we update the bibrec table with the current date
write_message("Stage 6: Start (Update bibrec table with current date).",
verbose=2)
if opt_notimechange == 0 and (updates_exist or record_had_FFT):
bibrec_now = convert_datestruct_to_datetext(time.localtime())
write_message(" -Retrieved current localtime: DONE", verbose=2)
update_bibrec_date(bibrec_now, rec_id, insert_mode_p, pretend=pretend)
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
# Increase statistics
if insert_mode_p:
stat['nb_records_inserted'] += 1
else:
stat['nb_records_updated'] += 1
# Upload of this record finish
write_message("Record "+str(rec_id)+" DONE", verbose=1)
return (0, int(rec_id), "")
finally:
if record_deleted_p:
## BibUpload has failed living the record deleted. We should
## back the original record then.
update_database_with_metadata(original_record, rec_id, oai_rec_id, pretend=pretend)
write_message(" Restored original record", verbose=1, stream=sys.stderr)
def record_is_valid(record):
"""
Check if the record is valid. Currently this simply checks if the record
has exactly one rec_id.
@param record: the record
@type record: recstruct
@return: True if the record is valid
@rtype: bool
"""
rec_ids = record_get_field_values(record, tag="001")
if len(rec_ids) != 1:
write_message(" The record is not valid: it has not a single rec_id: %s" % (rec_ids), stream=sys.stderr)
return False
return True
def find_record_ids_by_oai_id(oaiId):
"""
A method finding the records identifier provided the oai identifier
returns a list of identifiers matching a given oai identifier
"""
# Is this record already in invenio (matching by oaiid)
if oaiId:
recids = search_pattern(p=oaiId, f=CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, m='e')
# Is this record already in invenio (matching by reportnumber i.e.
# particularly 037. Idea: to avoid double insertions)
repnumber = oaiId.split(":")[-1]
if repnumber:
recids |= search_pattern(p = repnumber,
f = "reportnumber",
m = 'e' )
# Is this record already in invenio (matching by reportnumber i.e.
# particularly 037. Idea: to avoid double insertions)
repnumber = "arXiv:" + oaiId.split(":")[-1]
recids |= search_pattern(p = repnumber,
f = "reportnumber",
m = 'e' )
if CFG_INSPIRE_SITE or CFG_CERN_SITE:
## FIXME: for the time being this functionality is available only on INSPIRE
## or CDS, and waiting to be replaced by proper pidstore integration
if CFG_CERN_SITE:
return recids - (search_pattern(p='DELETED', f='980__%', m='e') | search_pattern(p='DUMMY', f='980__%', m='e'))
else:
return recids - search_pattern(p='DELETED', f='980__%', m='e')
else:
return recids
else:
return intbitset()
def bibupload_post_phase(record, mode=None, rec_id="", pretend=False,
tmp_ids=None, tmp_vers=None):
def _elaborate_tag(record, tag, fun):
if extract_tag_from_record(record, tag) is not None:
try:
record = fun()
except Exception, e:
register_exception()
write_message(" Stage failed: ERROR: while elaborating %s tags: %s" % (tag, e),
verbose=1, stream=sys.stderr)
return (1, int(rec_id)) # TODO: ?
if record is None:
write_message(" Stage failed: ERROR: while elaborating %s tags" % (tag, ),
verbose=1, stream=sys.stderr)
return (1, int(rec_id))
write_message(" -Stage COMPLETED", verbose=2)
else:
write_message(" -Stage NOT NEEDED", verbose=2)
if tmp_ids is None:
tmp_ids = {}
if tmp_vers is None:
tmp_vers = {}
_elaborate_tag(record, "BDR", lambda: elaborate_brt_tags(record, rec_id = rec_id,
mode = mode,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers))
_elaborate_tag(record, "BDM", lambda: elaborate_mit_tags(record, rec_id = rec_id,
mode = mode,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers))
def submit_ticket_for_holding_pen(rec_id, err, msg, pretend=False):
"""
Submit a ticket via BibCatalog to report about a record that has been put
into the Holding Pen.
@rec_id: the affected record
@err: the corresponding Exception
msg: verbose message
"""
from invenio import bibtask
from invenio.webuser import get_email_from_username, get_uid_from_email
user = task_get_task_param("user")
uid = None
if user:
try:
uid = get_uid_from_email(get_email_from_username(user))
except Exception, err:
write_message("WARNING: can't reliably retrieve uid for user %s: %s" % (user, err), stream=sys.stderr)
if check_bibcatalog():
text = """
%(msg)s found for record %(rec_id)s: %(err)s
See: <%(siteurl)s/record/edit/#state=edit&recid=%(rec_id)s>
BibUpload task information:
task_id: %(task_id)s
task_specific_name: %(task_specific_name)s
user: %(user)s
task_params: %(task_params)s
task_options: %(task_options)s""" % {
"msg": msg,
"rec_id": rec_id,
"err": err,
"siteurl": CFG_SITE_SECURE_URL,
"task_id": task_get_task_param("task_id"),
"task_specific_name": task_get_task_param("task_specific_name"),
"user": user,
"task_params": bibtask._TASK_PARAMS,
"task_options": bibtask._OPTIONS}
if not pretend:
BIBCATALOG_SYSTEM.ticket_submit(subject="%s: %s by %s" % (msg, rec_id, user), recordid=rec_id, text=text, queue=CFG_BIBUPLOAD_CONFLICTING_REVISION_TICKET_QUEUE, owner=uid)
def insert_record_into_holding_pen(record, oai_id, pretend=False):
query = "INSERT INTO bibHOLDINGPEN (oai_id, changeset_date, changeset_xml, id_bibrec) VALUES (%s, NOW(), %s, %s)"
xml_record = record_xml_output(record)
bibrec_ids = find_record_ids_by_oai_id(oai_id) # here determining the identifier of the record
if len(bibrec_ids) > 0:
bibrec_id = bibrec_ids.pop()
else:
# id not found by using the oai_id, let's use a wider search based
# on any information we might have.
bibrec_id = retrieve_rec_id(record, 'holdingpen', pretend=pretend)
if bibrec_id is None:
bibrec_id = 0
if not pretend:
run_sql(query, (oai_id, compress(xml_record), bibrec_id))
# record_id is logged as 0! ( We are not inserting into the main database)
log_record_uploading(oai_id, task_get_task_param('task_id', 0), 0, 'H', pretend=pretend)
stat['nb_holdingpen'] += 1
def print_out_bibupload_statistics():
"""Print the statistics of the process"""
out = "Task stats: %(nb_input)d input records, %(nb_updated)d updated, " \
"%(nb_inserted)d inserted, %(nb_errors)d errors, %(nb_holdingpen)d inserted to holding pen. " \
"Time %(nb_sec).2f sec." % { \
'nb_input': stat['nb_records_to_upload'],
'nb_updated': stat['nb_records_updated'],
'nb_inserted': stat['nb_records_inserted'],
'nb_errors': stat['nb_errors'],
'nb_holdingpen': stat['nb_holdingpen'],
'nb_sec': time.time() - time.mktime(stat['exectime']) }
write_message(out)
def open_marc_file(path):
"""Open a file and return the data"""
try:
# open the file containing the marc document
marc_file = open(path, 'r')
marc = marc_file.read()
marc_file.close()
except IOError, erro:
write_message("ERROR: %s" % erro, verbose=1, stream=sys.stderr)
if erro.errno == 2:
# No such file or directory
# Not scary
e = RecoverableError('File does not exist: %s' % path)
else:
e = StandardError('File not accessible: %s' % path)
raise e
return marc
def xml_marc_to_records(xml_marc):
"""create the records"""
# Creation of the records from the xml Marc in argument
xml_marc = wash_for_xml(xml_marc)
recs = create_records(xml_marc, 1, 1)
if recs == []:
msg = "ERROR: Cannot parse MARCXML file."
write_message(msg, verbose=1, stream=sys.stderr)
raise StandardError(msg)
elif recs[0][0] is None:
msg = "ERROR: MARCXML file has wrong format: %s" % recs
write_message(msg, verbose=1, stream=sys.stderr)
raise RecoverableError(msg)
else:
recs = map((lambda x:x[0]), recs)
return recs
def find_record_format(rec_id, bibformat):
"""Look whether record REC_ID is formatted in FORMAT,
i.e. whether FORMAT exists in the bibfmt table for this record.
Return the number of times it is formatted: 0 if not, 1 if yes,
2 if found more than once (should never occur).
"""
out = 0
query = """SELECT COUNT(*) FROM bibfmt WHERE id_bibrec=%s AND format=%s"""
params = (rec_id, bibformat)
res = []
res = run_sql(query, params)
out = res[0][0]
return out
def find_record_from_recid(rec_id):
"""
Try to find record in the database from the REC_ID number.
Return record ID if found, None otherwise.
"""
res = run_sql("SELECT id FROM bibrec WHERE id=%s",
(rec_id,))
if res:
return res[0][0]
else:
return None
def find_record_from_sysno(sysno):
"""
Try to find record in the database from the external SYSNO number.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib'+CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" %
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, sysno,))
for recid in res:
if CFG_INSPIRE_SITE or CFG_CERN_SITE:
## FIXME: for the time being this functionality is available only on INSPIRE
## or CDS, and waiting to be replaced by proper pidstore integration
if record_exists(recid[0]) > 0: ## Only non deleted records
return recid[0]
else:
return recid[0]
return None
def find_records_from_extoaiid(extoaiid, extoaisrc=None):
"""
Try to find records in the database from the external EXTOAIID number.
Return list of record ID if found, None otherwise.
"""
assert(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:5] == CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[:5])
bibxxx = 'bib'+CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
write_message(' Looking for extoaiid="%s" with extoaisrc="%s"' % (extoaiid, extoaisrc), verbose=9)
id_bibrecs = intbitset(run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" %
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, extoaiid,)))
write_message(' Partially found %s for extoaiid="%s"' % (id_bibrecs, extoaiid), verbose=9)
ret = intbitset()
for id_bibrec in id_bibrecs:
if CFG_INSPIRE_SITE or CFG_CERN_SITE:
## FIXME: for the time being this functionality is available only on INSPIRE
## or CDS, and waiting to be replaced by proper pidstore integration
if record_exists(id_bibrec) < 1:
## We don't match not existing records
continue
record = get_record(id_bibrec)
instances = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
write_message(' recid %s -> instances "%s"' % (id_bibrec, instances), verbose=9)
for instance in instances:
this_extoaisrc = field_get_subfield_values(instance, CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[5])
this_extoaisrc = this_extoaisrc and this_extoaisrc[0] or None
this_extoaiid = field_get_subfield_values(instance, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5])
this_extoaiid = this_extoaiid and this_extoaiid[0] or None
write_message(" this_extoaisrc -> %s, this_extoaiid -> %s" % (this_extoaisrc, this_extoaiid), verbose=9)
if this_extoaiid == extoaiid:
write_message(' recid %s -> provenance "%s"' % (id_bibrec, this_extoaisrc), verbose=9)
if this_extoaisrc == extoaisrc:
write_message('Found recid %s for extoaiid="%s" with provenance="%s"' % (id_bibrec, extoaiid, extoaisrc), verbose=9)
ret.add(id_bibrec)
break
if this_extoaisrc is None:
write_message('WARNING: Found recid %s for extoaiid="%s" that doesn\'t specify any provenance, while input record does.' % (id_bibrec, extoaiid), stream=sys.stderr)
if extoaisrc is None:
write_message('WARNING: Found recid %s for extoaiid="%s" that specify a provenance (%s), while input record does not have a provenance.' % (id_bibrec, extoaiid, this_extoaisrc), stream=sys.stderr)
return ret
def find_record_from_oaiid(oaiid):
"""
Try to find record in the database from the OAI ID number and OAI SRC.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib'+CFG_OAI_ID_FIELD[0:2]+'x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec FROM %(bibrec_bibxxx)s AS bb,
%(bibxxx)s AS b WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" %
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
(CFG_OAI_ID_FIELD, oaiid,))
for recid in res:
if CFG_INSPIRE_SITE or CFG_CERN_SITE:
## FIXME: for the time being this functionality is available only on INSPIRE
## or CDS, and waiting to be replaced by proper pidstore integration
if record_exists(recid[0]) > 0: ## Only non deleted records
return recid[0]
else:
return recid[0]
return None
def find_record_from_doi(doi):
"""
Try to find record in the database from the given DOI.
Return record ID if found, None otherwise.
"""
bibxxx = 'bib02x'
bibrec_bibxxx = 'bibrec_' + bibxxx
res = run_sql("""SELECT bb.id_bibrec, bb.field_number
FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b
WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id""" %
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
('0247_a', doi,))
# For each of the result, make sure that it is really tagged as doi
for (id_bibrec, field_number) in res:
if CFG_INSPIRE_SITE or CFG_CERN_SITE:
## FIXME: for the time being this functionality is available only on INSPIRE
## or CDS, and waiting to be replaced by proper pidstore integration
if record_exists(id_bibrec) < 1:
## We don't match not existing records
continue
res = run_sql("""SELECT bb.id_bibrec
FROM %(bibrec_bibxxx)s AS bb, %(bibxxx)s AS b
WHERE b.tag=%%s AND b.value=%%s
AND bb.id_bibxxx=b.id and bb.field_number=%%s and bb.id_bibrec=%%s""" %
{'bibxxx': bibxxx,
'bibrec_bibxxx': bibrec_bibxxx},
('0247_2', "doi", field_number, id_bibrec))
if res and res[0][0] == id_bibrec:
return res[0][0]
return None
def extract_tag_from_record(record, tag_number):
""" Extract the tag_number for record."""
# first step verify if the record is not already in the database
if record:
return record.get(tag_number, None)
return None
def retrieve_rec_id(record, opt_mode, pretend=False, post_phase = False):
"""Retrieve the record Id from a record by using tag 001 or SYSNO or OAI ID or DOI
tag. opt_mod is the desired mode.
@param post_phase Tells if we are calling this method in the postprocessing phase. If true, we accept presence of 001 fields even in the insert mode
@type post_phase boolean
"""
rec_id = None
# 1st step: we look for the tag 001
tag_001 = extract_tag_from_record(record, '001')
if tag_001 is not None:
# We extract the record ID from the tag
rec_id = tag_001[0][3]
# if we are in insert mode => error
if opt_mode == 'insert' and not post_phase:
write_message(" Failed: tag 001 found in the xml" \
" submitted, you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)",
verbose=1, stream=sys.stderr)
return -1
else:
# we found the rec id and we are not in insert mode => continue
# we try to match rec_id against the database:
if find_record_from_recid(rec_id) is not None:
# okay, 001 corresponds to some known record
return int(rec_id)
elif opt_mode in ('replace', 'replace_or_insert'):
if task_get_option('force'):
# we found the rec_id but it's not in the system and we are
# requested to replace records. Therefore we create on the fly
# a empty record allocating the recid.
write_message(" WARNING: tag 001 found in the xml with"
" value %(rec_id)s, but rec_id %(rec_id)s does"
" not exist. Since the mode replace was"
" requested the rec_id %(rec_id)s is allocated"
" on-the-fly." % {"rec_id": rec_id},
stream=sys.stderr)
return create_new_record(rec_id=rec_id, pretend=pretend)
else:
# Since --force was not used we are going to raise an error
write_message(" Failed: tag 001 found in the xml"
" submitted with value %(rec_id)s. The"
" corresponding record however does not"
" exists. If you want to really create"
" such record, please use the --force"
" parameter when calling bibupload." % {
"rec_id": rec_id}, stream=sys.stderr)
return -1
else:
# The record doesn't exist yet. We shall have try to check
# the SYSNO or OAI or DOI id later.
write_message(" -Tag 001 value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag 001 not found in the xml marc file.", verbose=9)
if rec_id is None:
# 2nd step we look for the SYSNO
sysnos = record_get_field_values(record,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[0:3],
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[3:4] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[3:4] or "",
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[4:5] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[4:5] or "",
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[5:6])
if sysnos:
sysno = sysnos[0] # there should be only one external SYSNO
write_message(" -Checking if SYSNO " + sysno + \
" exists in the database", verbose=9)
# try to find the corresponding rec id from the database
rec_id = find_record_from_sysno(sysno)
if rec_id is not None:
# rec_id found
pass
else:
# The record doesn't exist yet. We will try to check
# external and internal OAI ids later.
write_message(" -Tag SYSNO value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag SYSNO not found in the xml marc file.",
verbose=9)
if rec_id is None:
# 2nd step we look for the external OAIID
extoai_fields = record_get_field_instances(record,
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[0:3],
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3:4] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3:4] or "",
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4:5] != "_" and \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4:5] or "")
if extoai_fields:
for field in extoai_fields:
extoaiid = field_get_subfield_values(field, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5:6])
extoaisrc = field_get_subfield_values(field, CFG_BIBUPLOAD_EXTERNAL_OAIID_PROVENANCE_TAG[5:6])
if extoaiid:
extoaiid = extoaiid[0]
if extoaisrc:
extoaisrc = extoaisrc[0]
else:
extoaisrc = None
write_message(" -Checking if EXTOAIID %s (%s) exists in the database" % (extoaiid, extoaisrc), verbose=9)
# try to find the corresponding rec id from the database
rec_ids = find_records_from_extoaiid(extoaiid, extoaisrc)
if rec_ids:
# rec_id found
rec_id = rec_ids.pop()
break
else:
# The record doesn't exist yet. We will try to check
# OAI id later.
write_message(" -Tag EXTOAIID value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag EXTOAIID not found in the xml marc file.", verbose=9)
if rec_id is None:
# 4th step we look for the OAI ID
oaiidvalues = record_get_field_values(record,
CFG_OAI_ID_FIELD[0:3],
CFG_OAI_ID_FIELD[3:4] != "_" and \
CFG_OAI_ID_FIELD[3:4] or "",
CFG_OAI_ID_FIELD[4:5] != "_" and \
CFG_OAI_ID_FIELD[4:5] or "",
CFG_OAI_ID_FIELD[5:6])
if oaiidvalues:
oaiid = oaiidvalues[0] # there should be only one OAI ID
write_message(" -Check if local OAI ID " + oaiid + \
" exist in the database", verbose=9)
# try to find the corresponding rec id from the database
rec_id = find_record_from_oaiid(oaiid)
if rec_id is not None:
# rec_id found
pass
else:
write_message(" -Tag OAI ID value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag SYSNO not found in the xml marc file.",
verbose=9)
if rec_id is None:
# 5th step we look for the DOI.
record_dois = record_extract_dois(record)
matching_recids = set()
if record_dois:
# try to find the corresponding rec id from the database
for record_doi in record_dois:
possible_recid = find_record_from_doi(record_doi)
if possible_recid:
matching_recids.add(possible_recid)
if len(matching_recids) > 1:
# Oops, this record refers to DOI existing in multiple records.
# Dunno which one to choose.
write_message(" Failed: Multiple records found in the" \
" database %s that match the DOI(s) in the input" \
" MARCXML %s" % (repr(matching_recids), repr(record_dois)),
verbose=1, stream=sys.stderr)
return -1
elif len(matching_recids) == 1:
rec_id = matching_recids.pop()
if opt_mode == 'insert':
write_message(" Failed: DOI tag matching record #%s found in the xml" \
" submitted, you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)" % rec_id,
verbose=1, stream=sys.stderr)
return -1
else:
write_message(" - Tag DOI value not found in database.",
verbose=9)
rec_id = None
else:
write_message(" -Tag DOI not found in the xml marc file.",
verbose=9)
# Now we should have detected rec_id from SYSNO or OAIID
# tags. (None otherwise.)
if rec_id:
if opt_mode == 'insert':
write_message(" Failed: Record found in the database," \
" you should use the option replace," \
" correct or append to replace an existing" \
" record. (-h for help)",
verbose=1, stream=sys.stderr)
return -1
else:
if opt_mode != 'insert' and \
opt_mode != 'replace_or_insert':
write_message(" Failed: Record not found in the database."\
" Please insert the file before updating it."\
" (-h for help)", verbose=1, stream=sys.stderr)
return -1
return rec_id and int(rec_id) or None
def check_record_doi_is_unique(rec_id, record):
"""
Check that DOI found in 'record' does not exist in any other
record than 'recid'.
Return (boolean, msg) where 'boolean' would be True if the DOI is
unique.
"""
record_dois = record_extract_dois(record)
if record_dois:
matching_recids = set()
for record_doi in record_dois:
possible_recid = find_record_from_doi(record_doi)
if possible_recid:
matching_recids.add(possible_recid)
if len(matching_recids) > 1:
# Oops, this record refers to DOI existing in multiple records.
msg = " Failed: Multiple records found in the" \
" database %s that match the DOI(s) in the input" \
" MARCXML %s" % (repr(matching_recids), repr(record_dois))
return (False, msg)
elif len(matching_recids) == 1:
matching_recid = matching_recids.pop()
if str(matching_recid) != str(rec_id):
# Oops, this record refers to DOI existing in a different record.
msg = " Failed: DOI(s) %s found in this record (#%s)" \
" already exist(s) in another other record (#%s)" % \
(repr(record_dois), rec_id, matching_recid)
return (False, msg)
return (True, "")
### Insert functions
def create_new_record(rec_id=None, pretend=False):
"""
Create new record in the database
@param rec_id: if specified the new record will have this rec_id.
@type rec_id: int
@return: the allocated rec_id
@rtype: int
@note: in case of errors will be returned None
"""
if rec_id is not None:
try:
rec_id = int(rec_id)
except (ValueError, TypeError), error:
write_message(" ERROR: during the creation_new_record function: %s "
% error, verbose=1, stream=sys.stderr)
return None
if run_sql("SELECT id FROM bibrec WHERE id=%s", (rec_id, )):
write_message(" ERROR: during the creation_new_record function: the requested rec_id %s already exists." % rec_id)
return None
if pretend:
if rec_id:
return rec_id
else:
return run_sql("SELECT max(id)+1 FROM bibrec")[0][0]
if rec_id is not None:
return run_sql("INSERT INTO bibrec (id, creation_date, modification_date) VALUES (%s, NOW(), NOW())", (rec_id, ))
else:
return run_sql("INSERT INTO bibrec (creation_date, modification_date) VALUES (NOW(), NOW())")
def insert_bibfmt(id_bibrec, marc, bibformat, modification_date='1970-01-01 00:00:00', pretend=False):
"""Insert the format in the table bibfmt"""
# compress the marc value
pickled_marc = compress(marc)
try:
time.strptime(modification_date, "%Y-%m-%d %H:%M:%S")
except ValueError:
modification_date = '1970-01-01 00:00:00'
query = """INSERT LOW_PRIORITY INTO bibfmt (id_bibrec, format, last_updated, value)
VALUES (%s, %s, %s, %s)"""
if not pretend:
row_id = run_sql(query, (id_bibrec, bibformat, modification_date, pickled_marc))
return row_id
else:
return 1
def insert_record_bibxxx(tag, value, pretend=False):
"""Insert the record into bibxxx"""
# determine into which table one should insert the record
table_name = 'bib'+tag[0:2]+'x'
# check if the tag, value combination exists in the table
query = """SELECT id,value FROM %s """ % table_name
query += """ WHERE tag=%s AND value=%s"""
params = (tag, value)
res = None
res = run_sql(query, params)
# Note: compare now the found values one by one and look for
# string binary equality (e.g. to respect lowercase/uppercase
# match), regardless of the charset etc settings. Ideally we
# could use a BINARY operator in the above SELECT statement, but
# we would have to check compatibility on various MySQLdb versions
# etc; this approach checks all matched values in Python, not in
# MySQL, which is less cool, but more conservative, so it should
# work better on most setups.
if res:
for row in res:
row_id = row[0]
row_value = row[1]
if row_value == value:
return (table_name, row_id)
# We got here only when the tag, value combination was not found,
# so it is now necessary to insert the tag, value combination into
# bibxxx table as new.
query = """INSERT INTO %s """ % table_name
query += """ (tag, value) values (%s , %s)"""
params = (tag, value)
if not pretend:
row_id = run_sql(query, params)
else:
return (table_name, 1)
return (table_name, row_id)
def insert_record_bibrec_bibxxx(table_name, id_bibxxx,
field_number, id_bibrec, pretend=False):
"""Insert the record into bibrec_bibxxx"""
# determine into which table one should insert the record
full_table_name = 'bibrec_'+ table_name
# insert the proper row into the table
query = """INSERT INTO %s """ % full_table_name
query += """(id_bibrec,id_bibxxx, field_number) values (%s , %s, %s)"""
params = (id_bibrec, id_bibxxx, field_number)
if not pretend:
res = run_sql(query, params)
else:
return 1
return res
def synchronize_8564(rec_id, record, record_had_FFT, bibrecdocs, pretend=False):
"""
Synchronize 8564_ tags and BibDocFile tables.
This function directly manipulate the record parameter.
@type rec_id: positive integer
@param rec_id: the record identifier.
@param record: the record structure as created by bibrecord.create_record
@type record_had_FFT: boolean
@param record_had_FFT: True if the incoming bibuploaded-record used FFT
@return: the manipulated record (which is also modified as a side effect)
"""
def merge_marc_into_bibdocfile(field, pretend=False):
"""
Internal function that reads a single field and stores its content
in BibDocFile tables.
@param field: the 8564_ field containing a BibDocFile URL.
"""
write_message('Merging field: %s' % (field, ), verbose=9)
url = field_get_subfield_values(field, 'u')[:1] or field_get_subfield_values(field, 'q')[:1]
description = field_get_subfield_values(field, 'y')[:1]
comment = field_get_subfield_values(field, 'z')[:1]
if url:
recid, docname, docformat = decompose_bibdocfile_url(url[0])
if recid != rec_id:
write_message("INFO: URL %s is not pointing to a fulltext owned by this record (%s)" % (url, recid), stream=sys.stderr)
else:
try:
bibdoc = bibrecdocs.get_bibdoc(docname)
if description and not pretend:
bibdoc.set_description(description[0], docformat)
if comment and not pretend:
bibdoc.set_comment(comment[0], docformat)
except InvenioBibDocFileError:
## Apparently the referenced docname doesn't exist anymore.
## Too bad. Let's skip it.
write_message("WARNING: docname %s does not seem to exist for record %s. Has it been renamed outside FFT?" % (docname, recid), stream=sys.stderr)
def merge_bibdocfile_into_marc(field, subfields):
"""
Internal function that reads BibDocFile table entries referenced by
the URL in the given 8564_ field and integrate the given information
directly with the provided subfields.
@param field: the 8564_ field containing a BibDocFile URL.
@param subfields: the subfields corresponding to the BibDocFile URL
generated after BibDocFile tables.
"""
write_message('Merging subfields %s into field %s' % (subfields, field), verbose=9)
subfields = dict(subfields) ## We make a copy not to have side-effects
subfield_to_delete = []
for subfield_position, (code, value) in enumerate(field_get_subfield_instances(field)):
## For each subfield instance already existing...
if code in subfields:
## ...We substitute it with what is in BibDocFile tables
record_modify_subfield(record, '856', code, subfields[code],
subfield_position, field_position_global=field[4])
del subfields[code]
else:
## ...We delete it otherwise
subfield_to_delete.append(subfield_position)
subfield_to_delete.sort()
for counter, position in enumerate(subfield_to_delete):
## FIXME: Very hackish algorithm. Since deleting a subfield
## will alterate the position of following subfields, we
## are taking note of this and adjusting further position
## by using a counter.
record_delete_subfield_from(record, '856', position - counter,
field_position_global=field[4])
subfields = subfields.items()
subfields.sort()
for code, value in subfields:
## Let's add non-previously existing subfields
record_add_subfield_into(record, '856', code, value,
field_position_global=field[4])
def get_bibdocfile_managed_info():
"""
Internal function, returns a dictionary of
BibDocFile URL -> wanna-be subfields.
This information is retrieved from internal BibDoc
structures rather than from input MARC XML files
@rtype: mapping
@return: BibDocFile URL -> wanna-be subfields dictionary
"""
ret = {}
latest_files = bibrecdocs.list_latest_files(list_hidden=False)
for afile in latest_files:
url = afile.get_url()
ret[url] = {'u': url}
description = afile.get_description()
comment = afile.get_comment()
subformat = afile.get_subformat()
if description:
ret[url]['y'] = description
if comment:
ret[url]['z'] = comment
if subformat:
ret[url]['x'] = subformat
return ret
write_message("Synchronizing MARC of recid '%s' with:\n%s" % (rec_id, record), verbose=9)
tags856s = record_get_field_instances(record, '856', '%', '%')
write_message("Original 856%% instances: %s" % tags856s, verbose=9)
tags8564s_to_add = get_bibdocfile_managed_info()
write_message("BibDocFile instances: %s" % tags8564s_to_add, verbose=9)
positions_tags8564s_to_remove = []
for local_position, field in enumerate(tags856s):
if field[1] == '4' and field[2] == ' ':
write_message('Analysing %s' % (field, ), verbose=9)
for url in field_get_subfield_values(field, 'u') + field_get_subfield_values(field, 'q'):
if url in tags8564s_to_add:
# there exists a link in the MARC of the record and the connection exists in BibDoc tables
if record_had_FFT:
merge_bibdocfile_into_marc(field, tags8564s_to_add[url])
else:
merge_marc_into_bibdocfile(field, pretend=pretend)
del tags8564s_to_add[url]
break
elif bibdocfile_url_p(url) and decompose_bibdocfile_url(url)[0] == rec_id:
# The link exists and is potentially correct-looking link to a document
# moreover, it refers to current record id ... but it does not exist in
# internal BibDoc structures. This could have happen in the case of renaming a document
# or its removal. In both cases we have to remove link... a new one will be created
positions_tags8564s_to_remove.append(local_position)
write_message("%s to be deleted and re-synchronized" % (field, ), verbose=9)
break
record_delete_fields(record, '856', positions_tags8564s_to_remove)
tags8564s_to_add = tags8564s_to_add.values()
tags8564s_to_add.sort()
## FIXME: we are not yet able to preserve the sorting
## of 8564 tags WRT FFT in BibUpload.
## See ticket #1606.
for subfields in tags8564s_to_add:
subfields = subfields.items()
subfields.sort()
record_add_field(record, '856', '4', ' ', subfields=subfields)
write_message('Final record: %s' % record, verbose=9)
return record
def _get_subfield_value(field, subfield_code, default=None):
res = field_get_subfield_values(field, subfield_code)
if res != [] and res != None:
return res[0]
else:
return default
def elaborate_mit_tags(record, rec_id, mode, pretend = False, tmp_ids = {},
tmp_vers = {}):
"""
Uploading MoreInfo -> BDM tags
"""
tuple_list = extract_tag_from_record(record, 'BDM')
# Now gathering information from BDR tags - to be processed later
write_message("Processing BDM entries of the record ")
recordDocs = BibRecDocs(rec_id)
if tuple_list:
for mit in record_get_field_instances(record, 'BDM', ' ', ' '):
relation_id = _get_subfield_value(mit, "r")
bibdoc_id = _get_subfield_value(mit, "i")
# checking for a possibly temporary ID
if not (bibdoc_id is None):
bibdoc_id = resolve_identifier(tmp_ids, bibdoc_id)
bibdoc_ver = _get_subfield_value(mit, "v")
if not (bibdoc_ver is None):
bibdoc_ver = resolve_identifier(tmp_vers, bibdoc_ver)
bibdoc_name = _get_subfield_value(mit, "n")
bibdoc_fmt = _get_subfield_value(mit, "f")
moreinfo_str = _get_subfield_value(mit, "m")
if bibdoc_id == None:
if bibdoc_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the first obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc_id = recordDocs.get_docid(bibdoc_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % (bibdoc_name, ))
else:
if bibdoc_name != None:
write_message("WARNING: both name and id of the first document of a relation have been specified. Ignoring the name", stream=sys.stderr)
if (moreinfo_str is None or mode in ("replace", "correct")) and (not pretend):
MoreInfo(docid=bibdoc_id , version = bibdoc_ver,
docformat = bibdoc_fmt, relation = relation_id).delete()
if (not moreinfo_str is None) and (not pretend):
MoreInfo.create_from_serialised(moreinfo_str,
docid=bibdoc_id,
version = bibdoc_ver,
docformat = bibdoc_fmt,
relation = relation_id)
return record
def elaborate_brt_tags(record, rec_id, mode, pretend=False, tmp_ids = {}, tmp_vers = {}):
"""
Process BDR tags describing relations between existing objects
"""
tuple_list = extract_tag_from_record(record, 'BDR')
# Now gathering information from BDR tags - to be processed later
relations_to_create = []
write_message("Processing BDR entries of the record ")
recordDocs = BibRecDocs(rec_id) #TODO: check what happens if there is no record yet ! Will the class represent an empty set?
if tuple_list:
for brt in record_get_field_instances(record, 'BDR', ' ', ' '):
relation_id = _get_subfield_value(brt, "r")
bibdoc1_id = None
bibdoc1_name = None
bibdoc1_ver = None
bibdoc1_fmt = None
bibdoc2_id = None
bibdoc2_name = None
bibdoc2_ver = None
bibdoc2_fmt = None
if not relation_id:
bibdoc1_id = _get_subfield_value(brt, "i")
bibdoc1_name = _get_subfield_value(brt, "n")
if bibdoc1_id == None:
if bibdoc1_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the first obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc1_id = recordDocs.get_docid(bibdoc1_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % \
(bibdoc1_name, ))
else:
# resolving temporary identifier
bibdoc1_id = resolve_identifier(tmp_ids, bibdoc1_id)
if bibdoc1_name != None:
write_message("WARNING: both name and id of the first document of a relation have been specified. Ignoring the name", stream=sys.stderr)
bibdoc1_ver = _get_subfield_value(brt, "v")
if not (bibdoc1_ver is None):
bibdoc1_ver = resolve_identifier(tmp_vers, bibdoc1_ver)
bibdoc1_fmt = _get_subfield_value(brt, "f")
bibdoc2_id = _get_subfield_value(brt, "j")
bibdoc2_name = _get_subfield_value(brt, "o")
if bibdoc2_id == None:
if bibdoc2_name == None:
raise StandardError("Incorrect relation. Neither name nor identifier of the second obejct has been specified")
else:
# retrieving the ID based on the document name (inside current record)
# The document is attached to current record.
try:
bibdoc2_id = recordDocs.get_docid(bibdoc2_name)
except:
raise StandardError("BibDoc of a name %s does not exist within a record" % (bibdoc2_name, ))
else:
bibdoc2_id = resolve_identifier(tmp_ids, bibdoc2_id)
if bibdoc2_name != None:
write_message("WARNING: both name and id of the first document of a relation have been specified. Ignoring the name", stream=sys.stderr)
bibdoc2_ver = _get_subfield_value(brt, "w")
if not (bibdoc2_ver is None):
bibdoc2_ver = resolve_identifier(tmp_vers, bibdoc2_ver)
bibdoc2_fmt = _get_subfield_value(brt, "g")
control_command = _get_subfield_value(brt, "d")
relation_type = _get_subfield_value(brt, "t")
if not relation_type and not relation_id:
raise StandardError("The relation type must be specified")
more_info = _get_subfield_value(brt, "m")
# the relation id might be specified in the case of updating
# MoreInfo table instead of other fields
rel_obj = None
if not relation_id:
rels = BibRelation.get_relations(rel_type = relation_type,
bibdoc1_id = bibdoc1_id,
bibdoc2_id = bibdoc2_id,
bibdoc1_ver = bibdoc1_ver,
bibdoc2_ver = bibdoc2_ver,
bibdoc1_fmt = bibdoc1_fmt,
bibdoc2_fmt = bibdoc2_fmt)
if len(rels) > 0:
rel_obj = rels[0]
relation_id = rel_obj.id
else:
rel_obj = BibRelation(rel_id=relation_id)
relations_to_create.append((relation_id, bibdoc1_id, bibdoc1_ver,
bibdoc1_fmt, bibdoc2_id, bibdoc2_ver,
bibdoc2_fmt, relation_type, more_info,
rel_obj, control_command))
record_delete_field(record, 'BDR', ' ', ' ')
if mode in ("insert", "replace_or_insert", "append", "correct", "replace"):
# now creating relations between objects based on the data
if not pretend:
for (relation_id, bibdoc1_id, bibdoc1_ver, bibdoc1_fmt,
bibdoc2_id, bibdoc2_ver, bibdoc2_fmt, rel_type,
more_info, rel_obj, control_command) in relations_to_create:
if rel_obj == None:
rel_obj = BibRelation.create(bibdoc1_id = bibdoc1_id,
bibdoc1_ver = bibdoc1_ver,
bibdoc1_fmt = bibdoc1_fmt,
bibdoc2_id = bibdoc2_id,
bibdoc2_ver = bibdoc2_ver,
bibdoc2_fmt = bibdoc2_fmt,
rel_type = rel_type)
relation_id = rel_obj.id
if mode in ("replace"):
# Clearing existing MoreInfo content
rel_obj.get_more_info().delete()
if more_info:
MoreInfo.create_from_serialised(more_info, relation = relation_id)
if control_command == "DELETE":
rel_obj.delete()
else:
write_message("BDR tag is not processed in the %s mode" % (mode, ))
return record
def elaborate_fft_tags(record, rec_id, mode, pretend=False,
tmp_ids = {}, tmp_vers = {}, bibrecdocs=None):
"""
Process FFT tags that should contain $a with file pathes or URLs
to get the fulltext from. This function enriches record with
proper 8564 URL tags, downloads fulltext files and stores them
into var/data structure where appropriate.
CFG_BIBUPLOAD_WGET_SLEEP_TIME defines time to sleep in seconds in
between URL downloads.
Note: if an FFT tag contains multiple $a subfields, we upload them
into different 856 URL tags in the metadata. See regression test
case test_multiple_fft_insert_via_http().
"""
# Let's define some handy sub procedure.
def _add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, modification_date, pretend=False):
"""Adds a new format for a given bibdoc. Returns True when everything's fine."""
write_message('Add new format to %s url: %s, format: %s, docname: %s, doctype: %s, newname: %s, description: %s, comment: %s, flags: %s, modification_date: %s' % (repr(bibdoc), url, docformat, docname, doctype, newname, description, comment, flags, modification_date), verbose=9)
try:
if not url: # Not requesting a new url. Just updating comment & description
return _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=pretend)
try:
if not pretend:
bibdoc.add_file_new_format(url, description=description, comment=comment, flags=flags, modification_date=modification_date)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') not inserted because format already exists (%s)." % (url, docformat, docname, doctype, newname, description, comment, flags, modification_date, e), stream=sys.stderr)
raise
except Exception, e:
write_message("ERROR: in adding '%s' as a new format because of: %s" % (url, e), stream=sys.stderr)
raise
return True
def _add_new_version(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, modification_date, pretend=False):
"""Adds a new version for a given bibdoc. Returns True when everything's fine."""
write_message('Add new version to %s url: %s, format: %s, docname: %s, doctype: %s, newname: %s, description: %s, comment: %s, flags: %s' % (repr(bibdoc), url, docformat, docname, doctype, newname, description, comment, flags), verbose=9)
try:
if not url:
return _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=pretend)
try:
if not pretend:
bibdoc.add_file_new_version(url, description=description, comment=comment, flags=flags, modification_date=modification_date)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s') not inserted because '%s'." % (url, docformat, docname, doctype, newname, description, comment, flags, modification_date, e), stream=sys.stderr)
raise
except Exception, e:
write_message("ERROR: in adding '%s' as a new version because of: %s" % (url, e), stream=sys.stderr)
raise
return True
def _update_description_and_comment(bibdoc, docname, docformat, description, comment, flags, pretend=False):
"""Directly update comments and descriptions."""
write_message('Just updating description and comment for %s with format %s with description %s, comment %s and flags %s' % (docname, docformat, description, comment, flags), verbose=9)
try:
if not pretend:
bibdoc.set_description(description, docformat)
bibdoc.set_comment(comment, docformat)
for flag in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
if flag in flags:
bibdoc.set_flag(flag, docformat)
else:
bibdoc.unset_flag(flag, docformat)
except StandardError, e:
write_message("('%s', '%s', '%s', '%s', '%s') description and comment not updated because '%s'." % (docname, docformat, description, comment, flags, e))
raise
return True
def _process_document_moreinfos(more_infos, docname, version, docformat, mode):
if not mode in ('correct', 'append', 'replace_or_insert', 'replace', 'correct', 'insert'):
print "exited because the mode is incorrect"
return
docid = None
try:
docid = bibrecdocs.get_docid(docname)
except:
raise StandardError("MoreInfo: No document of a given name associated with the record")
if not version:
# We have to retrieve the most recent version ...
version = bibrecdocs.get_bibdoc(docname).get_latest_version()
doc_moreinfo_s, version_moreinfo_s, version_format_moreinfo_s, format_moreinfo_s = more_infos
if mode in ("replace", "replace_or_insert"):
if doc_moreinfo_s: #only if specified, otherwise do not touch
MoreInfo(docid = docid).delete()
if format_moreinfo_s: #only if specified... otherwise do not touch
MoreInfo(docid = docid, docformat = docformat).delete()
if not doc_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = doc_moreinfo_s, docid = docid)
if not version_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = version_moreinfo_s,
docid = docid, version = version)
if not version_format_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = version_format_moreinfo_s,
docid = docid, version = version,
docformat = docformat)
if not format_moreinfo_s is None:
MoreInfo.create_from_serialised(ser_str = format_moreinfo_s,
docid = docid, docformat = docformat)
if mode == 'delete':
raise StandardError('FFT tag specified but bibupload executed in --delete mode')
tuple_list = extract_tag_from_record(record, 'FFT')
if tuple_list: # FFT Tags analysis
write_message("FFTs: "+str(tuple_list), verbose=9)
docs = {} # docnames and their data
for fft in record_get_field_instances(record, 'FFT', ' ', ' '):
# Very first, we retrieve the potentially temporary odentifiers...
#even if the rest fails, we should include them in teh dictionary
version = _get_subfield_value(fft, 'v', '')
# checking if version is temporary... if so, filling a different varaible
is_tmp_ver, bibdoc_tmpver = parse_identifier(version)
if is_tmp_ver:
version = None
else:
bibdoc_tmpver = None
if not version: #treating cases of empty string etc...
version = None
bibdoc_tmpid = field_get_subfield_values(fft, 'i')
if bibdoc_tmpid:
bibdoc_tmpid = bibdoc_tmpid[0]
else:
bibdoc_tmpid
is_tmp_id, bibdoc_tmpid = parse_identifier(bibdoc_tmpid)
if not is_tmp_id:
bibdoc_tmpid = None
# In the case of having temporary id's, we dont resolve them yet but signaklise that they have been used
# value -1 means that identifier has been declared but not assigned a value yet
if bibdoc_tmpid:
if bibdoc_tmpid in tmp_ids:
write_message("WARNING: the temporary identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpid, ), stream=sys.stderr)
else:
tmp_ids[bibdoc_tmpid] = -1
if bibdoc_tmpver:
if bibdoc_tmpver in tmp_vers:
write_message("WARNING: the temporary version identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpver, ), stream=sys.stderr)
else:
tmp_vers[bibdoc_tmpver] = -1
# Let's discover the type of the document
# This is a legacy field and will not be enforced any particular
# check on it.
doctype = _get_subfield_value(fft, 't', 'Main') #Default is Main
# Let's discover the url.
url = field_get_subfield_values(fft, 'a')
if url:
url = url[0]
try:
check_valid_url(url)
except StandardError, e:
raise StandardError, "fft '%s' specifies in $a a location ('%s') with problems: %s" % (fft, url, e)
else:
url = ''
#TODO: a lot of code can be compactified using similar syntax ... should be more readable on the longer scale
# maybe right side expressions look a bit cryptic, but the elaborate_fft function would be much clearer
if mode == 'correct' and doctype != 'FIX-MARC':
arg2 = ""
else:
arg2 = KEEP_OLD_VALUE
description = _get_subfield_value(fft, 'd', arg2)
# Let's discover the description
# description = field_get_subfield_values(fft, 'd')
# if description != []:
# description = description[0]
# else:
# if mode == 'correct' and doctype != 'FIX-MARC':
## If the user require to correct, and do not specify
## a description this means she really want to
## modify the description.
# description = ''
# else:
# description = KEEP_OLD_VALUE
# Let's discover the desired docname to be created/altered
name = field_get_subfield_values(fft, 'n')
if name:
## Let's remove undesired extensions
name = file_strip_ext(name[0] + '.pdf')
else:
if url:
name = get_docname_from_url(url)
elif mode != 'correct' and doctype != 'FIX-MARC':
raise StandardError, "WARNING: fft '%s' doesn't specifies either a location in $a or a docname in $n" % str(fft)
else:
continue
# Let's discover the desired new docname in case we want to change it
newname = field_get_subfield_values(fft, 'm')
if newname:
newname = file_strip_ext(newname[0] + '.pdf')
else:
newname = name
# Let's discover the desired format
docformat = field_get_subfield_values(fft, 'f')
if docformat:
docformat = normalize_format(docformat[0])
else:
if url:
docformat = guess_format_from_url(url)
else:
docformat = ""
# Let's discover the icon
icon = field_get_subfield_values(fft, 'x')
if icon != []:
icon = icon[0]
if icon != KEEP_OLD_VALUE:
try:
check_valid_url(icon)
except StandardError, e:
raise StandardError, "fft '%s' specifies in $x an icon ('%s') with problems: %s" % (fft, icon, e)
else:
icon = ''
# Let's discover the comment
comment = field_get_subfield_values(fft, 'z')
if comment != []:
comment = comment[0]
else:
if mode == 'correct' and doctype != 'FIX-MARC':
## See comment on description
comment = ''
else:
comment = KEEP_OLD_VALUE
# Let's discover the restriction
restriction = field_get_subfield_values(fft, 'r')
if restriction != []:
restriction = restriction[0]
else:
if mode == 'correct' and doctype != 'FIX-MARC':
## See comment on description
restriction = ''
else:
restriction = KEEP_OLD_VALUE
document_moreinfo = _get_subfield_value(fft, 'w')
version_moreinfo = _get_subfield_value(fft, 'p')
version_format_moreinfo = _get_subfield_value(fft, 'b')
format_moreinfo = _get_subfield_value(fft, 'u')
# Let's discover the timestamp of the file (if any)
timestamp = field_get_subfield_values(fft, 's')
if timestamp:
try:
timestamp = datetime(*(time.strptime(timestamp[0], "%Y-%m-%d %H:%M:%S")[:6]))
except ValueError:
write_message('WARNING: The timestamp is not in a good format, thus will be ignored. The format should be YYYY-MM-DD HH:MM:SS', stream=sys.stderr)
timestamp = ''
else:
timestamp = ''
flags = field_get_subfield_values(fft, 'o')
for flag in flags:
if flag not in CFG_BIBDOCFILE_AVAILABLE_FLAGS:
raise StandardError, "fft '%s' specifies a non available flag: %s" % (fft, flag)
if docs.has_key(name): # new format considered
(doctype2, newname2, restriction2, version2, urls, dummybibdoc_moreinfos2, dummybibdoc_tmpid2, dummybibdoc_tmpver2 ) = docs[name]
if doctype2 != doctype:
raise StandardError, "fft '%s' specifies a different doctype from previous fft with docname '%s'" % (str(fft), name)
if newname2 != newname:
raise StandardError, "fft '%s' specifies a different newname from previous fft with docname '%s'" % (str(fft), name)
if restriction2 != restriction:
raise StandardError, "fft '%s' specifies a different restriction from previous fft with docname '%s'" % (str(fft), name)
if version2 != version:
raise StandardError, "fft '%s' specifies a different version than the previous fft with docname '%s'" % (str(fft), name)
for (dummyurl2, format2, dummydescription2, dummycomment2, dummyflags2, dummytimestamp2) in urls:
if docformat == format2:
raise StandardError, "fft '%s' specifies a second file '%s' with the same format '%s' from previous fft with docname '%s'" % (str(fft), url, docformat, name)
if url or docformat:
urls.append((url, docformat, description, comment, flags, timestamp))
if icon:
urls.append((icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp))
else:
if url or docformat:
docs[name] = (doctype, newname, restriction, version, [(url, docformat, description, comment, flags, timestamp)], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver)
if icon:
docs[name][4].append((icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp))
elif icon:
docs[name] = (doctype, newname, restriction, version, [(icon, icon[len(file_strip_ext(icon)):] + ';icon', description, comment, flags, timestamp)], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver)
else:
docs[name] = (doctype, newname, restriction, version, [], [document_moreinfo, version_moreinfo, version_format_moreinfo, format_moreinfo], bibdoc_tmpid, bibdoc_tmpver)
write_message('Result of FFT analysis:\n\tDocs: %s' % (docs,), verbose=9)
# Let's remove all FFT tags
record_delete_field(record, 'FFT', ' ', ' ')
## Let's pre-download all the URLs to see if, in case of mode 'correct' or 'append'
## we can avoid creating a new revision.
for docname, (doctype, newname, restriction, version, urls, more_infos, bibdoc_tmpid, bibdoc_tmpver ) in docs.items():
downloaded_urls = []
try:
bibdoc = bibrecdocs.get_bibdoc(docname)
except InvenioBibDocFileError:
## A bibdoc with the given docname does not exists.
## So there is no chance we are going to revise an existing
## format with an identical file :-)
bibdoc = None
new_revision_needed = False
for url, docformat, description, comment, flags, timestamp in urls:
if url:
try:
downloaded_url = download_url(url, docformat)
write_message("%s saved into %s" % (url, downloaded_url), verbose=9)
except Exception, err:
write_message("ERROR: in downloading '%s' because of: %s" % (url, err), stream=sys.stderr)
raise
if mode == 'correct' and bibdoc is not None and not new_revision_needed:
downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp))
if not bibrecdocs.check_file_exists(downloaded_url, docformat):
new_revision_needed = True
else:
write_message("WARNING: %s is already attached to bibdoc %s for recid %s" % (url, docname, rec_id), stream=sys.stderr)
elif mode == 'append' and bibdoc is not None:
if not bibrecdocs.check_file_exists(downloaded_url, docformat):
downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp))
else:
write_message("WARNING: %s is already attached to bibdoc %s for recid %s" % (url, docname, rec_id), stream=sys.stderr)
else:
downloaded_urls.append((downloaded_url, docformat, description, comment, flags, timestamp))
else:
downloaded_urls.append(('', docformat, description, comment, flags, timestamp))
if mode == 'correct' and bibdoc is not None and not new_revision_needed:
## Since we don't need a new revision (because all the files
## that are being uploaded are different)
## we can simply remove the urls but keep the other information
write_message("No need to add a new revision for docname %s for recid %s" % (docname, rec_id), verbose=2)
docs[docname] = (doctype, newname, restriction, version, [('', docformat, description, comment, flags, timestamp) for (dummy, docformat, description, comment, flags, timestamp) in downloaded_urls], more_infos, bibdoc_tmpid, bibdoc_tmpver)
for downloaded_url, dummy, dummy, dummy, dummy, dummy in downloaded_urls:
## Let's free up some space :-)
if downloaded_url and os.path.exists(downloaded_url):
os.remove(downloaded_url)
else:
if downloaded_urls or mode != 'append':
docs[docname] = (doctype, newname, restriction, version, downloaded_urls, more_infos, bibdoc_tmpid, bibdoc_tmpver)
else:
## In case we are in append mode and there are no urls to append
## we discard the whole FFT
del docs[docname]
if mode == 'replace': # First we erase previous bibdocs
if not pretend:
for bibdoc in bibrecdocs.list_bibdocs():
bibdoc.delete()
bibrecdocs.dirty = True
for docname, (doctype, newname, restriction, version, urls, more_infos, bibdoc_tmpid, bibdoc_tmpver) in docs.iteritems():
write_message("Elaborating olddocname: '%s', newdocname: '%s', doctype: '%s', restriction: '%s', urls: '%s', mode: '%s'" % (docname, newname, doctype, restriction, urls, mode), verbose=9)
if mode in ('insert', 'replace'): # new bibdocs, new docnames, new marc
if newname in bibrecdocs.get_bibdoc_names():
write_message("('%s', '%s') not inserted because docname already exists." % (newname, urls), stream=sys.stderr)
raise StandardError("('%s', '%s') not inserted because docname already exists." % (newname, urls), stream=sys.stderr)
try:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, newname)
bibdoc.set_status(restriction)
else:
bibdoc = None
except Exception, e:
write_message("('%s', '%s', '%s') not inserted because: '%s'." % (doctype, newname, urls, e), stream=sys.stderr)
raise e
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
elif mode == 'replace_or_insert': # to be thought as correct_or_insert
try:
bibdoc = bibrecdocs.get_bibdoc(docname)
found_bibdoc = True
except InvenioBibDocFileError:
found_bibdoc = False
else:
if doctype not in ('PURGE', 'DELETE', 'EXPUNGE', 'REVERT', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE'):
if newname != docname:
try:
if not pretend:
bibrecdocs.change_name(newname=newname, docid=bibdoc.id)
write_message(lambda: "After renaming: %s" % bibrecdocs, verbose=9)
except StandardError, e:
write_message('ERROR: in renaming %s to %s: %s' % (docname, newname, e), stream=sys.stderr)
raise
try:
bibdoc = bibrecdocs.get_bibdoc(newname)
found_bibdoc = True
except InvenioBibDocFileError:
found_bibdoc = False
else:
if doctype == 'PURGE':
if not pretend:
bibdoc.purge()
bibrecdocs.dirty = True
elif doctype == 'DELETE':
if not pretend:
bibdoc.delete()
bibrecdocs.dirty = True
elif doctype == 'EXPUNGE':
if not pretend:
bibdoc.expunge()
bibrecdocs.dirty = True
elif doctype == 'FIX-ALL':
if not pretend:
bibrecdocs.fix(docname)
elif doctype == 'FIX-MARC':
pass
elif doctype == 'DELETE-FILE':
if urls:
for (url, docformat, description, comment, flags, timestamp) in urls:
if not pretend:
bibdoc.delete_file(docformat, version)
elif doctype == 'REVERT':
try:
if not pretend:
bibdoc.revert(version)
except Exception, e:
write_message('(%s, %s) not correctly reverted: %s' % (newname, version, e), stream=sys.stderr)
raise
else:
if restriction != KEEP_OLD_VALUE:
if not pretend:
bibdoc.set_status(restriction)
# Since the docname already existed we have to first
# bump the version by pushing the first new file
# then pushing the other files.
if urls:
(first_url, first_format, first_description, first_comment, first_flags, first_timestamp) = urls[0]
other_urls = urls[1:]
assert(_add_new_version(bibdoc, first_url, first_format, docname, doctype, newname, first_description, first_comment, first_flags, first_timestamp, pretend=pretend))
for (url, docformat, description, comment, flags, timestamp) in other_urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
## Let's refresh the list of bibdocs.
if not found_bibdoc:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, newname)
bibdoc.set_status(restriction)
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp))
elif mode == 'correct':
try:
bibdoc = bibrecdocs.get_bibdoc(docname)
found_bibdoc = True
except InvenioBibDocFileError:
found_bibdoc = False
else:
if doctype not in ('PURGE', 'DELETE', 'EXPUNGE', 'REVERT', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE'):
if newname != docname:
try:
if not pretend:
bibrecdocs.change_name(newname=newname, docid=bibdoc.id)
write_message(lambda: "After renaming: %s" % bibrecdocs, verbose=9)
except StandardError, e:
write_message('ERROR: in renaming %s to %s: %s' % (docname, newname, e), stream=sys.stderr)
raise
try:
bibdoc = bibrecdocs.get_bibdoc(newname)
found_bibdoc = True
except InvenioBibDocFileError:
found_bibdoc = False
else:
if doctype == 'PURGE':
if not pretend:
bibdoc.purge()
bibrecdocs.dirty = True
elif doctype == 'DELETE':
if not pretend:
bibdoc.delete()
bibrecdocs.dirty = True
elif doctype == 'EXPUNGE':
if not pretend:
bibdoc.expunge()
bibrecdocs.dirty = True
elif doctype == 'FIX-ALL':
if not pretend:
bibrecdocs.fix(newname)
elif doctype == 'FIX-MARC':
pass
elif doctype == 'DELETE-FILE':
if urls:
for (url, docformat, description, comment, flags, timestamp) in urls:
if not pretend:
bibdoc.delete_file(docformat, version)
elif doctype == 'REVERT':
try:
if not pretend:
bibdoc.revert(version)
except Exception, e:
write_message('(%s, %s) not correctly reverted: %s' % (newname, version, e), stream=sys.stderr)
raise
else:
if restriction != KEEP_OLD_VALUE:
if not pretend:
bibdoc.set_status(restriction)
if doctype and doctype != KEEP_OLD_VALUE:
if not pretend:
bibdoc.change_doctype(doctype)
if urls:
(first_url, first_format, first_description, first_comment, first_flags, first_timestamp) = urls[0]
other_urls = urls[1:]
assert(_add_new_version(bibdoc, first_url, first_format, docname, doctype, newname, first_description, first_comment, first_flags, first_timestamp, pretend=pretend))
for (url, docformat, description, comment, flags, timestamp) in other_urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
if not found_bibdoc:
if doctype in ('PURGE', 'DELETE', 'EXPUNGE', 'FIX-ALL', 'FIX-MARC', 'DELETE-FILE', 'REVERT'):
write_message("('%s', '%s', '%s') not performed because '%s' docname didn't existed." % (doctype, newname, urls, docname), stream=sys.stderr)
raise StandardError
else:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, newname)
bibdoc.set_status(restriction)
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp))
elif mode == 'append':
found_bibdoc = False
try:
bibdoc = bibrecdocs.get_bibdoc(docname)
found_bibdoc = True
except InvenioBibDocFileError:
found_bibdoc = False
else:
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp, pretend=pretend))
if not found_bibdoc:
try:
if not pretend:
bibdoc = bibrecdocs.add_bibdoc(doctype, docname)
bibdoc.set_status(restriction)
for (url, docformat, description, comment, flags, timestamp) in urls:
assert(_add_new_format(bibdoc, url, docformat, docname, doctype, newname, description, comment, flags, timestamp))
except Exception, e:
register_exception()
write_message("('%s', '%s', '%s') not appended because: '%s'." % (doctype, newname, urls, e), stream=sys.stderr)
raise
if not pretend and doctype not in ('PURGE', 'DELETE', 'EXPUNGE'):
_process_document_moreinfos(more_infos, newname, version, urls and urls[0][1], mode)
# resolving temporary version and identifier
if bibdoc_tmpid:
if bibdoc_tmpid in tmp_ids and tmp_ids[bibdoc_tmpid] != -1:
write_message("WARNING: the temporary identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpid, ), stream=sys.stderr)
else:
tmp_ids[bibdoc_tmpid] = bibrecdocs.get_docid(docname)
if bibdoc_tmpver:
if bibdoc_tmpver in tmp_vers and tmp_vers[bibdoc_tmpver] != -1:
write_message("WARNING: the temporary version identifier %s has been declared more than once. Ignoring the second occurance" % (bibdoc_tmpver, ), stream=sys.stderr)
else:
if version == None:
if version:
tmp_vers[bibdoc_tmpver] = version
else:
tmp_vers[bibdoc_tmpver] = bibrecdocs.get_bibdoc(docname).get_latest_version()
else:
tmp_vers[bibdoc_tmpver] = version
return record
### Update functions
def update_bibrec_date(now, bibrec_id, insert_mode_p, pretend=False):
"""Update the date of the record in bibrec table """
if insert_mode_p:
query = """UPDATE bibrec SET creation_date=%s, modification_date=%s WHERE id=%s"""
params = (now, now, bibrec_id)
else:
query = """UPDATE bibrec SET modification_date=%s WHERE id=%s"""
params = (now, bibrec_id)
if not pretend:
run_sql(query, params)
write_message(" -Update record creation/modification date: DONE" , verbose=2)
def update_bibfmt_format(id_bibrec, format_value, format_name, modification_date=None, pretend=False):
"""Update the format in the table bibfmt"""
if modification_date is None:
modification_date = time.strftime('%Y-%m-%d %H:%M:%S')
else:
try:
time.strptime(modification_date, "%Y-%m-%d %H:%M:%S")
except ValueError:
modification_date = '1970-01-01 00:00:00'
# We check if the format is already in bibFmt
nb_found = find_record_format(id_bibrec, format_name)
if nb_found == 1:
# we are going to update the format
# compress the format_value value
pickled_format_value = compress(format_value)
# update the format:
query = """UPDATE LOW_PRIORITY bibfmt SET last_updated=%s, value=%s WHERE id_bibrec=%s AND format=%s"""
params = (modification_date, pickled_format_value, id_bibrec, format_name)
if not pretend:
row_id = run_sql(query, params)
if not pretend and row_id is None:
write_message(" ERROR: during update_bibfmt_format function", verbose=1, stream=sys.stderr)
return 1
else:
write_message(" -Update the format %s in bibfmt: DONE" % format_name , verbose=2)
return 0
elif nb_found > 1:
write_message(" Failed: Same format %s found several time in bibfmt for the same record." % format_name, verbose=1, stream=sys.stderr)
return 1
else:
# Insert the format information in BibFMT
res = insert_bibfmt(id_bibrec, format_value, format_name, modification_date, pretend=pretend)
if res is None:
write_message(" ERROR: during insert_bibfmt", verbose=1, stream=sys.stderr)
return 1
else:
write_message(" -Insert the format %s in bibfmt: DONE" % format_name , verbose=2)
return 0
def delete_bibfmt_format(id_bibrec, format_name, pretend=False):
"""
Delete format FORMAT_NAME from bibfmt table fo record ID_BIBREC.
"""
if not pretend:
run_sql("DELETE LOW_PRIORITY FROM bibfmt WHERE id_bibrec=%s and format=%s", (id_bibrec, format_name))
return 0
def archive_marcxml_for_history(recID, pretend=False):
"""
Archive current MARCXML format of record RECID from BIBFMT table
into hstRECORD table. Useful to keep MARCXML history of records.
Return 0 if everything went fine. Return 1 otherwise.
"""
res = run_sql("SELECT id_bibrec, value, last_updated FROM bibfmt WHERE format='xm' AND id_bibrec=%s",
(recID,))
if res and not pretend:
run_sql("""INSERT INTO hstRECORD (id_bibrec, marcxml, job_id, job_name, job_person, job_date, job_details)
VALUES (%s,%s,%s,%s,%s,%s,%s)""",
(res[0][0], res[0][1], task_get_task_param('task_id', 0), 'bibupload', task_get_task_param('user', 'UNKNOWN'), res[0][2],
'mode: ' + task_get_option('mode', 'UNKNOWN') + '; file: ' + task_get_option('file_path', 'UNKNOWN') + '.'))
return 0
def update_database_with_metadata(record, rec_id, oai_rec_id="oai", affected_tags=None, pretend=False):
"""Update the database tables with the record and the record id given in parameter"""
# extract only those tags that have been affected.
# check happens at subfield level. This is to prevent overhead
# associated with inserting already existing field with given ind pair
write_message("update_database_with_metadata: record=%s, rec_id=%s, oai_rec_id=%s, affected_tags=%s" % (record, rec_id, oai_rec_id, affected_tags), verbose=9)
tmp_record = {}
if affected_tags:
for tag in record.keys():
if tag in affected_tags.keys():
write_message(" -Tag %s found to be modified.Setting up for update" % tag, verbose=9)
# initialize new list to hold affected field
new_data_tuple_list = []
for data_tuple in record[tag]:
ind1 = data_tuple[1]
ind2 = data_tuple[2]
if (ind1, ind2) in affected_tags[tag]:
write_message(" -Indicator pair (%s, %s) added to update list" % (ind1, ind2), verbose=9)
new_data_tuple_list.append(data_tuple)
tmp_record[tag] = new_data_tuple_list
write_message(lambda: " -Modified fields: \n%s" % record_xml_output(tmp_record), verbose=2)
else:
tmp_record = record
for tag in tmp_record.keys():
# check if tag is not a special one:
if tag not in CFG_BIBUPLOAD_SPECIAL_TAGS:
# for each tag there is a list of tuples representing datafields
tuple_list = tmp_record[tag]
# this list should contain the elements of a full tag [tag, ind1, ind2, subfield_code]
tag_list = []
tag_list.append(tag)
for single_tuple in tuple_list:
# these are the contents of a single tuple
subfield_list = single_tuple[0]
ind1 = single_tuple[1]
ind2 = single_tuple[2]
# append the ind's to the full tag
if ind1 == '' or ind1 == ' ':
tag_list.append('_')
else:
tag_list.append(ind1)
if ind2 == '' or ind2 == ' ':
tag_list.append('_')
else:
tag_list.append(ind2)
datafield_number = single_tuple[4]
if tag in CFG_BIBUPLOAD_SPECIAL_TAGS:
# nothing to do for special tags (FFT, BDR, BDM)
pass
elif tag in CFG_BIBUPLOAD_CONTROLFIELD_TAGS and tag != "001":
value = single_tuple[3]
# get the full tag
full_tag = ''.join(tag_list)
# update the tables
write_message(" insertion of the tag "+full_tag+" with the value "+value, verbose=9)
# insert the tag and value into into bibxxx
(table_name, bibxxx_row_id) = insert_record_bibxxx(full_tag, value, pretend=pretend)
#print 'tname, bibrow', table_name, bibxxx_row_id;
if table_name is None or bibxxx_row_id is None:
write_message(" Failed: during insert_record_bibxxx", verbose=1, stream=sys.stderr)
# connect bibxxx and bibrec with the table bibrec_bibxxx
res = insert_record_bibrec_bibxxx(table_name, bibxxx_row_id, datafield_number, rec_id, pretend=pretend)
if res is None:
write_message(" Failed: during insert_record_bibrec_bibxxx", verbose=1, stream=sys.stderr)
else:
# get the tag and value from the content of each subfield
for subfield in subfield_list:
subtag = subfield[0]
value = subfield[1]
tag_list.append(subtag)
# get the full tag
full_tag = ''.join(tag_list)
# update the tables
write_message(" insertion of the tag "+full_tag+" with the value "+value, verbose=9)
# insert the tag and value into into bibxxx
(table_name, bibxxx_row_id) = insert_record_bibxxx(full_tag, value, pretend=pretend)
if table_name is None or bibxxx_row_id is None:
write_message(" Failed: during insert_record_bibxxx", verbose=1, stream=sys.stderr)
# connect bibxxx and bibrec with the table bibrec_bibxxx
res = insert_record_bibrec_bibxxx(table_name, bibxxx_row_id, datafield_number, rec_id, pretend=pretend)
if res is None:
write_message(" Failed: during insert_record_bibrec_bibxxx", verbose=1, stream=sys.stderr)
# remove the subtag from the list
tag_list.pop()
tag_list.pop()
tag_list.pop()
tag_list.pop()
write_message(" -Update the database with metadata: DONE", verbose=2)
log_record_uploading(oai_rec_id, task_get_task_param('task_id', 0), rec_id, 'P', pretend=pretend)
def append_new_tag_to_old_record(record, rec_old):
"""Append new tags to a old record"""
def _append_tag(tag):
if tag in CFG_BIBUPLOAD_CONTROLFIELD_TAGS:
if tag == '001':
pass
else:
# if it is a controlfield, just access the value
for single_tuple in record[tag]:
controlfield_value = single_tuple[3]
# add the field to the old record
newfield_number = record_add_field(rec_old, tag,
controlfield_value=controlfield_value)
if newfield_number is None:
write_message(" ERROR: when adding the field"+tag, verbose=1, stream=sys.stderr)
else:
# For each tag there is a list of tuples representing datafields
for single_tuple in record[tag]:
# We retrieve the information of the tag
subfield_list = single_tuple[0]
ind1 = single_tuple[1]
ind2 = single_tuple[2]
if '%s%s%s' % (tag, ind1 == ' ' and '_' or ind1, ind2 == ' ' and '_' or ind2) in (CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:5], CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG[:5]):
## We don't want to append the external identifier
## if it is already existing.
if record_find_field(rec_old, tag, single_tuple)[0] is not None:
write_message(" Not adding tag: %s ind1=%s ind2=%s subfields=%s: it's already there" % (tag, ind1, ind2, subfield_list), verbose=9)
continue
# We add the datafield to the old record
write_message(" Adding tag: %s ind1=%s ind2=%s subfields=%s" % (tag, ind1, ind2, subfield_list), verbose=9)
newfield_number = record_add_field(rec_old, tag, ind1,
ind2, subfields=subfield_list)
if newfield_number is None:
write_message(" ERROR: when adding the field"+tag, verbose=1, stream=sys.stderr)
# Go through each tag in the appended record
for tag in record:
_append_tag(tag)
return rec_old
def copy_strong_tags_from_old_record(record, rec_old):
"""
Look for strong tags in RECORD and REC_OLD. If no strong tags are
found in RECORD, then copy them over from REC_OLD. This function
modifies RECORD structure on the spot.
"""
for strong_tag in CFG_BIBUPLOAD_STRONG_TAGS:
if not record_get_field_instances(record, strong_tag, strong_tag[3:4] or '%', strong_tag[4:5] or '%'):
strong_tag_old_field_instances = record_get_field_instances(rec_old, strong_tag)
if strong_tag_old_field_instances:
for strong_tag_old_field_instance in strong_tag_old_field_instances:
sf_vals, fi_ind1, fi_ind2, controlfield, dummy = strong_tag_old_field_instance
record_add_field(record, strong_tag, fi_ind1, fi_ind2, controlfield, sf_vals)
return
### Delete functions
def delete_tags(record, rec_old):
"""
Returns a record structure with all the fields in rec_old minus the
fields in record.
@param record: The record containing tags to delete.
@type record: record structure
@param rec_old: The original record.
@type rec_old: record structure
@return: The modified record.
@rtype: record structure
"""
returned_record = copy.deepcopy(rec_old)
for tag, fields in record.iteritems():
if tag in ('001', ):
continue
for field in fields:
local_position = record_find_field(returned_record, tag, field)[1]
if local_position is not None:
record_delete_field(returned_record, tag, field_position_local=local_position)
return returned_record
def delete_tags_to_correct(record, rec_old):
"""
Delete tags from REC_OLD which are also existing in RECORD. When
deleting, pay attention not only to tags, but also to indicators,
so that fields with the same tags but different indicators are not
deleted.
"""
## Some fields are controlled via provenance information.
## We should re-add saved fields at the end.
fields_to_readd = {}
for tag in CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS:
if tag[:3] in record:
tmp_field_instances = record_get_field_instances(record, tag[:3], tag[3], tag[4]) ## Let's discover the provenance that will be updated
provenances_to_update = []
for instance in tmp_field_instances:
for code, value in instance[0]:
if code == tag[5]:
if value not in provenances_to_update:
provenances_to_update.append(value)
break
else:
## The provenance is not specified.
## let's add the special empty provenance.
if '' not in provenances_to_update:
provenances_to_update.append('')
potential_fields_to_readd = record_get_field_instances(rec_old, tag[:3], tag[3], tag[4]) ## Let's take all the field corresponding to tag
## Let's save apart all the fields that should be updated, but
## since they have a different provenance not mentioned in record
## they should be preserved.
fields = []
for sf_vals, ind1, ind2, dummy_cf, dummy_line in potential_fields_to_readd:
for code, value in sf_vals:
if code == tag[5]:
if value not in provenances_to_update:
fields.append(sf_vals)
break
else:
if '' not in provenances_to_update:
## Empty provenance, let's protect in any case
fields.append(sf_vals)
fields_to_readd[tag] = fields
# browse through all the tags from the MARCXML file:
for tag in record:
# check if the tag exists in the old record too:
if tag in rec_old and tag != '001':
# the tag does exist, so delete all record's tag+ind1+ind2 combinations from rec_old
for dummy_sf_vals, ind1, ind2, dummy_cf, dummyfield_number in record[tag]:
write_message(" Delete tag: " + tag + " ind1=" + ind1 + " ind2=" + ind2, verbose=9)
record_delete_field(rec_old, tag, ind1, ind2)
## Ok, we readd necessary fields!
for tag, fields in fields_to_readd.iteritems():
for sf_vals in fields:
write_message(" Adding tag: " + tag[:3] + " ind1=" + tag[3] + " ind2=" + tag[4] + " code=" + str(sf_vals), verbose=9)
record_add_field(rec_old, tag[:3], tag[3], tag[4], subfields=sf_vals)
def delete_bibrec_bibxxx(record, id_bibrec, affected_tags={}, pretend=False):
"""Delete the database record from the table bibxxx given in parameters"""
# we clear all the rows from bibrec_bibxxx from the old record
# clearing only those tags that have been modified.
write_message(lambda: "delete_bibrec_bibxxx(record=%s, id_bibrec=%s, affected_tags=%s)" % (record, id_bibrec, affected_tags), verbose=9)
for tag in affected_tags:
# sanity check with record keys just to make sure its fine.
if tag not in CFG_BIBUPLOAD_SPECIAL_TAGS:
write_message("%s found in record"%tag, verbose=2)
# for each name construct the bibrec_bibxxx table name
table_name = 'bib'+tag[0:2]+'x'
bibrec_table = 'bibrec_'+table_name
# delete all the records with proper id_bibrec. Indicators matter for individual affected tags
tmp_ind_1 = ''
tmp_ind_2 = ''
# construct exact tag value using indicators
for ind_pair in affected_tags[tag]:
if ind_pair[0] == ' ':
tmp_ind_1 = '_'
else:
tmp_ind_1 = ind_pair[0]
if ind_pair[1] == ' ':
tmp_ind_2 = '_'
else:
tmp_ind_2 = ind_pair[1]
# need to escape incase of underscore so that mysql treats it as a char
tag_val = tag+"\\"+tmp_ind_1+"\\"+tmp_ind_2 + '%'
query = """DELETE br.* FROM `%s` br,`%s` b where br.id_bibrec=%%s and br.id_bibxxx=b.id and b.tag like %%s""" % (bibrec_table, table_name)
params = (id_bibrec, tag_val)
write_message(query % params, verbose=9)
if not pretend:
run_sql(query, params)
else:
write_message("%s not found"%tag, verbose=2)
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='runbibupload',
authorization_msg="BibUpload Task Submission",
description="""Receive MARC XML file and update appropriate database
tables according to options.
Examples:
$ bibupload -i input.xml
""",
help_specific_usage=""" -a, --append\t\tnew fields are appended to the existing record
-c, --correct\t\tfields are replaced by the new ones in the existing record, except
\t\t\twhen overridden by CFG_BIBUPLOAD_CONTROLLED_PROVENANCE_TAGS
-i, --insert\t\tinsert the new record in the database
-r, --replace\t\tthe existing record is entirely replaced by the new one,
\t\t\texcept for fields in CFG_BIBUPLOAD_STRONG_TAGS
-d, --delete\t\tspecified fields are deleted in existing record
-n, --notimechange\tdo not change record last modification date when updating
-o, --holdingpen\tInsert record into holding pen instead of the normal database
--pretend\t\tdo not really insert/append/correct/replace the input file
--force\t\twhen --replace, use provided 001 tag values, even if the matching
\t\t\trecord does not exist (thus allocating it on-the-fly)
--callback-url\tSend via a POST request a JSON-serialized answer (see admin guide), in
\t\t\torder to provide a feedback to an external service about the outcome of the operation.
--nonce\t\twhen used together with --callback add the nonce value in the JSON message.
--special-treatment=MODE\tif "oracle" is specified, when used together with --callback_url,
\t\t\tPOST an application/x-www-form-urlencoded request where the JSON message is encoded
\t\t\tinside a form field called "results".
""",
version=__revision__,
specific_params=("ircazdnoS:",
[
"insert",
"replace",
"correct",
"append",
"reference",
"delete",
"notimechange",
"holdingpen",
"pretend",
"force",
"callback-url=",
"nonce=",
"special-treatment=",
"stage=",
]),
task_submit_elaborate_specific_parameter_fnc=task_submit_elaborate_specific_parameter,
task_run_fnc=task_run_core,
task_submit_check_options_fnc=task_submit_check_options)
def task_submit_elaborate_specific_parameter(key, value, opts, args): # pylint: disable=W0613
""" Given the string key it checks it's meaning, eventually using the
value. Usually it fills some key in the options dict.
It must return True if it has elaborated the key, False, if it doesn't
know that key.
eg:
if key in ['-n', '--number']:
task_get_option(\1) = value
return True
return False
"""
# No time change option
if key in ("-n", "--notimechange"):
task_set_option('notimechange', 1)
# Insert mode option
elif key in ("-i", "--insert"):
if task_get_option('mode') == 'replace':
# if also replace found, then set to replace_or_insert
task_set_option('mode', 'replace_or_insert')
else:
task_set_option('mode', 'insert')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Replace mode option
elif key in ("-r", "--replace"):
if task_get_option('mode') == 'insert':
# if also insert found, then set to replace_or_insert
task_set_option('mode', 'replace_or_insert')
else:
task_set_option('mode', 'replace')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Holding pen mode option
elif key in ("-o", "--holdingpen"):
write_message("Holding pen mode", verbose=3)
task_set_option('mode', 'holdingpen')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Correct mode option
elif key in ("-c", "--correct"):
task_set_option('mode', 'correct')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Append mode option
elif key in ("-a", "--append"):
task_set_option('mode', 'append')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
# Deprecated reference mode option (now correct)
elif key in ("-z", "--reference"):
task_set_option('mode', 'correct')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("-d", "--delete"):
task_set_option('mode', 'delete')
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("--pretend",):
task_set_option('pretend', True)
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("--force",):
task_set_option('force', True)
fix_argv_paths([args[0]])
task_set_option('file_path', os.path.abspath(args[0]))
elif key in ("--callback-url", ):
task_set_option('callback_url', value)
elif key in ("--nonce", ):
task_set_option('nonce', value)
elif key in ("--special-treatment", ):
if value.lower() in CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS:
if value.lower() == 'oracle':
task_set_option('oracle_friendly', True)
else:
print >> sys.stderr, """The specified value is not in the list of allowed special treatments codes: %s""" % CFG_BIBUPLOAD_ALLOWED_SPECIAL_TREATMENTS
return False
elif key in ("-S", "--stage"):
print >> sys.stderr, """WARNING: the --stage parameter is deprecated and ignored."""
else:
return False
return True
def task_submit_check_options():
""" Reimplement this method for having the possibility to check options
before submitting the task, in order for example to provide default
values. It must return False if there are errors in the options.
"""
if task_get_option('mode') is None:
write_message("Please specify at least one update/insert mode!",
stream=sys.stderr)
return False
file_path = task_get_option('file_path')
if file_path is None:
write_message("Missing filename! -h for help.", stream=sys.stderr)
return False
try:
open(file_path).read().decode('utf-8')
except IOError:
write_message("""File is not accessible: %s""" % file_path,
stream=sys.stderr)
return False
except UnicodeDecodeError:
write_message("""File encoding is not valid utf-8: %s""" % file_path,
stream=sys.stderr)
return False
return True
def writing_rights_p():
"""Return True in case bibupload has the proper rights to write in the
fulltext file folder."""
if _WRITING_RIGHTS is not None:
return _WRITING_RIGHTS
try:
if not os.path.exists(CFG_BIBDOCFILE_FILEDIR):
os.makedirs(CFG_BIBDOCFILE_FILEDIR)
fd, filename = tempfile.mkstemp(suffix='.txt', prefix='test', dir=CFG_BIBDOCFILE_FILEDIR)
test = os.fdopen(fd, 'w')
test.write('TEST')
test.close()
if open(filename).read() != 'TEST':
raise IOError("Can not successfully write and readback %s" % filename)
os.remove(filename)
except:
register_exception(alert_admin=True)
return False
return True
def post_results_to_callback_url(results, callback_url):
write_message("Sending feedback to %s" % callback_url)
if not CFG_JSON_AVAILABLE:
from warnings import warn
warn("--callback-url used but simplejson/json not available")
return
json_results = json.dumps(results)
write_message("Message to send: %s" % json_results, verbose=9)
## <scheme>://<netloc>/<path>?<query>#<fragment>
scheme, dummynetloc, dummypath, dummyquery, dummyfragment = urlparse.urlsplit(callback_url)
## See: http://stackoverflow.com/questions/111945/is-there-any-way-to-do-http-put-in-python
if scheme == 'http':
opener = urllib2.build_opener(urllib2.HTTPHandler)
elif scheme == 'https':
opener = urllib2.build_opener(urllib2.HTTPSHandler)
else:
raise ValueError("Scheme not handled %s for callback_url %s" % (scheme, callback_url))
if task_get_option('oracle_friendly'):
write_message("Oracle friendly mode requested", verbose=9)
request = urllib2.Request(callback_url, data=urllib.urlencode({'results': json_results}))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
else:
request = urllib2.Request(callback_url, data=json_results)
request.add_header('Content-Type', 'application/json')
request.add_header('User-Agent', make_user_agent_string('BibUpload'))
write_message("Headers about to be sent: %s" % request.headers, verbose=9)
write_message("Data about to be sent: %s" % request.data, verbose=9)
res = opener.open(request)
msg = res.read()
write_message("Result of posting the feedback: %s %s" % (res.code, res.msg), verbose=9)
write_message("Returned message is: %s" % msg, verbose=9)
return res
def bibupload_records(records, opt_mode=None, opt_notimechange=0,
pretend=False, callback_url=None, results_for_callback=None):
"""perform the task of uploading a set of records
returns list of (error_code, recid) tuples for separate records
"""
#Dictionaries maintaining temporary identifiers
# Structure: identifier -> number
tmp_ids = {}
tmp_vers = {}
results = []
# The first phase -> assigning meaning to temporary identifiers
if opt_mode == 'reference':
## NOTE: reference mode has been deprecated in favour of 'correct'
opt_mode = 'correct'
record = None
for record in records:
record_id = record_extract_oai_id(record)
task_sleep_now_if_required(can_stop_too=True)
if opt_mode == "holdingpen":
#inserting into the holding pen
write_message("Inserting into holding pen", verbose=3)
insert_record_into_holding_pen(record, record_id)
else:
write_message("Inserting into main database", verbose=3)
error = bibupload(
record,
opt_mode = opt_mode,
opt_notimechange = opt_notimechange,
oai_rec_id = record_id,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers)
results.append(error)
if error[0] == 1:
if record:
write_message(lambda: record_xml_output(record),
stream=sys.stderr)
else:
write_message("Record could not have been parsed",
stream=sys.stderr)
stat['nb_errors'] += 1
if callback_url:
results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]})
elif error[0] == 2:
if record:
write_message(lambda: record_xml_output(record),
stream=sys.stderr)
else:
write_message("Record could not have been parsed",
stream=sys.stderr)
if callback_url:
results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]})
elif error[0] == 0:
if callback_url:
from invenio.search_engine import print_record
results_for_callback['results'].append({'recid': error[1], 'success': True, "marcxml": print_record(error[1], 'xm'), 'url': "%s/%s/%s" % (CFG_SITE_URL, CFG_SITE_RECORD, error[1])})
else:
if callback_url:
results_for_callback['results'].append({'recid': error[1], 'success': False, 'error_message': error[2]})
# stat us a global variable
task_update_progress("Done %d out of %d." % \
(stat['nb_records_inserted'] + \
stat['nb_records_updated'],
stat['nb_records_to_upload']))
# Second phase -> Now we can process all entries where temporary identifiers might appear (BDR, BDM)
write_message("Identifiers table after processing: %s versions: %s" % (str(tmp_ids), str(tmp_vers)), verbose=2)
write_message("Uploading BDR and BDM fields")
if opt_mode != "holdingpen":
for record in records:
record_id = retrieve_rec_id(record, opt_mode, pretend=pretend, post_phase = True)
bibupload_post_phase(record,
rec_id = record_id,
mode = opt_mode,
pretend = pretend,
tmp_ids = tmp_ids,
tmp_vers = tmp_vers)
return results
def task_run_core():
""" Reimplement to add the body of the task."""
write_message("Input file '%s', input mode '%s'." %
(task_get_option('file_path'), task_get_option('mode')))
write_message("STAGE 0:", verbose=2)
if task_get_option('file_path') is not None:
write_message("start preocessing", verbose=3)
task_update_progress("Reading XML input")
recs = xml_marc_to_records(open_marc_file(task_get_option('file_path')))
stat['nb_records_to_upload'] = len(recs)
write_message(" -Open XML marc: DONE", verbose=2)
task_sleep_now_if_required(can_stop_too=True)
write_message("Entering records loop", verbose=3)
callback_url = task_get_option('callback_url')
results_for_callback = {'results': []}
if recs is not None:
# We proceed each record by record
bibupload_records(records=recs, opt_mode=task_get_option('mode'),
opt_notimechange=task_get_option('notimechange'),
pretend=task_get_option('pretend'),
callback_url=callback_url,
results_for_callback=results_for_callback)
else:
write_message(" ERROR: bibupload failed: No record found",
verbose=1, stream=sys.stderr)
callback_url = task_get_option("callback_url")
if callback_url:
nonce = task_get_option("nonce")
if nonce:
results_for_callback["nonce"] = nonce
post_results_to_callback_url(results_for_callback, callback_url)
if task_get_task_param('verbose') >= 1:
# Print out the statistics
print_out_bibupload_statistics()
# Check if they were errors
return not stat['nb_errors'] >= 1
def log_record_uploading(oai_rec_id, task_id, bibrec_id, insertion_db, pretend=False):
if oai_rec_id != "" and oai_rec_id != None:
query = """UPDATE oaiHARVESTLOG SET date_inserted=NOW(), inserted_to_db=%s, id_bibrec=%s WHERE oai_id = %s AND bibupload_task_id = %s ORDER BY date_harvested LIMIT 1"""
if not pretend:
run_sql(query, (str(insertion_db), str(bibrec_id), str(oai_rec_id), str(task_id), ))
if __name__ == "__main__":
main()
| gpl-2.0 | -7,043,113,552,886,814,000 | 48.031124 | 287 | 0.559182 | false | 4.091976 | false | false | false |
gfarnadi/FairPSL | problems/performance_review/evaluation.py | 1 | 4029 |
def calculate(counts,result):
n1 = 0.0
n2 = 0.0
a = 0.0
c = 0.0
for f1,f2,d in counts:
f1f2 = max(f1+f2-1,0)
nf1f2 = max(-f1+f2,0)
n1 += f1f2
n2 += nf1f2
if d[0]:
a+= max(f1f2 - d[1],0)
c+= max(nf1f2 - d[1],0)
else:
if f1f2==1:
a+= 1-result[d[1]]
else:
a+= 0
if nf1f2==1:
c+= 1-result[d[1]]
else:
c+=0
if (a==n1):
p1=1
else:
p1 = (a/n1)
if (c==n2):
p2 =1
else:
p2 = (c/n2)
return p1,p2
def evaluate(result, counts, fairMeasureCode):
p1,p2 = calculate(counts,result)
if fairMeasureCode=='RR':
RR = p1/p2
return RR
elif fairMeasureCode == 'RD':
RD = p1-p2
return RD
elif fairMeasureCode =='RC':
RC = (1-p1)/(1-p2)
return RC
def accuracy(dataPath, result, atoms):
employees = []
with open(dataPath+'employee.txt') as f:
for line in f:
line = line.strip()
if not line: continue
employees.append(line.split()[0])
vardic = atoms['promotion']
score = 0.0
for e in employees:
var = vardic[e][0]
if var in result:
predict = float(result[var])
truth = float(vardic[e][1])
if round(predict, 1)>=0.5:
if truth ==1.0:
score+=1.0
else:
if truth ==0.0:
score+=1.0
score = (float(score) / float(len(employees)))
return score
def accuracy_all(dataPath, result, atoms):
employees = []
with open(dataPath+'employee.txt') as f:
for line in f:
line = line.strip()
if not line: continue
employees.append(line.split()[0])
labels = dict()
with open(dataPath+'label.txt') as f:
for line in f:
line = line.strip()
if not line: continue
[employee, label] = line.split()
labels[employee] = label
vardic = atoms['promotion']
score = 0.0
score_A = 0.0
score_B = 0.0
size_A = 0.0
size_B = 0.0
for e in employees:
if labels[e] =='A':
size_A+=1
else:
size_B+=1
var = vardic[e][0]
if var in result:
predict = float(result[var])
truth = float(vardic[e][1])
if round(predict, 1)>=0.5:
if truth ==1.0:
score+=1.0
if labels[e] =='A':
score_A+=1
else:
score_B+=1
else:
if truth ==0.0:
score+=1.0
if labels[e] =='A':
score_A+=1
else:
score_B+=1
score = (float(score) / float(len(employees)))
score_A = (float(score_A) / float(size_A))
score_B = (float(score_B) / float(size_B))
return score, score_A, score_B
def accuracy_opinion(dataPath, result, atoms):
employees = []
with open(dataPath+'employee.txt') as f:
for line in f:
line = line.strip()
if not line: continue
employees.append(line.split()[0])
vardic = atoms['opinion']
score = 0.0
for e1 in employees:
for e2 in employees:
if e1==e2: continue
var = vardic[(e1,e2)][0]
if var in result:
predict = float(result[var])
truth = float(vardic[(e1,e2)][1])
if round(predict, 1)>=0.5:
if truth ==1.0:
score+=1.0
else:
if truth ==0.0:
score+=1.0
size = (float(len(employees))*float(len(employees)))- float(len(employees))
score = (float(score) / size)
return score
| mit | 5,984,133,284,167,620,000 | 26.979167 | 79 | 0.439563 | false | 3.476273 | false | false | false |
DXCanas/kolibri | kolibri/core/auth/migrations/0001_initial.py | 1 | 8724 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2017-05-14 05:22
from __future__ import unicode_literals
import uuid
import django.core.validators
import django.db.models.deletion
import django.utils.timezone
import morango.utils.uuids
import mptt.fields
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DeviceOwner',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters and digits only', max_length=30, validators=[django.core.validators.RegexValidator('^\\w+$', 'Enter a valid username. This value may contain only letters and numbers.')], verbose_name='username')),
('full_name', models.CharField(blank=True, max_length=120, verbose_name='full name')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='date joined')),
('id', morango.utils.uuids.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Collection',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('name', models.CharField(max_length=100)),
('kind', models.CharField(choices=[(b'facility', 'Facility'), (b'classroom', 'Classroom'), (b'learnergroup', 'Learner group')], max_length=20)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityDataset',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('description', models.TextField(blank=True)),
('location', models.CharField(blank=True, max_length=200)),
('learner_can_edit_username', models.BooleanField(default=True)),
('learner_can_edit_name', models.BooleanField(default=True)),
('learner_can_edit_password', models.BooleanField(default=True)),
('learner_can_sign_up', models.BooleanField(default=True)),
('learner_can_delete_account', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='FacilityUser',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters and digits only', max_length=30, validators=[django.core.validators.RegexValidator('^\\w+$', 'Enter a valid username. This value may contain only letters and numbers.')], verbose_name='username')),
('full_name', models.CharField(blank=True, max_length=120, verbose_name='full name')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='date joined')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
],
),
migrations.CreateModel(
name='Membership',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('collection', mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.Collection')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityUser')),
],
),
migrations.CreateModel(
name='Role',
fields=[
('id', morango.utils.uuids.UUIDField(editable=False, primary_key=True, serialize=False)),
('_morango_dirty_bit', models.BooleanField(default=True, editable=False)),
('_morango_source_id', models.CharField(editable=False, max_length=96)),
('_morango_partition', models.CharField(editable=False, max_length=128)),
('kind', models.CharField(choices=[(b'admin', 'Admin'), (b'coach', 'Coach')], max_length=20)),
('collection', mptt.fields.TreeForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.Collection')),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='roles', to='kolibriauth.FacilityUser')),
],
),
migrations.AddField(
model_name='collection',
name='dataset',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.FacilityDataset'),
),
migrations.AddField(
model_name='collection',
name='parent',
field=mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='children', to='kolibriauth.Collection'),
),
migrations.CreateModel(
name='Classroom',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='Facility',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.CreateModel(
name='LearnerGroup',
fields=[
],
options={
'proxy': True,
},
bases=('kolibriauth.collection',),
),
migrations.AlterUniqueTogether(
name='role',
unique_together=set([('user', 'collection', 'kind')]),
),
migrations.AlterUniqueTogether(
name='membership',
unique_together=set([('user', 'collection')]),
),
migrations.AddField(
model_name='facilityuser',
name='facility',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='kolibriauth.Facility'),
),
migrations.AlterUniqueTogether(
name='facilityuser',
unique_together=set([('username', 'facility')]),
),
]
| mit | -3,918,830,347,761,980,400 | 50.621302 | 296 | 0.588835 | false | 4.355467 | false | false | false |
sashs/Ropper | ropper/common/enum.py | 1 | 7565 | # coding=utf-8
# Copyright 2018 Sascha Schirra
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" A ND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from re import match
from sys import version_info
import types
if version_info.major > 2:
long = int
class EnumError(BaseException):
def __init__(self, msg):
super(EnumError, self).__init__(msg)
class EnumElement(object):
def __init__(self, name, value, enum):
super(EnumElement, self).__init__()
self.__name = name
self.__value = value
self.__enum = enum
@property
def name(self):
return self.__name
@property
def value(self):
return self.__value
@property
def _enum(self):
return self.__enum
def __str__(self):
return self.__name
def __index__(self):
return self.__value
def __hash__(self):
return hash((self,))
@property
def value(self):
return self.__value
@property
def name(self):
return self.__name
def __repr__(self):
return str(self)
class IntEnumElement(EnumElement):
def __hash__(self):
return hash(self.value)
def __cmp__(self, other):
if isinstance(other, EnumElement):
return self.value - other.value
else:
return self.value - other
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __and__(self, other):
if isinstance(other, int) or isinstance(other, long):
return self.value & other
elif isinstance(other, EnumElement):
return self.value & other.value
raise TypeError('This operation is not supported for type ' % type(other))
def __rand__(self, other):
if isinstance(other, int) or isinstance(other, long):
return self.value & other
elif isinstance(other, EnumElement):
return self.value & other.value
raise TypeError('This operation is not supported for type ' % type(other))
def __or__(self, other):
if isinstance(other, int) or isinstance(other, long):
return self.value | other
elif isinstance(other, EnumElement) :
return self.value | other.value
raise TypeError('This operation is not supported for type ' % type(other))
def __ror__(self, other):
if isinstance(other, int) or isinstance(other, long):
return self.value | other
elif isinstance(other, EnumElement):
return self.value | other.value
raise TypeError('This operation is not supported for type ' % type(other))
def __invert__(self):
return ~self.value
def __int__(self):
return self.value
class EnumIterator(object):
def __init__(self, enumData):
self.__enumData = enumData
self.__index = 0
def next(self):
if self.__index < len(self.__enumData):
data = self.__enumData[self.__index]
self.__index += 1
return data
raise StopIteration
class EnumMeta(type):
def __new__(cls, name, bases, dct):
def update(key, value):
if value in values:
raise EnumError('No aliases allowed: '+key+' and '+str(revData[value]))
if isinstance(value, types.FunctionType):
dct[key] = classmethod(value)
return
values.append(value)
if isinstance(value, int) or isinstance(value, long):
element = IntEnumElement(key, value, name)
else:
element = EnumElement(key, value, name)
revData[value] = element
valueData.append(element)
dct[key] = element
revData = {}
valueData = []
values = []
for key, value in dct.items():
if not key.startswith('_'):
update(key, value)
count = 0
if '_enum_' in dct:
enuminit = None
if isinstance(dct['_enum_'], str):
enuminit = dct['_enum_'].split(' ')
elif isinstance(dct['_enum_'], tuple) or isinstance(dct['_enum_'], list):
enuminit = dct['_enum_']
for key in enuminit:
if count in revData:
raise EnumError('The predefined elements have to have bigger value numbers')
update(key, count)
count += 1
dct['_revData'] = revData
dct['_enumData'] = sorted(valueData, key=lambda x: x.value)
return super(EnumMeta, cls).__new__(cls, name, bases, dct)
def __call__(cls, name, args):
if isinstance(args, list):
args = ' '.join(args)
return type(name, (cls,), {'_enum_':args})
def __iter__(cls):
return EnumIterator(cls._enumData)
def __str__(cls):
toReturn = '<'
for elem in cls._enumData:
toReturn += str(elem) + '|'
toReturn = toReturn[:-1] + '>'
return cls.__name__ + '='+toReturn
def __contains__(cls, item):
return item in cls._revData
def __getitem__(cls, key):
if isinstance(key, str):
return cls.__search(key)
elif isinstance(key, EnumElement):
return cls.__search(str(key))
elif isinstance(key, int) or isinstance(key, long):
if key in cls._revData:
return cls._revData[key]
return 'Unkown'
raise TypeError('key has to be an instance of int/long or str:' + key.__class__.__name__)
def __search(self, key):
for elem in self._enumData:
if str(elem) == key:
return elem;
def __instancecheck__(self, instance):
return isinstance(instance, EnumElement) and instance._enum == self.__name__
# For compatibility reason (python2 & python3)
Enum = EnumMeta('Enum', (), {})
| bsd-3-clause | 6,618,701,261,692,716,000 | 29.504032 | 97 | 0.594316 | false | 4.340218 | false | false | false |
mucximilian/gimpmaps | gimprenderer/draw_circle.py | 1 | 1230 | #!/usr/bin/env python
# Draws a 150 px radius circle centered in an 800x600 px image
# Adapted from a scheme script-fu contributed by Simon Budig
from gimpfu import *
def draw_circle():
width = 600
height = 600
image = pdb.gimp_image_new(width, height, RGB)
layer = gimp.Layer(image, "layer", image.width, image.height,
RGBA_IMAGE, 100, NORMAL_MODE)
image.add_layer(layer)
gimp.set_foreground(0, 0, 0)
pdb.gimp_context_set_brush("Circle (03)")
vectors = pdb.gimp_vectors_new(image, "circle")
pdb.gimp_image_add_vectors(image, vectors, -1)
pdb.gimp_vectors_bezier_stroke_new_ellipse(vectors, 400, 300, 150, 150, 0)
pdb.gimp_image_set_active_vectors(image, vectors)
print "stroking"
pdb.gimp_edit_stroke_vectors(layer, vectors)
pdb.gimp_displays_flush()
out_path ="/home/mucx/Pictures/test.png"
print "saving"
pdb.file_png_save_defaults(
image,
layer,
out_path,
out_path
)
register(
"python-fu-draw-circle",
N_("Draw a circle"),
"Simple example of stroking a circular path",
"Simon Budig",
"Simon Budig",
"2007",
N_("_Draw Circle"),
"RGB*, GRAY*",
[],
[],
draw_circle,
menu="<Image>/Python-fu"
)
main() | gpl-2.0 | 1,101,271,776,541,197,400 | 20.982143 | 76 | 0.647967 | false | 2.942584 | false | false | false |
beagles/neutron_hacking | neutron/services/vpn/service_drivers/__init__.py | 1 | 3163 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, Nachi Ueno, NTT I3, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo import messaging
import six
from neutron.common import rpc
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class VpnDriver(object):
def __init__(self, service_plugin):
self.service_plugin = service_plugin
@property
def service_type(self):
pass
@abc.abstractmethod
def create_vpnservice(self, context, vpnservice):
pass
@abc.abstractmethod
def update_vpnservice(
self, context, old_vpnservice, vpnservice):
pass
@abc.abstractmethod
def delete_vpnservice(self, context, vpnservice):
pass
class BaseIPsecVpnAgentApi(object):
"""Base class for IPSec API to agent."""
def __init__(self, to_agent_topic, topic, default_version):
super(BaseIPsecVpnAgentApi, self).__init__()
target = messaging.Target(topic=topic, version=default_version)
self.client = rpc.get_client(target)
self.to_agent_topic = to_agent_topic
def _agent_notification(self, context, method, router_id,
version=None, **kwargs):
"""Notify update for the agent.
This method will find where is the router, and
dispatch notification for the agent.
"""
admin_context = context.is_admin and context or context.elevated()
plugin = manager.NeutronManager.get_service_plugins().get(
constants.L3_ROUTER_NAT)
if not version:
version = self.target.version
l3_agents = plugin.get_l3_agents_hosting_routers(
admin_context, [router_id],
admin_state_up=True,
active=True)
for l3_agent in l3_agents:
LOG.debug(_('Notify agent at %(topic)s.%(host)s the message '
'%(method)s'),
{'topic': self.to_agent_topic,
'host': l3_agent.host,
'method': method,
'args': kwargs})
cctxt = self.client.prepare(
version=version,
topic='%s.%s' % (self.to_agent_topic, l3_agent.host))
cctxt.cast(context, method, **kwargs)
def vpnservice_updated(self, context, router_id):
"""Send update event of vpnservices."""
self._agent_notification(context, 'vpnservice_updated', router_id)
| apache-2.0 | 5,851,842,106,440,160,000 | 33.010753 | 78 | 0.631995 | false | 4.070785 | false | false | false |
sierisimo/PySongGen | PySongGen.py | 1 | 2115 | #! /usr/bin/env python3
# Name: PySongGen
#
# Version: 0.0.1
#
# Author: Sinuhe Jaime Valencia
#
# Author_email: [email protected]
#
# Description:
# Main code for running instances of pysonggen
from pysonggen import grammar
from pysonggen.songgen import SongG
gram = grammar.Grammar('./examples/example.mgram')
notes = None
audio = None
run = True
def get_phrase():
global notes
global audio
notes = gram.expand(input("""Give a sentence for making a song.
It's very IMPORTANT that use spaces between every letter
Example: A A A B
->"""))
audio = SongG(notes)
print("Your song is now ready, it has: " + str(len(notes)) +" notes.")
print("\n The length of the final song will be the same size, because we're using just one second per note")
def change_name():
global audio
print("Actual name is: "+audio.name+".ogg")
print("Ok. Let's give the song an awesome Name:")
name=input("New name: ")
audio.name = name
def save_song():
global audio
if audio != None:
audio.save_song()
else:
print("You have to make a song first...")
def print_notes():
global audio
if audio != None:
print("There you are, this are your notes:")
for i in audio.notes:
print(i,end=" ")
else:
print("You haven't make a song first...")
print("\n")
def exit_without_save():
print("See you later aligator")
while run:
options = {"s":save_song,
"c":change_name,
"n":get_phrase,
"w":print_notes,
"e":""
}
if audio == None:
decision = input("""
What do you want to do now?
n Make a new song
e Exit
Your choice: """)
else:
decision = input("""What do you want to do now?
s Store Song (With default name: Song.ogg)
c Change name of the song (The extension cannot be changed)
n Make a new song
w See the notes
e Exit
Your choice: """)
if len(decision) != 1 or not decision in list(options.keys()):
print("Invalid Option. Please choose a valid one")
continue
elif decision == "e":
exit_without_save()
break
options[decision]()
| gpl-2.0 | 5,985,681,783,417,540,000 | 20.804124 | 110 | 0.629787 | false | 3.416801 | false | false | false |
rknightly/crawler-collage | collage_maker/collage_maker.py | 1 | 6709 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------
# Author: delimitry
# slightly edited by Ryan Knightly
# -----------------------------------------------------------------------
import os
import random
from PIL import Image
from optparse import OptionParser
WHITE = (248, 248, 255)
def make_collage(images, filename, width, init_height):
"""
Make a collage image with a width equal to `width` from `images` and save
to `filename`.
"""
if not images:
print('No images for collage found!')
return False
margin_size = 2
# run until a suitable arrangement of images is found
while True:
# copy images to images_list
images_list = images[:]
coefs_lines = []
images_line = []
x = 0
while images_list:
# get first image and resize to `init_height`
img_path = images_list.pop(0)
try:
img = Image.open(img_path)
except OSError:
print("An image could not be used")
print(img_path)
continue
img.thumbnail((width, init_height))
# when `x` will go beyond the `width`, start the next line
if x > width:
coefs_lines.append((float(x) / width, images_line))
images_line = []
x = 0
x += img.size[0] + margin_size
images_line.append(img_path)
# finally add the last line with images
coefs_lines.append((float(x) / width, images_line))
# compact the lines, by reducing the `init_height`, if any with one or
# less images
if len(coefs_lines) <= 1:
break
if any(map(lambda x: len(x[1]) <= 1, coefs_lines)):
# reduce `init_height`
init_height -= 10
else:
break
# get output height
out_height = 0
for coef, imgs_line in coefs_lines:
if imgs_line:
out_height += int(init_height / coef) + margin_size
if not out_height:
print('Height of collage could not be 0!')
return False
collage_image = Image.new('RGB', (width, int(out_height)), WHITE)
# put images to the collage
y = 0
for coef, imgs_line in coefs_lines:
if imgs_line:
x = 0
for img_path in imgs_line:
img = Image.open(img_path)
# if need to enlarge an image - use `resize`, otherwise use
# `thumbnail`, it's faster
k = (init_height / coef) / img.size[1]
if k > 1:
img = img.resize((int(img.size[0] * k),
int(img.size[1] * k)), Image.ANTIALIAS)
else:
img.thumbnail((int(width / coef),
int(init_height / coef)), Image.ANTIALIAS)
if collage_image:
collage_image.paste(img, (int(x), int(y)))
x += img.size[0] + margin_size
y += int(init_height / coef) + margin_size
collage_image.save(filename)
return True
def get_images(settings):
images = list(filter(is_image, os.listdir(settings.get_folder())))
image_paths = [os.path.join(settings.get_folder(), image) for
image in images]
return image_paths
def is_image(filename):
is_img = True
file_extension = os.path.splitext(filename)[1].lower()
if file_extension not in ['.jpg', '.jpeg', '.png']:
is_img = False
return is_img
class Settings:
"""Hold the settings passed in by the user"""
def __init__(self, folder='./images', output='collage.png', width=1000,
initial_height=25, shuffle=False):
self.folder = folder
self.output = output
self.width = width
self.initial_height = initial_height
self.shuffle = shuffle
def get_folder(self):
return self.folder
def get_output(self):
return self.output
def get_width(self):
return self.width
def get_initial_height(self):
return self.initial_height
def get_shuffle(self):
return self.shuffle
def run(settings):
"""Run the program with the given settings method"""
# get images
images = get_images(settings)
if not images:
print('No images for making collage! Please select other directory'
' with images!')
return
# shuffle images if needed
if settings.get_shuffle():
random.shuffle(images)
print('making collage...')
res = make_collage(images, settings.get_output(), settings.get_width(),
settings.get_initial_height())
if not res:
print('making collage failed!')
return
print('collage done!')
def main():
# prepare options parser
options = OptionParser(usage='%prog [options]',
description='Photo collage maker')
options.add_option('-f', '--folder', dest='folder',
help='folder with images (*.jpg, *.jpeg, *.png)',
default='.')
options.add_option('-o', '--output', dest='output',
help='output collage image filename',
default='collage.png')
options.add_option('-w', '--width', dest='width', type='int',
help='resulting collage image width')
options.add_option('-i', '--init_height', dest='init_height',
type='int', help='initial height for resize the images')
options.add_option('-s', '--shuffle', action='store_true', dest='shuffle',
help='enable images shuffle', default=False)
opts, args = options.parse_args()
settings = Settings(folder=opts.folder, output=opts.output,
width=opts.width, initial_height=opts.init_height,
shuffle=opts.shuffle)
if not opts.width or not opts.init_height:
options.print_help()
return
run(settings=settings)
# get images
images = get_images(opts)
print("Images:", images)
if not images:
print('No images for making collage! Please select other directory'
' with images!')
return
# shuffle images if needed
if opts.shuffle:
random.shuffle(images)
print('making collage...')
res = make_collage(images, opts.output, opts.width, opts.init_height)
if not res:
print('making collage failed!')
return
print('collage done!')
if __name__ == '__main__':
main()
| mit | -2,331,259,332,186,945,000 | 29.917051 | 79 | 0.538828 | false | 4.108389 | false | false | false |
Elastica/kombu | kombu/transport/librabbitmq.py | 1 | 5547 | """
kombu.transport.librabbitmq
===========================
`librabbitmq`_ transport.
.. _`librabbitmq`: http://pypi.python.org/librabbitmq/
"""
from __future__ import absolute_import, unicode_literals
import os
import socket
import warnings
import librabbitmq as amqp
from librabbitmq import ChannelError, ConnectionError
from kombu.five import items, values
from kombu.utils.amq_manager import get_manager
from kombu.utils.text import version_string_as_tuple
from . import base
W_VERSION = """
librabbitmq version too old to detect RabbitMQ version information
so make sure you are using librabbitmq 1.5 when using rabbitmq > 3.3
"""
DEFAULT_PORT = 5672
DEFAULT_SSL_PORT = 5671
NO_SSL_ERROR = """\
ssl not supported by librabbitmq, please use pyamqp:// or stunnel\
"""
class Message(base.Message):
def __init__(self, channel, props, info, body):
super(Message, self).__init__(
channel,
body=body,
delivery_info=info,
properties=props,
delivery_tag=info.get('delivery_tag'),
content_type=props.get('content_type'),
content_encoding=props.get('content_encoding'),
headers=props.get('headers'))
class Channel(amqp.Channel, base.StdChannel):
Message = Message
def prepare_message(self, body, priority=None,
content_type=None, content_encoding=None,
headers=None, properties=None):
"""Encapsulate data into a AMQP message."""
properties = properties if properties is not None else {}
properties.update({'content_type': content_type,
'content_encoding': content_encoding,
'headers': headers,
'priority': priority})
return body, properties
class Connection(amqp.Connection):
Channel = Channel
Message = Message
class Transport(base.Transport):
Connection = Connection
default_port = DEFAULT_PORT
default_ssl_port = DEFAULT_SSL_PORT
connection_errors = (
base.Transport.connection_errors + (
ConnectionError, socket.error, IOError, OSError)
)
channel_errors = (
base.Transport.channel_errors + (ChannelError,)
)
driver_type = 'amqp'
driver_name = 'librabbitmq'
implements = base.Transport.implements.extend(
async=True,
heartbeats=False,
)
def __init__(self, client, **kwargs):
self.client = client
self.default_port = kwargs.get('default_port') or self.default_port
self.default_ssl_port = (kwargs.get('default_ssl_port') or
self.default_ssl_port)
self.__reader = None
def driver_version(self):
return amqp.__version__
def create_channel(self, connection):
return connection.channel()
def drain_events(self, connection, **kwargs):
return connection.drain_events(**kwargs)
def establish_connection(self):
"""Establish connection to the AMQP broker."""
conninfo = self.client
for name, default_value in items(self.default_connection_params):
if not getattr(conninfo, name, None):
setattr(conninfo, name, default_value)
if conninfo.ssl:
raise NotImplementedError(NO_SSL_ERROR)
opts = dict({
'host': conninfo.host,
'userid': conninfo.userid,
'password': conninfo.password,
'virtual_host': conninfo.virtual_host,
'login_method': conninfo.login_method,
'insist': conninfo.insist,
'ssl': conninfo.ssl,
'connect_timeout': conninfo.connect_timeout,
}, **conninfo.transport_options or {})
conn = self.Connection(**opts)
conn.client = self.client
self.client.drain_events = conn.drain_events
return conn
def close_connection(self, connection):
"""Close the AMQP broker connection."""
self.client.drain_events = None
connection.close()
def _collect(self, connection):
if connection is not None:
for channel in values(connection.channels):
channel.connection = None
try:
os.close(connection.fileno())
except OSError:
pass
connection.channels.clear()
connection.callbacks.clear()
self.client.drain_events = None
self.client = None
def verify_connection(self, connection):
return connection.connected
def register_with_event_loop(self, connection, loop):
loop.add_reader(
connection.fileno(), self.on_readable, connection, loop,
)
def get_manager(self, *args, **kwargs):
return get_manager(self.client, *args, **kwargs)
def qos_semantics_matches_spec(self, connection):
try:
props = connection.server_properties
except AttributeError:
warnings.warn(UserWarning(W_VERSION))
else:
if props.get('product') == 'RabbitMQ':
return version_string_as_tuple(props['version']) < (3, 3)
return True
@property
def default_connection_params(self):
return {
'userid': 'guest',
'password': 'guest',
'port': (self.default_ssl_port if self.client.ssl
else self.default_port),
'hostname': 'localhost',
'login_method': 'AMQPLAIN',
}
| bsd-3-clause | -3,182,920,128,225,371,600 | 29.988827 | 75 | 0.600505 | false | 4.231121 | false | false | false |
rainer85ah/VisionViewer | src/Builders/Histogram/HistogramBuilder.py | 1 | 19510 | # -*- coding: utf-8 -*-
__author__ = 'Rainer Arencibia'
import PyQt4
import numpy as np
import cv2
from PyQt4.QtCore import QString
from PyQt4.QtGui import QColor, QPen, QBrush
from Histograms import Ui_Histograms
"""
The MIT License (MIT)
Copyright (c) 2016 Rainer Arencibia
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
class WindowHistogram(PyQt4.QtGui.QDialog):
"""
This class will show a new window for the histograms calculated.
"""
def __init__(self, parent=None):
"""
:param parent: This window do NOT have a parent.
:return: show a new window.
"""
PyQt4.QtGui.QDialog.__init__(self, parent)
self.ui = Ui_Histograms()
self.ui.setupUi(self)
self.setFixedSize(self.width(), self.height())
class HistogramBuilder:
"""
The class implements a lot´s methods, some with the intention to calculate the histograms of an image.
Other show the results on the window, using some methods from PyQt4 to draw lines & rectangles.
We have some (slots & methods) for the signals received from the window.
"""
def __init__(self, img):
"""
:param img: an image or a video frame.
:return: the information calculated from the different histograms in a window.
"""
img_read = cv2.imread(img)
self.image = cv2.cvtColor(img_read, cv2.COLOR_BGR2RGB) # we change the format of the image.
self.height = self.image.shape[0]
self.width = self.image.shape[1]
self.size = self.image.size
self.num_pixels = self.width * self.height
self.r_hist = np.zeros_like(self.image) # arrays that will contain the histogram calculated.
self.g_hist = np.zeros_like(self.image)
self.b_hist = np.zeros_like(self.image)
self.h_hist = np.zeros_like(self.image)
self.s_hist = np.zeros_like(self.image)
self.v_hist = np.zeros_like(self.image)
self.color_bool = False # True: if the image received it´s a color image.
self.gray_bool = False # True: if the image received it´s a gray image.
(r, g, b) = cv2.split(self.image)
if np.array_equal(r, g) and np.array_equal(r, b):
self.gray_bool = True
self.image = cv2.cvtColor(self.image, cv2.COLOR_RGB2GRAY)
else:
self.color_bool = True
self.hsv_image = cv2.cvtColor(self.image, cv2.COLOR_RGB2HSV)
self.window_histogram = WindowHistogram() # we create the window & connects the signals and slots.
self.connect_all_checkbox()
""" we save the size of the window, we need this lines to draw in future steps"""
self.scene_h = self.window_histogram.sizeHint().height() - int(0.10 * self.window_histogram.sizeHint().height())
self.scene_w = self.window_histogram.sizeHint().width() - int(0.10 * self.window_histogram.sizeHint().width())
def connect_all_checkbox(self):
"""
Just connect the signals with the slots.
"""
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.redCheckBox, PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_redCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.greenCheckBox, PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_greenCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.blueCheckBox, PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_blueCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.hueCheckBox, PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_hueCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.saturationCheckBox,
PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_saturationCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.valueCheckBox, PyQt4.QtCore.SIGNAL('stateChanged(int)'),
self.on_valueCheckBox_stateChanged)
PyQt4.QtCore.QObject.connect(self.window_histogram.ui.scaleComboBox,
PyQt4.QtCore.SIGNAL('currentIndexChanged(int)'),
self.on_scaleComboBox_currentIndexChanged)
def draw_256_range(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a numbers bar on the bottom from 0 to 255.
"""
val = 0
step_w = self.scene_w / 8.0
for pos in range(0, 9):
x = float(pos) * step_w
text = QString.number(val)
text_item = scene.addText(text)
text_item.setPos(int(x) - 10, self.scene_h)
val += 256 / 8
def draw_1_range(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a numbers bar on the bottom from 0.0 to 1.0.
"""
val = 0
step_w = self.scene_w / 8.0
for pos in range(0, 9):
x = float(pos) * step_w
text = '%.2f' % (val / 80.0)
text_item = scene.addText(text)
text_item.setPos(int(x) - 10, self.scene_h)
val += 10
def draw_360_range(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a numbers bar on the bottom from 0 to 360.
"""
val = 0
step_w = self.scene_w / 8.0
for pos in range(0, 9):
x = float(pos) * step_w
text = QString.number(val)
text_item = scene.addText(text)
text_item.setPos(int(x) - 10, self.scene_h)
val += 365 / 8
# draw the HUE range of values.
def draw_hue_range(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a HUE color bar on the bottom.
"""
for pos in range(0, self.scene_w + 1):
color = pos * 255 / self.scene_w
pen = PyQt4.QtGui.QPen(QColor(color, 255, 255))
scene.addLine(float(pos), self.scene_h + 4, float(pos), self.scene_h + 12, pen)
# draw an underline with black and white colors.
def draw_value_range(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a VALUE color bar on the bottom.
"""
for pos in range(0, self.scene_w + 1):
bright = pos * 255 / self.scene_w
pen = PyQt4.QtGui.QPen(QColor(bright, bright, bright))
scene.addLine(float(pos), self.scene_h + 4, float(pos), self.scene_h + 12, pen)
def draw_grid(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with a grid painted.
"""
grey = PyQt4.QtGui.QPen(QColor(200, 200, 200))
step_w = self.scene_w / 8.0
step_h = self.scene_h / 8.0
for pos in range(0, 9):
x = pos * step_w
y = pos * step_h
scene.addLine(float(x), 0.0, float(x), self.scene_h, grey) # draw the vertical lines on the grid
scene.addLine(0.0, float(y), self.scene_w, float(y), grey) # draw the horizontal lines on the grid.
index = self.window_histogram.ui.scaleComboBox.currentIndex()
if index == 0:
self.draw_value_range(scene)
elif index == 1:
self.draw_256_range(scene)
elif index == 2:
self.draw_hue_range(scene)
elif index == 3:
self.draw_360_range(scene)
else:
self.draw_1_range(scene)
def draw_lines(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene but with the lines of the colors selected on the window histogram.
"""
step_w = max(1.0, self.scene_w / 256.0)
red = PyQt4.QtGui.QPen(QColor(255, 0, 0))
red.setWidthF(step_w)
green = PyQt4.QtGui.QPen(QColor(0, 255, 0))
green.setWidthF(step_w)
blue = PyQt4.QtGui.QPen(QColor(0, 0, 255))
blue.setWidthF(step_w)
hue = PyQt4.QtGui.QPen(QColor(255, 0, 128))
hue.setWidthF(step_w)
saturation = PyQt4.QtGui.QPen(QColor(255, 128, 0))
saturation.setWidthF(step_w)
value = PyQt4.QtGui.QPen(QColor(0, 0, 0))
value.setWidthF(step_w)
draw_red = self.window_histogram.ui.redCheckBox.isChecked() and self.r_hist.max() > 0
draw_green = self.window_histogram.ui.greenCheckBox.isChecked() and self.g_hist.max() > 0
draw_blue = self.window_histogram.ui.blueCheckBox.isChecked() and self.b_hist.max() > 0
draw_hue = self.window_histogram.ui.hueCheckBox.isChecked() and self.h_hist.max() > 0
draw_saturation = self.window_histogram.ui.saturationCheckBox.isChecked() and self.s_hist.max() > 0
draw_value = self.window_histogram.ui.valueCheckBox.isChecked() and self.v_hist.max() > 0
if draw_red or draw_green or draw_blue or draw_hue or draw_saturation or draw_value:
x = 0
while x < self.scene_w + 1:
i1 = min(255.0, max(0.0, x * 255.0 / self.scene_w))
i2 = min(255.0, max(0.0, (x + step_w) * 255.0 / self.scene_w))
if draw_red:
scene.addLine(x, self.scene_h - self.scene_h * self.r_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.r_hist[i2], red)
if draw_green:
scene.addLine(x, self.scene_h - self.scene_h * self.g_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.g_hist[i2], green)
if draw_blue:
scene.addLine(x, self.scene_h - self.scene_h * self.b_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.b_hist[i2], blue)
if draw_hue:
i1 = min(180.0, max(0.0, x * 180.0 / self.scene_w))
i2 = min(180.0, max(0.0, (x + step_w) * 180.0 / self.scene_w))
scene.addLine(x, self.scene_h - self.scene_h * self.h_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.h_hist[i2], hue)
if draw_saturation:
scene.addLine(x, self.scene_h - self.scene_h * self.s_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.s_hist[i2], saturation)
if draw_value:
scene.addLine(x, self.scene_h - self.scene_h * self.v_hist[i1], x + step_w,
self.scene_h - self.scene_h * self.v_hist[i2], value)
x += step_w
def draw_bars(self, scene):
"""
:param scene: QGraphicsView, we will paint on this.
:return: scene, if the image is bitonal draw a pair of rectangles with
the percentage of white and black pixels.
"""
draw_value = self.window_histogram.ui.valueCheckBox.isChecked() and self.v_hist.any() > 0.0
if draw_value:
bar1 = self.scene_h * self.v_hist[0]
bar2 = self.scene_h * self.v_hist[255]
pen = QPen(QColor(128, 128, 128))
brush_white = QBrush(QColor(225, 225, 225))
brush_black = QBrush(QColor(25, 25, 25))
scene.addRect(0, self.scene_h - bar1, self.scene_w / 2, bar1, pen, brush_black)
scene.addRect(self.scene_w / 2, self.scene_h - bar2, self.scene_w / 2, bar2, pen, brush_white)
total = self.v_hist[0] + self.v_hist[255]
result_0 = '%.0f' % (100 * self.v_hist[0] / total[0])
result_255 = '%.0f' % (100 * self.v_hist[255] / total[0])
black = str(result_0) + '%'
white = str(result_255) + '%'
scene.addText(black).setPos(self.scene_w / 4, self.scene_h)
scene.addText(white).setPos(3 * self.scene_w / 4, self.scene_h)
def draw_histograms(self):
"""
Make an new QGraphicsScene (scene) and send it to painted.
:return: if we have a bitonal image, we will paint rectangles.
if not we gonna draw lines on the scene.
But first we draw the grid.
"""
self.scene_h = self.window_histogram.sizeHint().height() - int(0.10 * self.window_histogram.sizeHint().height())
self.scene_w = self.window_histogram.sizeHint().width() - int(0.10 * self.window_histogram.sizeHint().width())
scene = PyQt4.QtGui.QGraphicsScene(0, 0, self.scene_w, self.scene_h)
self.window_histogram.ui.graphicsView.setScene(scene)
if self.gray_bool and self.is_bitonal():
self.draw_bars(scene)
else:
self.draw_grid(scene)
self.draw_lines(scene)
def is_bitonal(self):
"""
:return: True if an image is bitonal. This types of image only have 0(black) or 255(white) on the pixels value.
So we check the value histogram array, and only ask for the first and last position.
If the sum of this positions is the number of pixels of the image, we have a bitonal image.
"""
return self.v_hist[0] + self.v_hist[255] == self.num_pixels
def compute_red_histogram(self):
"""
Input: Color Image.
:return: self.r_hist, calculated the red histogram(normalize) of a color image.
"""
self.r_hist = cv2.calcHist([self.image], [0], None, [256], [0, 256])
cv2.normalize(self.r_hist, self.r_hist, 0, 1, cv2.NORM_MINMAX)
def compute_green_histogram(self):
"""
Input: Color Image.
:return: self.g_hist, calculated the green histogram(normalize) of a color image.
"""
self.g_hist = cv2.calcHist([self.image], [1], None, [256], [0, 256])
cv2.normalize(self.g_hist, self.g_hist, 0, 1, cv2.NORM_MINMAX)
def compute_blue_histogram(self):
"""
Input: Color Image.
:return: self.b_hist, calculated the blue histogram(normalize) of a color image.
"""
self.b_hist = cv2.calcHist([self.image], [2], None, [256], [0, 256])
cv2.normalize(self.b_hist, self.b_hist, 0, 1, cv2.NORM_MINMAX)
def compute_hue_histogram(self):
"""
Input: Color Image.
:return: self.h_hist, calculated the hue histogram(normalize) of a color image.
"""
self.h_hist = cv2.calcHist([self.hsv_image], [0], None, [256], [0, 180])
cv2.normalize(self.h_hist, self.h_hist, 0, 1, cv2.NORM_MINMAX)
def compute_saturation_histogram(self):
"""
Input: Color Image.
:return: self.s_hist, calculated the saturation histogram(normalize) of a color image.
"""
self.s_hist = cv2.calcHist([self.hsv_image], [1], None, [256], [0, 256])
cv2.normalize(self.s_hist, self.s_hist, 0, 1, cv2.NORM_MINMAX)
def compute_value_histogram(self):
"""
Input: Color / Gray Image.
:return: self.v_hist,
IF it´s a gray image calculated the value histogram("equalize" & normalize).
IF it´s a color image calculated the value histogram(normalize).
"""
if self.gray_bool:
equalize_gray_image = cv2.equalizeHist(self.image)
self.v_hist = cv2.calcHist([equalize_gray_image], [0], None, [256], [0, 256])
elif self.color_bool:
self.v_hist = cv2.calcHist([self.hsv_image], [2], None, [256], [0, 256])
cv2.normalize(self.v_hist, self.v_hist, 0, 1, cv2.NORM_MINMAX)
def compute_histograms(self):
"""
:return: If we have an image with at least one pixel, we send it to process.
If it´s a color image, do all of the histograms.
If it´s a gray scale image only will calculate the value histogram "equalize"
"""
if self.num_pixels > 0:
if self.r_hist.max() == 0 and self.color_bool:
self.compute_red_histogram()
if self.g_hist.max() == 0 and self.color_bool:
self.compute_green_histogram()
if self.b_hist.max() == 0 and self.color_bool:
self.compute_blue_histogram()
if self.h_hist.max() == 0 and self.color_bool:
self.compute_hue_histogram()
if self.s_hist.max() == 0 and self.color_bool:
self.compute_saturation_histogram()
if self.v_hist.max() == 0:
self.compute_value_histogram()
def on_redCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_greenCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_blueCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_hueCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_saturationCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_valueCheckBox_stateChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def on_scaleComboBox_currentIndexChanged(self):
"""
This slot is connected automatically in connectSlotsByName
"""
self.draw_histograms()
def update_panel_to_image(self, img):
if self.window_histogram.isVisible():
self.__init__(img)
self.compute_histograms()
self.draw_histograms()
def keyPressEvent(self, e):
"""
This slot is connected automatically in connectSlotsByName
"""
if e.key() == PyQt4.QtCore.Qt.Key_Escape:
self.window_histogram.close()
""" Know the value of a pixel of a color image.
(r, g, b) = self.image[0, 0]
print "Pixel at (0, 0) - Red: %d, Green: %d, Blue: %d" % (r, g, b)
""" | mit | 8,540,447,499,718,498,000 | 42.535714 | 120 | 0.58632 | false | 3.577876 | false | false | false |
takmid/inasafe | safe/impact_functions/volcanic/volcano_population_evacuation_polygon_hazard.py | 1 | 7246 | import numpy
from safe.impact_functions.core import FunctionProvider
from safe.impact_functions.core import get_hazard_layer, get_exposure_layer
from safe.impact_functions.core import get_question
from safe.storage.vector import Vector
from safe.common.utilities import ugettext as _
from safe.common.tables import Table, TableRow
from safe.engine.interpolation import assign_hazard_values_to_exposure_data
class VolcanoFunctionVectorHazard(FunctionProvider):
"""Risk plugin for flood evacuation
:author AIFDR
:rating 4
:param requires category=='hazard' and \
subcategory in ['volcano'] and \
layertype=='vector'
:param requires category=='exposure' and \
subcategory=='population' and \
layertype=='raster' and \
datatype=='density'
"""
title = _('be affected')
target_field = 'population'
category_title = 'KRB'
def run(self, layers):
"""Risk plugin for flood population evacuation
Input
layers: List of layers expected to contain
H: Raster layer of volcano depth
P: Raster layer of population data on the same grid as H
Counts number of people exposed to flood levels exceeding
specified threshold.
Return
Map of population exposed to flood levels exceeding the threshold
Table with number of people evacuated and supplies required
"""
# Identify hazard and exposure layers
H = get_hazard_layer(layers) # Flood inundation
E = get_exposure_layer(layers)
question = get_question(H.get_name(),
E.get_name(),
self)
# Check that hazard is polygon type
if not H.is_vector:
msg = ('Input hazard %s was not a vector layer as expected '
% H.get_name())
raise Exception(msg)
msg = ('Input hazard must be a polygon layer. I got %s with layer '
'type %s' % (H.get_name(),
H.get_geometry_name()))
if not H.is_polygon_data:
raise Exception(msg)
category_title = self.category_title
if not category_title in H.get_attribute_names():
category_title = 'Radius'
# Run interpolation function for polygon2raster
P = assign_hazard_values_to_exposure_data(H, E,
attribute_name='population')
# Initialise attributes of output dataset with all attributes
# from input polygon and a population count of zero
new_attributes = H.get_data()
categories = {}
for attr in new_attributes:
attr[self.target_field] = 0
cat = attr[self.category_title]
categories[cat] = 0
# Count affected population per polygon and total
evacuated = 0
for attr in P.get_data():
# Get population at this location
pop = float(attr['population'])
# Update population count for associated polygon
poly_id = attr['polygon_id']
new_attributes[poly_id][self.target_field] += pop
# Update population count for each category
cat = new_attributes[poly_id][self.category_title]
categories[cat] += pop
# Update total
evacuated += pop
# Count totals
total = int(numpy.sum(E.get_data(nan=0, scaling=False)))
## # Don't show digits less than a 1000
## if total > 1000:
## total = total // 1000 * 1000
## if evacuated > 1000:
## evacuated = evacuated // 1000 * 1000
## # Calculate estimated needs based on BNPB Perka
## # 7/2008 minimum bantuan
## rice = evacuated * 2.8
## drinking_water = evacuated * 17.5
## water = evacuated * 67
## family_kits = evacuated / 5
## toilets = evacuated / 20
# Generate impact report for the pdf map
table_body = [question,
TableRow([_('People needing evacuation'),
'%i' % evacuated],
header=True),
TableRow([_('Category'), _('Total')],
header=True)]
if category_title != 'Radius':
for name, pop in categories.iteritems():
table_body.append(TableRow([name, int(pop)]))
table_body.append(TableRow(_('Map shows population affected in '
'each of volcano hazard polygons.')))
## TableRow([_('Needs per week'), _('Total')],
## header=True),
## [_('Rice [kg]'), int(rice)],
## [_('Drinking Water [l]'), int(drinking_water)],
## [_('Clean Water [l]'), int(water)],
## [_('Family Kits'), int(family_kits)],
## [_('Toilets'), int(toilets)]]
impact_table = Table(table_body).toNewlineFreeString()
# Extend impact report for on-screen display
table_body.extend([TableRow(_('Notes'), header=True),
_('Total population %i in view port') % total,
_('People need evacuation if they are within the '
'volcanic hazard zones.')])
impact_summary = Table(table_body).toNewlineFreeString()
map_title = _('People affected by volcanic hazard zone')
# Define classes for legend for flooded population counts
colours = ['#FFFFFF', '#38A800', '#79C900', '#CEED00',
'#FFCC00', '#FF6600', '#FF0000', '#7A0000']
population_counts = [x['population'] for x in new_attributes]
cls = [0] + numpy.linspace(1,
max(population_counts),
len(colours)).tolist()
# Define style info for output polygons showing population counts
style_classes = []
for i, colour in enumerate(colours):
lo = cls[i]
hi = cls[i + 1]
if i == 0:
label = _('0')
else:
label = _('%i - %i') % (lo, hi)
entry = dict(label=label, colour=colour, min=lo, max=hi,
transparency=0, size=1)
style_classes.append(entry)
# Override style info with new classes and name
style_info = dict(target_field=self.target_field,
style_classes=style_classes,
legend_title=_('Population Count'))
# Create vector layer and return
V = Vector(data=new_attributes,
projection=H.get_projection(),
geometry=H.get_geometry(),
name=_('Population affected by volcanic hazard zone'),
keywords={'impact_summary': impact_summary,
'impact_table': impact_table,
'map_title': map_title},
style_info=style_info)
return V
| gpl-3.0 | 2,129,394,776,019,534,600 | 37.956989 | 77 | 0.532846 | false | 4.453596 | false | false | false |
sixpearls/django-mediacracy | mediacracy/widgets.py | 1 | 1103 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from django import forms
from django.core import urlresolvers
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from markitup.widgets import AdminMarkItUpWidget, MarkItUpWidget
from django.conf import settings as site_settings
use_mm = False
if 'massmedia' in site_settings.INSTALLED_APPS:
use_mm = True
class TextifyMarkitupAdminWidget(AdminMarkItUpWidget):
def render(self,*args,**kwargs):
attrs_copy = kwargs['attrs'].copy()
html = super(MarkItUpWidget,self).render(*args,**kwargs)
html += '<script type="text/javascript">'
html += render_to_string('mediacracy/markitup_helper.js',{ 'id': attrs_copy['id'], 'use_mm': use_mm })
html += '</script>'
return mark_safe(html)
def _media(self):
return super(TextifyMarkitupAdminWidget,self).media + forms.Media(
css={'all': ('mediacracy/markitup/markitup_helper.css',),},
js=("mediacracy/js/mediacracy_ajax_csrf.js",)
)
media = property(_media)
| bsd-2-clause | 3,709,738,566,038,608,400 | 34.580645 | 110 | 0.672711 | false | 3.512739 | false | false | false |
dmsovetov/dreemchest | Source/CLI/command_line/env.py | 1 | 5399 | #################################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Dmitry Sovetov
#
# https://github.com/dmsovetov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#################################################################################
from collections import namedtuple
import os
import tempfile
# Define named tuple for environment configuration
Configuration = namedtuple('Configuration', ['home', 'cmake', 'android', 'emscripten'])
# Environment variable that points to a Dreemchest home directory
DREEMCHEST_HOME = 'DREEMCHEST_HOME'
# Environemt variable that points to a CMake folder used by Dreemchest
DREEMCHEST_CMAKE = 'DREEMCHEST_CMAKE'
# Environment variable that points to a CMake bin folder
DREEMCHEST_CMAKE_BIN = 'DREEMCHEST_CMAKE_BIN'
# Environment variable that points to Android SDK used by Dreemchest
DREEMCHEST_ANDROID = 'DREEMCHEST_ANDROID'
# Environment variable that points to Emscripten SDK used by Dreemchest
DREEMCHEST_EMSCRIPTEN = 'DREEMCHEST_EMSCRIPTEN'
class Configuration:
"""An active environment configuration"""
def __init__(self):
# Load home directory
if DREEMCHEST_HOME not in os.environ.keys():
raise Exception("'%s' environment variable should point to a Dreemchest home directory." % DREEMCHEST_HOME)
self.home = os.environ[DREEMCHEST_HOME]
# Load CMake directory
if DREEMCHEST_CMAKE_BIN not in os.environ.keys():
raise Exception("'%s' environment variable should point to a CMake directory." % DREEMCHEST_CMAKE_BIN)
self._cmake = os.environ[DREEMCHEST_CMAKE_BIN]
# Load Android SDK directory
self._android = None
if DREEMCHEST_ANDROID in os.environ.keys():
self._android = os.environ[DREEMCHEST_ANDROID]
# Load Emscripten SDK directory
self._emscripten = None
if DREEMCHEST_EMSCRIPTEN in os.environ.keys():
self._emscripten = os.environ[DREEMCHEST_EMSCRIPTEN]
@property
def home(self):
"""Returns the Dreemchest home directory"""
return self._home
@property
def cmake(self):
"""Returns CMake home directory"""
return self._cmake
@property
def emscripten(self):
"""Returns the Emscripten SDK home directory"""
return self._emscripten
@property
def emscripten_toolchain(self):
if self.emscripten is None:
return None
return os.path.join(self.emscripten, 'cmake', 'Modules', 'Platform', 'Emscripten.cmake')
@property
def ios_toolchain(self):
"""Returns an iOS toolchain file"""
return os.path.join(self.home, 'CMake', 'Toolchains', 'iOS.cmake')
@property
def android_toolchain(self):
"""Returns an Android toolchain file"""
return os.path.join(self.home, 'CMake', 'Toolchains', 'Android.cmake')
@property
def android(self):
"""Returns the Android SDK home directory"""
return self._android
@property
def android_ndk(self):
"""Returns the Android NDK home directory"""
return os.path.join(self.android, 'ndk-bundle')
@property
def dependencies(self):
"""Returns a directory where all precompiled dependencies are stored"""
return os.path.join(self.build_dir, 'Dependencies')
@property
def build_dir(self):
"""Returns a build directory"""
return os.path.join(self.home, 'Build')
@property
def prebuilt(self):
"""Returns an prebuilt directory path"""
return os.path.join(self.build_dir, 'Prebuilt')
@property
def externals(self):
"""Returns externals source directory"""
return os.path.join(self.home, 'Externals')
@property
def source(self):
"""Returns engine source directory"""
return os.path.join(self.home, 'Source')
@property
def projects(self):
"""Returns the Projects directory path"""
return os.path.join(self.home, 'Projects')
@property
def bootstrap_temp_dir(self):
"""Returns a temporary directory where to store all intermediate artifacts for bootstrap process"""
return os.path.join(tempfile.gettempdir(), 'Bootstrap')
def load():
"""Loads an active configuration from environment"""
return Configuration() | mit | -1,461,173,353,538,070,500 | 33.394904 | 119 | 0.667346 | false | 4.087055 | true | false | false |
wordpress-mobile/WordPress-iOS | Scripts/localize.py | 1 | 5499 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This program is free software. It comes without any warranty, to
# the extent permitted by applicable law. You can redistribute it
# and/or modify it under the terms of the Do What The Fuck You Want
# To Public License, Version 2, as published by Sam Hocevar. See
# http://sam.zoy.org/wtfpl/COPYING for more details.
#
# Localize.py - Incremental localization on XCode projects
# João Moreno 2009
# http://joaomoreno.com/
from sys import argv
from codecs import open
from re import compile
from copy import copy
import os
re_translation = compile(r'^"(.+)" = "(.+)";$')
re_comment_single = compile(r'^/(/.*|\*.*\*/)$')
re_comment_start = compile(r'^/\*.*$')
re_comment_end = compile(r'^.*\*/$')
def print_help():
print u"""Usage: merge.py merged_file old_file new_file
Xcode localizable strings merger script. João Moreno 2009."""
class LocalizedString():
def __init__(self, comments, translation):
self.comments, self.translation = comments, translation
self.key, self.value = re_translation.match(self.translation).groups()
def __unicode__(self):
return u'%s%s\n' % (u''.join(self.comments), self.translation)
class LocalizedFile():
def __init__(self, fname=None, auto_read=False):
self.fname = fname
self.strings = []
self.strings_d = {}
if auto_read:
self.read_from_file(fname)
def read_from_file(self, fname=None):
fname = self.fname if fname == None else fname
try:
f = open(fname, encoding='utf_16', mode='r')
except:
print 'File %s does not exist.' % fname
exit(-1)
line = f.readline()
while line and line == u'\n':
line = f.readline()
while line:
comments = [line]
if not re_comment_single.match(line):
while line and not re_comment_end.match(line):
line = f.readline()
comments.append(line)
line = f.readline()
if line and re_translation.match(line):
translation = line
else:
raise Exception('invalid file: %s' % line)
line = f.readline()
while line and line == u'\n':
line = f.readline()
string = LocalizedString(comments, translation)
self.strings.append(string)
self.strings_d[string.key] = string
f.close()
def save_to_file(self, fname=None):
fname = self.fname if fname == None else fname
try:
f = open(fname, encoding='utf_16', mode='w')
except:
print 'Couldn\'t open file %s.' % fname
exit(-1)
for string in self.strings:
f.write(string.__unicode__())
f.close()
def merge_with(self, new):
merged = LocalizedFile()
for string in new.strings:
if self.strings_d.has_key(string.key):
new_string = copy(self.strings_d[string.key])
new_string.comments = string.comments
string = new_string
merged.strings.append(string)
merged.strings_d[string.key] = string
return merged
def merge(merged_fname, old_fname, new_fname):
try:
old = LocalizedFile(old_fname, auto_read=True)
new = LocalizedFile(new_fname, auto_read=True)
except Exception as e:
print 'Error: input files have invalid format. old: %s, new: %s' % (old_fname, new_fname)
print e
merged = old.merge_with(new)
merged.save_to_file(merged_fname)
STRINGS_FILE = 'Localizable.strings'
def localize(path, language, include_pods_and_frameworks):
if "Scripts" in path:
print "Must run script from the root folder"
quit()
os.chdir(path)
language = os.path.join(path, language)
original = merged = language + os.path.sep + STRINGS_FILE
old = original + '.old'
new = original + '.new'
# TODO: This is super ugly, we have to come up with a better way of doing it
if include_pods_and_frameworks:
find_cmd = 'find . ../Pods/WordPress* ../Pods/WPMediaPicker ../WordPressShared/WordPressShared ../Pods/Gutenberg -name "*.m" -o -name "*.swift" | grep -v Vendor | grep -v ./WordPressTest/I18n.swift | grep -v ./WordPressStatsWidgets/Views/Localization/LocalizedStringKey+extension.swift | grep -v Secrets.swift'
else:
find_cmd = 'find . -name "*.m" -o -name "*.swift" | grep -v Vendor | grep -v ./WordPressTest/I18n.swift | grep -v ./WordPressStatsWidgets/Views/Localization/LocalizedStringKey+extension.swift | grep -v Secrets.swift'
filelist = os.popen(find_cmd).read().strip().split('\n')
filelist = '"{0}"'.format('" "'.join(filelist))
if os.path.isfile(original):
os.rename(original, old)
os.system('genstrings -q -o "%s" %s' % (language, filelist))
os.rename(original, new)
merge(merged, old, new)
os.remove(new)
os.remove(old)
else:
os.system('genstrings -q -o "%s" %s' % (language, filelist))
if __name__ == '__main__':
basedir = os.getcwd()
localize(os.path.join(basedir, 'WordPress'), 'Resources/en.lproj', True)
localize(os.path.join(basedir, 'WordPress', 'WordPressTodayWidget'), 'Base.lproj', False)
localize(os.path.join(basedir, 'WordPress', 'WordPressShareExtension'), 'Base.lproj', False)
| gpl-2.0 | 3,877,940,906,966,676,000 | 33.791139 | 318 | 0.599418 | false | 3.611695 | false | false | false |
cts-admin/cts | cts/members/management/commands/send_renewal_emails.py | 1 | 1296 | import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.template.loader import render_to_string
from ...models import CorporateMember
from home.tasks import mail_task
class Command(BaseCommand):
def handle(self, *args, **options):
thirty_days_from_now = datetime.date.today() + datetime.timedelta(days=30)
for member in CorporateMember.objects.filter(inactive=False):
if member.get_expiry_date() == thirty_days_from_now:
mail_task(
'Expiring Conservation Technology Solutions Membership for %s' % member.display_name,
render_to_string('members/corporate_member_renewal_email.txt', {
'contact_name': member.contact_name,
'member_name': member.display_name,
'expiry_date': member.get_expiry_date(),
'renewal_link': member.get_renewal_link(),
}),
settings.DEFAULT_FROM_EMAIL,
[
settings.DEFAULT_FROM_EMAIL,
member.contact_email,
'[email protected]'
],
)
| gpl-3.0 | 1,292,665,148,696,779,800 | 40.806452 | 105 | 0.56713 | false | 4.5 | false | false | false |
kmike/django-admin-user-stats | admin_user_stats/base_modules.py | 1 | 2779 | # -*- coding: utf-8 -*-
from datetime import timedelta
from django.db.models import Count
from django.utils.translation import ugettext_lazy as _
try:
from django.utils.timezone import now
except ImportError:
from datetime import datetime
now = datetime.now
from qsstats import QuerySetStats
from admin_tools.dashboard import modules
class BaseChart(modules.DashboardModule):
"""
Dashboard module with user registration charts.
With default values it is suited best for 2-column dashboard layouts.
"""
title = _('Registration chart')
template = 'admin_user_stats/modules/chart.html'
chart_size = "580x100"
days = None
values_count = 30
interval = 'days'
queryset = None
date_field = 'date_joined'
aggregate = Count('id')
def is_empty(self):
return False
def __init__(self, *args, **kwargs):
super(BaseChart, self).__init__(*args, **kwargs)
if self.days is None:
self.days = {'days': self.values_count, 'weeks': self.values_count*7, 'months': self.values_count*30, 'years': self.values_count*365}[self.interval]
self.data = self.get_data(self.interval, self.days)
self.prepare_template_data(self.data)
def get_caption(self, dt):
return {
'days': dt.day,
'months': dt.strftime("%b"),
'weeks': dt.strftime('%W'),
'years': dt.strftime('%Y'),
}[self.interval]
# @cached(60*5)
def get_data(self, interval, days):
""" Returns an array with new users count per interval """
stats = QuerySetStats(self.queryset, self.date_field, aggregate = self.aggregate)
today = now()
begin = today - timedelta(days=days-1)
return stats.time_series(begin, today+timedelta(days=1), interval)
def prepare_template_data(self, data):
""" Prepares data for template (it is passed as module attributes) """
self.captions = [self.get_caption(t[0]) for t in data]
self.values = [t[1] for t in data]
self.max_value = max(self.values)
class BaseCharts(modules.Group):
""" Group module with 3 default registration charts """
title = _('New Users')
chart_model = BaseChart
def __init__(self, *args, **kwargs):
kwargs.setdefault('children', self.get_charts())
super(BaseCharts, self).__init__(*args, **kwargs)
def get_charts(self):
""" Returns 3 basic chart modules (per-day, per-week and per-month) """
return [
self.chart_model(_('By Day'), interval='days'),
self.chart_model(_('By Week'), interval='weeks'),
self.chart_model(_('By Month'), interval='months'),
self.chart_model(_('By Year'), interval='years'),
]
| mit | 5,017,720,829,543,766,000 | 32.481928 | 160 | 0.617488 | false | 3.854369 | false | false | false |
eirannejad/pyRevit | pyrevitlib/rpw/__revit.py | 1 | 5945 | """
The main rpw namespace and rpw.revit provide you with most of the imports will
need.
>>> from rpw import revit, db, ui
>>> db.Element(SomeElement)
>>> ui.Selection()
>>> revit.doc
>>> revit.uidoc.ActiveView
Revit Namespaces are also available:
>>> from rpw import DB, UI
>>> DB.ElementId(00000)
>>> UI.TaskDialog
In summary, if you use rpw, this could potentially be the only import line
you would need:
>>> from rpw import revit, db, ui, DB, UI
""" #
import rpw
from rpw.utils.dotnet import clr, Process
from rpw.utils.logger import logger
from rpw.base import BaseObject
class Revit(BaseObject):
"""
Revit Application Wrapper
Note:
The module path for the Revit Wrapper and its namespaces is ``rpw.__revit.Revit``.
However, the ``Revit()`` is always instantiated on the initialization of rpw,
and is stored along with the ``DB`` and ``UI`` namespaces in the
root of rpw module.
In other words, to use this wrapper all you need is to import
``from rpw import revit``
>>> from rpw import revit
>>> revit.doc
<Autodesk.Revit.DB.Document>
>>> revit.username
gtalarico
>>> revit.host
'Dynamo'
"""
class HOSTS():
RPS = 'RPS'
DYNAMO = 'Dynamo'
def __init__(self):
try:
self.uiapp = __revit__
self._host = Revit.HOSTS.RPS
except NameError:
try:
# Try Getting handler from Dynamo RevitServices
self.uiapp = self.find_dynamo_uiapp()
self._host = Revit.HOSTS.DYNAMO
except Exception as errmsg:
logger.warning('Revit Application handle could not be found')
try:
# Add DB UI Import to globals so it can be imported by rpw
clr.AddReference('RevitAPI')
clr.AddReference('RevitAPIUI')
from Autodesk.Revit import DB, UI
globals().update({'DB': DB, 'UI': UI})
except Exception:
# Replace Globals with Mock Objects for Sphinx and ipy direct exec.
logger.warning('RevitAPI References could not be added')
from rpw.utils.sphinx_compat import MockObject
globals().update({'DB': MockObject(fullname='Autodesk.Revit.DB'),
'UI': MockObject(fullname='Autodesk.Revit.DB')})
self.uiapp = MockObject(fullname='Autodesk.Revit.UI.UIApplication')
self._host = None
def find_dynamo_uiapp(self):
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
import sys
sys.path.append(r'C:\Program Files (x86)\IronPython 2.7\Lib')
return DocumentManager.Instance.CurrentUIApplication
@property
def host(self):
""" Host is set based on how revit handle was found.
Returns:
Host (str): Revit Application Host ['RPS', 'Dynamo']
"""
return self._host
def open(self, path):
""" Opens New Document """
@property
def doc(self):
""" Returns: uiapp.ActiveUIDocument.Document """
return getattr(self.uiapp.ActiveUIDocument, 'Document', None)
@property
def uidoc(self):
""" Returns: uiapp.ActiveUIDocument """
return getattr(self.uiapp, 'ActiveUIDocument', None)
@property
def active_view(self):
""" Returns: uidoc.ActiveView """
return rpw.db.Element(self.uidoc.ActiveView)
@active_view.setter
def active_view(self, view_reference):
self.uidoc.ActiveView = view_reference
@property
def app(self):
""" Returns: uidoc.Application """
return self.uiapp.Application
@property
def docs(self):
""" Returns: uidoc.Application.Documents """
return [doc for doc in self.app.Documents]
@property
def username(self):
""" Returns: uidoc.Application.Username """
return self.uiapp.Application.Username
@property
def version(self):
""" Returns: uidoc.Application.Username """
return RevitVersion(self.uiapp)
@property
def process(self):
""" Returns: Process.GetCurrentProcess() """
return Process.GetCurrentProcess()
@property
def process_id(self):
""" Returns: Process.GetCurrentProcess() """
return self.process.Id
@property
def process_name(self):
""" Returns: Process.GetCurrentProcess() """
return self.process.ProcessName
def __repr__(self):
return '<{version} [{process}:{pid}]>'.format(version=self.version,
process=self.process_name,
pid=self.process_id)
# Check what this is
# @property
# def process(self):
# clr.AddReferenceByPartialName('System.Windows.Forms')
# # noinspection PyUnresolvedReferences
# from System.Windows.Forms import Screen
# return Screen.FromHandle(Process.GetCurrentProcess().MainWindowHandle)
class RevitVersion():
def __init__(self, uiapp):
self.uiapp = uiapp
@property
def year(self):
return self.uiapp.Application.VersionNumber
@property
def name(self):
return self.uiapp.Application.VersionName
@property
def build(self):
return self.uiapp.Application.VersionBuild
def __lt__(self, other):
""" Handle Version Comparison Logic"""
raise NotImplemented
def __gt__(self, other):
""" Handle Version Comparison Logic"""
raise NotImplemented
def __repr__(self):
return '<Version: {year}: {build}>'.format(year=self.name,
build=self.build)
def __str__(self):
return '{name}:{build}'.format(name=self.name, build=self.build)
revit = Revit()
| gpl-3.0 | 7,026,033,917,477,913,000 | 28.142157 | 90 | 0.601346 | false | 4.035981 | false | false | false |
LAST-EBD/Consultas | RandomlyMovePolygons.py | 1 | 5556 | import fiona, shapely, logging, sys, os, random
from shapely import affinity, speedups
from shapely.geometry import mapping, shape, Polygon
speedups.enable()
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
class randomly_move():
'''Con esta clase se pretende el mover aleatoriamente una serie de poligonos dentro de un shape'''
def __init__(self, shape, marco):
self.shape = shape
self.marco = marco
self.out = os.path.join(r'O:\consultas\shapes_moved', os.path.split(self.shape)[1])
self.moves = {0: 'NW', 1: 'NE', 2: 'SW', 3: 'SE'}
self.rndm = random.randrange(0,4)
self.diffX = 0
self.diffY = 0
self.count = 1
print('shape:', self.shape, '\nmarco: ', self.marco, '\nsalida:', self.out)
print('Nos movemos hacia el:', self.moves[self.rndm])
def get_extent_shapely(self, shp):
shp = fiona.open(shp)
#print('zone:', self.moves[self.rndm])
#GETTING THE GEOMETRY (COORDINATES)
feature1 = shp.next()
geom1 = feature1['geometry']
a1 = Polygon(geom1['coordinates'][0])
Oeste, Este, Norte, Sur = a1.bounds[0], a1.bounds[2], a1.bounds[3], a1.bounds[1]
#return(Oeste, Este, Norte, Sur)
move = {'NW': (Norte, Oeste), 'NE': (Norte, Este), 'SW': (Sur, Oeste), 'SE': (Sur, Este)}
return move[self.moves[self.rndm]]
def get_diff(self):
''' Este metodo computa la diferencia entre las coordenas del shape con respecto al marco (en la direccion aleatoria
que haya tocado. Retorna una tupla con valores (X, Y) con la diferencia. Por tanto get_diff[0] es la diferencia en X
y get_diff[1] es la diferencia en Y)'''
frameM = self.get_extent_shapely(self.marco)
#print(frameM)
NorteM, OesteM = frameM[0], frameM[1]
frameS = self.get_extent_shapely(self.shape)
#print(frameS)
NorteS, OesteS = frameS[0], frameS[1]
self.diffX = OesteM - OesteS
self.diffY = NorteM - NorteS
return(self.diffX, self.diffY)
def new_geom(self):
with fiona.open(self.shape, 'r') as source:
# **source.meta is a shortcut to get the crs, driver, and schema
# keyword arguments from the source Collection.
with fiona.open(self.out, 'w', **source.meta) as sink:
for f in source:
#print(f)
try:
feature1 = f['geometry']['coordinates'][0]
#geom1 = feature1['geometry']['coordinates']
#print(feature1)
#coords = geom1['coordinates'][0]
#CALCULAMOS UN VALOR RANDOM PARA MOVER EL SHAPE
X_offset = random.uniform(0.1, self.get_diff()[0])
Y_offset = random.uniform(0.1, self.get_diff()[1])
#print(X_offset, Y_offset)
#CREAMOS LA NUEVA LISTA DE COORDENADAS PARA EL SHAPE MOVIDO
#geom2 = [(X_offset + i[0], Y_offset + i[1]) for i in feature1]
new_shape = Polygon(feature1)
#PROBAMOS A GIRAR EL SHAPE
rotated_a = affinity.rotate(new_shape, random.randint(0, 360))
#PROBAMOS A MOVERLO CON SHAPELY (funciona de las 2 maneras)
rotated_b = shapely.affinity.translate(rotated_a, X_offset, Y_offset)
#COMPROBAMOS QUE ESTE DENTRO DEL MARCO SIN INTERSECTAR
if self.check(rotated_b) == True:
f['geometry'] = mapping(rotated_b)
sink.write(f)
else:
self.count += 1
f['geometry'] = mapping(rotated_b)
sink.write(f)
self.new_geom()
#print('intersecta')
except Exception as e:
# Writing uncleanable features to a different shapefile
# is another option.
print('error', e)
logging.exception("Error cleaning feature %s:", f['id'])
def check(self, ncoords):
'''En este metodo vamos a comprobar si el shape que estamos utilizando esta incluido dentro del marco'''
shape2 = fiona.open(self.marco)
feature2 = shape2.next()
geom2 = feature2['geometry']['coordinates'][0]
a2 = Polygon(geom2)
return(ncoords.within(a2))
def run(self):
if self.check() == True:
print('El shape esta totalmente incluido dentro del marco. La diferencia es: ')
print(self.get_diff())
else:
print('El shape no esta incluido dentro del marco') | mit | 1,503,735,368,378,538,500 | 37.324138 | 125 | 0.474622 | false | 3.932059 | false | false | false |
wadester/wh_test_py | gdbm_test.py | 1 | 1061 | #!/usr/bin/env python
# Module: gdbm_test.py
# Purpose: gdbm test
# Date: N/A
# Notes:
# 1) Reference:
# https://docs.python.org/2/library/gdbm.html
#
import gdbm
import random as r
mydb="testdb.gdbm"
rcnt=10
print "creating test db",mydb, "with ", rcnt, "records"
db=gdbm.open(mydb, 'c')
for x in range(0,rcnt):
key="%03d" % x
val="%10f" % r.random()
print "K[v]=", key, '[', val, ']'
db[key]=val
print "using some dict methods"
keys=db.keys()
print "Keys=", keys
ll=len(db)
print "DB size=", ll
print "testing for key 000"
if ('000' in db):
print "Key 000 found"
else:
print "key 000 not found"
print "deleting key"
del db['000']
if ('000' in db):
print "Key 000 found"
else:
print "key 000 not found"
ll=len(db)
print "DB size=", ll
# shrink the DB
db.reorganize()
db.close()
print "opening and looking through all keys"
db=gdbm.open(mydb, 'r')
# use iteritems with anydbm
#for k,v in db.iteritems():
k=db.firstkey()
while k != None:
v=db[k]
print k, ' ', v
k = db.nextkey(k)
db.close()
| gpl-2.0 | 1,135,733,845,140,344,400 | 15.578125 | 55 | 0.618285 | false | 2.581509 | false | false | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/py_compile.py | 1 | 6128 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: py_compile.py
"""Routine to "compile" a .py file to a .pyc (or .pyo) file.
This module has intimate knowledge of the format of .pyc files.
"""
import __builtin__
import imp
import marshal
import os
import sys
import traceback
MAGIC = imp.get_magic()
__all__ = [
'compile', 'main', 'PyCompileError']
class PyCompileError(Exception):
"""Exception raised when an error occurs while attempting to
compile the file.
To raise this exception, use
raise PyCompileError(exc_type,exc_value,file[,msg])
where
exc_type: exception type to be used in error message
type name can be accesses as class variable
'exc_type_name'
exc_value: exception value to be used in error message
can be accesses as class variable 'exc_value'
file: name of file being compiled to be used in error message
can be accesses as class variable 'file'
msg: string message to be written as error message
If no value is given, a default exception message will be given,
consistent with 'standard' py_compile output.
message (or default) can be accesses as class variable 'msg'
"""
def __init__(self, exc_type, exc_value, file, msg=''):
exc_type_name = exc_type.__name__
if exc_type is SyntaxError:
tbtext = ''.join(traceback.format_exception_only(exc_type, exc_value))
errmsg = tbtext.replace('File "<string>"', 'File "%s"' % file)
else:
errmsg = 'Sorry: %s: %s' % (exc_type_name, exc_value)
Exception.__init__(self, msg or errmsg, exc_type_name, exc_value, file)
self.exc_type_name = exc_type_name
self.exc_value = exc_value
self.file = file
self.msg = msg or errmsg
def __str__(self):
return self.msg
def wr_long(f, x):
"""Internal; write a 32-bit int to a file in little-endian order."""
f.write(chr(x & 255))
f.write(chr(x >> 8 & 255))
f.write(chr(x >> 16 & 255))
f.write(chr(x >> 24 & 255))
def compile(file, cfile=None, dfile=None, doraise=False):
"""Byte-compile one Python source file to Python bytecode.
Arguments:
file: source filename
cfile: target filename; defaults to source with 'c' or 'o' appended
('c' normally, 'o' in optimizing mode, giving .pyc or .pyo)
dfile: purported filename; defaults to source (this is the filename
that will show up in error messages)
doraise: flag indicating whether or not an exception should be
raised when a compile error is found. If an exception
occurs and this flag is set to False, a string
indicating the nature of the exception will be printed,
and the function will return to the caller. If an
exception occurs and this flag is set to True, a
PyCompileError exception will be raised.
Note that it isn't necessary to byte-compile Python modules for
execution efficiency -- Python itself byte-compiles a module when
it is loaded, and if it can, writes out the bytecode to the
corresponding .pyc (or .pyo) file.
However, if a Python installation is shared between users, it is a
good idea to byte-compile all modules upon installation, since
other users may not be able to write in the source directories,
and thus they won't be able to write the .pyc/.pyo file, and then
they would be byte-compiling every module each time it is loaded.
This can slow down program start-up considerably.
See compileall.py for a script/module that uses this module to
byte-compile all installed files (or all files in selected
directories).
"""
with open(file, 'U') as f:
try:
timestamp = long(os.fstat(f.fileno()).st_mtime)
except AttributeError:
timestamp = long(os.stat(file).st_mtime)
codestring = f.read()
try:
codeobject = __builtin__.compile(codestring, dfile or file, 'exec')
except Exception as err:
py_exc = PyCompileError(err.__class__, err.args, dfile or file)
if doraise:
raise py_exc
else:
sys.stderr.write(py_exc.msg + '\n')
return
if cfile is None:
cfile = file + (__debug__ and 'c' or 'o')
with open(cfile, 'wb') as fc:
fc.write('\x00\x00\x00\x00')
wr_long(fc, timestamp)
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(MAGIC)
return
def main(args=None):
"""Compile several source files.
The files named in 'args' (or on the command line, if 'args' is
not specified) are compiled and the resulting bytecode is cached
in the normal manner. This function does not search a directory
structure to locate source files; it only compiles files named
explicitly. If '-' is the only parameter in args, the list of
files is taken from standard input.
"""
if args is None:
args = sys.argv[1:]
rv = 0
if args == ['-']:
while True:
filename = sys.stdin.readline()
if not filename:
break
filename = filename.rstrip('\n')
try:
compile(filename, doraise=True)
except PyCompileError as error:
rv = 1
sys.stderr.write('%s\n' % error.msg)
except IOError as error:
rv = 1
sys.stderr.write('%s\n' % error)
else:
for filename in args:
try:
compile(filename, doraise=True)
except PyCompileError as error:
rv = 1
sys.stderr.write(error.msg)
return rv
if __name__ == '__main__':
sys.exit(main()) | unlicense | 8,292,061,942,146,150,000 | 33.627119 | 84 | 0.598074 | false | 4.036891 | false | false | false |
loveisbug/liveshow-sh | roll.py | 1 | 1679 | # -*- coding: utf-8 -*-
import urllib
from urllib.request import urlopen
import html.parser as h
from bs4 import BeautifulSoup
import sys
import time
import io
import re
reg =re.compile(r'\d+')
list = ['0', '10', '20']
for s in list:
url = ('https://site.douban.com/maosh/widget/events/1441569/?start='+s)
urlrequest = urlopen(url)
parser = BeautifulSoup(urlrequest, "html.parser")
elist = parser.find('div', 'events-list-s').findAll('li', 'item')
for event in elist:
urlevent = event.findNext('a')['href']
with open('aaa.txt', 'a', encoding='utf-8') as detail:
print(urlevent, file=detail)
detailrequest = urlopen(urlevent)
Detailparser = BeautifulSoup(detailrequest, 'html.parser')
DetailInfolist = Detailparser.find('div', 'event-info')
x = DetailInfolist.contents[1]
x1 = DetailInfolist.findAll('div', 'event-detail')
print (DetailInfolist.findNext('h1'). text.strip(),file=detail)
print (DetailInfolist.findNext('li','calendar-str-item ').text,file=detail)
# print(x.find('h1'))
# print (x1[3].reg)
# print (x1[2].text.split('\n').split(' '))
print (x1[2].text.replace('\t','').replace('\n','').replace(' ','').replace('\xa0','').split('\n'), file=detail)
print('\n', file=detail)
# # 本句打印价格,语法错误,会导致其他程序正常运行;
# print (DetailInfolist.findNext('span', 'tickets-info-price').text.split(' ')[1]+'\n',file=detail)
# print (DetailInfolist.find(span={itemprop:'tickets-info-price'}).text,file=detail) | mit | -884,590,390,812,692,500 | 41.947368 | 124 | 0.599632 | false | 3.229703 | false | false | false |
nkalodimas/invenio | modules/websearch/lib/search_engine_utils.py | 1 | 7053 | # -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio search engine utilities."""
import string
from invenio.config import CFG_BIBFORMAT_HIDDEN_TAGS
from invenio.dbquery import run_sql
from invenio.intbitset import intbitset
def get_fieldvalues(recIDs, tag, repetitive_values=True, sort=True, split_by=0):
"""
Return list of field values for field TAG for the given record ID
or list of record IDs. (RECIDS can be both an integer or a list
of integers.)
If REPETITIVE_VALUES is set to True, then return all values even
if they are doubled. If set to False, then return unique values
only.
"""
out = []
try:
recIDs = int(recIDs)
except:
pass
if isinstance(recIDs, (int, long)):
recIDs = [recIDs,]
if not isinstance(recIDs, (list, tuple, intbitset)):
return []
if len(recIDs) == 0:
return []
if tag == "001___":
# We have asked for tag 001 (=recID) that is not stored in bibXXx
# tables.
out = [str(recID) for recID in recIDs]
else:
# we are going to look inside bibXXx tables
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
return []
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
if not repetitive_values:
queryselect = "DISTINCT(bx.value)"
else:
queryselect = "bx.value"
if sort:
sort_sql = "ORDER BY bibx.field_number, bx.tag ASC"
else:
sort_sql = ""
def get_res(recIDs):
query = "SELECT %s FROM %s AS bx, %s AS bibx " \
"WHERE bibx.id_bibrec IN (%s) AND bx.id=bibx.id_bibxxx AND " \
"bx.tag LIKE %%s %s" % \
(queryselect, bx, bibx, ("%s,"*len(recIDs))[:-1], sort_sql)
return [i[0] for i in run_sql(query, tuple(recIDs) + (tag,))]
#print not sort and split_by>0 and len(recIDs)>split_by
if sort or split_by<=0 or len(recIDs)<=split_by:
return get_res(recIDs)
else:
return [i for res in map(get_res, zip(*[iter(recIDs)]*split_by)) for i in res]
return out
def get_fieldvalues_alephseq_like(recID, tags_in, can_see_hidden=False):
"""Return buffer of ALEPH sequential-like textual format with fields found
in the list TAGS_IN for record RECID.
If can_see_hidden is True, just print everything. Otherwise hide fields
from CFG_BIBFORMAT_HIDDEN_TAGS.
"""
out = ""
if type(tags_in) is not list:
tags_in = [tags_in,]
if len(tags_in) == 1 and len(tags_in[0]) == 6:
## case A: one concrete subfield asked, so print its value if found
## (use with care: can mislead if field has multiple occurrences)
out += string.join(get_fieldvalues(recID, tags_in[0]),"\n")
else:
## case B: print our "text MARC" format; works safely all the time
# find out which tags to output:
dict_of_tags_out = {}
if not tags_in:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
else:
for tag in tags_in:
if len(tag) == 0:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
elif len(tag) == 1:
for j in range(0, 10):
dict_of_tags_out["%s%d%%" % (tag, j)] = 1
elif len(tag) < 5:
dict_of_tags_out["%s%%" % tag] = 1
elif tag >= 6:
dict_of_tags_out[tag[0:5]] = 1
tags_out = dict_of_tags_out.keys()
tags_out.sort()
# search all bibXXx tables as needed:
for tag in tags_out:
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
continue
if tag.startswith("001") or tag.startswith("00%"):
if out:
out += "\n"
out += "%09d %s %d" % (recID, "001__", recID)
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(tag)+'%'))
# go through fields:
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
printme = True
#check the stuff in hiddenfields
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if ind1 == "_":
ind1 = ""
if ind2 == "_":
ind2 = ""
# print field tag
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if out:
out += "\n"
out += "%09d %s " % (recID, field[:5])
field_number_old = field_number
field_old = field
# print subfield value
if field[0:2] == "00" and field[-1:] == "_":
out += value
else:
out += "$$%s%s" % (field[-1:], value)
return out
| gpl-2.0 | 3,573,369,764,786,767,400 | 37.752747 | 90 | 0.510563 | false | 3.798061 | false | false | false |
comptech/atrex | Software/myPeakTableWidget.py | 1 | 3328 |
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.uic import *
from myPeaks import *
from myDetector import *
from peakEditDlg import *
class myPeakTableWidget (QTableWidget) :
numPeaks = 0
#headList = QString("Num;H;K;L;len(XYZ)^-1;2-theta;Gonio[5];nu").split(";")
headList = ['Num','H','K','L','len(xyz)^-1','2-theta','Gonio[5]','nu']
myDet = 0
peaks = 0
imname =''
numPeaks = 0
def __init__(self, parent=None) :
QTableWidget.__init__(self, parent)
hhead = QHeaderView (Qt.Horizontal)
hhead.setVisible(False)
self.setHorizontalHeader (hhead)
self.setColumnCount (8)
self.setRowCount(8)
#self.setHorizontalHeader (hhead)
# peak table widget
#hhead.setVisible(True)
self.setHorizontalHeaderLabels(self.headList)
#for i in range(8) :
#for j in range(8) :
#newItem = QTableWidgetItem("%d %d"%(j,i))
#self.setItem(i, j, newItem)
self.setSelectionBehavior (QAbstractItemView.SelectRows)
self.cellDoubleClicked.connect (self.peakEdit)
def setImageFileName (self, imname) :
self.imfile = imname
def setDetector (self, det) :
self.myDet = det
def setPeaks (self, peaks) :
self.peaks = peaks
count = 0
self.setRowCount (len(peaks))
for p in peaks :
# redo this to match IDL routine
str = '%d'%count
self.setItem (count, 0, QTableWidgetItem(str))
str = '%d'%p.HKL[0]
self.setItem (count, 1, QTableWidgetItem(str))
str = '%d'%p.HKL[1]
self.setItem (count, 2, QTableWidgetItem(str))
str = '%d'%p.HKL[2]
self.setItem (count, 3, QTableWidgetItem(str))
val = vlength (p.XYZ)
xy = p.DetXY
str = '%.2f'%(1./val)
self.setItem (count, 4, QTableWidgetItem(str))
str = '%.2f'%p.tth
self.setItem (count, 5, QTableWidgetItem(str))
#tthval = self.myDet.calculate_tth_from_pixels(xy, self.myDet.gonio)
# xyz = self.myDet.calculate_xyz_from_pixels (xy, self.myDet.gonio)
str = '%.3f'%p.Gonio[5]
self.setItem (count, 6, QTableWidgetItem(str))
str = '%.3f'%p.nu
self.setItem (count, 7, QTableWidgetItem(str))
count = count + 1
self.numPeaks = count
self.resizeColumnsToContents()
def addPeak (self, peak) :
xy = peak.DetXY
str = '%d'%xy[0]
self.setItem (self.numPeaks, 0, QTableWidgetItem(str))
str = '%d'%xy[1]
self.setItem (self.numPeaks, 1, QTableWidgetItem(str))
tthval = self.myDet.calculate_tth_from_pixels(xy, self.myDet.gonio)
str = '%f'%tthval
self.setItem (self.numPeaks, 2, QTableWidgetItem(str))
self.numPeaks += 1
""" peakEdit
method called by dbl clicking of the peakTableWidget item
will open a dialog to further edit the peak parameters
"""
def peakEdit (self, row, col):
#open peakEditDlg
curpeak = self.peaks[row]
pedit_dlg = peakEditDlg (curpeak, row)
pedit_dlg.setImageFile (self.imfile)
pedit_dlg.exec_()
| lgpl-3.0 | -1,797,646,228,205,733,000 | 31.627451 | 80 | 0.574219 | false | 3.395918 | false | false | false |
Chilledheart/windycode | python/vimsupport.py | 1 | 6475 | #!/usr/bin/env python
import vim
import os
# Given an object, returns a str object that's utf-8 encoded.
def ToUtf8IfNeeded(value):
if isinstance(value, unicode):
return value.encode('utf8')
if isinstance(value, str):
return value
return str(value)
def PresentYesOrNoDialog(message):
return int(vim.eval('confirm("%s", "&Yes\n&No")' % message)) == 1;
def CurrentLineAndColumn():
"""Returns the 0-based current line and 0-based current column."""
# See the comment in CurrentColumn about the calculation for the line and
# column number
line, column = vim.current.window.cursor
return line, column + 1
def CurrentLine():
return vim.current.line
def CurrentBuffer():
return vim.current.buffer
def CurrentBufferFileName():
file_name = vim.current.buffer.name
if file_name == None:
EchoMessage('empty buffer name')
return file_name
def CurrentFileTypes():
return vim.eval("&filetype").split('.')
#TODO refine this
def EscapeForVim(text):
return text.replace("'", "''")
def FiletypesForBuffer(buffer_object):
# NOTE: Getting &ft for other buffers only works when the buffer has been
# visited by the user at least once, which is true for modified buffers
return GetBufferOption(buffer_object, 'ft').split('.')
def GetBufferOption(buffer_object, option):
to_eval = 'getbufvar({0}, "&{1}")'.format(buffer_object.number, option)
return GetVariableValue(to_eval)
def GetVariableValue(variable):
return vim.eval(variable)
def GetBoolValue(variable):
return bool(int(vim.eval(variable)))
def GetIntValue(variable):
return int(vim.eval(variable))
def GetBufferNumberForFilename(filename, open_file_if_needed = True):
return GetIntValue(u"bufnr('{0}', {1})".format(
EscapeForVim(os.path.realpath(filename)),
int(open_file_if_needed)))
# clean all signs for existing buffer
# FIXME clean WdcSigns only
def UnplaceAllSigns():
buffer_num = vim.current.buffer.number
vim.command('sign unplace * buffer=%d' % buffer_num)
def PlaceSignForErrorMessage(buffer_num, index, diagnostic):
if diagnostic['severity'] >= 3:
sign_name = 'WdcError'
else:
sign_name = 'WdcWarning'
vim.command('sign place %d line=%d name=%s buffer=%d' % (index, diagnostic['lnum'], sign_name, buffer_num))
def PlaceSignForErrorMessageArray(diagnostics):
buffer_num = vim.current.buffer.number
index = 1
for line_num in diagnostics:
PlaceSignForErrorMessage(buffer_num, index, diagnostics[line_num])
index += 1
def ConvertDiagnosticsToQfList(diagnostics):
retval = []
num = len(diagnostics);
for i in xrange(0, num):
diagnostic = diagnostics[i]
location = diagnostic.location
line = location.line
column = location.column - 1
# when the error is "too many error occurs"
if line == 0 or column == 0:
continue;
retval.append({
'bufnr' : GetBufferNumberForFilename(location.file_name),
'lnum' : line,
'col' : column,
'text' : ToUtf8IfNeeded(diagnostic.spelling),
'full_text' : ToUtf8IfNeeded(diagnostic.full_spelling),
'type' : diagnostic.category,
'valid' : 1,
'severity' : diagnostic.severity
})
return retval
def EchoMessage(text):
for line in str(text).split('\n'):
vim.command('{0} \'{1}\''.format('echom', EscapeForVim(line)))
def EchoText(text):
for line in str(text).split('\n'):
vim.command('{0} \'{1}\''.format('echo', EscapeForVim(line)))
def EchoTextH(text):
for line in str(text).split('\n'):
vim.command('{0} \'{1}\''.format('echoh', EscapeForVim(line)))
def EchoTruncatedText(text):
width = int(vim.eval('&columns')) - 3
if width <= 0:
return
saved_ruler = vim.eval('&ruler')
saved_showcmd = vim.eval('&showcmd')
vim.command('set noruler noshowcmd')
truncated = str(text)[:width]
EchoText(truncated)
saved_ruler = vim.eval('&ruler')
saved_showcmd = vim.eval('&showcmd')
vim.command('let &ruler = %s' % saved_ruler)
vim.command('let &showcmd = %s' % saved_showcmd)
def ClearWdcSyntaxMatches():
matches = vim.eval('getmatches()')
for match in matches:
if match['group'].startswith('Wdc'):
vim.eval('matchdelete({0})'.format(match['id']))
def AddDiagnosticSyntaxMatch(line_num,
column_num,
line_end_num = None,
column_end_num = None,
is_error = True):
group = 'WdcErrorSection' if is_error else 'WdcWarningSection'
if not line_end_num:
line_end_num = line_num
line_num, column_num = LineAndColumnNumbersClamped(line_num, column_num)
line_end_num, column_end_num = LineAndColumnNumbersClamped(line_end_num,
column_end_num)
if not column_end_num:
return GetIntValue(
"matchadd('{0}', '\%{1}l\%{2}c')".format(group, line_num, column_num))
else:
return GetIntValue(
"matchadd('{0}', '\%{1}l\%{2}c\_.\\{{-}}\%{3}l\%{4}c')".format(
group, line_num, column_num, line_end_num, column_end_num))
def LineAndColumnNumbersClamped(line_num, column_num):
new_line_num = line_num
new_column_num = column_num
max_line = len(vim.current.buffer)
if line_num and line_num > max_line:
new_line_num = max_line
max_column = len(vim.current.buffer[new_line_num - 1])
if column_num and column_num > max_column:
new_column_num = max_column
return new_line_num, new_column_num
def GotoOpenedBuffer(filename, line, column):
filepath = os.path.realpath(filename)
for tab in vim.tabpages:
for win in tab.windows:
if win.buffer.name == filepath:
vim.current.tabpage = tab
vim.current.window = win
vim.current.window.cursor = (line, column - 1)
# Center the screen on the jumped-to location
vim.command('normal! zz')
return True
return False
def GotoBuffer(filename, line, column):
# Add an entry to the jumplist
vim.command("normal! m'")
if filename != CurrentBufferFileName():
if GotoOpenedBuffer(filename, line, column):
return
buf = vim.current.buffer;
usable = not buf.options['modified'] or buf.options['bufhidden']
if usable:
command = 'edit'
else:
command = 'split'
vim.command('keepjumps {0} {1}'.format(command, filename.replace( ' ' , r'\ ' )))
vim.current.window.cursor = (line, column - 1)
# Center the screen on the jumped-to location
vim.command('normal! zz')
| bsd-3-clause | -5,430,061,958,010,127,000 | 28.83871 | 109 | 0.658069 | false | 3.353185 | false | false | false |
SKIRT/PTS | modeling/config/expand.py | 1 | 3493 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import the relevant PTS classes and modules
from pts.core.remote.host import find_host_ids
from pts.modeling.core.environment import load_modeling_environment_cwd
from pts.modeling.config.component import definition
# -----------------------------------------------------------------
# Copy the definition
definition = definition.copy()
# -----------------------------------------------------------------
# Set the modeling path
environment = load_modeling_environment_cwd()
runs = environment.fitting_runs
# -----------------------------------------------------------------
# Remote hosts
all_host_ids = find_host_ids()
has_remotes = len(all_host_ids) > 0
# -----------------------------------------------------------------
# The fitting run for which to explore the parameter space
if runs.empty: raise RuntimeError("No fitting runs are present")
elif runs.has_single: definition.add_fixed("run", "name of the fitting run", runs.single_name)
else: definition.add_required("run", "string", "name of the fitting run", choices=runs.names)
# Generation name
definition.add_required("generation", "string", "generation name")
# -----------------------------------------------------------------
# Parameters in which to expand
definition.add_required("parameters", "string_list", "parameters for which to expand the range") # choices are supposed to be the free parameters of a fitting run
definition.add_required("direction", "string_or_string_string_dictionary", "direction in which to expand") #choices=directions)
definition.add_required("npoints", "integer_or_string_integer_dictionary", "number of grid points to add")
# -----------------------------------------------------------------
# Remote or local execution
#if has_remotes: definition.add_positional_optional("remotes", "string_list", "remote hosts to use", default=environment.modeling_configuration.fitting_host_ids, choices=all_host_ids)
#else: definition.add_fixed("remotes", [])
definition.add_positional_optional("host", "host", "remote host to use")
definition.add_flag("local", "run everything locally")
# -----------------------------------------------------------------
# Options
definition.add_flag("attached", "run remote simulations in attached mode")
definition.add_flag("group", "group simulations in larger jobs")
definition.add_optional("walltime", "real", "the preferred walltime per job (for schedulers)")
# -----------------------------------------------------------------
# Update flags
definition.add_flag("update_individuals", "update the individuals table", True)
definition.add_flag("update_parameters", "update the parameters table", True)
definition.add_flag("update_info", "update the generation info", True)
definition.add_flag("update_generations", "update the generations table", True)
# -----------------------------------------------------------------
# Parallelization
definition.add_optional("parallelization", "parallelization", "parallelization scheme for the simulations")
definition.add_optional("nnodes", "positive_integer", "number of computation nodes to use for the simulations")
# -----------------------------------------------------------------
| agpl-3.0 | 8,462,161,034,693,216,000 | 44.947368 | 183 | 0.579324 | false | 4.488432 | false | false | false |
unidesigner/microcircuit | microcircuit/dataset/testconnectome001.py | 1 | 1589 | """ Retrieve C.elegans connectivity from Web and parse appropriately
"""
# see data/
# http://mit.edu/lrv/www/elegans/
from scipy.io import matlab
import os.path as op
import networkx as nx
import microcircuit.constants as const
from microcircuit.connectome import Connectome
mat = matlab.loadmat(op.join(op.dirname(op.abspath(__file__)), 'data', 'ConnOrdered_040903.mat'))
['A_init_t_ordered',
'__header__',
'__globals__',
'Q_sorted',
'Neuron_ordered',
'Ag_t_ordered',
'__version__']
metadata = {'name': 'testconnectome001',
'neuronmap': {}}
for i,label in enumerate(mat['Neuron_ordered']):
metadata['neuronmap'][i+1] = {'name': label[0][0]}
gap = mat['Ag_t_ordered']
gap[94,94]=0.0
gap[106,106]=0.0
gap[216,216]=0.0
graphgap = nx.from_numpy_matrix(gap.todense(), create_using=nx.DiGraph())
graphgap = nx.relabel_nodes(graphgap, (lambda x:x+1))
for u,v,d in graphgap.edges_iter(data=True):
d[const.CONNECTOME_ELECTRICAL_SYNAPSE] = d['weight']
del d['weight']
chem=mat['A_init_t_ordered']
graphchem = nx.from_numpy_matrix(gap.todense(), create_using=nx.DiGraph())
graphchem = nx.relabel_nodes(graphchem, (lambda x:x+1))
for u,v,d in graphchem.edges_iter(data=True):
d[const.CONNECTOME_CHEMICAL_SYNAPSE] = d['weight']
del d['weight']
# TODO: problem with merge
for u,v,d in graphchem.edges_iter(data=True):
# TODO: how does it go over digraphs?
# In addtion
graphgap.add_edge(u,v, {const.CONNECTOME_CHEMICAL_SYNAPSE:d[const.CONNECTOME_CHEMICAL_SYNAPSE]})
testconnectome = Connectome(metadata=metadata['neuronmap'], graph=graphgap) | bsd-3-clause | -2,411,533,777,501,062,000 | 29.576923 | 100 | 0.696665 | false | 2.817376 | false | false | false |
rtrouton/Recipes-for-AutoPkg | Shared_Processors/SubDirectoryList.py | 1 | 3324 | #!/usr/bin/python
#
# Copyright 2013 Jesse Peterson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""See docstring for SubDirectoryList class"""
import os
from autopkglib import Processor, ProcessorError
__all__ = ["SubDirectoryList"]
class SubDirectoryList(Processor):
'''Finds a filename for use in other Processors.
Currently only supports glob filename patterns.
'''
input_variables = {
'root_path': {
'description': 'Path to start looking for files.',
'required': True,
},
'suffix_string': {
'description': ("String to append to each found item name in dir."
"Defaults to ','"),
'default': ',',
'required': False,
}
}
output_variables = {
'found_filenames': {
'description': ('String containing a list of all files found '
'relative to root_path, separated by '
'suffix_string.')
},
'found_directories': {
'description': ('String containg a list of all directories '
'found relative to root_path, separated by '
'suffix_string.')
},
'relative_root': {
'description': ('Relative root path')
}
}
description = __doc__
def main(self):
sip_dirs = [
'usr',
'usr/local',
'private',
'private/etc',
'Library'
]
format_string = '%s' % self.env['suffix_string']
# search_string = ' \'{0}\''
search_string = '{0}'
dir_list = list()
file_list = list()
if not os.path.isdir(self.env['root_path']):
raise ProcessorError("Can't find root path!")
for dirName, subdirList, fileList in os.walk(self.env['root_path']):
relative_path = os.path.relpath(dirName, self.env['root_path'])
# We need to remove the SIP folders so Chef doesn't try to create them
if not relative_path == '.' and not (relative_path in sip_dirs):
dir_list.append(relative_path)
# search_string.format(format_string.join(dirName)).strip()
for fname in fileList:
if '.DS_Store' in fname:
continue
# print('\t%s' % fname)
relpath = os.path.relpath(os.path.join(fname, dirName),
self.env['root_path'])
self.output("Relative path: %s" % relpath)
if relpath == ".":
# we want to avoid prepending './' to files at root dir
relpath = ''
# print "Real relative path: %s" % relpath
file_list.append(os.path.join(relpath, fname))
self.env['found_directories'] = search_string.format(
format_string.join(dir_list)).strip()
self.env['found_filenames'] = search_string.format(
format_string.join(file_list)).strip()
if __name__ == '__main__':
PROCESSOR = SubDirectoryList()
PROCESSOR.execute_shell()
| bsd-3-clause | -6,133,843,226,604,085,000 | 32.24 | 76 | 0.610409 | false | 3.887719 | false | false | false |
ayiis/python | better_tcp_sniffer.py | 1 | 7186 | # -*- coding:utf-8 -*-
import socket
import struct
import pcap
import traceback
from aytool.common.print_table import PrettyTable
"""
┌──────────────────────────────────┬──────────────────────────────────┬──────────┐
│ Destination Mac │ Source Mac │Ether type│
└──────────────────────────────────┴──────────────────────────────────┴──────────┘
┌──────┬──────┬──────────┬─────────────────────┐ ┌────────────────────────┬──────────────────┐
│ Ver │ IHL │ TOS │ Total length │ │ Source Port │ Destination Port │
├──────┴──────┴──────────┼──────┬──────────────┤ ├────────────────────────┴──────────────────┤
│ Identification │ Flags│FragmentOffset│ │ Sequence Number │
├─────────────┬──────────┼──────┴──────────────┤ ├───────────────────────────────────────────┤
│ TTL │ Protocol │ Header Checksum │ │ Acknowledgement Number │
├─────────────┴──────────┴─────────────────────┤ ├──────┬────────┬────────┬──────────────────┤
│ Source IP │ │ HL │Reserved│ UAPRSF │ Window Size │
├──────────────────────────────────────────────┤ ├──────┴────────┴────────┼──────────────────┤
│ Destination IP │ │ Checksum │ Urgent Pointer │
├──────────────────────────────────────────────┤ ├────────────────────────┴──────────────────┤
│ Options │ │ Options │
└──────────────────────────────────────────────┘ └───────────────────────────────────────────┘
┌─────────┬───────────────────────────────────────────────────────────────────────────────────┐
│ Data Len│ TCP Data│
└─────────┴───────────────────────────────────────────────────────────────────────────────────┘
"""
_eth_addr = lambda a: "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % ((a[0]), (a[1]), (a[2]), (a[3]), (a[4]), (a[5]))
def better_print(packet):
eth_hlen = 14
# IP 头部长度通常是 20 + 可选项
ip_hlen = packet[eth_hlen] << 2 & 0b111100
ip_hdata_raw = packet[eth_hlen: ip_hlen + eth_hlen]
ip_hdata = struct.unpack("!BBHHHBBH4s4s", ip_hdata_raw[:20])
ip_ver = ip_hdata[0] >> 4 # ??
# ip_hlen = ip_hdata[0] << 2 & 0b111100
ip_dlen = ip_hdata[2]
# TCP 头部长度通常是 20 + 可选项
tcp_hlen = packet[eth_hlen + ip_hlen + 2 + 2 + 4 + 4] >> 4 << 2
tcp_hdata_raw = packet[eth_hlen + ip_hlen: eth_hlen + ip_hlen + tcp_hlen]
tcp_hdata = struct.unpack("!HHLLBBHHH", tcp_hdata_raw[:20])
tcp_dlen = ip_dlen - ip_hlen - tcp_hlen
tcp_data = packet[eth_hlen + ip_hlen + tcp_hlen: eth_hlen + ip_dlen]
# ETH
pt = PrettyTable()
pt.add_line((32, 32, 8), [_eth_addr(packet[6:12]), _eth_addr(packet[0:6]), "0x" + packet[12:14].hex()])
# if not(tcp_hdata[0] == 10002 or tcp_hdata[1] == 10002):
# return None
# 右对齐输出
print("\n".join(["%s%s" % (x, y) for x, y in zip([" "] * 3, pt.get_table())]))
# IP
pt1 = PrettyTable()
pt1.add_line((4, 4, 8, 16), [ip_ver, ip_hlen, packet[15:16].hex(), ip_dlen])
pt1.add_line((16, 4, 12), ["0x" + packet[18:20].hex(), format(packet[20] >> 5, "03b"), (packet[20] & 0b0001111) << 4 + packet[21]])
pt1.add_line((8, 8, 16), [ip_hdata[5], ip_hdata[6], hex(ip_hdata[7])])
pt1.add_line((32, ), [socket.inet_ntoa(ip_hdata[8])])
pt1.add_line((32, ), [socket.inet_ntoa(ip_hdata[9])])
pt1.add_line((32, ), [("0x" + ip_hdata_raw[20:].hex()) if ip_hlen > 20 else ""])
packet[14:][12:16]
ip_hdata_raw[16:20]
# TCP
pt2 = PrettyTable()
pt2.add_line((16, 16), [tcp_hdata[0], tcp_hdata[1]])
pt2.add_line((32, ), [tcp_hdata[2]])
pt2.add_line((32, ), [tcp_hdata[3]])
pt2.add_line((4, 6, 6, 16), [tcp_hlen, format(tcp_hdata_raw[2 + 2 + 4 + 4] & 0b1111, "04b") + format(tcp_hdata_raw[2 + 2 + 4 + 4 + 1], "08b")[:2], format(tcp_hdata_raw[2 + 2 + 4 + 4 + 1], "08b")[2:], tcp_hdata[6]])
pt2.add_line((16, 16), [hex(tcp_hdata[7]), tcp_hdata[8]])
pt2.add_line((32, ), [("0x" + tcp_hdata_raw[20:].hex()) if tcp_hlen > 20 else ""])
# 并列输出
print("\n".join(["%s %s" % (x, y) for x, y in zip(pt1.get_table(), pt2.get_table())]))
# DATA
pt3 = PrettyTable()
pt3.add_line((7, 81), [tcp_dlen, tcp_data if tcp_dlen > 0 else ""])
pt3.print_table()
def main():
sniffer = pcap.pcap(name=None, promisc=True, immediate=True, timeout_ms=50)
while True:
try:
ts, packet = next(sniffer, (None, None))
if packet[12:14] == b"\x08\x00" and packet[23:24] == b"\x06": # IP & TCP
better_print(packet)
except Exception:
print(traceback.format_exc())
# #
# TEST #
# #
# Python 3.7 #
# Mac #
if __name__ == "__main__":
main()
| mit | -6,738,308,824,205,303,000 | 42.756522 | 218 | 0.382949 | false | 2.314627 | false | false | false |
mariusbaumann/pyload | module/plugins/hoster/FastixRu.py | 1 | 1986 | # -*- coding: utf-8 -*-
import re
from random import randrange
from urllib import unquote
from module.common.json_layer import json_loads
from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo
class FastixRu(MultiHoster):
__name__ = "FastixRu"
__type__ = "hoster"
__version__ = "0.08"
__pattern__ = r'http://(?:www\.)?fastix\.(ru|it)/file/\w{24}'
__description__ = """Fastix hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Massimo Rosamilia", "[email protected]")]
def getFilename(self, url):
try:
name = unquote(url.rsplit("/", 1)[1])
except IndexError:
name = "Unknown_Filename..."
if name.endswith("..."): # incomplete filename, append random stuff
name += "%s.tmp" % randrange(100, 999)
return name
def setup(self):
self.chunkLimit = 3
def handlePremium(self):
api_key = self.account.getAccountData(self.user)
api_key = api_key['api']
page = self.load("http://fastix.ru/api_v2/",
get={'apikey': api_key, 'sub': "getdirectlink", 'link': self.pyfile.url})
data = json_loads(page)
self.logDebug("Json data", data)
if "error\":true" in page:
self.offline()
else:
self.link = data['downloadlink']
if self.link != self.pyfile.url:
self.logDebug("New URL: %s" % self.link)
if self.pyfile.name.startswith("http") or self.pyfile.name.startswith("Unknown"):
#only use when name wasnt already set
self.pyfile.name = self.getFilename(self.link)
def checkFile(self):
super(FastixRu, self).checkFile()
if self.checkDownload({"error": "<title>An error occurred while processing your request</title>"}) is "error":
self.retry(wait_time=60, reason=_("An error occurred while generating link"))
getInfo = create_getInfo(FastixRu)
| gpl-3.0 | 5,678,757,568,383,446,000 | 28.205882 | 118 | 0.58711 | false | 3.610909 | false | false | false |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/vta/apps/tsim_example/python/tsim.py | 1 | 1886 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import ctypes
import os.path as osp
from sys import platform
def get_ext():
return ".dylib" if platform == "darwin" else ".so"
def load_dll(dll):
try:
return [ctypes.CDLL(dll, ctypes.RTLD_GLOBAL)]
except OSError:
return []
def load_sw():
cur_path = osp.dirname(osp.abspath(osp.expanduser(__file__)))
sw_libname = "libsw" + get_ext()
sw_lib = osp.join(cur_path, "..", "build", sw_libname)
load_dll(sw_lib)
def init(hw_backend):
"""Init hardware and software shared library for accelerator
Parameters
------------
hw_backend : str
Hardware backend can be verilog or chisel
"""
cur_path = osp.dirname(osp.abspath(osp.expanduser(__file__)))
hw_libname = "libhw" + get_ext()
if hw_backend in ("verilog", "chisel"):
hw_lib = osp.join(cur_path, "..", "hardware", hw_backend, "build", hw_libname)
m = tvm.module.load(hw_lib, "vta-tsim")
load_sw()
f = tvm.get_global_func("tvm.vta.tsim.init")
f(m)
def load_module():
load_sw()
return tvm.get_global_func("tvm.vta.driver")
| apache-2.0 | -4,434,247,329,592,706,000 | 31.517241 | 86 | 0.677094 | false | 3.441606 | false | false | false |
nerdvegas/rez | src/rez/data/tests/release/build.py | 1 | 1566 | from __future__ import print_function
import shutil
import os.path
import os
import sys
def build(source_path, build_path, install_path, targets):
def _copy(src, dest):
print("copying %s to %s..." % (src, dest))
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
# build
src = os.path.join(source_path, "data")
dest = os.path.join(build_path, "data")
_copy(src, dest)
if "install" not in (targets or []):
return
# install
src = os.path.join(build_path, "data")
dest = os.path.join(install_path, "data")
_copy(src, dest)
if __name__ == '__main__':
build(
source_path=os.environ['REZ_BUILD_SOURCE_PATH'],
build_path=os.environ['REZ_BUILD_PATH'],
install_path=os.environ['REZ_BUILD_INSTALL_PATH'],
targets=sys.argv[1:]
)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 | -7,258,360,811,447,583,000 | 28.54717 | 79 | 0.667944 | false | 3.591743 | false | false | false |
zbqf109/goodo | openerp/service/report.py | 1 | 5140 | # -*- coding: utf-8 -*-
import base64
import logging
import sys
import threading
import openerp
import openerp.report
from openerp import tools
from openerp.exceptions import UserError
import security
_logger = logging.getLogger(__name__)
# TODO: set a maximum report number per user to avoid DOS attacks
#
# Report state:
# False -> True
self_reports = {}
self_id = 0
self_id_protect = threading.Semaphore()
def dispatch(method, params):
(db, uid, passwd ) = params[0:3]
threading.current_thread().uid = uid
params = params[3:]
if method not in ['report', 'report_get', 'render_report']:
raise KeyError("Method not supported %s" % method)
security.check(db,uid,passwd)
openerp.modules.registry.RegistryManager.check_registry_signaling(db)
fn = globals()['exp_' + method]
res = fn(db, uid, *params)
openerp.modules.registry.RegistryManager.signal_caches_change(db)
return res
def exp_render_report(db, uid, object, ids, datas=None, context=None):
if not datas:
datas={}
if not context:
context={}
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
cr = openerp.registry(db).cursor()
try:
result, format = openerp.report.render_report(cr, uid, ids, object, datas, context)
if not result:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
self_reports[id]['result'] = result
self_reports[id]['format'] = format
self_reports[id]['state'] = True
except Exception, exception:
_logger.exception('Exception: %s\n', exception)
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self_reports[id]['state'] = True
cr.connection.commit()
cr.close()
return _check_report(id)
def exp_report(db, uid, object, ids, datas=None, context=None):
if not datas:
datas={}
if not context:
context={}
self_id_protect.acquire()
global self_id
self_id += 1
id = self_id
self_id_protect.release()
self_reports[id] = {'uid': uid, 'result': False, 'state': False, 'exception': None}
def go(id, uid, ids, datas, context):
with openerp.api.Environment.manage():
cr = openerp.registry(db).cursor()
try:
result, format = openerp.report.render_report(cr, uid, ids, object, datas, context)
if not result:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException('RML is not available at specified location or not enough data to print!', tb)
self_reports[id]['result'] = result
self_reports[id]['format'] = format
self_reports[id]['state'] = True
except Exception, exception:
_logger.exception('Exception: %s\n', exception)
if hasattr(exception, 'name') and hasattr(exception, 'value'):
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.ustr(exception.name), tools.ustr(exception.value))
else:
tb = sys.exc_info()
self_reports[id]['exception'] = openerp.exceptions.DeferredException(tools.exception_to_unicode(exception), tb)
self_reports[id]['state'] = True
cr.connection.commit()
cr.close()
return True
threading.Thread(target=go, args=(id, uid, ids, datas, context)).start()
return id
def _check_report(report_id):
result = self_reports[report_id]
exc = result['exception']
if exc:
raise UserError('%s: %s' % (exc.message, exc.traceback))
res = {'state': result['state']}
if res['state']:
if tools.config['reportgz']:
import zlib
res2 = zlib.compress(result['result'])
res['code'] = 'zlib'
else:
#CHECKME: why is this needed???
if isinstance(result['result'], unicode):
res2 = result['result'].encode('latin1', 'replace')
else:
res2 = result['result']
if res2:
res['result'] = base64.encodestring(res2)
res['format'] = result['format']
del self_reports[report_id]
return res
def exp_report_get(db, uid, report_id):
if report_id in self_reports:
if self_reports[report_id]['uid'] == uid:
return _check_report(report_id)
else:
raise Exception, 'AccessDenied'
else:
raise Exception, 'ReportNotFound'
| gpl-3.0 | 5,155,048,508,241,609,000 | 34.205479 | 167 | 0.603307 | false | 3.93267 | false | false | false |
energyPATHWAYS/energyPATHWAYS | model_building_tools/create_map_keys_from_drivers/map_key_from_driver.py | 1 | 2834 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 07 19:20:05 2016
@author: ryandrewjones
"""
import sys
import signal
import click
import os
import cPickle as pickle
import energyPATHWAYS.config as cfg
import energyPATHWAYS.util as util
from energyPATHWAYS.pathways_model import PathwaysModel
import energyPATHWAYS.shape as shape
from energyPATHWAYS.outputs import Output
import csv
import time
import datetime
import logging
import cProfile
import traceback
import pandas as pd
# set up a dummy model
path = os.getcwd()
config = 'config.INI'
scenario_id = 1
cfg.initialize_config(path, config, _log_name='log.log')
cfg.primary_geography = 'intersection_id'
model = PathwaysModel(scenario_id, api_run=False)
# model.run(scenario_id, solve_demand=False, solve_supply=False, save_models=False, append_results=False)
demand = model.demand
demand.add_drivers()
existing_geo_map_key_ids, existing_geo_map_key_names = zip(*util.sql_read_table('GeographyMapKeys'))
next_map_key_id = max(existing_geo_map_key_ids)+1
next_geo_map_id = max(util.sql_read_table('GeographyMap', 'id'))+1
###############################################
# user inputs
driver_ids_to_make_map_keys = [
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61]
basis_year_for_map_key = int(cfg.cfgfile.get('case', 'current_year'))
###############################################
# make our new map keys
GeographyMapKeys = [['id', 'name']]
GeographyMap_columns = ['intersection_id', 'geography_map_key_id', 'value', 'id']
GeographyMap = []
for driver_id in driver_ids_to_make_map_keys:
driver = demand.drivers[driver_id]
demand.remap_driver(driver) # remaps to our new super detailed geography
values = util.df_slice(driver.values, basis_year_for_map_key, 'year')
if values.index.nlevels>1:
levels_to_remove = [n for n in values.index.names if n!='intersection_id']
values = util.remove_df_levels(values, levels_to_remove)
new_key_name = driver.name
if new_key_name in existing_geo_map_key_names:
raise ValueError('driver name {} is already in the existing map keys, please rename driver id {}'.format(driver.name, driver.id))
GeographyMapKeys.append([next_map_key_id, new_key_name])
values = values.reset_index()
values['id'] = range(next_geo_map_id, next_geo_map_id+len(values))
values['geography_map_key_id'] = next_map_key_id
GeographyMap.append(values)
next_geo_map_id += len(values)
next_map_key_id+=1
output = pd.concat(GeographyMap)[GeographyMap_columns]
output.to_csv(os.path.join(path, 'outputs', 'GeographyMap.csv'), index=False)
with open(os.path.join(path, 'outputs', 'GeographyMapKeys.csv'), 'wb') as outfile:
csvwriter = csv.writer(outfile, delimiter=',')
for row in GeographyMapKeys:
csvwriter.writerow(row)
| mit | 2,169,229,182,338,889,700 | 25.240741 | 137 | 0.690543 | false | 3.044039 | false | false | false |
nitely/ochDownloader | plugins/filefactory_com/anonym_download.py | 1 | 1919 | #python libs
import logging
logger = logging.getLogger(__name__)
#Libs
from addons.captcha.recaptcha import PluginRecaptcha
#CONNECTION_RETRY = 3
BASE_URL = "http://www.filefactory.com"
WAITING = 60
class PluginDownload(PluginRecaptcha):
def parse(self):
link = self.link
page = self.get_page(link)
err_list = ('All free download slots are in use.', )
self.validate(err_list, page)
#
m_pattern = 'check:[^\']+\'(?P<check>[^\']+)'
m = self.get_match(m_pattern, page, "Captcha not found")
c_pattern = 'Recaptcha\.create[^"]+"(?P<key>[^"]+)'
extra_fields = [("check", m.group('check')), ]
self.recaptcha_post_link = "%s/file/checkCaptcha.php" % BASE_URL
page = self.recaptcha(c_pattern, page, extra_fields)
#
m_pattern = '"path":"(?P<path>.*?)"'
m = self.get_match(m_pattern, page, "No path found")
link2 = "%s%s" % (BASE_URL, m.group('path').replace("\\", ""))
page = self.get_page(link2)
#"all slots are taken" may appear here.
cn_pattern = 'countdown">(?P<count>[^<]+)'
self.countdown(cn_pattern, page, 320, WAITING)
#
file_id = self.link.split("/file/")[-1].split("/")[0]
s_pattern = '<a href="(?P<link>[^"]+/%s/[^"]+)' % file_id
#s_pattern = 'id="downloadLinkTarget[^<]+<a href="(?P<link>[^"]+)'
self.source = self.click(s_pattern, page, False)
def recaptcha_success(self, pattern, page):
#overriden
if '"status":"ok"' in page:
return True
else:
return False
if __name__ == "__main__":
import re
page = """<p id="downloadLinkTarget" style="display: none;">
"""
pattern = 'id="downloadLinkTarget.*?<a href="(?P<link>.*?)"'
m = re.search(pattern, page, re.S)
if m is not None:
print m.groups()
else:
print 'not found' | lgpl-3.0 | -5,499,612,315,312,826,000 | 32.684211 | 74 | 0.549766 | false | 3.314335 | false | false | false |
yaniv14/OpenCommunity | src/acl/default_roles.py | 1 | 3262 | from django.utils.translation import ugettext_lazy as _
class DefaultRoles(object):
VIEWER = 'viewer'
OBSERVER = 'observer'
PARTICIPANT = 'participant'
PROPOSER = 'proposer'
CONTRIBUTOR = 'contributor'
EDITOR = 'editor'
OPERATOR = 'operator'
DECIDER = 'decider'
MANAGER = 'manager'
choices = (
(VIEWER, _('Viewer')),
(OBSERVER, _('Observer')),
(PARTICIPANT, _('Participant')),
(PROPOSER, _('Proposer')),
(CONTRIBUTOR, _('Contributor')),
(EDITOR, _('Editor')),
(OPERATOR, _('Operator')),
(DECIDER, _('Decider')),
(MANAGER, _('Manager')),
)
permissions = {}
permissions[VIEWER] = [
'access_community',
'access_committee',
'viewclosed_issue',
'viewclosed_proposal',
'view_meeting',
]
permissions[OBSERVER] = permissions[VIEWER] + [
'viewopen_issue',
'viewopen_proposal',
'viewupcoming_community',
'vote',
'proposal_board_vote_self',
'vote_ranking',
]
permissions[PARTICIPANT] = permissions[OBSERVER] + [
'view_proposal_in_discussion',
'viewupcoming_draft',
'view_referendum_results',
'view_update_status',
'view_straw_vote_result',
'view_confidential',
]
permissions[PROPOSER] = permissions[PARTICIPANT] + [
'add_proposal',
]
permissions[CONTRIBUTOR] = permissions[PROPOSER] + [
'add_issue',
]
permissions[EDITOR] = permissions[CONTRIBUTOR] + [
'editopen_issue',
'editopen_proposal',
'edittask_proposal',
]
permissions[OPERATOR] = permissions[CONTRIBUTOR] + [
'add_issuecomment',
'edittask_proposal',
'editupcoming_community',
'editparticipants_community',
'editsummary_community', # ???
'invite_member',
'move_to_referendum',
'proposal_board_vote',
]
permissions[DECIDER] = permissions[OPERATOR] + [
'editopen_issuecomment',
'editagenda_community',
'acceptopen_proposal',
'add_meeting', # == Close Meeting
'edit_referendum',
'chairman_vote',
'show_member_profile',
]
permissions[MANAGER] = permissions[DECIDER] + [
'editopen_issue',
'editclosed_issue',
'editclosed_issuecomment',
'editopen_proposal',
'editclosed_proposal',
'acceptclosed_proposal',
]
class DefaultGroups(object):
MEMBER = "member"
BOARD = "board"
SECRETARY = "secretary"
CHAIRMAN = "chairman"
builtin = {
MEMBER: [DefaultRoles.OBSERVER],
BOARD: [DefaultRoles.PARTICIPANT],
SECRETARY: [DefaultRoles.OPERATOR],
CHAIRMAN: [DefaultRoles.DECIDER, DefaultRoles.EDITOR]
}
permissions = {
k: frozenset(
[p for role in roles for p in DefaultRoles.permissions[role]])
for k, roles in builtin.items()
}
CHOICES = (
(MEMBER, _("member")),
(BOARD, _("board")),
(SECRETARY, _("secretary")),
(CHAIRMAN, _("chairman")),
)
ALL_PERMISSIONS = frozenset(
[p for perms in DefaultGroups.permissions.values() for p in perms])
| bsd-3-clause | -7,700,405,619,576,067,000 | 24.484375 | 74 | 0.568363 | false | 3.690045 | false | false | false |
jensck/fluidity | fluidity/managers.py | 1 | 38146 | #-*- coding:utf-8 -*-
#
# Copyright (C) 2012 - Jens Knutson <jens.knutson at gmail dot com>
# This software is licensed under the GNU General Public License
# version 3 or later (see the file COPYING).
#pylint: disable-msg=W0201
"""Collection of "manager" classes, which handle various aspects of Fluidity."""
from __future__ import absolute_import, division, print_function
__author__ = 'Jens Knutson'
try:
import cPickle as pickle
except ImportError:
import pickle
import datetime
import glob
import json
import operator
import os
import shutil
import subprocess
import time
import gio
import gobject
import pango
import yaml
from kiwi.ui.objectlist import Column
from xdg import BaseDirectory
from fluidity import defs
from fluidity import gee_tee_dee
from fluidity import inbox_items
from fluidity import magic_machine
from fluidity import app_utils
from fluidity.first_time import FirstTimeBot
from fluidity.note import ProjectNote
class DataManager(object):
def __init__(self):
self.pickle_path = defs.USER_DATA_MAIN_FILE
#first, make sure we have our data file - if not, invoke FirstTimeBot
if (not os.path.exists(self.pickle_path) or
not os.path.exists(defs.NOTE_SLIDER_FOLDER)):
bot = FirstTimeBot()
bot.create_initial_files_and_paths()
del(bot) # Thank you for your service, bot. Rest in peace.
try:
with open(self.pickle_path, 'r') as pfile:
self.top_data = pickle.load(pfile)
except EOFError:
# probably the main app in the middle of saving its file.
# Wait a couple seconds, then try again.
time.sleep(2)
# If it _still_ fails, something is really screwed - not
# accommodating this, at least not yet.
with open(self.pickle_path, 'r') as pfile:
self.top_data = pickle.load(pfile)
self.aofs = self.top_data['areas_of_focus']
self.prjs = self.top_data['projects']
self.single_notes = self.top_data['single_notes']
self.queued_singletons = self.top_data['queued_singletons']
self._file_toady = FileSystemManager()
self._magic_maker = magic_machine.MagicMachine()
self.rebuild_aof_cache()
# PUBLIC METHODS
def activate_due_queued(self):
app_utils.log_line("Running activate_due_queued()", datetime.datetime.now())
for p in self.prjs:
prj = self.prjs[p]
if prj.status == "queued":
# FIXME: if the status is queued, we should /always/ have a
# queue date. What's the fallback?
if prj.queue_date:
if datetime.date.today() >= prj.queue_date:
self.change_project_status(prj, "active")
for na in self.queued_singletons:
if na.queue_date <= datetime.date.today():
self.prjs['singletons'].next_actions.append(na)
self.queued_singletons.remove(na)
def activate_nas(self, nas, prj_key):
"""Move the given NextActions to the Project's next_actions list"""
project = self.prjs[prj_key]
self.__move_na(nas, project.next_actions,
(project.unordered_next_actions,
project.incubating_next_actions))
def add_na_to_prj(self, na, prj_key):
self.prjs[prj_key].next_actions.append(na)
def add_queued_singleton(self, na):
self.queued_singletons.append(na)
self.save_data()
def aof_names(self):
return [self.aofs[k]['name'] for k in self.aofs.keys()]
def archive_completed_singletons(self):
#FIXME: total crap. fix later.
# the .format("") below is on purpose - look at the path for
# defs.USER_DATA_PATH in your filesystem, it'll make more sense.
pkl_path = os.path.join(defs.USER_DATA_PATH,
defs.ARCHIVED_SINGLETONS_FNAME.format(""))
try:
with open(pkl_path, 'r') as pkl_read:
nas_to_archive = pickle.load(pkl_read)
now = datetime.datetime.now().strftime(
defs.ARCHIVED_SINGLETONS_TIME_TMPLT)
# back up the old data file, just in case...
backup_file_name = defs.ARCHIVED_SINGLETONS_FNAME.format(now)
shutil.copy2(pkl_path, os.path.join(defs.BACKUPS_PATH, backup_file_name))
except IOError:
nas_to_archive = []
singletons = self.prjs['singletons'].next_actions
for na in singletons:
if na.complete:
nas_to_archive.append(na)
for na in nas_to_archive:
if na in singletons:
singletons.remove(na)
with open(pkl_path, 'wb') as pkl_write:
pickle.dump(nas_to_archive, pkl_write, pickle.HIGHEST_PROTOCOL)
self.save_data()
def autosave(self):
# FIXME: ZOMG this is so ghetto-tastic. fix it. srsly.
self.save_data()
return True
def change_project_status(self, prj, new_status, queue_date=None):
self._file_toady.move_project_folder(prj.summary, prj.status, new_status)
prj_ = prj
note = ProjectNote(prj=prj_)
note.change_prj_status(new_status)
if new_status == "queued":
prj.queue_date = queue_date
prj.status = new_status
self.save_data()
def cleanup_before_exit(self):
self.save_data()
def copy_to_project_folder(self, file_name, prj):
self._file_toady.copy_to_project_folder(file_name, prj.summary, prj.status)
def create_new_aof(self, new_name):
key_name = app_utils.format_for_dict_key(new_name)
self.aofs[key_name] = {'name': new_name, 'projects': []}
self.rebuild_aof_cache()
self.save_data()
return self.aofs
def delete_na(self, na, prj):
prj.next_actions.remove(na)
self.save_data()
def delete_prj(self, prj):
app_utils.log_line("Deleting project: " + str(prj), datetime.datetime.now())
# trash the folders first
self._file_toady.trash_project_folder(prj.summary, prj.status)
# then ditch the project notes
prj_ = prj
ProjectNote(prj=prj_).delete()
#this is /almost certainly/ The Hard Way...
for a in self.aofs.keys():
matches = []
# Welcome to my entry in the "Obfuscated Python" contest!
for p in xrange(len(self.aofs[a]['projects'])):
if self.aofs[a]['projects'][p] == prj.key_name:
matches.append({'aof': a, 'p_index': p})
for i in matches:
del(self.aofs[i['aof']]['projects'][i['p_index']])
del(self.prjs[prj.key_name])
self.save_data()
def delete_stuff_note(self, note_obj):
DUHLETED = False
i = 0
while not DUHLETED and i < len(self.single_notes):
if self.single_notes[i]['summary'] == note_obj.summary:
del(self.single_notes[i])
DUHLETED = True
i += 1
def dump_processed_stuff_notes(self, stuff_list):
# cull out the InboxFile items - unneeded.
real_list = []
for stuff in stuff_list:
if not isinstance(stuff, inbox_items.InboxFile):
real_list.append(stuff)
processed_path = \
os.path.join(defs.USER_DATA_PATH,
defs.PROCESSED_STUFF_FILE_NAME + str(time.time()))
with open(processed_path, 'wb') as pfile:
pickle.dump(real_list, pfile, pickle.HIGHEST_PROTOCOL)
gf = gio.File(processed_path)
gf.trash()
def file_stuff_as_read_review(self, stuff, rr_path):
stuff_path = os.path.split(stuff.path)[1]
shutil.move(stuff.path, os.path.join(rr_path, stuff_path))
def get_contexts(self):
contexts = []
for pk in self.prjs.keys():
p = self.prjs[pk]
if p.status == "active":
for na in p.next_actions:
if na.context != "" and na.context != None:
if not na.context in contexts:
contexts.append(na.context)
contexts.sort()
return contexts
def get_file_list_for_prj(self, prj):
return self._file_toady.get_file_list_for_prj(prj.summary, prj.status)
def get_inbox_files(self):
hiddens = os.path.join(defs.INBOX_FOLDER, ".hidden")
if os.path.exists(hiddens):
with open(hiddens, 'r') as dot_hidden:
hidden = dot_hidden.read()
else:
hidden = ""
hidden += "\n".join(defs.IGNORED_INBOX_PATHS)
for file_ in os.listdir(defs.INBOX_FOLDER):
if file_ not in hidden and not file_.startswith('.'):
yield inbox_items.InboxFile(os.path.join(defs.INBOX_FOLDER,
file_))
def get_inbox_notes(self):
return self.single_notes
def get_current_nas_for_each_active_prj(self):
active_nas = []
for p in self.prjs.keys():
prj = self.prjs[p]
if prj.status == "active" and prj.summary != 'singletons':
for na in prj.next_actions:
if not na.complete:
active_nas.append(na)
break
active_nas.extend([na for na in prj.unordered_next_actions
if not na.complete])
for na in self.prjs['singletons'].next_actions:
if not na.complete:
active_nas.append(na)
return active_nas
def get_nas_for_prj(self, prj_key):
try:
return self.prjs[prj_key].next_actions
except AttributeError:
return []
def get_prj_aof_names(self, prj):
aof_list = []
if len(prj.aofs) == 0:
aof_list.append(defs.NO_AOF_ASSIGNED)
else:
for a in prj.aofs:
aof_list.append(self.aofs[a]['name'])
return sorted(aof_list)
def get_prjs_by_aof(self, area, review_filter):
prj_list = []
# "incomplete" is just used by Slider, so far"
if review_filter == "incomplete":
for p in sorted(self.prjs.keys()):
prj = self.prjs[p]
if prj.status != "completed":
prj_list.append(prj)
else:
if area == "All":
prj_list.extend([prj for prj in self.prjs.values() if prj.status == review_filter])
elif area == defs.NO_AOF_ASSIGNED:
for p in sorted(self.prjs.keys()):
prj = self.prjs[p]
if prj.status == review_filter and len(prj.aofs) == 0:
prj_list.append(prj)
else:
area_key = app_utils.format_for_dict_key(area)
if self.aofs[area_key]['projects']:
prj_keys = self.aofs[area_key]['projects']
prj_list.extend([prj for prj in self.prjs.values()
if prj.status == review_filter and prj.key_name in prj_keys])
return sorted(prj_list, key=operator.attrgetter('summary'))
def get_project_folder_uri(self, prj):
return self._file_toady.get_project_folder_uri(prj.summary, prj.status)
def incubate_nas(self, nas, prj_key):
"""Move the given NextActions to the Project's incubating_next_actions."""
project = self.prjs[prj_key]
self.__move_na(nas, project.incubating_next_actions,
(project.next_actions, project.unordered_next_actions))
def move_nas_to_ordered_actions(self, nas, prj_key):
project = self.prjs[prj_key]
self.__move_na(nas, project.next_actions,
(project.unordered_next_actions, project.incubating_next_actions))
def move_nas_to_unordered_actions(self, nas, prj_key):
project = self.prjs[prj_key]
self.__move_na(nas, project.unordered_next_actions,
(project.next_actions, project.incubating_next_actions))
def __move_na(self, nas, add_to, remove_from):
for na in nas:
add_to.append(na)
for na_list in remove_from:
try:
na_list.remove(na)
except ValueError:
# HACK to work around the fact that we don't know which
# list it's coming _from_.
pass
def queue_singleton_na(self, na, queue_date_str):
try:
self.prjs['singletons'].next_actions.remove(na)
na.queue_date = self._magic_maker.get_magic_date(queue_date_str)
self.add_queued_singleton(na)
except ValueError:
# don't freak out if someone tries queuing a NA that isn't in singletons
pass
def rebuild_aof_cache(self):
for aof in self.aofs:
del(self.aofs[aof]['projects'][:])
for prj in self.prjs.keys():
for aof_key in self.prjs[prj].aofs:
if prj not in self.aofs[aof_key]['projects']:
self.aofs[aof_key]['projects'].append(prj)
def remove_file_from_prj(self, file_name, prj):
self._file_toady.remove_from_project_folder(file_name, prj.summary,
prj.status)
def reparent_project(self, prj, new_parent):
"""Make `new_parent` the parent object of `prj`."""
new_parent.subprojects.append(prj.uuid)
prj.parent_project = new_parent.uuid
def save_data(self):
# utils.log_line("Saving main data file.", datetime.datetime.now())
backup_path = os.path.join(defs.BACKUPS_PATH,
defs.USER_DATA_MAIN_FNAME + str(time.time()))
shutil.copy(self.pickle_path, backup_path)
with open(self.pickle_path, 'wb') as pfile:
pickle.dump(self.top_data, pfile, pickle.HIGHEST_PROTOCOL)
return True
def search(self, query, include_completed=False, include_nas=False):
query = query.lower()
formatter = lambda x: "<b>{0}</b>".format(x) # pylint: disable-msg=W0108
results = []
for prj in self.prjs.values():
if include_nas and (include_completed or prj.status != 'completed'):
for na in prj.next_actions:
score = magic_machine.score(na.summary, query)
if score > 0.4:
# fuck me, this is ugly: "flat is better than nested."
summary_formatted = magic_machine.format_common_substrings(
na.summary, query, format_match=formatter)
results.append(
SearchResult(na.summary, summary_formatted,
prj.key_name, score, na.uuid))
if include_completed:
score = magic_machine.score(prj.summary, query)
if score > 0.4:
formatted = magic_machine.format_common_substrings(
prj.summary, query, format_match=formatter)
results.append(SearchResult(prj.summary, formatted,
prj.key_name, score))
else:
if prj.status != 'completed':
score = magic_machine.score(prj.summary, query)
if score > 0.4:
formatted = magic_machine.format_common_substrings(
prj.summary, query,
format_match=formatter)
results.append(SearchResult(prj.summary, formatted,
prj.key_name, score))
results.sort(key=operator.attrgetter('score'), reverse=True)
return results
def set_prj_aofs(self, prj, aof_text):
if aof_text == defs.NO_AOF_ASSIGNED:
del(prj.aofs[:])
else:
for aof in self._parse_aof_text(aof_text):
del(prj.aofs[:])
if prj.key_name not in self.aofs[aof]['projects']:
self.aofs[aof]['projects'].append(prj.key_name)
prj.aofs.append(aof)
self.save_data()
return self.get_prj_aof_names(prj)
def add_slider_items(self, na_list, note_list, queued_list, note_strings):
self._take_these_fucking_nas(na_list)
self._take_these_fucking_notes(note_list)
self._take_these_fucking_queues(queued_list)
for note_str in note_strings:
note = {'summary': note_str, 'details': ""}
self.single_notes.append(note)
# Confirm that we made it to the step of saving
return self.save_data()
def _take_these_fucking_nas(self, na_list):
na_objs = [self._ploader(na_file) for na_file in na_list]
for na in na_objs:
self.prjs[na['prj_key']].next_actions.append(na['na_obj'])
def _take_these_fucking_notes(self, note_list):
note_objs = []
for note in note_list:
note_objs.append(self._ploader(note))
for notey in note_objs:
self.single_notes.append(notey)
def _take_these_fucking_queues(self, queued_list):
q_objs = []
for q_file in queued_list:
q_objs.append(self._ploader(q_file))
for obj in q_objs:
self.queued_singletons.append(obj['na_obj'])
self.activate_due_queued()
def _parse_aof_text(self, atext):
if atext == '':
return [app_utils.format_for_dict_key(defs.NO_AOF_ASSIGNED)]
else:
return [app_utils.format_for_dict_key(atext)]
def _ploader(self, pfile_path):
with open(pfile_path, 'r') as pfile:
pcontent = pickle.load(pfile)
return pcontent
# PROPERTIES
def engage_na_deleter(self, uuid):
"""Find the NA with the UID of uid arg, and delete it."""
for prj in self.prjs.values():
# only look at active projects, since this is for Engage
if prj.status == "active":
for na in prj.next_actions:
if na.uuid == uuid:
prj.next_actions.remove(na)
return True
# uh-oh. we REALLY shouldn't have gotten here.
# FIXME: this ought to throw an exception, really
return False
class FileSystemManager(object):
"""Filesystem manager for Fluidity"""
def __init__(self):
pass
def copy_to_project_folder(self, fname, prj_summary, prj_status):
full_path = self._get_path_for_type(prj_status) + \
self._sanitize_path(prj_summary)
# Does the project folder exist yet? If not, create it. If that fails,
# return False right away.
if not os.path.exists(full_path):
# try creating the right folder. if it fails, return False
if not self._create_project_folder(full_path):
return False
if fname.startswith('/'):
base_name = os.path.split(fname)[1]
else:
base_name = fname
# We got this far; now we can try the copy or move operation - which
# path will need to depend on if fname is a folder or not
if os.path.isdir(fname):
if fname.startswith(defs.INBOX_FOLDER):
shutil.move(fname, os.path.join(full_path, base_name))
else:
shutil.copytree(fname, os.path.join(full_path, base_name))
else:
if fname.startswith(defs.INBOX_FOLDER):
# more Evil(TM)... to be fixed with the signals rewrite
try:
shutil.move(fname, os.path.join(full_path, base_name))
except IOError:
# this might have "completed processing" already,
# so maybe it's in the trash...
base_name = os.path.split(fname)[1]
trash_path = BaseDirectory.xdg_data_home + "/Trash/files"
fname = os.path.join(trash_path, base_name)
shutil.move(fname, os.path.join(full_path, base_name))
else:
shutil.copy(fname, os.path.join(full_path, base_name))
return True
def move_project_folder(self, prj_summary, old_status, new_status):
sanitized_summary = self._sanitize_path(prj_summary)
full_path = self._get_path_for_type(old_status) + sanitized_summary
new_path = self._get_path_for_type(new_status) + sanitized_summary
if os.path.exists(full_path):
if full_path != new_path:
shutil.move(full_path, new_path)
def remove_from_project_folder(self, fname, prj_summary, prj_status):
full_path = os.path.join(self._get_path_for_type(prj_status),
self._sanitize_path(prj_summary), fname)
gf = gio.File(full_path)
gf.trash()
gf = None
del(gf)
def get_project_folder_uri(self, prj_summary, prj_status, create=True):
# this method assumes that if you're asking for the URI, you must want
# there to be a prj folder, so if there isn't one yet, just make one.
# However, if you don't want that, just set 'create' to False
full_path = self._get_path_for_type(prj_status) + \
self._sanitize_path(prj_summary)
if create:
if not os.path.exists(full_path):
# try creating the right folder. if it fails, return False
if not self._create_project_folder(full_path):
return ""
uri = "file://" + full_path
return uri
def get_file_list_for_prj(self, prj_summary, prj_status):
path = self.get_project_folder_uri(prj_summary, prj_status, create=False)
path = path.replace("file://", '')
path += os.sep
if os.path.exists(path):
return [path + f for f in os.listdir(path)]
else:
return []
def trash_project_folder(self, prj_summary, prj_status):
full_path = self._get_path_for_type(prj_status) + \
self._sanitize_path(prj_summary)
if os.path.exists(full_path):
gf = gio.File(full_path)
gf.trash()
gf = None
del(gf)
def _create_project_folder(self, path):
os.mkdir(path)
if os.path.exists(path):
return True
else:
return False
def _sanitize_path(self, fname):
# I might want to extend this behavior later, which is why I made a custom
# method instead of just doing the raw replacement below each time
return fname.replace('/', '-')
def _get_path_for_type(self, prj_status):
if prj_status == "active":
return defs.ACTIVE_FOLDER + os.sep
elif prj_status == "queued":
return defs.QUEUED_FOLDER + os.sep
elif prj_status == "waiting_for":
return defs.WAITING_FOR_FOLDER + os.sep
elif prj_status == 'incubating':
return defs.INCUBATING_FOLDER + os.sep
elif prj_status == 'completed':
return defs.COMPLETED_FOLDER + os.sep
class InboxManager(object):
# CHOCK FULL OF PROFANITY! I'm a juvenile, easily frustrated asshole.
# Get used to it.
def __init__(self, caller, obj_tree, fucdkingdatamanager):
# I also write shitty code, get used to that, too.
self._caller = caller
self._tree = obj_tree
self._fsm = FileSystemManager()
self.dm = fucdkingdatamanager
col = [Column('summary', data_type=str, searchable=True,
ellipsize=pango.ELLIPSIZE_END, expand=True),]
self._tree.set_columns(col)
self._fill_rows()
self._tree.set_headers_visible(False)
# automagically import new Slider items
inbox_monitor = gio.File(defs.NOTE_SLIDER_FOLDER).monitor_directory()
inbox_monitor.connect('changed', self.process_slider_inbox_changes)
def _fill_rows(self):
# FIXME: fix this FFS, use some actual polymorphism
#FIXME: reenable these later
self._row_inbox_folder = CategoryRow("Inbox Folder")
# self._row_email_inbox = CategoryRow("Emails")
# i.e.: Tomboy, e-d-s inbox "tasks", & collected items from Slider
self._row_single_notes = CategoryRow("Single notes")
self._row_processed_stuff = CategoryRow("Processed Stuff")
#FIXME: and re-enable these , too.
self._tree.append(None, self._row_inbox_folder)
# self._tree.append(None, self._row_email_inbox)
self._tree.append(None, self._row_single_notes)
self._tree.append(None, self._row_processed_stuff)
def add_actual_shit_to_columns(self):
notes = self.dm.get_inbox_notes()
notes.sort(key=operator.itemgetter('summary'))
# FIXME: this clears everything in "Processed Stuff", and it probably
# shouldn't - that should live in its own method.
self._tree.clear()
self._fill_rows()
for note in notes:
self._tree.append(self._row_single_notes,
inbox_items.InboxNote(note['summary'], note['details']))
for file_ in sorted(self.dm.get_inbox_files(),
key=operator.attrgetter('summary')):
self._tree.append(self._row_inbox_folder, file_)
# def add_inbox_files_to_clarify(self):
# note, file_, files = None, None, None
# for file_ in files:
# self._tree.append(self._row_single_notes,
# inbox_items.InboxNote(note['summary'], note['details']))
def complete_processing(self, obj):
# FIXME: wtf is this doing in here? this is GUI shit!
if isinstance(obj, inbox_items.InboxStuff):
selected_row = self._tree.get_selected_row_number()
self._tree.remove(obj)
self._tree.append(self._row_processed_stuff, obj)
if isinstance(obj, inbox_items.InboxNote):
self.dm.delete_stuff_note(obj)
elif isinstance(obj, inbox_items.InboxFile):
try:
obj.trash()
except gio.Error as error:
msg = ("Can't trash file (called from InboxManager."
"complete_processing): {0} -- error: {1}")
app_utils.log_line(msg.format(obj.summary, error))
self._tree.refresh()
self._tree.select_paths((selected_row, 0))
gobject.idle_add(self._tree.grab_focus)
def gather_slider_items(self):
na_list = []
note_list = []
queued_list = []
filenames = [os.path.join(defs.NOTE_SLIDER_FOLDER, f)
for f in os.listdir(defs.NOTE_SLIDER_FOLDER)
if f.endswith('.pkl')]
for n in filenames:
if n.endswith('-note.pkl'):
note_list.append(n)
elif n.endswith('-na.pkl'):
na_list.append(n)
elif n.endswith("-queued_na.pkl"):
queued_list.append(n)
gtasks_note_strings = self._import_google_tasks_inbox()
# only delete the actual files if we got confirmation that
# the data from them was saved successfully
if self.dm.add_slider_items(na_list, note_list, queued_list, gtasks_note_strings):
for f in note_list + na_list + queued_list:
gio.File(f).trash()
def _import_google_tasks_inbox(self):
# Yep, hardcoded paths specific to my machine. This is all temporary throwaway code now, so IDGAF
gtasks_raw_output = subprocess.check_output(["python3", "/home/jensck/Code/TasksSample/tasks_test.py"])
notes_list = json.loads(gtasks_raw_output)
return notes_list
def process_slider_inbox_changes(self, gfile_mon, gfile, other_file, event): # IGNORE:W0613
if event.value_nick == 'changes-done-hint':
self.gather_slider_items()
self.add_actual_shit_to_columns()
class RecurrenceManager(object):
def __init__(self, dm):
self._data_lumbergh = dm
def place_recurring_tasks(self):
app_utils.log_line("Running place_recurring_tasks()", datetime.datetime.now())
self._load_data(defs.RECURRENCE_DATA)
data = self._recur_data
today = datetime.date.today()
if self._recur_data['last_run'] < today:
self._place_daily_tasks(today, data)
self._place_monthly_tasks(today, data)
self._place_weekly_tasks(today, data)
self._recur_data['last_run'] = today
self._save_data(defs.RECURRENCE_DATA)
def _create_na(self, task):
na = gee_tee_dee.NextAction(task['summary'])
na_attrs = ('priority', 'context', 'notes', 'url', 'time_est',
'energy_est')
for attr in na_attrs:
if attr in task:
na.__setattr__(attr, task[attr])
if 'due_date' in task:
na.due_date = datetime.date.today() + \
datetime.timedelta(task['due_date'])
return na
# everyXDays: 1 # integer
#- summary: # the task's description in e-d-s
# priority: # "gnite syntax": ! and + are high, - is low, blank is normal
# context: # String, enclosed in quotes
# notes: # probably ought to be a block I guess. until then, string.
# url: # url, enclosed in quotes
# due_date: # integer - X days after placement
def _load_data(self, data_file_path):
self._recur_data = None
self._recur_data = self._yloader(data_file_path)
# FIXME: datamanager is a fucking mess. clean it up.
self._singleton_nas = self._data_lumbergh.get_nas_for_prj('singletons')
def _place_daily_tasks(self, today, data):
for t in data['daily']:
if 'last_seen' not in t:
na = self._create_na(t)
self._data_lumbergh.add_na_to_prj(na, 'singletons')
t['last_seen'] = today
else:
delta = datetime.timedelta(t['everyXDays'])
found = False
index = 0
while found == False and index < len(self._singleton_nas):
if self._singleton_nas[index].summary == t['summary']:
if not self._singleton_nas[index].complete:
found = True
t['last_seen'] = today
index += 1
if found == False and today >= t['last_seen'] + delta:
na = self._create_na(t)
self._data_lumbergh.add_na_to_prj(na, 'singletons')
t['last_seen'] = today
def _place_monthly_tasks(self, today, data):
last = data['last_run']
for t in data['monthly']:
for day in t['days']:
# FIXME: make more generic wrt weekly tasks, too.
task_date = datetime.date(today.year, today.month, day)
if last < task_date <= today:
found = False
index = 0
while found == False and index < len(self._singleton_nas):
if self._singleton_nas[index].summary == t['summary']:
if not self._singleton_nas[index].complete:
found = True
index += 1
if found == False:
na = self._create_na(t)
self._data_lumbergh.add_na_to_prj(na, 'singletons')
def _place_weekly_tasks(self, today, data):
for t in data['weekly']:
for day in t['weekdays']:
# FIXME: make more generic wrt weekly tasks, too.
if day == today.weekday():
# FIXME: bah, I suck. make this work properly when we haven't run
# on a given day, make it run everything since the last time we ran.
# the following should help I guess...
# (today + datetime.timedelta(7 - (today - last_day).days)).weekday()
found = False
index = 0
while found == False and index < len(self._singleton_nas):
if self._singleton_nas[index].summary == t['summary']:
if not self._singleton_nas[index].complete:
found = True
index += 1
if found == False:
na = self._create_na(t)
self._data_lumbergh.add_na_to_prj(na, 'singletons')
def _save_data(self, data_file_path):
#FIXME: create a backup copy?
with open(data_file_path, 'w') as yfile:
print("Saving recurrence data")
yaml.dump(self._recur_data, yfile, Dumper=defs.YAML_DUMPER,
default_flow_style=False)
def _yloader(self, yfile_path):
with open(yfile_path, 'r') as yfile:
print("calling yaml.load()")
ycontent = yaml.load(yfile, Loader=defs.YAML_LOADER)
return ycontent
class BackupJesus(object):
"""BackupJesus saaaaaaaaaaaves the righteous among thy backup files from the
fiery damnation of the void which is /dev/null!
(Actually, /dev/null has nothing to do with this code actually, I just
use gio.File.delete(), but that wouldn't be as funny. ;P)
"""
BACKUPS_PATH = defs.BACKUPS_PATH
FITY_EPOCH = defs.FITY_EPOCH
def __init__(self):
self.now = datetime.datetime.now()
# I'm lazy.
delta = datetime.timedelta
self.backup_policies = (# First four hours of *all* backups
{'start_time': self.now - delta(hours=4),
'end_time': self.now,
'interval': delta(0)},
# every hour of the past week
{'start_time': self.now - delta(weeks=1),
'end_time': self.now - delta(hours=4),
'interval': delta(hours=1)},
# every day of the past month
{'start_time': self.now - delta(weeks=4),
'end_time': self.now - delta(weeks=1),
'interval': delta(1)},
# every month since Fluidity's "epoch"
{'start_time': datetime.datetime.fromtimestamp(
defs.FITY_EPOCH),
'end_time': self.now - delta(weeks=4),
'interval': delta(weeks=4)})
def kill_stale_backups(self, dry_run=False):
pattern = os.path.join(defs.BACKUPS_PATH, 'fluidity*.pkl*')
kill_list = sorted(glob.glob(pattern))
the_book_of_life = []
for policy in self.backup_policies:
the_book_of_life += self._find_saved_indexes(kill_list, **policy)
the_book_of_life.sort()
doomed = self._delete_doomed_files(kill_list, the_book_of_life, dry_run)
elderly = [d for d in sorted(doomed) if self._is_senior_citizen(d)]
message = "Damned {0} backups to the void; {1} were senior citizens."
app_utils.log_line(message.format(len(doomed), len(elderly)),
datetime.datetime.now())
def _delete_doomed_files(self, klist, saved_indexes, keep_the_safety_on):
doomed = []
for idx, victim in enumerate(klist):
if idx not in saved_indexes:
doomed.append(self._get_creation_time(victim))
if not keep_the_safety_on:
gfile = gio.File(victim)
gfile.trash()
return doomed
def _find_saved_indexes(self, klist, start_time, end_time, interval):
saved = []
for idx, backup_file in enumerate(klist):
creation_time = self._get_creation_time(backup_file)
if start_time < creation_time < end_time:
saved.append(idx)
start_time = creation_time + interval
return saved
def _get_creation_time(self, path):
file_name = path.replace(defs.BACKUPS_PATH + '/', '')
time_float = float(file_name.replace('fluidity.pkl', ''))
return datetime.datetime.fromtimestamp(time_float)
def _is_senior_citizen(self, dt):
return dt < datetime.datetime.now() - datetime.timedelta(weeks=9)
class CategoryRow(object):
def __init__(self, summary):
self.summary = summary
class SearchResult(object):
"""Simple "row" class for use with Kiwi's ObjectList"""
def __init__(self, summary, summary_formatted, prj_key, score, na_uuid=None):
"""Initialize this SearchResult.
Args:
summary: a plain-text string of the result content
summary_formatted: a string formatted with pango markup
prj_key: ...I can't even remember what this does anymore. FML.
score: the 'score' returned by the relevance module
na_uuid: if this is a NextAction, give its uuid so we can jump to it;
defaults to None
"""
self.summary = summary
self.prj_key = prj_key
self.score = score
self.summary_formatted = summary_formatted
self.na_uuid = na_uuid
if self.na_uuid:
self.result_type = "na"
self.result_type_formatted = "<i>Next Action</i>"
else:
self.result_type = "prj"
self.result_type_formatted = "<i>Project</i>"
| gpl-3.0 | -1,707,985,159,558,534,700 | 40.826754 | 111 | 0.554055 | false | 3.741638 | false | false | false |
macosforge/ccs-calendarserver | calendarserver/tools/resources.py | 1 | 4349 | #!/usr/bin/env python
##
# Copyright (c) 2006-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
__all__ = [
"migrateResources",
]
from getopt import getopt, GetoptError
import os
import sys
from calendarserver.tools.cmdline import utilityMain, WorkerService
from twext.python.log import Logger
from twisted.internet.defer import inlineCallbacks, returnValue
from txdav.who.directory import CalendarDirectoryRecordMixin
from twext.who.directory import DirectoryRecord as BaseDirectoryRecord
from txdav.who.idirectory import RecordType
log = Logger()
class ResourceMigrationService(WorkerService):
@inlineCallbacks
def doWork(self):
try:
from txdav.who.opendirectory import (
DirectoryService as OpenDirectoryService
)
except ImportError:
returnValue(None)
sourceService = OpenDirectoryService()
sourceService.recordType = RecordType
destService = self.store.directoryService()
yield migrateResources(sourceService, destService)
def usage():
name = os.path.basename(sys.argv[0])
print("usage: %s [options] " % (name,))
print("")
print(" Migrates resources and locations from OD to Calendar Server")
print("")
print("options:")
print(" -h --help: print this help and exit")
print(" -f --config <path>: Specify caldavd.plist configuration path")
print(" -v --verbose: print debugging information")
print("")
sys.exit(0)
def main():
try:
(optargs, _ignore_args) = getopt(
sys.argv[1:], "hf:", [
"help",
"config=",
],
)
except GetoptError, e:
usage(e)
#
# Get configuration
#
configFileName = None
verbose = False
for opt, arg in optargs:
if opt in ("-h", "--help"):
usage()
elif opt in ("-f", "--config"):
configFileName = arg
else:
raise NotImplementedError(opt)
utilityMain(configFileName, ResourceMigrationService, verbose=verbose)
class DirectoryRecord(BaseDirectoryRecord, CalendarDirectoryRecordMixin):
pass
@inlineCallbacks
def migrateResources(sourceService, destService, verbose=False):
"""
Fetch all the locations and resources from sourceService that are not
already in destService and copy them into destService.
"""
destRecords = []
for recordType in (
RecordType.resource,
RecordType.location,
):
records = yield sourceService.recordsWithRecordType(recordType)
for sourceRecord in records:
destRecord = yield destService.recordWithUID(sourceRecord.uid)
if destRecord is None:
if verbose:
print(
"Migrating {recordType} {uid}".format(
recordType=recordType.name,
uid=sourceRecord.uid
)
)
fields = sourceRecord.fields.copy()
fields[destService.fieldName.recordType] = destService.recordType.lookupByName(recordType.name)
# Only interested in these fields:
fn = destService.fieldName
interestingFields = [
fn.recordType, fn.shortNames, fn.uid, fn.fullNames, fn.guid
]
for key in fields.keys():
if key not in interestingFields:
del fields[key]
destRecord = DirectoryRecord(destService, fields)
destRecords.append(destRecord)
if destRecords:
yield destService.updateRecords(destRecords, create=True)
if __name__ == "__main__":
main()
| apache-2.0 | -4,358,202,407,898,562,000 | 27.801325 | 111 | 0.626811 | false | 4.516096 | true | false | false |
KatolaZ/mammult | structure/activity/node_degree_vectors.py | 1 | 2498 | # This file is part of MAMMULT: Metrics And Models for Multilayer Networks
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
####
##
## Take as input the layers of a multiplex, and provide as output a
## file where the n-th line contains the degrees of the n-th node at
## each layer, separated by a space, in the format:
##
## node1_deglay1 node1_deglay2 .... node1_deglayM
## node2_deglay1 node2_deglay2 .... node2_deglayM
## ..............................................
## nodeN_deglay1 nodeN_deglay2 .... nodeN_deglayM
##
##
import sys
if len(sys.argv) < 2:
print "Usage: %s <layer1> [<layer2>...]" % sys.argv[0]
sys.exit(1)
node_degrees = {}
max_N = -1
num_layer = 0
for layer in sys.argv[1:]:
with open(layer, "r") as lines:
for l in lines:
if l[0] == "#":
continue
s, d = [int(x) for x in l.strip(" \n").split(" ")[:2]]
if s > max_N:
max_N = s
if d > max_N:
max_N = d
if s in node_degrees:
if num_layer in node_degrees[s]:
node_degrees[s][num_layer] += 1
else:
node_degrees[s][num_layer] = 1
else:
node_degrees[s] = {}
node_degrees[s][num_layer] = 1
if d in node_degrees:
if num_layer in node_degrees[d]:
node_degrees[d][num_layer] += 1
else:
node_degrees[d][num_layer] = 1
else:
node_degrees[d] = {}
node_degrees[d][num_layer] = 1
num_layer += 1
for n in range(max_N+1):
for i in range(num_layer):
if n in node_degrees:
if i in node_degrees[n]:
print node_degrees[n][i],
else:
print 0,
else:
print 0,
print
| gpl-3.0 | 6,631,259,597,424,604,000 | 29.463415 | 74 | 0.542834 | false | 3.641399 | false | false | false |
petewarden/tensorflow | tensorflow/python/keras/engine/data_adapter.py | 1 | 57975 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adapter module that convert different input data objects into tf.dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import functools
import itertools
import math
import random
import numpy as np
import six
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import training_utils
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import dataset_creator
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
keras_data_adapter_gauge = monitoring.BoolGauge(
"/tensorflow/api/keras/data_adapters", "keras data adapter usage", "method")
try:
from scipy import sparse as scipy_sparse # pylint: disable=g-import-not-at-top
except ImportError:
scipy_sparse = None
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pd = None
@six.add_metaclass(abc.ABCMeta)
class DataAdapter(object):
"""Base class for input data adapter.
In TF 2.0, tf.data is the preferred API for user to feed in data. In order
to simplify the training code path, all the input data object will be
converted to `tf.data.Dataset` if possible.
Note that since this class is mainly targeted for TF 2.0, it might have a lot
of assumptions under the hood, eg eager context by default, distribution
strategy, etc. In the meantime, some legacy feature support might be dropped,
eg, Iterator from dataset API in v1, etc.
The sample usage of this class is like:
```
x = tf.data.Dataset.range(100)
adapter_cls = [NumpyArrayDataAdapter, ..., DatasetAdapter]
applicable_adapters = [cls for cls in adapter_cls if cls.can_handle(x)]
if len(applicable_adapters) != 1:
raise ValueError("Expect only one adapter class to handle the input")
dataset = applicable_adapters[0](x).get_dataset()
for data in dataset:
# training
```
"""
@staticmethod
def can_handle(x, y=None):
"""Whether the current DataAdapter could handle the input x and y.
Structure wise, x and y can be single object, or list of objects if there
multiple input/output, or dictionary of objects when the intput/output are
named.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
Returns:
boolean
"""
raise NotImplementedError
@abc.abstractmethod
def __init__(self, x, y=None, **kwargs):
"""Create a DataAdapter based on data inputs.
The caller must make sure to call `can_handle()` first before invoking this
method. Provide unsupported data type will result into unexpected behavior.
Args:
x: input features.
y: target labels. Note that y could be None in the case of prediction.
**kwargs: Other keyword arguments for DataAdapter during the construction
of the tf.dataset.Dataset. For example:
- Numpy data might have `sample_weights` which will be used for
weighting the loss function during training.
- Numpy data might need to have `batch_size` parameter when constructing
the dataset and iterator.
- Certain input might need to be distribution strategy aware. When
`distribution_strategy` is passed, the created dataset need to respect
the strategy.
DataAdapter might choose to ignore any keyword argument if it doesn't
use it, or raise exception if any required argument is not provide.
"""
if not self.can_handle(x, y):
raise ValueError("{} Cannot handle input {}, {}".format(
self.__class__, x, y))
@abc.abstractmethod
def get_dataset(self):
"""Get a dataset instance for the current DataAdapter.
Note that the dataset returned does not repeat for epoch, so caller might
need to create new iterator for the same dataset at the beginning of the
epoch. This behavior might change in future.
Returns:
An tf.dataset.Dataset. Caller might use the dataset in different
context, eg iter(dataset) in eager to get the value directly, or in graph
mode, provide the iterator tensor to Keras model function.
"""
raise NotImplementedError
@abc.abstractmethod
def get_size(self):
"""Return the size (number of batches) for the dataset created.
For certain type of the data input, the number of batches is known, eg for
Numpy data, the size is same as (number_of_element / batch_size). Whereas
for dataset or python generator, the size is unknown since it may or may not
have a end state.
Returns:
int, the number of batches for the dataset, or None if it is unknown. The
caller could use this to control the loop of training, show progress bar,
or handle unexpected StopIteration error.
"""
raise NotImplementedError
@abc.abstractmethod
def batch_size(self):
"""Return the batch size of the dataset created.
For certain type of the data input, the batch size is known, and even
required, like numpy array. Where as for dataset, the batch is unknown
unless we take a peek.
Returns:
int, the batch size of the dataset, or None if it is unknown.
"""
raise NotImplementedError
def representative_batch_size(self):
"""Return a representative size for batches in the dataset.
This is not guaranteed to be the batch size for all batches in the
dataset. It just needs to be a rough approximation for batch sizes in
the dataset.
Returns:
int, a representative size for batches found in the dataset,
or None if it is unknown.
"""
return self.batch_size()
@abc.abstractmethod
def has_partial_batch(self):
"""Whether the dataset has partial batch at the end."""
raise NotImplementedError
@abc.abstractmethod
def partial_batch_size(self):
"""The size of the final partial batch for dataset.
Will return None if has_partial_batch is False or batch_size is None.
"""
raise NotImplementedError
@abc.abstractmethod
def should_recreate_iterator(self):
"""Returns whether a new iterator should be created every epoch."""
raise NotImplementedError
def get_samples(self):
"""Returns number of samples in the data, or `None`."""
if not self.get_size() or not self.batch_size():
return None
total_sample = self.get_size() * self.batch_size()
if self.has_partial_batch():
total_sample -= (self.batch_size() - self.partial_batch_size())
return total_sample
def on_epoch_end(self):
"""A hook called after each epoch."""
pass
class TensorLikeDataAdapter(DataAdapter):
"""Adapter that handles Tensor-like objects, e.g. EagerTensor and NumPy."""
@staticmethod
def can_handle(x, y=None):
# TODO(kaftan): Check performance implications of using a flatten
# here for other types of inputs.
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
tensor_types = (ops.Tensor, np.ndarray)
if pd:
tensor_types = (ops.Tensor, np.ndarray, pd.Series, pd.DataFrame)
def _is_tensor(v):
if isinstance(v, tensor_types):
return True
return False
return all(_is_tensor(v) for v in flat_inputs)
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
epochs=1,
steps=None,
shuffle=False,
**kwargs):
super(TensorLikeDataAdapter, self).__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
num_samples = set(int(i.shape[0]) for i in nest.flatten(inputs)).pop()
_check_data_cardinality(inputs)
# If batch_size is not passed but steps is, calculate from the input data.
# Default to 32 for backwards compat.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
num_full_batches = int(num_samples // batch_size)
self._partial_batch_size = num_samples % batch_size
if isinstance(shuffle, str):
shuffle = shuffle.lower()
self._shuffle = shuffle
# Vectorized version of shuffle.
# This is a performance improvement over using `from_tensor_slices`.
# The indices of the data are shuffled and batched, and these indices
# are then zipped with the data and used to extract a batch of the data
# at each step. The performance improvements here come from:
# 1. vectorized batch using gather
# 2. parallelized map
# 3. pipelined permutation generation
# 4. optimized permutation batching
# 5. disabled static optimizations
indices_dataset = dataset_ops.DatasetV2.range(1)
if shuffle != "batch":
indices_dataset = indices_dataset.repeat(epochs)
def permutation(_):
# It turns out to be more performant to make a new set of indices rather
# than reusing the same range Tensor. (presumably because of buffer
# forwarding.)
indices = math_ops.range(num_samples, dtype=dtypes.int64)
if shuffle and shuffle != "batch":
indices = random_ops.random_shuffle(indices)
return indices
# We prefetch a single element. Computing large permutations can take quite
# a while so we don't want to wait for prefetching over an epoch boundary to
# trigger the next permutation. On the other hand, too many simultaneous
# shuffles can contend on a hardware level and degrade all performance.
indices_dataset = indices_dataset.map(permutation).prefetch(1)
def slice_batch_indices(indices):
"""Convert a Tensor of indices into a dataset of batched indices.
This step can be accomplished in several ways. The most natural is to
slice the Tensor in a Dataset map. (With a condition on the upper index to
handle the partial batch.) However it turns out that coercing the Tensor
into a shape which is divisible by the batch size (and handling the last
partial batch separately) allows for a much more favorable memory access
pattern and improved performance.
Args:
indices: Tensor which determines the data order for an entire epoch.
Returns:
A Dataset of batched indices.
"""
num_in_full_batch = num_full_batches * batch_size
first_k_indices = array_ops.slice(indices, [0], [num_in_full_batch])
first_k_indices = array_ops.reshape(
first_k_indices, [num_full_batches, batch_size])
flat_dataset = dataset_ops.DatasetV2.from_tensor_slices(first_k_indices)
if self._partial_batch_size:
index_remainder = dataset_ops.DatasetV2.from_tensors(array_ops.slice(
indices, [num_in_full_batch], [self._partial_batch_size]))
flat_dataset = flat_dataset.concatenate(index_remainder)
if shuffle == "batch":
# 1024 is a magic constant that has not been properly evaluated
flat_dataset = flat_dataset.shuffle(1024).repeat(epochs)
return flat_dataset
indices_dataset = indices_dataset.flat_map(slice_batch_indices)
dataset = self.slice_inputs(indices_dataset, inputs)
if shuffle == "batch":
def shuffle_batch(*batch):
return nest.map_structure(random_ops.random_shuffle, batch)
dataset = dataset.map(shuffle_batch)
self._dataset = dataset
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
dataset = dataset_ops.DatasetV2.zip((
indices_dataset,
dataset_ops.DatasetV2.from_tensors(inputs).repeat()
))
def grab_batch(i, data):
return nest.map_structure(lambda d: array_ops.gather(d, i, axis=0), data)
dataset = dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
# Default optimizations are disabled to avoid the overhead of (unnecessary)
# input pipeline graph serialization and deserialization
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
if self._shuffle:
# See b/141490660 for more details.
options.experimental_external_state_policy = (
distribute_options.ExternalStatePolicy.IGNORE)
dataset = dataset.with_options(options)
return dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._partial_batch_size > 0
def partial_batch_size(self):
return self._partial_batch_size or None
def should_recreate_iterator(self):
# An infinite dataset is always created here.
return False
class GenericArrayLikeDataAdapter(TensorLikeDataAdapter):
"""Adapter that handles array-like data without forcing it into memory.
This adapter handles array-like datasets that may be too big to fully
fit into memory.
Specifically, this adapter handles any Python class which implements:
`__get_item__`, `__len__`, `shape`, and `dtype` with the same meanings
as Numpy, but it ignores any case where all the inputs are Tensors or Numpy
arrays (because that case is handled by the base TensorLikeDataAdapter).
It ignores scipy sparse matrices and Composite Tensors because those are
handled by the CompositeTensorDataAdapter.
It also does not handle lists/tuples of scalars, because those are handled
by the ListsOfScalarsDataAdapter.
"""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_array_like(v):
"""Return True if v is a Tensor, array, or is array-like."""
return (
hasattr(v, "__getitem__") and
hasattr(v, "shape") and
hasattr(v, "dtype") and
hasattr(v, "__len__")
)
if (not TensorLikeDataAdapter.can_handle(x, y) and
not CompositeTensorDataAdapter.can_handle(x, y)):
return all(_is_array_like(v) for v in flat_inputs)
else:
return False
def __init__(self, *args, **kwargs):
logging.warn(
"Keras is training/fitting/evaluating on array-like data. Keras may "
"not be optimized for this format, so if your input data format is "
"supported by TensorFlow I/O (https://github.com/tensorflow/io) we "
"recommend using that to load a Dataset instead.")
super(GenericArrayLikeDataAdapter, self).__init__(*args, **kwargs)
def slice_inputs(self, indices_dataset, inputs):
"""Slice inputs into a Dataset of batches.
Given a Dataset of batch indices and the unsliced inputs,
this step slices the inputs in a parallelized fashion
and produces a dataset of input batches.
Args:
indices_dataset: A Dataset of batched indices
inputs: A python data structure that contains the inputs, targets,
and possibly sample weights.
Returns:
A Dataset of input batches matching the batch indices.
"""
flat_inputs = nest.flatten(inputs)
def dynamic_shape_like(t):
shape = list(t.shape)
shape[0] = None
return tuple(shape)
flat_dtypes = [inp.dtype for inp in flat_inputs]
contiguous = True
if self._shuffle and self._shuffle != "batch":
contiguous = False
def grab_batch(indices):
"""Grab a batch of data from the inputs."""
# This uses a py_function to avoid converting the array-like
# into a Tensor before slicing it, because converting the array-like
# to a Tensor may force it into memory..
def py_method(ind):
def slice_array(data):
return training_utils.slice_arrays(data, ind.numpy(),
contiguous=contiguous)
return [slice_array(inp) for inp in flat_inputs]
flat_out = script_ops.eager_py_func(py_method, [indices], flat_dtypes)
for v, original_inp in zip(flat_out, flat_inputs):
v.set_shape(dynamic_shape_like(original_inp))
return nest.pack_sequence_as(inputs, flat_out)
dataset = indices_dataset.map(
grab_batch, num_parallel_calls=dataset_ops.AUTOTUNE)
return dataset
class DatasetCreatorAdapter(DataAdapter):
"""Adapter that handles dataset functions."""
def __init__(self, x, *args, **kwargs):
super(DatasetCreatorAdapter, self).__init__(x, *args, **kwargs)
if not isinstance(x, dataset_creator.DatasetCreator):
raise TypeError("The input of a `DatasetCreatorAdapter` should be a "
"`DatasetCreator` but it received type {}.".format(
type(x)))
self.dataset_creator = x
self.strategy = kwargs.get("distribution_strategy", None)
@staticmethod
def can_handle(x, y=None):
if isinstance(x, dataset_creator.DatasetCreator):
assert y is None
return True
def should_recreate_iterator(self):
# We expect users to shuffle the dataset in their `dataset_fn` supplied to
# `DatasetCreator`. Since that is a buffered shuffle, we intend to not reset
# the dataset so the batches that are not shuffled can still be pulled.
return False
def get_size(self):
return None # To be inferred by `DataHandler`.
def get_dataset(self):
return self.strategy.distribute_datasets_from_function(self.dataset_creator)
def batch_size(self):
raise NotImplementedError()
def has_partial_batch(self):
raise NotImplementedError()
def partial_batch_size(self):
raise NotImplementedError()
class CompositeTensorDataAdapter(DataAdapter):
"""Adapter that handles composite tensor."""
@staticmethod
def can_handle(x, y=None):
flat_inputs = nest.flatten(x)
if y is not None:
flat_inputs += nest.flatten(y)
def _is_composite(v):
# Dataset/iterator/DistributedDataset inherits from CompositeTensor but
# should be handled by DatasetAdapter and GeneratorAdapter.
if (tf_utils.is_extension_type(v) and
not isinstance(v,
(dataset_ops.DatasetV2, iterator_ops.IteratorBase)) and
not _is_distributed_dataset(v)):
return True
# Support Scipy sparse tensors if scipy is installed
if scipy_sparse is not None and scipy_sparse.issparse(v):
return True
return False
def _is_tensor_or_composite(v):
if isinstance(v, (ops.Tensor, np.ndarray)):
return True
return _is_composite(v)
return (any(_is_composite(v) for v in flat_inputs) and
all(_is_tensor_or_composite(v) for v in flat_inputs))
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
steps=None,
shuffle=False,
**kwargs):
super(CompositeTensorDataAdapter, self).__init__(x, y, **kwargs)
x, y, sample_weights = _process_tensorlike((x, y, sample_weights))
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
# If sample_weights are not specified for an output use 1.0 as weights.
(sample_weights, _, _) = training_utils.handle_partial_sample_weights(
y, sample_weights, sample_weight_modes, check_all_flat=True)
inputs = pack_x_y_sample_weight(x, y, sample_weights)
dataset = dataset_ops.DatasetV2.from_tensor_slices(inputs)
num_samples = int(nest.flatten(x)[0].shape[0])
if shuffle:
dataset = dataset.shuffle(num_samples)
# If batch_size is not passed but steps is, calculate from the input data.
# Default to 32 for backwards compat.
if not batch_size:
batch_size = int(math.ceil(num_samples / steps)) if steps else 32
dataset = dataset.batch(batch_size)
self._size = int(math.ceil(num_samples / batch_size))
self._batch_size = batch_size
self._has_partial_batch = (self._size != (num_samples // batch_size))
self._partial_batch_size = None
if self._has_partial_batch:
self._partial_batch_size = (
num_samples - (self._size - 1) * self._batch_size)
self._dataset = dataset
def get_dataset(self):
return self._dataset
def get_size(self):
return self._size
def batch_size(self):
return self._batch_size
def has_partial_batch(self):
return self._has_partial_batch
def partial_batch_size(self):
return self._partial_batch_size
def should_recreate_iterator(self):
return True
class ListsOfScalarsDataAdapter(DataAdapter):
"""Adapter that handles lists of scalars and lists of lists of scalars."""
@staticmethod
def can_handle(x, y=None):
handles_x = ListsOfScalarsDataAdapter._is_list_of_scalars(x)
handles_y = True
if y is not None:
handles_y = ListsOfScalarsDataAdapter._is_list_of_scalars(y)
return handles_x and handles_y
@staticmethod
def _is_list_of_scalars(inp):
if isinstance(inp, (float, int, str, bytes, bytearray)):
return True
if isinstance(inp, (list, tuple)) and inp:
return ListsOfScalarsDataAdapter._is_list_of_scalars(inp[0])
return False
def __init__(self,
x,
y=None,
sample_weights=None,
sample_weight_modes=None,
batch_size=None,
shuffle=False,
**kwargs):
super(ListsOfScalarsDataAdapter, self).__init__(x, y, **kwargs)
x = np.asarray(x)
if y is not None:
y = np.asarray(y)
if sample_weights is not None:
sample_weights = np.asarray(sample_weights)
sample_weight_modes = broadcast_sample_weight_modes(
sample_weights, sample_weight_modes)
self._internal_adapter = TensorLikeDataAdapter(
x,
y=y,
sample_weights=sample_weights,
sample_weight_modes=sample_weight_modes,
batch_size=batch_size,
shuffle=shuffle,
**kwargs)
def get_dataset(self):
return self._internal_adapter.get_dataset()
def get_size(self):
return self._internal_adapter.get_size()
def batch_size(self):
return self._internal_adapter.batch_size()
def has_partial_batch(self):
return self._internal_adapter.has_partial_batch()
def partial_batch_size(self):
return self._internal_adapter.partial_batch_size()
def should_recreate_iterator(self):
return True
class DatasetAdapter(DataAdapter):
"""Adapter that handles `tf.data.Dataset`."""
@staticmethod
def can_handle(x, y=None):
return (isinstance(x, (dataset_ops.DatasetV1, dataset_ops.DatasetV2)) or
_is_distributed_dataset(x))
def __init__(self,
x,
y=None,
sample_weights=None,
steps=None,
**kwargs):
super(DatasetAdapter, self).__init__(x, y, **kwargs)
# Note that the dataset instance is immutable, its fine to reuse the user
# provided dataset.
self._dataset = x
# The user-provided steps.
self._user_steps = steps
self._validate_args(y, sample_weights, steps)
def get_dataset(self):
return self._dataset
def get_size(self):
return # Inferred in `DataHandler`.
def batch_size(self):
return None
def has_partial_batch(self):
return False
def partial_batch_size(self):
return None
def should_recreate_iterator(self):
# Since DistributedDatasets have no cardinality, the user must provide
# all steps that need to be run, calling `.repeat()` as needed.
if _is_distributed_dataset(self._dataset):
return False
# If user doesn't supply `steps`, or if they supply `steps` that
# exactly equals the size of the `Dataset`, create a new iterator
# each epoch.
return (self._user_steps is None or
cardinality.cardinality(self._dataset).numpy() == self._user_steps)
def _validate_args(self, y, sample_weights, steps):
"""Validates `__init__` arguments."""
# Arguments that shouldn't be passed.
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"dataset as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"dataset as input.")
if steps is None:
if _is_distributed_dataset(self._dataset):
raise ValueError("When providing a distributed dataset, you must "
"specify the number of steps to run.")
size = cardinality.cardinality(self._dataset).numpy()
if size == cardinality.INFINITE and steps is None:
raise ValueError(
"When providing an infinite dataset, you must specify "
"the number of steps to run (if you did not intend to "
"create an infinite dataset, make sure to not call "
"`repeat()` on the dataset).")
class GeneratorDataAdapter(DataAdapter):
"""Adapter that handles python generators and iterators."""
@staticmethod
def can_handle(x, y=None):
return ((hasattr(x, "__next__") or hasattr(x, "next"))
and hasattr(x, "__iter__")
and not isinstance(x, data_utils.Sequence))
def __init__(self,
x,
y=None,
sample_weights=None,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs):
# Generators should never shuffle as exhausting the generator in order to
# shuffle the batches is inefficient.
kwargs.pop("shuffle", None)
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"python generator as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"python generator as input.")
super(GeneratorDataAdapter, self).__init__(x, y, **kwargs)
# Since we have to know the dtype of the python generator when we build the
# dataset, we have to look at a batch to infer the structure.
peek, x = self._peek_and_restore(x)
peek = self._standardize_batch(peek)
peek = _process_tensorlike(peek)
# Need to build the Model on concrete input shapes.
if model is not None and not model.built:
concrete_x, _, _ = unpack_x_y_sample_weight(peek)
model.distribute_strategy.run(
lambda x: model(x, training=False), args=(concrete_x,))
self._first_batch_size = int(nest.flatten(peek)[0].shape[0])
def _get_dynamic_shape(t):
shape = t.shape
# Unknown number of dimensions, `as_list` cannot be called.
if shape.rank is None:
return shape
return tensor_shape.TensorShape([None for _ in shape.as_list()])
output_shapes = nest.map_structure(_get_dynamic_shape, peek)
output_types = nest.map_structure(lambda t: t.dtype, peek)
# Note that dataset API takes a callable that creates a generator object,
# rather than generator itself, which is why we define a function here.
generator_fn = self._handle_multiprocessing(x, workers, use_multiprocessing,
max_queue_size)
def wrapped_generator():
for data in generator_fn():
yield self._standardize_batch(data)
dataset = dataset_ops.DatasetV2.from_generator(
wrapped_generator, output_types, output_shapes=output_shapes)
if workers == 1 and not use_multiprocessing:
dataset = dataset.prefetch(1)
self._dataset = dataset
def _standardize_batch(self, data):
"""Standardizes a batch output by a generator."""
# Removes `None`s.
x, y, sample_weight = unpack_x_y_sample_weight(data)
data = pack_x_y_sample_weight(x, y, sample_weight)
data = nest.list_to_tuple(data)
def _convert_dtype(t):
if (isinstance(t, np.ndarray) and issubclass(t.dtype.type, np.floating)):
return np.array(t, dtype=backend.floatx())
return t
data = nest.map_structure(_convert_dtype, data)
return data
@staticmethod
def _peek_and_restore(x):
peek = next(x)
return peek, itertools.chain([peek], x)
def _handle_multiprocessing(self, x, workers, use_multiprocessing,
max_queue_size):
"""Create a callable, possibly including an Enqueuer."""
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
enqueuer = data_utils.GeneratorEnqueuer(
x, use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return enqueuer.get()
else:
generator_fn = lambda: x
return generator_fn
def get_dataset(self):
return self._dataset
def get_size(self):
return None
def batch_size(self):
return None
def representative_batch_size(self):
return self._first_batch_size
def has_partial_batch(self):
return False
def partial_batch_size(self):
return
def should_recreate_iterator(self):
return False
class KerasSequenceAdapter(GeneratorDataAdapter):
"""Adapter that handles `keras.utils.Sequence`."""
@staticmethod
def can_handle(x, y=None):
return isinstance(x, data_utils.Sequence)
def __init__(self,
x,
y=None,
sample_weights=None,
shuffle=False,
workers=1,
use_multiprocessing=False,
max_queue_size=10,
model=None,
**kwargs):
if not is_none_or_empty(y):
raise ValueError("`y` argument is not supported when using "
"`keras.utils.Sequence` as input.")
if not is_none_or_empty(sample_weights):
raise ValueError("`sample_weight` argument is not supported when using "
"`keras.utils.Sequence` as input.")
self._size = len(x)
self._shuffle_sequence = shuffle
self._keras_sequence = x
self._enqueuer = None
super(KerasSequenceAdapter, self).__init__(
x,
shuffle=False, # Shuffle is handed in the _make_callable override.
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
model=model,
**kwargs)
@staticmethod
def _peek_and_restore(x):
return x[0], x
def _handle_multiprocessing(self, x, workers, use_multiprocessing,
max_queue_size):
if workers > 1 or (workers > 0 and use_multiprocessing):
def generator_fn():
self._enqueuer = data_utils.OrderedEnqueuer(
x, use_multiprocessing=use_multiprocessing,
shuffle=self._shuffle_sequence)
self._enqueuer.start(workers=workers, max_queue_size=max_queue_size)
return self._enqueuer.get()
else:
def generator_fn():
order = range(len(x))
if self._shuffle_sequence:
# Match the shuffle convention in OrderedEnqueuer.
order = list(order)
random.shuffle(order)
for i in order:
yield x[i]
return generator_fn
def get_size(self):
return self._size
def should_recreate_iterator(self):
return True
def on_epoch_end(self):
if self._enqueuer:
self._enqueuer.stop()
self._keras_sequence.on_epoch_end()
ALL_ADAPTER_CLS = [
ListsOfScalarsDataAdapter, TensorLikeDataAdapter,
GenericArrayLikeDataAdapter, DatasetAdapter, GeneratorDataAdapter,
KerasSequenceAdapter, CompositeTensorDataAdapter, DatasetCreatorAdapter
]
def select_data_adapter(x, y):
"""Selects a data adapter than can handle a given x and y."""
adapter_cls = [cls for cls in ALL_ADAPTER_CLS if cls.can_handle(x, y)]
if not adapter_cls:
# TODO(scottzhu): This should be a less implementation-specific error.
raise ValueError(
"Failed to find data adapter that can handle "
"input: {}, {}".format(
_type_name(x), _type_name(y)))
elif len(adapter_cls) > 1:
raise RuntimeError(
"Data adapters should be mutually exclusive for "
"handling inputs. Found multiple adapters {} to handle "
"input: {}, {}".format(
adapter_cls, _type_name(x), _type_name(y)))
# Instrument the data adapter usage before returning it
keras_data_adapter_gauge.get_cell(adapter_cls[0].__name__).set(True)
return adapter_cls[0]
def _type_name(x):
"""Generates a description of the type of an object."""
if isinstance(x, dict):
key_types = set(_type_name(key) for key in x.keys())
val_types = set(_type_name(key) for key in x.values())
return "({} containing {} keys and {} values)".format(
type(x), key_types, val_types)
if isinstance(x, (list, tuple)):
types = set(_type_name(val) for val in x)
return "({} containing values of types {})".format(
type(x), types)
return str(type(x))
def _process_tensorlike(inputs):
"""Process tensor-like inputs.
This function:
(1) Converts `Numpy` arrays to `Tensor`s.
(2) Converts `Scipy` sparse matrices to `SparseTensor`s.
(2) Converts `list`s to `tuple`s (for `tf.data` support).
Args:
inputs: Structure of `Tensor`s, `NumPy` arrays, or tensor-like.
Returns:
Structure of `Tensor`s or tensor-like.
"""
def _convert_numpy_and_scipy(x):
if isinstance(x, np.ndarray):
dtype = None
if issubclass(x.dtype.type, np.floating):
dtype = backend.floatx()
return ops.convert_to_tensor_v2_with_dispatch(x, dtype=dtype)
elif scipy_sparse and scipy_sparse.issparse(x):
return _scipy_sparse_to_sparse_tensor(x)
return x
inputs = nest.map_structure(_convert_numpy_and_scipy, inputs)
return nest.list_to_tuple(inputs)
def is_none_or_empty(inputs):
# util method to check if the input is a None or a empty list.
# the python "not" check will raise an error like below if the input is a
# numpy array
# "The truth value of an array with more than one element is ambiguous.
# Use a.any() or a.all()"
return inputs is None or not nest.flatten(inputs)
def broadcast_sample_weight_modes(target_structure, sample_weight_modes):
"""Match sample_weight_modes structure with output structure."""
if target_structure is None or not nest.flatten(target_structure):
return sample_weight_modes
if isinstance(sample_weight_modes, str):
if isinstance(target_structure, dict):
return {key: sample_weight_modes for key in target_structure.keys()}
return [sample_weight_modes for _ in target_structure]
if sample_weight_modes:
try:
nest.assert_same_structure(
training_utils.list_to_tuple(target_structure),
training_utils.list_to_tuple(sample_weight_modes))
except (ValueError, TypeError):
target_str = str(nest.map_structure(lambda _: "...", target_structure))
mode_str = str(nest.map_structure(lambda _: "...", sample_weight_modes))
# Attempt to coerce sample_weight_modes to the target structure. This
# implicitly depends on the fact that Model flattens outputs for its
# internal representation.
try:
sample_weight_modes = nest.pack_sequence_as(
target_structure, nest.flatten(sample_weight_modes))
logging.warning(
"sample_weight modes were coerced from\n {}\n to \n {}"
.format(target_str, mode_str))
except (ValueError, TypeError):
raise ValueError(
"Unable to match target structure and sample_weight_modes "
"structure:\n {}\n to \n {}".format(target_str, mode_str))
return sample_weight_modes
class DataHandler(object):
"""Handles iterating over epoch-level `tf.data.Iterator` objects."""
def __init__(self,
x,
y=None,
sample_weight=None,
batch_size=None,
steps_per_epoch=None,
initial_epoch=0,
epochs=1,
shuffle=False,
class_weight=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
model=None,
steps_per_execution=None,
distribute=True):
"""Initializes a `DataHandler`.
Arguments:
x: See `Model.fit`.
y: See `Model.fit`.
sample_weight: See `Model.fit`.
batch_size: See `Model.fit`.
steps_per_epoch: See `Model.fit`.
initial_epoch: See `Model.fit`.
epochs: See `Model.fit`.
shuffle: See `Model.fit`.
class_weight: See `Model.fit`.
max_queue_size: See `Model.fit`.
workers: See `Model.fit`.
use_multiprocessing: See `Model.fit`.
model: The `Model` instance. Needed in order to correctly `build` the
`Model` using generator-like inputs (see `GeneratorDataAdapter`).
steps_per_execution: See `Model.compile`.
distribute: Whether to distribute the `tf.dataset`.
`PreprocessingLayer.adapt` does not support distributed datasets,
`Model` should always set this to `True`.
"""
self._initial_epoch = initial_epoch
self._epochs = epochs
self._insufficient_data = False
self._model = model
# `steps_per_execution_value` is the cached initial value.
# `steps_per_execution` is mutable and may be changed by the DataAdapter
# to handle partial executions.
if steps_per_execution is None:
self._steps_per_execution = 1
self._steps_per_execution_value = 1
else:
self._steps_per_execution = steps_per_execution
self._steps_per_execution_value = steps_per_execution.numpy().item()
adapter_cls = select_data_adapter(x, y)
self._verify_data_adapter_compatibility(adapter_cls)
self._adapter = adapter_cls(
x,
y,
batch_size=batch_size,
steps=steps_per_epoch,
epochs=epochs - initial_epoch,
sample_weights=sample_weight,
shuffle=shuffle,
max_queue_size=max_queue_size,
workers=workers,
use_multiprocessing=use_multiprocessing,
distribution_strategy=ds_context.get_strategy(),
model=model)
strategy = ds_context.get_strategy()
self._current_step = 0
self._step_increment = self._steps_per_execution_value - 1
self._insufficient_data = False
self._configure_dataset_and_inferred_steps(strategy, x, steps_per_epoch,
class_weight, distribute)
def _verify_data_adapter_compatibility(self, adapter_cls):
pass
def _configure_dataset_and_inferred_steps(self, strategy, x, steps_per_epoch,
class_weight, distribute):
"""Configure the `_dataset` and `_inferred_steps` attributes."""
del x
dataset = self._adapter.get_dataset()
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
self._inferred_steps = self._infer_steps(steps_per_epoch, dataset)
# `PreprocessingLayer.adapt` does not currently support distributed
# datasets, so we pass `distribute=False` there.
if distribute and not _is_distributed_dataset(dataset):
dataset = strategy.experimental_distribute_dataset(dataset)
self._dataset = dataset
self._validate_data_handler()
def enumerate_epochs(self):
"""Yields `(epoch, tf.data.Iterator)`."""
with self._truncate_execution_to_epoch():
data_iterator = iter(self._dataset)
for epoch in range(self._initial_epoch, self._epochs):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
if self._adapter.should_recreate_iterator():
data_iterator = iter(self._dataset)
yield epoch, data_iterator
self._adapter.on_epoch_end()
@contextlib.contextmanager
def _truncate_execution_to_epoch(self):
"""Truncates steps per execution to at most one epoch."""
should_truncate = (
self._inferred_steps is not None and
self._steps_per_execution_value > self._inferred_steps)
original_value = self._steps_per_execution_value
try:
if should_truncate:
self._steps_per_execution.assign(self._inferred_steps)
self._steps_per_execution_value = self._inferred_steps
yield
finally:
if should_truncate:
self._steps_per_execution.assign(original_value)
self._steps_per_execution_value = original_value
def sync(self):
context.async_wait()
@contextlib.contextmanager
def catch_stop_iteration(self):
"""Catches errors when an iterator runs out of data."""
try:
yield
self.sync()
except (StopIteration, errors.OutOfRangeError):
if self._inferred_steps is None:
self._inferred_steps = self._current_step
else:
self._insufficient_data = True
total_epochs = self._epochs - self._initial_epoch
logging.warning(
"Your input ran out of data; interrupting training. "
"Make sure that your dataset or generator can generate at "
"least `steps_per_epoch * epochs` batches (in this case, "
"{} batches). You may need to use the repeat() function "
"when building your dataset.".format(total_epochs *
self._inferred_steps))
def steps(self):
"""Yields steps for the current epoch."""
self._current_step = 0
# `self._inferred_steps` can be changed by `catch_stop_iteration`.
while (self._inferred_steps is None or
self._current_step < self._inferred_steps):
if self._insufficient_data: # Set by `catch_stop_iteration`.
break
can_run_full_execution = (
self._steps_per_execution_value == 1 or
self._inferred_steps is None or
self._inferred_steps - self._current_step >=
self._steps_per_execution_value)
if can_run_full_execution:
self._step_increment = self._steps_per_execution_value - 1
yield self._current_step
self._current_step += self._steps_per_execution_value
else:
# Last partial execution.
steps_remaining = self._inferred_steps - self._current_step
self._steps_per_execution.assign(steps_remaining)
self._step_increment = steps_remaining - 1
yield self._current_step
self._current_step += steps_remaining
self._steps_per_execution.assign(self._steps_per_execution_value)
@property
def step_increment(self):
"""The number to increment the step for `on_batch_end` methods."""
return self._step_increment
@property
def inferred_steps(self):
"""The inferred steps per epoch of the created `Dataset`.
This will be `None` in the case where:
(1) A `Dataset` of unknown cardinality was passed to the `DataHandler`, and
(2) `steps_per_epoch` was not provided, and
(3) The first epoch of iteration has not yet completed.
Returns:
The inferred steps per epoch of the created `Dataset`.
"""
return self._inferred_steps
@property
def should_sync(self):
# Catch OutOfRangeError for Datasets of unknown size.
# This blocks until the batch has finished executing.
# TODO(b/150292341): Allow multiple async steps here.
return self._inferred_steps is None
def _infer_steps(self, steps, dataset):
"""Infers steps_per_epoch needed to loop through a dataset."""
if steps is not None:
return steps
adapter_steps = self._adapter.get_size()
if adapter_steps is not None:
return adapter_steps
size = cardinality.cardinality(dataset)
if size == cardinality.INFINITE and steps is None:
raise ValueError("When passing an infinitely repeating dataset, you "
"must specify how many steps to draw.")
if size >= 0:
return size.numpy().item()
return None
@property
def _samples(self):
return self._adapter.get_samples()
def _validate_data_handler(self):
# TODO(b/152094471): Support this with DistIter.get_next_as_optional.
if self._steps_per_execution_value > 1 and self._inferred_steps is None:
raise ValueError(
"Could not infer the size of the data. With "
"`steps_per_execution > 1`, you must specify the number of steps "
"to run.")
def resolve_logs(self, logs):
return logs
class _ClusterCoordinatorDataHandler(DataHandler):
"""A `DataHandler` that is compatible with `ClusterCoordinator`."""
def _verify_data_adapter_compatibility(self, adapter_cls):
if adapter_cls != DatasetCreatorAdapter:
raise NotImplementedError("Only `DatasetCreator` input is supported in "
"`ParameterServerStrategy` at this time.")
def _configure_dataset_and_inferred_steps(self, strategy, x, steps_per_epoch,
class_weight, distribute):
if not isinstance(x, dataset_creator.DatasetCreator):
raise TypeError("When using `ParameterServerStrategy`, `x` must be a "
"`DatasetCreator`.")
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(x)
self._dataset = self._model._cluster_coordinator.create_per_worker_dataset( # pylint: disable=protected-access
per_worker_dataset_fn)
if steps_per_epoch is None:
raise ValueError(
"`steps_per_epoch` must be specified with `ParameterServerStrategy`.")
self._inferred_steps = steps_per_epoch
def sync(self):
self._model._cluster_coordinator.join() # pylint: disable=protected-access
def resolve_logs(self, logs):
return logs.fetch()
def get_data_handler(*args, **kwargs):
if getattr(kwargs["model"], "_cluster_coordinator", None):
return _ClusterCoordinatorDataHandler(*args, **kwargs)
return DataHandler(*args, **kwargs)
def _make_class_weight_map_fn(class_weight):
"""Applies class weighting to a `Dataset`.
The `Dataset` is assumed to be in format `(x, y)` or `(x, y, sw)`, where
`y` must be a single `Tensor`.
Args:
class_weight: A map where the keys are integer class ids and values are
the class weights, e.g. `{0: 0.2, 1: 0.6, 2: 0.3}`
Returns:
A function that can be used with `tf.data.Dataset.map` to apply class
weighting.
"""
class_ids = list(sorted(class_weight.keys()))
expected_class_ids = list(range(len(class_ids)))
if class_ids != expected_class_ids:
error_msg = (
"Expected `class_weight` to be a dict with keys from 0 to one less "
"than the number of classes, found {}").format(class_weight)
raise ValueError(error_msg)
class_weight_tensor = ops.convert_to_tensor_v2_with_dispatch(
[class_weight[int(c)] for c in class_ids])
def _class_weights_map_fn(*data):
"""Convert `class_weight` to `sample_weight`."""
x, y, sw = unpack_x_y_sample_weight(data)
if nest.is_nested(y):
raise ValueError(
"`class_weight` is only supported for Models with a single output.")
if y.shape.rank > 2:
raise ValueError("`class_weight` not supported for "
"3+ dimensional targets.")
y_classes = smart_cond.smart_cond(
y.shape.rank == 2 and backend.shape(y)[1] > 1,
lambda: backend.argmax(y, axis=1),
lambda: math_ops.cast(backend.reshape(y, (-1,)), dtypes.int64))
cw = array_ops.gather_v2(class_weight_tensor, y_classes)
if sw is not None:
cw = math_ops.cast(cw, sw.dtype)
sw, cw = expand_1d((sw, cw))
# `class_weight` and `sample_weight` are multiplicative.
sw = sw * cw
else:
sw = cw
return x, y, sw
return _class_weights_map_fn
def expand_1d(data):
"""Expands 1-dimensional `Tensor`s into 2-dimensional `Tensor`s."""
def _expand_single_1d_tensor(t):
# Leaves `CompositeTensor`s as-is.
if (isinstance(t, ops.Tensor) and
isinstance(t.shape, tensor_shape.TensorShape) and t.shape.rank == 1):
return array_ops.expand_dims_v2(t, axis=-1)
return t
return nest.map_structure(_expand_single_1d_tensor, data)
def train_validation_split(arrays, validation_split):
"""Split arrays into train and validation subsets in deterministic order.
The last part of data will become validation data.
Args:
arrays: Tensors to split. Allowed inputs are arbitrarily nested structures
of Tensors and NumPy arrays.
validation_split: Float between 0 and 1. The proportion of the dataset to
include in the validation split. The rest of the dataset will be included
in the training split.
Returns:
`(train_arrays, validation_arrays)`
"""
def _can_split(t):
tensor_types = (ops.Tensor, np.ndarray)
if pd:
tensor_types = (ops.Tensor, np.ndarray, pd.Series, pd.DataFrame)
return isinstance(t, tensor_types) or t is None
flat_arrays = nest.flatten(arrays)
unsplitable = [type(t) for t in flat_arrays if not _can_split(t)]
if unsplitable:
raise ValueError(
"`validation_split` is only supported for Tensors or NumPy "
"arrays, found following types in the input: {}".format(unsplitable))
if all(t is None for t in flat_arrays):
return arrays, arrays
first_non_none = None
for t in flat_arrays:
if t is not None:
first_non_none = t
break
# Assumes all arrays have the same batch shape or are `None`.
batch_dim = int(first_non_none.shape[0])
split_at = int(math.floor(batch_dim * (1. - validation_split)))
if split_at == 0 or split_at == batch_dim:
raise ValueError(
"Training data contains {batch_dim} samples, which is not sufficient "
"to split it into a validation and training set as specified by "
"`validation_split={validation_split}`. Either provide more data, or a "
"different value for the `validation_split` argument." .format(
batch_dim=batch_dim, validation_split=validation_split))
def _split(t, start, end):
if t is None:
return t
return t[start:end]
train_arrays = nest.map_structure(
functools.partial(_split, start=0, end=split_at), arrays)
val_arrays = nest.map_structure(
functools.partial(_split, start=split_at, end=batch_dim), arrays)
return train_arrays, val_arrays
@keras_export("keras.utils.unpack_x_y_sample_weight", v1=[])
def unpack_x_y_sample_weight(data):
"""Unpacks user-provided data tuple.
This is a convenience utility to be used when overriding
`Model.train_step`, `Model.test_step`, or `Model.predict_step`.
This utility makes it easy to support data of the form `(x,)`,
`(x, y)`, or `(x, y, sample_weight)`.
Standalone usage:
>>> features_batch = tf.ones((10, 5))
>>> labels_batch = tf.zeros((10, 5))
>>> data = (features_batch, labels_batch)
>>> # `y` and `sample_weight` will default to `None` if not provided.
>>> x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
>>> sample_weight is None
True
Example in overridden `Model.train_step`:
```python
class MyModel(tf.keras.Model):
def train_step(self, data):
# If `sample_weight` is not provided, all samples will be weighted
# equally.
x, y, sample_weight = tf.keras.utils.unpack_x_y_sample_weight(data)
with tf.GradientTape() as tape:
y_pred = self(x, training=True)
loss = self.compiled_loss(
y, y_pred, sample_weight, regularization_losses=self.losses)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
self.compiled_metrics.update_state(y, y_pred, sample_weight)
return {m.name: m.result() for m in self.metrics}
```
Args:
data: A tuple of the form `(x,)`, `(x, y)`, or `(x, y, sample_weight)`.
Returns:
The unpacked tuple, with `None`s for `y` and `sample_weight` if they are not
provided.
"""
if not isinstance(data, tuple):
return (data, None, None)
elif len(data) == 1:
return (data[0], None, None)
elif len(data) == 2:
return (data[0], data[1], None)
elif len(data) == 3:
return (data[0], data[1], data[2])
else:
error_msg = ("Data is expected to be in format `x`, `(x,)`, `(x, y)`, "
"or `(x, y, sample_weight)`, found: {}").format(data)
raise ValueError(error_msg)
@keras_export("keras.utils.pack_x_y_sample_weight", v1=[])
def pack_x_y_sample_weight(x, y=None, sample_weight=None):
"""Packs user-provided data into a tuple.
This is a convenience utility for packing data into the tuple formats
that `Model.fit` uses.
Standalone usage:
>>> x = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x)
>>> isinstance(data, tf.Tensor)
True
>>> y = tf.ones((10, 1))
>>> data = tf.keras.utils.pack_x_y_sample_weight(x, y)
>>> isinstance(data, tuple)
True
>>> x, y = data
Args:
x: Features to pass to `Model`.
y: Ground-truth targets to pass to `Model`.
sample_weight: Sample weight for each element.
Returns:
Tuple in the format used in `Model.fit`.
"""
if y is None:
# For single x-input, we do no tuple wrapping since in this case
# there is no ambiguity. This also makes NumPy and Dataset
# consistent in that the user does not have to wrap their Dataset
# data in an unecessary tuple
if not nest.is_nested(x):
return x
else:
return (x,)
elif sample_weight is None:
return (x, y)
else:
return (x, y, sample_weight)
def single_batch_iterator(strategy,
x,
y=None,
sample_weight=None,
class_weight=None):
"""Creates a single-batch dataset."""
x, y, sample_weight = _process_tensorlike((x, y, sample_weight))
if y is None:
data = (x,)
elif sample_weight is None:
data = (x, y)
else:
data = (x, y, sample_weight)
_check_data_cardinality(data)
dataset = dataset_ops.DatasetV2.from_tensors(data)
if class_weight:
dataset = dataset.map(_make_class_weight_map_fn(class_weight))
dataset = strategy.experimental_distribute_dataset(dataset)
return iter(dataset)
def _check_data_cardinality(data):
num_samples = set(int(i.shape[0]) for i in nest.flatten(data))
if len(num_samples) > 1:
msg = "Data cardinality is ambiguous:\n"
for label, single_data in zip(["x", "y", "sample_weight"], data):
msg += " {} sizes: {}\n".format(
label, ", ".join(str(i.shape[0]) for i in nest.flatten(single_data)))
msg += "Make sure all arrays contain the same number of samples."
raise ValueError(msg)
def _scipy_sparse_to_sparse_tensor(t):
"""Converts a SciPy sparse matrix to a SparseTensor."""
sparse_coo = t.tocoo()
row, col = sparse_coo.row, sparse_coo.col
data, shape = sparse_coo.data, sparse_coo.shape
if issubclass(data.dtype.type, np.floating):
data = data.astype(backend.floatx())
indices = np.concatenate(
(np.expand_dims(row, axis=1), np.expand_dims(col, axis=1)), axis=1)
return sparse_tensor.SparseTensor(indices, data, shape)
def _is_distributed_dataset(ds):
return isinstance(ds, input_lib.DistributedDatasetInterface)
| apache-2.0 | -7,670,201,060,302,608,000 | 33.778044 | 115 | 0.658404 | false | 3.911943 | false | false | false |
yhalk/vw_challenge_ECR | src/ev3/Sensors/sensors.py | 1 | 1535 | import paho.mqtt.client as mqtt
import config
import ev3dev.ev3 as ev3
import IR.IR_control as remoteControl
import ev3control.master as master
from ev3control.messages import *
import time
from MotionCtrl.actuators_simple import actuators
from Sensors.odometry_ev3 import Odometry
#Set IR sensor to remote control mode
ir = remoteControl.InfraredSensor()
ir.mode = "IR-REMOTE"
#Get odometer
odometer = Odometry()
publishable_names_dict = { "IR_control":ir,
config.LARGE_MOTOR_A:actuators[0],
config.LARGE_MOTOR_B:actuators[1],
config.LARGE_MOTOR_D:actuators[2],
config.MEDIUM_MOTOR:actuators[3],
"Odometer":odometer
}
# Make dict where key is sensor name and value
# is a list of all properties of this sensor
items_to_publish = {pub: [] for pub in list(publishable_names_dict.keys())}
for pub_name in list(items_to_publish.keys()):
pub_obj = publishable_names_dict[pub_name]
for member, dtype in pub_obj.__class__.__dict__.items():
if isinstance(dtype, property):
items_to_publish[pub_name].append(member)
def addSensorDevices(client,topic,qos):
#Use same names as in sensors_names_dict
#Add remote controller
master.publish_cmd(client,topic, AddDeviceMessage("IR_control", "remoteControl.InfraredSensor()"),1,qos=qos)
master.publish_cmd(client,topic, SetAttrMessage("IR_control", "mode","IR-REMOTE"),1,qos=qos)
| apache-2.0 | 4,616,542,250,256,086,000 | 35.547619 | 112 | 0.661889 | false | 3.488636 | false | false | false |
JMoravec/unkRadnet | fitToCurve/pyeq2/Examples/Complex/ListAllStandardEquations_2D.py | 1 | 1067 | # Version info: $Id: ListAllStandardEquations_2D.py 1 2012-01-07 22:20:43Z [email protected] $
import os, sys, inspect
if os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '../..') not in sys.path:
sys.path.append(os.path.join(sys.path[0][:sys.path[0].rfind(os.sep)], '../..'))
import pyeq2, pyeq2.ExtendedVersionHandlers
if __name__ == "__main__":
for submodule in inspect.getmembers(pyeq2.Models_2D):
if inspect.ismodule(submodule[1]):
for equationClass in inspect.getmembers(submodule[1]):
if inspect.isclass(equationClass[1]):
for extendedVersionName in ['Default', 'Offset']:
if (-1 != extendedVersionName.find('Offset')) and (equationClass[1].autoGenerateOffsetForm == False):
continue
equation = equationClass[1]('SSQABS', extendedVersionName)
print '2D ' + submodule[0] + ' --- ' + equation.GetDisplayName()
print 'Done.' | bsd-3-clause | 7,471,303,952,131,567,000 | 43.5 | 125 | 0.564199 | false | 3.797153 | false | false | false |
backmari/moose | python/peacock/Input/ParamsByType.py | 1 | 5164 | from PyQt5.QtWidgets import QWidget, QComboBox, QStackedWidget
from PyQt5.QtCore import pyqtSignal
from peacock.base.MooseWidget import MooseWidget
from peacock.utils import WidgetUtils
from ParamsByGroup import ParamsByGroup
class ParamsByType(QWidget, MooseWidget):
"""
Has a QComboBox for the different allowed types.
On switching type a new ParamsByGroup is shown.
"""
needBlockList = pyqtSignal(list)
blockRenamed = pyqtSignal(object, str)
changed = pyqtSignal()
def __init__(self, block, **kwds):
"""
Constructor.
Input:
block[BlockInfo]: The block to show.
"""
super(ParamsByType, self).__init__(**kwds)
self.block = block
self.combo = QComboBox()
self.types = []
self.type_params_map = {}
self.table_stack = QStackedWidget()
self.type_table_map = {}
for t in sorted(self.block.types.keys()):
self.types.append(t)
params_list = []
for p in self.block.parameters_list:
params_list.append(self.block.parameters[p])
t_block = self.block.types[t]
for p in t_block.parameters_list:
params_list.append(t_block.parameters[p])
self.type_params_map[t] = params_list
self.combo.addItems(sorted(self.block.types.keys()))
self.combo.currentTextChanged.connect(self.setBlockType)
self.top_layout = WidgetUtils.addLayout(vertical=True)
self.top_layout.addWidget(self.combo)
self.top_layout.addWidget(self.table_stack)
self.setLayout(self.top_layout)
self.user_params = []
self.setDefaultBlockType()
self.setup()
def _syncUserParams(self, current, to):
"""
Sync user added parameters that are on the main block into
each type ParamsByGroup.
Input:
current[ParamsByGroup]: The current group parameter table
to[ParamsByGroup]: The new group parameter table
"""
ct = current.findTable("Main")
tot = to.findTable("Main")
if not ct or not tot or ct == tot:
return
# first remove user params in tot
tot.removeUserParams()
params = ct.getUserParams()
tot.addUserParams(params)
idx = ct.findRow("Name")
if idx >= 0:
name = ct.item(idx, 1).text()
idx = tot.findRow("Name")
if idx >= 0:
tot.item(idx, 1).setText(name)
def currentType(self):
return self.combo.currentText()
def save(self):
"""
Look at the user params in self.block.parameters.
update the type tables
Save type on block
"""
t = self.getTable()
if t:
t.save()
self.block.setBlockType(self.combo.currentText())
def reset(self):
t = self.getTable()
t.reset()
def getOrCreateTypeTable(self, type_name):
"""
Gets the table for the type name or create it if it doesn't exist.
Input:
type_name[str]: Name of the type
Return:
ParamsByGroup: The parameters corresponding to the type
"""
t = self.type_table_map.get(type_name)
if t:
return t
t = ParamsByGroup(self.block, self.type_params_map.get(type_name, self.block.orderedParameters()))
t.needBlockList.connect(self.needBlockList)
t.blockRenamed.connect(self.blockRenamed)
t.changed.connect(self.changed)
self.type_table_map[type_name] = t
self.table_stack.addWidget(t)
return t
def setDefaultBlockType(self):
param = self.block.getParamInfo("type")
if param and param.value:
self.setBlockType(param.value)
elif self.block.types:
self.setBlockType(sorted(self.block.types.keys())[0])
def setBlockType(self, type_name):
if type_name not in self.block.types:
return
t = self.getOrCreateTypeTable(type_name)
t.updateWatchers()
self.combo.blockSignals(True)
self.combo.setCurrentText(type_name)
self.combo.blockSignals(False)
t.updateType(type_name)
current = self.table_stack.currentWidget()
self._syncUserParams(current, t)
self.table_stack.setCurrentWidget(t)
self.changed.emit()
def addUserParam(self, param):
t = self.table_stack.currentWidget()
t.addUserParam(param)
def setWatchedBlockList(self, path, children):
for i in range(self.table_stack.count()):
t = self.table_stack.widget(i)
t.setWatchedBlockList(path, children)
def updateWatchers(self):
for i in range(self.table_stack.count()):
t = self.table_stack.widget(i)
t.updateWatchers()
def getTable(self):
return self.table_stack.currentWidget()
def paramValue(self, name):
for i in range(self.table_stack.count()):
t = self.table_stack.widget(i)
if t.paramValue(name):
return t.paramValue(name)
| lgpl-2.1 | -7,038,259,173,053,225,000 | 32.532468 | 106 | 0.599923 | false | 3.891485 | false | false | false |
eharney/cinder | cinder/volume/targets/tgt.py | 1 | 13042 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import textwrap
import time
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import fileutils
from cinder import exception
from cinder import utils
from cinder.volume.targets import iscsi
LOG = logging.getLogger(__name__)
class TgtAdm(iscsi.ISCSITarget):
"""Target object for block storage devices.
Base class for target object, where target
is data transport mechanism (target) specific calls.
This includes things like create targets, attach, detach
etc.
"""
VOLUME_CONF = textwrap.dedent("""
<target %(name)s>
backing-store %(path)s
driver %(driver)s
%(chap_auth)s
%(target_flags)s
write-cache %(write_cache)s
</target>
""")
def _get_target(self, iqn):
(out, err) = utils.execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line:
parsed = line.split()
tid = parsed[1]
return tid[:-1]
return None
def _verify_backing_lun(self, iqn, tid):
backing_lun = True
capture = False
target_info = []
(out, err) = utils.execute('tgt-admin', '--show', run_as_root=True)
lines = out.split('\n')
for line in lines:
if iqn in line and "Target %s" % tid in line:
capture = True
if capture:
target_info.append(line)
if iqn not in line and 'Target ' in line:
capture = False
if ' LUN: 1' not in target_info:
backing_lun = False
return backing_lun
def _recreate_backing_lun(self, iqn, tid, name, path):
LOG.warning('Attempting recreate of backing lun...')
# Since we think the most common case of this is a dev busy
# (create vol from snapshot) we're going to add a sleep here
# this will hopefully give things enough time to stabilize
# how long should we wait?? I have no idea, let's go big
# and error on the side of caution
time.sleep(10)
(out, err) = (None, None)
try:
(out, err) = utils.execute('tgtadm', '--lld', 'iscsi',
'--op', 'new', '--mode',
'logicalunit', '--tid',
tid, '--lun', '1', '-b',
path, run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error("Failed recovery attempt to create "
"iscsi backing lun for Volume "
"ID:%(vol_id)s: %(e)s",
{'vol_id': name, 'e': e})
finally:
LOG.debug('StdOut from recreate backing lun: %s', out)
LOG.debug('StdErr from recreate backing lun: %s', err)
def _get_iscsi_target(self, context, vol_id):
return 0
def _get_target_and_lun(self, context, volume):
lun = 1 # For tgtadm the controller is lun 0, dev starts at lun 1
iscsi_target = 0 # NOTE(jdg): Not used by tgtadm
return iscsi_target, lun
@utils.retry(putils.ProcessExecutionError)
def _do_tgt_update(self, name):
(out, err) = utils.execute('tgt-admin', '--update', name,
run_as_root=True)
LOG.debug("StdOut from tgt-admin --update: %s", out)
LOG.debug("StdErr from tgt-admin --update: %s", err)
@utils.retry(exception.NotFound)
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
# Note(jdg) tid and lun aren't used by TgtAdm but remain for
# compatibility
# NOTE(jdg): Remove this when we get to the bottom of bug: #1398078
# for now, since we intermittently hit target already exists we're
# adding some debug info to try and pinpoint what's going on
(out, err) = utils.execute('tgtadm',
'--lld',
'iscsi',
'--op',
'show',
'--mode',
'target',
run_as_root=True)
LOG.debug("Targets prior to update: %s", out)
fileutils.ensure_tree(self.volumes_dir)
vol_id = name.split(':')[1]
write_cache = self.configuration.get('iscsi_write_cache', 'on')
driver = self.iscsi_protocol
chap_str = ''
if chap_auth is not None:
chap_str = 'incominguser %s %s' % chap_auth
target_flags = self.configuration.get('iscsi_target_flags', '')
if target_flags:
target_flags = 'bsoflags ' + target_flags
volume_conf = self.VOLUME_CONF % {
'name': name, 'path': path, 'driver': driver,
'chap_auth': chap_str, 'target_flags': target_flags,
'write_cache': write_cache}
LOG.debug('Creating iscsi_target for Volume ID: %s', vol_id)
volumes_dir = self.volumes_dir
volume_path = os.path.join(volumes_dir, vol_id)
if os.path.exists(volume_path):
LOG.debug(('Persistence file already exists for volume, '
'found file at: %s'), volume_path)
utils.robust_file_write(volumes_dir, vol_id, volume_conf)
LOG.debug(('Created volume path %(vp)s,\n'
'content: %(vc)s'),
{'vp': volume_path, 'vc': volume_conf})
old_persist_file = None
old_name = kwargs.get('old_name', None)
if old_name is not None:
LOG.debug('Detected old persistence file for volume '
'%(vol)s at %(old_name)s',
{'vol': vol_id, 'old_name': old_name})
old_persist_file = os.path.join(volumes_dir, old_name)
try:
# With the persistent tgts we create them
# by creating the entry in the persist file
# and then doing an update to get the target
# created.
self._do_tgt_update(name)
except putils.ProcessExecutionError as e:
if "target already exists" in e.stderr:
# Adding the additional Warning message below for a clear
# ER marker (Ref bug: #1398078).
LOG.warning('Could not create target because '
'it already exists for volume: %s', vol_id)
LOG.debug('Exception was: %s', e)
else:
LOG.error("Failed to create iscsi target for Volume "
"ID: %(vol_id)s: %(e)s",
{'vol_id': vol_id, 'e': e})
# Don't forget to remove the persistent file we created
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
# Grab targets list for debug
# Consider adding a check for lun 0 and 1 for tgtadm
# before considering this as valid
(out, err) = utils.execute('tgtadm',
'--lld',
'iscsi',
'--op',
'show',
'--mode',
'target',
run_as_root=True)
LOG.debug("Targets after update: %s", out)
iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
tid = self._get_target(iqn)
if tid is None:
LOG.warning("Failed to create iscsi target for Volume "
"ID: %(vol_id)s. It could be caused by problem "
"with concurrency. "
"Also please ensure your tgtd config "
"file contains 'include %(volumes_dir)s/*'",
{'vol_id': vol_id,
'volumes_dir': volumes_dir, })
raise exception.NotFound()
# NOTE(jdg): Sometimes we have some issues with the backing lun
# not being created, believe this is due to a device busy
# or something related, so we're going to add some code
# here that verifies the backing lun (lun 1) was created
# and we'll try and recreate it if it's not there
if not self._verify_backing_lun(iqn, tid):
try:
self._recreate_backing_lun(iqn, tid, name, path)
except putils.ProcessExecutionError:
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
# Finally check once more and if no go, fail and punt
if not self._verify_backing_lun(iqn, tid):
os.unlink(volume_path)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
if old_persist_file is not None:
fileutils.delete_if_exists(old_persist_file)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info('Removing iscsi_target for Volume ID: %s', vol_id)
vol_uuid_file = vol_name
volume_path = os.path.join(self.volumes_dir, vol_uuid_file)
if not os.path.exists(volume_path):
LOG.warning('Volume path %s does not exist, '
'nothing to remove.', volume_path)
return
if os.path.isfile(volume_path):
iqn = '%s%s' % (self.iscsi_target_prefix,
vol_uuid_file)
else:
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
try:
# NOTE(vish): --force is a workaround for bug:
# https://bugs.launchpad.net/cinder/+bug/1159948
utils.execute('tgt-admin',
'--force',
'--delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
non_fatal_errors = ("can't find the target",
"access control rule does not exist")
if any(error in e.stderr for error in non_fatal_errors):
LOG.warning("Failed target removal because target or "
"ACL's couldn't be found for iqn: %s.", iqn)
else:
LOG.error("Failed to remove iscsi target for Volume "
"ID: %(vol_id)s: %(e)s",
{'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
# NOTE(jdg): There's a bug in some versions of tgt that
# will sometimes fail silently when using the force flag
# https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343
# For now work-around by checking if the target was deleted,
# if it wasn't, try again without the force.
# This will NOT do any good for the case of mutliple sessions
# which the force was aded for but it will however address
# the cases pointed out in bug:
# https://bugs.launchpad.net/cinder/+bug/1304122
if self._get_target(iqn):
try:
LOG.warning('Silent failure of target removal '
'detected, retry....')
utils.execute('tgt-admin',
'--delete',
iqn,
run_as_root=True)
except putils.ProcessExecutionError as e:
LOG.error("Failed to remove iscsi target for Volume "
"ID: %(vol_id)s: %(e)s",
{'vol_id': vol_id, 'e': e})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
# NOTE(jdg): This *should* be there still but incase
# it's not we don't care, so just ignore it if was
# somehow deleted between entry of this method
# and here
if os.path.exists(volume_path):
os.unlink(volume_path)
else:
LOG.debug('Volume path %s not found at end, '
'of remove_iscsi_target.', volume_path)
| apache-2.0 | 3,726,398,437,732,719,000 | 40.403175 | 78 | 0.523463 | false | 4.252364 | false | false | false |
noogel/xyzStudyPython | python/qqwry/qq_ip_query.py | 1 | 9212 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
import os
import sys, _socket, mmap
from struct import unpack, pack
DataFileName = "qq_ip_database.Dat"
def _ip2ulong(ip):
'''ip(0.0.0.0) -> unsigned long'''
return unpack('>L', _socket.inet_aton(ip))[0]
def _ulong2ip(ip):
'''unsigned long -> ip(0.0.0.0)'''
return _socket.inet_ntoa(pack('>L', ip))
class QQIpQueryBase:
'''
QQIpQueryBase, 提供基本查找功能.
注意返回的国家和地区信息都是未解码的字符串, 对于简体版数据库应为GB编码, 对于繁体版则应为BIG5编码.
'''
class ipInfo(tuple):
'''
方便输出 ip 信息的类.
ipInfo((sip, eip, country, area)) -> ipInfo object
'''
def __str__(self):
return str(self[0]).ljust(16) + ' - ' + str(self[1]).rjust(16) + ' ' + self[2] + self[3]
def normalize(self):
'''
转化ip地址成点分十进制.
'''
return QQIpQueryBase.ipInfo((_ulong2ip(self[0]), _ulong2ip(self[1]), self[2], self[3]))
def __init__(self, dbfile):
'''
QQIpQueryBase(dbfile) -> QQIpQueryBase object
dbfile 是数据库文件的 file 对象.
'''
self.f = dbfile
self.f.seek(0)
self.indexBaseOffset = unpack('<L', self.f.read(4))[0] # 索引区基址
self.Count = (unpack('<L', self.f.read(4))[0] - self.indexBaseOffset) / 7 # 索引数-1
def Lookup(self, ip):
'''
x.Lookup(ip) -> (sip, eip, country, area) 查找 ip 所对应的位置.
ip, sip, eip 是点分十进制记录的 ip 字符串.
sip, eip 分别是 ip 所在 ip 段的起始 ip 与结束 ip.
'''
return self.nLookup(_ip2ulong(ip))
def nLookup(self, ip):
'''
x.nLookup(ip) -> (sip, eip, country, area) 查找 ip 所对应的位置.
ip 是 unsigned long 型 ip 地址.
其它同 x.Lookup(ip).
'''
si = 0
ei = self.Count
if ip < self._readIndex(si)[0]:
raise StandardError('IP NOT Found.')
elif ip >= self._readIndex(ei)[0]:
si = ei
else: # keep si <= ip < ei
while (si + 1) < ei:
mi = (si + ei) // 2
if self._readIndex(mi)[0] <= ip:
si = mi
else:
ei = mi
ipinfo = self[si]
if ip > ipinfo[1]:
raise StandardError('IP NOT Found.')
else:
return ipinfo
def __str__(self):
tmp = []
tmp.append('RecCount:')
tmp.append(str(len(self)))
tmp.append('\nVersion:')
tmp.extend(self[self.Count].normalize()[2:])
return ''.join(tmp)
def __len__(self):
return self.Count + 1
def __getitem__(self, key):
'''
x[key]
若 key 为整数, 则返回第key条记录(从0算起, 注意与 x.nLookup(ip) 不一样).
若 key 为点分十进制的 ip 描述串, 同 x.Lookup(key).
'''
if type(key) == type(0):
if (key >= 0) and (key <= self.Count):
index = self._readIndex(key)
sip = index[0]
self.f.seek(index[1])
eip = unpack('<L', self.f.read(4))[0]
(country, area) = self._readRec()
return QQIpQueryBase.ipInfo((sip, eip, country, area))
else:
raise KeyError('INDEX OUT OF RANGE.')
elif type(key) == type(''):
try:
return self.Lookup(key).normalize()
except StandardError, e:
if e.message == 'IP NOT Found.':
raise KeyError('IP NOT Found.')
else:
raise e
else:
raise TypeError('WRONG KEY TYPE.')
def __iter__(self):
'''返回迭代器(生成器).'''
for i in range(0, len(self)):
yield self[i]
def _read3ByteOffset(self):
'''_read3ByteOffset() -> unsigned long 从文件 f 读入长度为3字节的偏移.'''
return unpack('<L', self.f.read(3) + '\x00')[0]
def _readCStr(self):
'''x._readCStr() -> string 读 '\0' 结尾的字符串.'''
if self.f.tell() == 0:
return 'Unknown'
tmp = []
ch = self.f.read(1)
while ch != '\x00':
tmp.append(ch)
ch = self.f.read(1)
return ''.join(tmp)
def _readIndex(self, n):
'''x._readIndex(n) -> (ip ,offset) 读取第n条索引.'''
self.f.seek(self.indexBaseOffset + 7 * n)
return unpack('<LL', self.f.read(7) + '\x00')
def _readRec(self, onlyOne=False):
'''x._readRec() -> (country, area) 读取记录的信息.'''
mode = unpack('B', self.f.read(1))[0]
if mode == 0x01:
rp = self._read3ByteOffset()
bp = self.f.tell()
self.f.seek(rp)
result = self._readRec(onlyOne)
self.f.seek(bp)
return result
elif mode == 0x02:
rp = self._read3ByteOffset()
bp = self.f.tell()
self.f.seek(rp)
result = self._readRec(True)
self.f.seek(bp)
if not onlyOne:
result.append(self._readRec(True)[0])
return result
else: # string
self.f.seek(-1, 1)
result = [self._readCStr()]
if not onlyOne:
result.append(self._readRec(True)[0])
return result
pass # End of class QQIpQueryBase
class QQIpQuery(QQIpQueryBase):
'''QQIpQuery 类.'''
def __init__(self, filename='qq_ip_database.Dat'):
'''QQIpQuery(filename) -> QQIpQuery object
filename 是数据库文件名.
'''
f = open(filename, 'rb')
QQIpQueryBase.__init__(self, f)
class MQQIpQuery(QQIpQueryBase):
'''MQQIpQuery 类.
将数据库放到内存的 QQIpQuery 类.查询速度大约快两倍.
'''
def __init__(self, filename=DataFileName, dbfile=None):
'''MQQIpQuery(filename[,dbfile]) -> MQQIpQuery object
filename 是数据库文件名.
也可以直接提供 dbfile 文件对象. 此时 filename 被忽略.
'''
if dbfile == None:
try:
UPLOAD_DIR = os.path.dirname(os.path.realpath(__file__))
filename = '%s/%s' % (UPLOAD_DIR, filename)
dbf = open(filename, 'rb')
except IOError:
print "ERROR:", filename, "is not exist!"
sys.exit(1)
else:
dbf = dbfile
bp = dbf.tell()
dbf.seek(0)
QQIpQueryBase.__init__(self, mmap.mmap(dbf.fileno(), 0, access=1))
dbf.seek(bp)
def _readCStr(self):
'''x._readCStr() -> string 读 '\0' 结尾的字符串.'''
pstart = self.f.tell()
if pstart == 0:
return 'unknown'
else:
pend = self.f.find('\x00', pstart)
if pend < 0:
raise StandardError('Fail To Read CStr.')
else:
self.f.seek(pend + 1)
return self.f[pstart:pend].decode('GBK').encode('UTF-8')
def _readIndex(self, n):
'''x._readIndex(n) -> (ip ,offset) 读取第n条索引.'''
startp = self.indexBaseOffset + 7 * n
return unpack('<LL', self.f[startp:startp + 7] + '\x00')
# def get_ip_list(ip_list):
# try:
# Q = MQQIpQuery()
# results = []
#
# print ip_list
# for item in ip_list:
# address = str(item['address'])
# count = item['count']
# result = {}
# result['address'] = address
# result['region'] = ''.join(Q[address][2:])
# result['type'] = ''.join(Q[address][3:])
# result['count'] = count
# print "result:", result
# results.append(result)
# print results
# return results
# except Exception, err:
# print err
# return None
if __name__ == '__main__':
try:
Q = MQQIpQuery() # 数据库文件名为 ./qq_ip_database.Dat
if len(sys.argv) == 1:
print Q
if len(sys.argv) == 2:
if sys.argv[1] == '-': # 参数只有一个“-”时, 从标准输入读取IP
print ''.join(Q[raw_input()][2:])
elif sys.argv[1] in ('all', '-a', '-all'): # 遍历示例代码
for i in Q:
print i.normalize()
else: # 参数只有一个IP时, 只输出简要的信息
print ''.join(Q[sys.argv[1]][2:]).decode("utf-8")
else:
for i in sys.argv[1:]:
print Q[i]
except StandardError, e:
if e.message != '':
print e.message
else:
raise e
finally:
pass
| apache-2.0 | 2,556,463,129,593,330,700 | 29.101449 | 100 | 0.463653 | false | 3.053718 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.