repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
skandavivek/tracking-cars-highway
lk_track2.py
1
3415
#!/usr/bin/env python ''' Lucas-Kanade tracker ==================== Lucas-Kanade algorithm to track cars on a highway and save output lk_track2.py [<video_source>] ESC - exit ''' from __future__ import print_function import numpy as np import cv2 #import video from common import anorm2, draw_str from time import clock if __name__ == '__main__': import sys video_src='./cabrillo-1.asf' count=0 save=np.ndarray(shape=(1, 5), dtype=np.float) c1=np.ndarray(shape=(1, 5), dtype=np.float) print(__doc__) lk_params = dict( winSize = (15, 15), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)) feature_params = dict( maxCorners = 500, qualityLevel = 0.3, minDistance = 25, blockSize = 25 ) #track_len = 50 detect_interval = 1 tracks = [] cam = cv2.VideoCapture(video_src) frame_idx = 0 while True: ret, frame = cam.read() frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) vis = frame.copy() if len(tracks) > 0: img0, img1 = prev_gray, frame_gray p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2) p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] for tr, (x, y), (dx, dy),good_flag in zip(tracks, p1.reshape(-1, 2), (p1-p0).reshape(-1, 2),good): if not good_flag: continue if y>200 and y<350 and x>300 and 500*x-500*y<125000 and np.sqrt(dx**2+dy**2)>.1: #which part of the road to track tr.append((x, y)) c1[:,0]=x c1[:,1]=y c1[:,2]=dx c1[:,3]=dy c1[:,4]=count save=np.r_[save,c1] new_tracks.append(tr) cv2.circle(vis, (x, y), 3, (0, 0, 255), -1) #if len(tr) > track_len: #del tr[0] tracks = new_tracks cv2.polylines(vis, [np.int32(tr) for tr in tracks], False, (0, 255, 0),2) #cv2.line(vis,(750,500),(250,0),(0,255,0),3) nc=len(tracks)/6 draw_str(vis, (20, 20), 'track count: %d' % nc) if frame_idx % detect_interval == 0: mask = np.zeros_like(frame_gray) mask[:] = 255 for x, y in [np.int32(tr[-1]) for tr in tracks]: cv2.circle(mask, (x, y), 50, 0, -1) p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): tracks.append([(x, y)]) frame_idx += 1 prev_gray = frame_gray cv2.imshow('lk_track', vis) cv2.imwrite('./output-lk/'+str(count)+'.jpg', vis) print(count) count=count+1 ch = 0xFF & cv2.waitKey(1) if count==int(cam.get(cv2.CAP_PROP_FRAME_COUNT))-1: np.savetxt('cabrillo-1-lk.txt',save,fmt='%9.3f') if ch == 27: break cv2.destroyAllWindows()
apache-2.0
-1,440,188,667,553,131,300
32.480392
119
0.489312
false
3.115876
false
false
false
windelbouwman/ppci-mirror
ppci/cli/yacc.py
1
1231
""" Parser generator utility. This script can generate a python script from a grammar description. Invoke the script on a grammar specification file: .. code:: $ ppci-yacc test.x -o test_parser.py And use the generated parser by deriving a user class: .. code:: import test_parser class MyParser(test_parser.Parser): pass p = MyParser() p.parse() Alternatively you can load the parser on the fly: .. code:: import yacc parser_mod = yacc.load_as_module('mygrammar.x') class MyParser(parser_mod.Parser): pass p = MyParser() p.parse() """ import argparse from .base import base_parser, LogSetup from ..lang.tools.yacc import transform parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, parents=[base_parser], ) parser.add_argument( "source", type=argparse.FileType("r"), help="the parser specification" ) parser.add_argument( "-o", "--output", type=argparse.FileType("w"), required=True ) def yacc(args=None): args = parser.parse_args(args) with LogSetup(args): transform(args.source, args.output) args.output.close() if __name__ == "__main__": yacc()
bsd-2-clause
-3,877,802,558,502,337,000
18.854839
74
0.669374
false
3.620588
false
false
false
no-net/gr-winelo
python/channel/models/const_multi_cc.py
1
1510
from gnuradio import gr class const_multi_cc(gr.hier_block2): """ Constant channel model. """ def __init__(self, tx_id, rx_id, k11=0.0, k12=1.0, k13=1.0, k21=1.0, k22=0.0, k23=1.0, k31=1.0, k32=1.0, k33=0.0): gr.hier_block2.__init__( self, "No HW model", gr.io_signature(1, 1, gr.sizeof_gr_complex), gr.io_signature(1, 1, gr.sizeof_gr_complex), ) ################################################## # Parameters ################################################## # Use Symmetric channels for this model #k21 = k12 #k31 = k13 #k32 = k23 # No self-coupling #k11 = k22 = k33 = 0 # Build the channel matrix self.k = [[k11, k12, k13], [k21, k22, k23], [k31, k32, k33]] ################################################## # Blocks ################################################## self.multiply = gr.multiply_const_cc(self.k[tx_id - 1][rx_id - 1]) print "[INFO] WiNeLo - Channel model: Setting k = %s for clients %s "\ "and %s" % (self.k[tx_id - 1][rx_id - 1], tx_id, rx_id) ################################################## # Connections ################################################## self.connect((self, 0), (self.multiply, 0)) self.connect((self.multiply, 0), (self, 0))
gpl-3.0
-3,393,310,703,416,812,000
34.952381
78
0.368212
false
3.710074
false
false
false
fnp/wolnelektury
src/social/migrations/0012_auto_20210120_1444.py
1
1182
# Generated by Django 2.2.16 on 2021-01-20 13:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('social', '0011_auto_20190807_1056'), ] operations = [ migrations.AlterModelOptions( name='carousel', options={'verbose_name': 'carousel', 'verbose_name_plural': 'carousels'}, ), migrations.AddField( model_name='carousel', name='language', field=models.CharField(blank=True, choices=[('de', 'Deutsch'), ('en', 'English'), ('es', 'español'), ('fr', 'français'), ('it', 'italiano'), ('lt', 'lietuvių'), ('pl', 'polski'), ('ru', 'русский'), ('uk', 'українська')], default='', max_length=2, verbose_name='language'), ), migrations.AddField( model_name='carousel', name='priority', field=models.SmallIntegerField(default=0, verbose_name='priority'), ), migrations.AlterField( model_name='carousel', name='slug', field=models.SlugField(choices=[('main', 'main')], verbose_name='placement'), ), ]
agpl-3.0
3,994,508,156,126,134,000
35.3125
284
0.557659
false
3.642633
false
false
false
yeming233/rally
rally/plugins/openstack/verification/tempest/config.py
1
9370
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import os from oslo_config import cfg import six from six.moves import configparser from six.moves.urllib import parse from rally.common import logging from rally import exceptions from rally.verification import utils CONF = cfg.CONF LOG = logging.getLogger(__name__) class TempestConfigfileManager(object): """Class to create a Tempest config file.""" def __init__(self, deployment): self.credential = deployment.get_credentials_for("openstack")["admin"] self.clients = self.credential.clients() self.available_services = self.clients.services().values() self.conf = configparser.ConfigParser() def _get_service_type_by_service_name(self, service_name): for s_type, s_name in self.clients.services().items(): if s_name == service_name: return s_type def _configure_auth(self, section_name="auth"): self.conf.set(section_name, "admin_username", self.credential.username) self.conf.set(section_name, "admin_password", self.credential.password) self.conf.set(section_name, "admin_project_name", self.credential.tenant_name) # Keystone v3 related parameter self.conf.set(section_name, "admin_domain_name", self.credential.user_domain_name or "Default") # Sahara has two service types: 'data_processing' and 'data-processing'. # 'data_processing' is deprecated, but it can be used in previous OpenStack # releases. So we need to configure the 'catalog_type' option to support # environments where 'data_processing' is used as service type for Sahara. def _configure_data_processing(self, section_name="data-processing"): if "sahara" in self.available_services: self.conf.set(section_name, "catalog_type", self._get_service_type_by_service_name("sahara")) def _configure_identity(self, section_name="identity"): self.conf.set(section_name, "region", self.credential.region_name) # discover keystone versions def get_versions(auth_url): from keystoneauth1 import discover from keystoneauth1 import session temp_session = session.Session( verify=(self.credential.https_cacert or not self.credential.https_insecure), timeout=CONF.openstack_client_http_timeout) data = discover.Discover(temp_session, auth_url).version_data() return dict([(v["version"][0], v["url"]) for v in data]) # check the original auth_url without cropping versioning to identify # the default version versions = get_versions(self.credential.auth_url) cropped_auth_url = self.clients.keystone._remove_url_version() if cropped_auth_url == self.credential.auth_url: # the given auth_url doesn't contain version if set(versions.keys()) == {2, 3}: # ok, both versions of keystone are enabled, we can take urls # there uri = versions[2] uri_v3 = versions[3] target_version = 3 elif set(versions.keys()) == {2} or set(versions.keys()) == {3}: # only one version is available while discovering, let's just # guess the second auth_url (it should not be used) # get the most recent version target_version = sorted(versions.keys())[-1] if target_version == 2: uri = self.credential.auth_url uri_v3 = parse.urljoin(uri, "/v3") else: uri_v3 = self.credential.auth_url uri = parse.urljoin(uri_v3, "/v2.0") else: # Does Keystone released new version of API ?! LOG.debug("Discovered keystone versions: %s", versions) raise exceptions.RallyException("Failed to discover keystone " "auth urls.") else: if self.credential.auth_url.rstrip("/").endswith("v2.0"): uri = self.credential.auth_url uri_v3 = uri.replace("/v2.0", "/v3") target_version = 2 else: uri_v3 = self.credential.auth_url uri = uri_v3.replace("/v3", "/v2.0") target_version = 3 self.conf.set(section_name, "auth_version", "v%s" % target_version) self.conf.set(section_name, "uri", uri) self.conf.set(section_name, "uri_v3", uri_v3) self.conf.set(section_name, "disable_ssl_certificate_validation", str(self.credential.https_insecure)) self.conf.set(section_name, "ca_certificates_file", self.credential.https_cacert) # The compute section is configured in context class for Tempest resources. # Options which are configured there: 'image_ref', 'image_ref_alt', # 'flavor_ref', 'flavor_ref_alt'. def _configure_network(self, section_name="network"): if "neutron" in self.available_services: neutronclient = self.clients.neutron() public_nets = [net for net in neutronclient.list_networks()["networks"] if net["status"] == "ACTIVE" and net["router:external"] is True] if public_nets: net_id = public_nets[0]["id"] net_name = public_nets[0]["name"] self.conf.set(section_name, "public_network_id", net_id) self.conf.set(section_name, "floating_network_name", net_name) else: novaclient = self.clients.nova() net_name = next(net.human_id for net in novaclient.networks.list() if net.human_id is not None) self.conf.set("compute", "fixed_network_name", net_name) self.conf.set("validation", "network_for_ssh", net_name) def _configure_network_feature_enabled( self, section_name="network-feature-enabled"): if "neutron" in self.available_services: neutronclient = self.clients.neutron() extensions = neutronclient.list_ext("extensions", "/extensions", retrieve_all=True) aliases = [ext["alias"] for ext in extensions["extensions"]] aliases_str = ",".join(aliases) self.conf.set(section_name, "api_extensions", aliases_str) def _configure_object_storage(self, section_name="object-storage"): self.conf.set(section_name, "operator_role", CONF.tempest.swift_operator_role) self.conf.set(section_name, "reseller_admin_role", CONF.tempest.swift_reseller_admin_role) def _configure_service_available(self, section_name="service_available"): services = ["cinder", "glance", "heat", "ironic", "neutron", "nova", "sahara", "swift"] for service in services: # Convert boolean to string because ConfigParser fails # on attempt to get option with boolean value self.conf.set(section_name, service, str(service in self.available_services)) def _configure_validation(self, section_name="validation"): if "neutron" in self.available_services: self.conf.set(section_name, "connect_method", "floating") else: self.conf.set(section_name, "connect_method", "fixed") def _configure_orchestration(self, section_name="orchestration"): self.conf.set(section_name, "stack_owner_role", CONF.tempest.heat_stack_owner_role) self.conf.set(section_name, "stack_user_role", CONF.tempest.heat_stack_user_role) def create(self, conf_path, extra_options=None): self.conf.read(os.path.join(os.path.dirname(__file__), "config.ini")) for name, method in inspect.getmembers(self, inspect.ismethod): if name.startswith("_configure_"): method() if extra_options: utils.add_extra_options(extra_options, self.conf) with open(conf_path, "w") as configfile: self.conf.write(configfile) raw_conf = six.StringIO() raw_conf.write("# Some empty values of options will be replaced while " "creating required resources (images, flavors, etc).\n") self.conf.write(raw_conf) return raw_conf.getvalue()
apache-2.0
5,861,679,213,484,211,000
43.198113
79
0.594023
false
4.162594
true
false
false
robocomp/robocomp-robolab
components/localization/UWBpublisher/src/genericworker.py
1
4050
#!/usr/bin/python3 # -*- coding: utf-8 -*- # # Copyright (C) 2020 by YOUR NAME HERE # # This file is part of RoboComp # # RoboComp is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # RoboComp is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with RoboComp. If not, see <http://www.gnu.org/licenses/>. import sys, Ice, os from PySide2 import QtWidgets, QtCore ROBOCOMP = '' try: ROBOCOMP = os.environ['ROBOCOMP'] except KeyError: print('$ROBOCOMP environment variable not set, using the default value /opt/robocomp') ROBOCOMP = '/opt/robocomp' preStr = "-I/opt/robocomp/interfaces/ -I"+ROBOCOMP+"/interfaces/ --all /opt/robocomp/interfaces/" Ice.loadSlice(preStr+"CommonBehavior.ice") import RoboCompCommonBehavior additionalPathStr = '' icePaths = [ '/opt/robocomp/interfaces' ] try: SLICE_PATH = os.environ['SLICE_PATH'].split(':') for p in SLICE_PATH: icePaths.append(p) additionalPathStr += ' -I' + p + ' ' icePaths.append('/opt/robocomp/interfaces') except: print('SLICE_PATH environment variable was not exported. Using only the default paths') pass ice_UWBSimple = False for p in icePaths: if os.path.isfile(p+'/UWBSimple.ice'): preStr = "-I/opt/robocomp/interfaces/ -I"+ROBOCOMP+"/interfaces/ " + additionalPathStr + " --all "+p+'/' wholeStr = preStr+"UWBSimple.ice" Ice.loadSlice(wholeStr) ice_UWBSimple = True break if not ice_UWBSimple: print('Couln\'t load UWBSimple') sys.exit(-1) from RoboCompUWB import * class GenericWorker(QtCore.QObject): kill = QtCore.Signal() #Signals for State Machine t_initialize_to_compute = QtCore.Signal() t_initialize_to_finalize = QtCore.Signal() t_compute_to_compute = QtCore.Signal() t_compute_to_finalize = QtCore.Signal() #------------------------- def __init__(self, mprx): super(GenericWorker, self).__init__() self.uwbsimple_proxy = mprx["UWBSimplePub"] self.mutex = QtCore.QMutex(QtCore.QMutex.Recursive) self.Period = 30 self.timer = QtCore.QTimer(self) #State Machine self.defaultMachine= QtCore.QStateMachine() self.compute_state = QtCore.QState(self.defaultMachine) self.initialize_state = QtCore.QState(self.defaultMachine) self.finalize_state = QtCore.QFinalState(self.defaultMachine) #------------------ #Initialization State machine self.initialize_state.addTransition(self.t_initialize_to_compute, self.compute_state) self.initialize_state.addTransition(self.t_initialize_to_finalize, self.finalize_state) self.compute_state.addTransition(self.t_compute_to_compute, self.compute_state) self.compute_state.addTransition(self.t_compute_to_finalize, self.finalize_state) self.compute_state.entered.connect(self.sm_compute) self.initialize_state.entered.connect(self.sm_initialize) self.finalize_state.entered.connect(self.sm_finalize) self.timer.timeout.connect(self.t_compute_to_compute) self.defaultMachine.setInitialState(self.initialize_state) #------------------ #Slots funtion State Machine @QtCore.Slot() def sm_compute(self): print("Error: lack sm_compute in Specificworker") sys.exit(-1) @QtCore.Slot() def sm_initialize(self): print("Error: lack sm_initialize in Specificworker") sys.exit(-1) @QtCore.Slot() def sm_finalize(self): print("Error: lack sm_finalize in Specificworker") sys.exit(-1) #------------------------- @QtCore.Slot() def killYourSelf(self): rDebug("Killing myself") self.kill.emit() # \brief Change compute period # @param per Period in ms @QtCore.Slot(int) def setPeriod(self, p): print("Period changed", p) self.Period = p self.timer.start(self.Period)
gpl-3.0
-1,514,458,303,238,705,700
28.136691
106
0.715556
false
3.115385
false
false
false
taiwenko/python
Visa.py
1
3504
#!c:\Python27\python import vxi11Device as vxi11 #import numpy import os,sys, time # FSW #['Rohde&Schwarz', 'FSW-50', '1312.8000K50/100970', '2.10\n'] #inst = rm.open_resource('TCPIP::10.0.0.160::INSTR') #SMW200A #['Rohde&Schwarz', 'SMW200A', '1412.0000K02/101575', '3.1.18.2-3.01.086.171_SP2\n'] #inst = rm.open_resource('TCPIP::10.0.0.225::INSTR') # Anritsu #['"Anritsu', 'MT8221B/31/541/542/546', '1350198', '1.77"'] #inst = rm.open_resource('TCPIP::10.0.0.189::INSTR') #inst = vxi11.Instrument("10.0.0.189") # Agilent Power Supply N6705B #['Agilent Technologies', 'N6705B', 'MY50001691', 'D.01.08\n'] #inst = rm.open_resource('TCPIP::10.0.0.193::INSTR') #inst = vxi11.Instrument("10.0.0.193") # Agilent VSG #['Agilent Technologies', ' E4438C', ' MY45093057', ' C.05.83\n'] #inst = vxi11.Vxi11Device("10.0.0.193","inst0") inst = vxi11.Vxi11Device(host="10.0.0.176",device="inst0") # R&S LTE DEMOD Software #['Rohde&Schwarz', 'K10x', '000000/000', 'Version 3.4 Beta 2\n'] #inst = rm.open_resource('TCPIP::127.0.0.1::INSTR') # JDSU #inst = rm.open_resource('TCPIP::10.0.0.137::INSTR') vxi11.timeout(15000) #idn = inst.query_ascii_values("*IDN?",converter="s") #print idn #quit() #inst.write("CONF:PRES") res = None try: res = inst.ask("*IDN?") except Exception,e: print "FAILED %s"%e print res #quit() def AnritsuMT8221B(): #inst.write("FREQuency:CENTer 2.68GHz") inst.write("FREQuency:CENTer 2.11GHz") inst.write("BANDWidth:RESolution 10") time.sleep(3) inst.write("CONF:RF SUMM") inst.write("CONF:DEMod SUMM") #print(inst.query(":MEAsure:DEMod:AVErage?")) time.sleep(10) #print(inst.query(":FETCh:SUMMary?")) #time.sleep(1) #inst.write("CONF:DEMod SUMM") #time.sleep(10) #print(inst.query(":FETCh:SUMMary?")) #print(inst.write("INIT")) #time.sleep(4) #inst.query(":FETCh:RF:ACLR?") #inst.write("DISP:TRAC:Y:RLEV:OFFS 49") #print(inst.query(":FETCh:SUMMary?")) #EVM (rms) in %, EVM (pk) in %,Ref Signal (RS) Power in dBm, Sync Signal (SS) Power in dBm, Carrier Frequency in MHz, Freq Error in Hz, Freq Error in ppm, the Cell ID, and the number of measurements average for Frequency Error. print(inst.ask(":FETCh:DEMod:CONStln?")) print(inst.ask("FETCh:RF:ACLR?")) def RS_SW(inst): ok = inst.write("CONF:PRES"); inst.write("CONF:LTE:DUP FDD") inst.write("CONF:LTE:LDIR DL") inst.write("FREQ:CENT 2.68GHZ") inst.write("DISP:TRAC:Y:RLEV:OFFS 49") inst.write("CONF:DL:MIMO:CONF TX2") res = dict() retry = 0 print "MEASURE..." run = True while run == True: print(inst.write("INIT")) #inst.write("INIT:REFR") time.sleep(2) retry += 1 stat = inst.query_ascii_values("SYNC:STAT?",converter="b") print("STATUS: ",stat," Retry:", retry) if (stat[0] == 1 & stat[1] == 1 & stat[2] == 1): run = False if retry > 3: print "Cannot Obtain Sync!" raise SystemExit break #for stat #print(stat) res['Power'] = inst.query_ascii_values("FETCh:SUMMary:OSTP?")[0] res['EVM'] = inst.query_ascii_values("FETC:SUMM:EVM?")[0] res['FreqError'] = inst.query_ascii_values("FETC:SUMM:FERR?")[0] res['RSPower'] = inst.query_ascii_values("FETCh:SUMMary:RSTP?")[0] print res print inst.query("SENSe:LTE:ANTenna:SELect?") print inst.query("CONFigure:LTE:DL:CC:SYNC:ANTenna?") #print inst.query("CONF:DL:SUBF2:ALL3:PREC:AP?") #print inst.query("TRACe:DATA?") #print "DONE!" raw_input() #RS_SW(inst) #AnritsuMT8221B()
mit
-6,667,128,671,122,640,000
27.694915
228
0.640982
false
2.26943
false
false
false
anarcher/enso-launcher-continued
enso/commands/suggestions.py
1
18589
# Copyright (c) 2008, Humanized, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of Enso nor the names of its contributors may # be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- # # enso.commands.suggestions # # ---------------------------------------------------------------------------- """ Classes for encapsulating suggestions (including auto-completions). """ # ---------------------------------------------------------------------------- # Imports # ---------------------------------------------------------------------------- import re import enso.utils.strings import enso.utils.xml_tools # This is used in loop so better to import the function directly to avoid lookup penalty from enso.utils.xml_tools import escape_xml # ---------------------------------------------------------------------------- # Suggestion Objects # ---------------------------------------------------------------------------- class Suggestion: """ An object the encapsulates a "suggestion". A "suggestion" is essentially a string from a list that is similar to some source string. Suggestion objects keep track of the original source string, and has utility methods for marking-up the suggestion to indicate similarities to the source string (i.e., which characters of the suggestion are the same as the original, which are added, and which are altered). """ def __init__( self, originalText, suggestedText, helpText = None, prefix_end=None, start=None, end=None, suggestedPrefix=None ): """ Initializes the Suggestion: suggestedText is the suggestion for originalText. """ assert isinstance( originalText, basestring ) assert isinstance( suggestedText, basestring ) # The "source" or "original" text is the text that the user # typed. self.__source = originalText # The "suggestion" is the text that very nearly matches # the user's typed text. self.__suggestion = suggestedText self.__suggestedPrefix = suggestedPrefix # The "help" text is text that is not actually part of the # suggestion, per-se, but should be displayed after the # suggestion to indicate that something should follow the # suggestion before it is complete and valid. self.__helpText = helpText # The xml representation of this suggestion; will not be # created until requested. self.__xml = None # The completion of the user text to the next word. #self.__completion = None self.__prefix_end = prefix_end self.__start = start self.__end = end # For performance reasons, compute the "nearness" value # and cache it. self._nearness = self.__getNearness() def getHelpText( self ): return self.__helpText """ TODO:This is broken because the __transform() function has been optimized and is not setting __completion variable. It is not used anywhere in the code anyway... def toNextWord( self ): "" Returns the simple string representation of the suggestion, i.e., the exact suggested text. Example: >>> s = Suggestion( 'fo', 'foo bar' ) >>> s.toNextWord() 'foo ' "" if self.__completion is None: self.__transform() return self.__completion """ def toText( self ): """ Returns the simple string representation of the suggestion, i.e., the exact suggested text. Example: >>> s = Suggestion( 'fo', 'foo' ) >>> s.toText() 'foo' """ return self.__suggestion def getSource( self ): """ Returns the "source" string, i.e., the string for which this object is a suggestion. Example: >>> s = Suggestion( 'fo', 'foo' ) >>> s.getSource() 'fo' """ return self.__source def getSuggestedPrefix( self ): """ """ return self.__suggestedPrefix def __getNearness( self ): """ Returns a number between 0 and 1 indicating how near the original string this suggestion is; 0 means totally different, and 1 means exactly the same. NOTE: As long as the return value remains as described, this method may be overridden to implement custom notions of "nearness". """ result = enso.utils.strings.stringRatio( self.__source, self.__suggestion ) assert (result >= 0) and (result <= 1),\ "string-ratio is not between 0 and 1: %0.1f" % result return result def __eq__( self, other ): """ Considers two suggestions to be equal if they suggest the same string. """ if not isinstance( other, Suggestion ): # The other object isn't a Suggestion, so they can't # possibly be equal. return False else: return self.toText() == other.toText() def __ne__( self, other ): """ Considers two suggestions to be unequal if they do not suggest the same text. """ # Simply return the inverse of __eq__ return not self.__eq__( other ) def __cmp__( self, other ): """ Compares two suggestions on the basis of nearness. """ # NOTE: This function is called SO OFTEN, that using getter's # for the nearness values incurs a NOTICEABLE performance # penalty. # Returning the inverse of the value, because 1 is near and 0 # is far. # Original: #return - cmp( self._nearness, other._nearness ) if self._nearness < other._nearness: #IGNORE:W0212 return 1 elif self._nearness > other._nearness: #IGNORE:W0212 return -1 else: # If the nearness is equal, return alphabetical order return cmp(self.__suggestion, other.__suggestion) #IGNORE:W0212 def toXml( self ): """ Transforms the suggestion into a simple XML string. There are three tags: <ins></ins> marks an "insertion", i.e., something added to the original text to make the suggestion. <alt></alt> marks an "alteration", i.e., a substring of the original string was replaced with a new substring to make the suggestion. <help></help> marks a "help" text, i.e., a string that indicates the suggestion should be followed by some additional text; this string is for the user's aid, and is not part of the suggestion proper. Anything not contained in these tags was part of the original text. NOTE: The return value does not have a "root" tag, and is therefore not well-formed XML. Here is a simple example using insertions and help text: >>> Suggestion( 'fo', 'foo', 'fooObject' ).toXml() 'fo<ins>o</ins><help>fooObject</help>' Here is a simple example using alterations: >>> Suggestion( 'fog', 'foo' ).toXml() 'fo<alt>o</alt>' The default implementation finds the the largest substring of the original text that (a) includes the first character of the original text and (b) is entirely contained in the suggestion. It then repeats this with the remainder of the original text. So, for instance, if our original text is 'foobar' and our suggestion text is 'foo the bar', the default implementation will first match 'foo' to part of the suggestion; at this point the remainder of the original text will be 'bar', which it will find a substring for in the suggestion text as well. This is shown in the following example: >>> Suggestion( 'foobar', 'foo the bar' ).toXml() 'foo<ins> the </ins>bar' Furthermore, if there is no initial substring of the original text in the suggestion text (i.e., condition 'a' from above) , the first character is removed from the original text and the algorithm proceeds as described above, marking a corresponding part of the suggestion string as an alteration, if applicable: >>> Suggestion( 'zzzfo', 'gfoo' ).toXml() '<alt>g</alt>fo<ins>o</ins>' >>> Suggestion( 'zzzfo', 'foo' ).toXml() 'fo<ins>o</ins>' Finally, if no substring of the original text matches the suggestion text, the entire suggestion text is returned as an alteration: >>> Suggestion( 'zzz', 'defghi' ).toXml() '<alt>defghi</alt>' NOTE: This method is intended to be overriden by subclasses that have specialized ways of determining what was original and what was inserted or altered. """ # This class is read-only; the only "setters" are through the # constructor. If we have previously computed the xml value, # return that cached value. if self.__xml == None: self.__transform() return self.__xml def __transform( self ): if self.__start is not None: #s = escape_xml(self.__suggestion) xmlText = "%s<ins>%s</ins>%s<ins>%s</ins>" % ( escape_xml(self.__suggestion[:self.__prefix_end]), escape_xml(self.__suggestion[self.__prefix_end:self.__prefix_end+self.__start]), escape_xml(self.__suggestion[self.__prefix_end+self.__start:self.__prefix_end+self.__end]), escape_xml(self.__suggestion[self.__prefix_end+self.__end:]) ) if self.__suggestedPrefix and xmlText.startswith(self.__suggestedPrefix): xmlText = "<prefix>%s</prefix>%s" % (escape_xml(self.__suggestedPrefix), xmlText[len(self.__suggestedPrefix):]) # Finally, add help text, if it exists. if self.__helpText is not None: xmlText = "%s<help>%s</help>" % (xmlText, escape_xml(self.__helpText)) self.__xml = xmlText return else: pass # We are going to "use up" both the source string and the # suggestion unusedSource = self.__source[:] unusedSuggestion = self.__suggestion[:] # The xml representation xmlText = "" # The "to the next word" completion. completion = "" # If we cannot match an initial substring of unusedSource, # then we are going to peel off characters one-by-one into # this variable. These characters have been lost in the # suggestion, and will cause "insertions" to instead be # "alterations". unmatchedChars = "" # BEGIN SOURCE-STRING LOOP # Each iteration of this loop should reduce the length of # unusedSource, and this loop ends when unusedSource is empty. while len(unusedSource) > 0: # Save a copy of unusedSource, so we know if it changes. oldUnusedSource = unusedSource[:] # Loop from the full length of unusedSource down to one # character for i in range( len(unusedSource), 0, -1 ): # The initial substring we are trying to locate. target = unusedSource[:i] # BEGIN TARGET-FOUND CONDITION if target in unusedSuggestion: # Search normally from begining index = unusedSuggestion.find( target ) # Search on word boundaries. This is different from \b in # that it considers also the underscore character as a word boundary. m = re.match(r".*[^0-9a-zA-Z](%s)" % re.escape(target), unusedSuggestion, re.I) if m and m.groups() and m.start(1) > index: # Prefer word boundary match index = m.start(1) # index, m.start(1) if index > 0: if len(unmatchedChars) > 0: # There were unused characters in the # source, and there were characters in the # unused suggestion before the target, so # the next "inserted" portion of the # suggestion becomes an "alteration" # instead. xmlFormat = "<alt>%s</alt>" else: xmlFormat = "<ins>%s</ins>" xmlText += xmlFormat % escape_xml( unusedSuggestion[:index] ) # NOTE: Do not add inserted characters to the # 'next word' completion. # Whether or not there were characters between # the start of the unused suggestion and "here", # any unmatched chars are now defunct. unmatchedChars = "" xmlText += escape_xml( target ) completion += target unusedSuggestion = unusedSuggestion[index+len(target):] unusedSource = unusedSource[i:] # The target was found and unusedSource was # modified; we exit the for-loop (to be entered # again if unusedSource is still nonempty). break # END TARGET-FOUND CONDITION # Either unusedSource is smaller, or it is the same as # oldUnusedSource. If it is the same as old unusedSource, # then there was no match of a beginning substring, so we # remove the first character and store it as an "unused # character", which will become part of an "altered # substring", if there is a match to a later substring. if unusedSource == oldUnusedSource: unmatchedChars += unusedSource[0] unusedSource = unusedSource[1:] assert len( unusedSource ) < len( oldUnusedSource ), \ "Potential infinite loop condition; failed to reduce"\ " the length of the unused portion of the source string"\ " in toXml()" # END SOURCE-STRING LOOP # The source-string loop above only guarantees to use up the # source string; there may be an unused portion of the # suggestion left. We append it to the xml string as an # insertion (or alteration, if appropriate). if len( unusedSuggestion ) > 0: if len( unmatchedChars ) > 0: format = "<alt>%s</alt>" else: format = "<ins>%s</ins>" unusedXml = escape_xml( unusedSuggestion ) xmlText += format % unusedXml completion += unusedSuggestion.split(" ")[0] if " " in unusedSuggestion: completion += " " # Finally, add the help text, if it exists. if self.__helpText != None: xmlText += "<help>%s</help>" % self.__helpText if self.__suggestedPrefix and xmlText.startswith(self.__suggestedPrefix): xmlText = "<prefix>%s</prefix>%s" % (escape_xml(self.__suggestedPrefix), xmlText[len(self.__suggestedPrefix):]) self.__xml = xmlText #print "COMPLETION: \"%s\"" % completion #self.__completion = completion class AutoCompletion( Suggestion ): """ Encapsulates a single auto-completed suggestion. Basically the same as a suggestion, except that it requires either (1) that each word of the original text be contained in the suggestion, or (2) that the suggestion be empty (indicating a failed autocompletion). """ def __init__( self, originalText, suggestedText, helpText=None, prefix_end=None, start=None, end=None ): """ Initializes the AutoCompletion. """ # Enforce the object's preconditions. if len( suggestedText ) > 0: assertionText = "Attempted to create AutoCompletion %s from %s, "\ "but %s was not found." words = originalText.split( " " ) # LONGTERM TODO: Don't handle this as a special case. if words[-1].endswith( "?" ): words[-1] = words[-1][:-1] words.append( "?" ) for word in words: assert word in suggestedText, \ assertionText % ( suggestedText, originalText, word) # The text matches one of the class's two required conditions, # so initialize self as a Suggestion. Suggestion.__init__( self, originalText, suggestedText, helpText, prefix_end, start, end ) def hasCompletion(self): return bool(self.toText())
bsd-3-clause
-5,472,798,759,257,362,000
37.97065
132
0.571467
false
4.644928
false
false
false
ffmmjj/desafio-dados-2016
data_preparation_pipeline/outliers_separation.py
1
1185
import luigi import pandas as pd from augment_data import AppendFeaturesAggregatedFromTeachersDatasetToSchool class SplitSchoolOutliersData(luigi.Task): input_task = AppendFeaturesAggregatedFromTeachersDatasetToSchool() def requires(self): return self.input_task def output(self): return {'average': luigi.LocalTarget('./dados/2013/TS_ESCOLA_average.csv'), 'outstanding': luigi.LocalTarget('./dados/2013/TS_ESCOLA_outstanding.csv')} def run(self): with self.input_task.output().open('r') as fp: escolas_pd = pd.read_csv(fp) escolas_statistics = escolas_pd['MEDIA_9EF_MT'].describe() math_avg, math_std = escolas_statistics.values[1], escolas_statistics.values[2] above_two_std_schools_indices = escolas_pd['MEDIA_9EF_MT'] > (math_avg + 2*math_std) below_two_std_schools_indices = escolas_pd['MEDIA_9EF_MT'] < (math_avg + 2*math_std) with self.output()['average'].open('w') as fp: escolas_pd[below_two_std_schools_indices].to_csv(fp) with self.output()['outstanding'].open('w') as fp: escolas_pd[above_two_std_schools_indices].to_csv(fp)
apache-2.0
-158,811,922,506,397,440
38.533333
92
0.665823
false
3.16
false
false
false
wehlutyk/Watson
scripts/fuzzer.py
1
1043
import arrow import random from watson import Watson watson = Watson(frames=None, current=None) projects = [ ("apollo11", ["reactor", "module", "wheels", "steering", "brakes"]), ("hubble", ["lens", "camera", "transmission"]), ("voyager1", ["probe", "generators", "sensors", "antenna"]), ("voyager2", ["probe", "generators", "sensors", "antenna"]), ] now = arrow.now() for date in arrow.Arrow.range('day', now.replace(months=-1), now): if date.weekday() in (5, 6): # Weekend \o/ continue start = date.replace( hour=9, minute=random.randint(0, 59), seconds=random.randint(0, 59) ) while start.hour < random.randint(16, 19): project, tags = random.choice(projects) frame = watson.frames.add( project, start, start.replace(seconds=random.randint(60, 4 * 60 * 60)), tags=random.sample(tags, random.randint(0, len(tags))) ) start = frame.stop.replace(seconds=random.randint(0, 1 * 60 * 60)) watson.save()
mit
4,692,943,791,232,215,000
27.972222
75
0.591563
false
3.259375
false
false
false
nedbat/zellij
tests/test_intersection.py
1
1510
from hypothesis import assume, given from hypothesis.strategies import builds, lists, integers, tuples from zellij.defuzz import Defuzzer from zellij.euclid import collinear, Segment, BadGeometry from zellij.intersection import segment_intersections from zellij.postulates import all_pairs nums = integers(min_value=-1000, max_value=1000) points = tuples(nums, nums) segments = builds(lambda l: Segment(*l), lists(points, min_size=2, max_size=2, unique=True)) @given(lists(segments, min_size=2, max_size=100, unique=True)) def test_intersections(segments): defuzz = Defuzzer().defuzz # Check that none of our segment pairs are pathological, and collect the # true answers the hard way, by checking pair-wise. true = set() for s1, s2 in all_pairs(segments): try: ipt = s1.intersect(s2) if ipt is not None: true.add(defuzz(ipt)) except BadGeometry: # If two segments don't have an answer, then don't use this test # case. assume(False) # Run the actual function we care about. isects = segment_intersections(segments) for pt, segs in isects.items(): # Property: the answer should be in the true answers we found the hard # way. assert defuzz(pt) in true # Property: every intersection should be collinear with the segment it # claims to be part of. for seg in segs: s1, s2 = seg assert collinear(s1, pt, s2)
apache-2.0
2,406,871,413,739,147,000
34.116279
92
0.662252
false
3.673966
false
false
false
riscmaster/risc_maap
risc_control/src/circles3_traj.py
2
3789
#!/usr/bin/env python '''====================================================== Created by: D. Spencer Maughan Last updated: July 2015 File name: circles3_traj.py Organization: RISC Lab, Utah State University ======================================================''' import roslib; roslib.load_manifest('ardrone_tutorials') roslib.load_manifest('risc_msgs') import rospy from math import * import numpy as np import time #=======================# # Messages Needed # #=======================# from risc_msgs.msg import * #========================# # Globals # #========================# start_time = 0 pub = rospy.Publisher('trajectory',Trajectories,queue_size = 200) # Trajectory Variables period = 8 # seconds a = 0 b = 0 c = 0 n = 1 w1 = 2*np.pi/period #====================================# # Update and Publish Trajectory # #====================================# def Datahandler(): global start_time, pub, period, a, b, c, n, w1 time_now = rospy.get_time() t = time_now-start_time WP = Trajectories() num_traj = 3 # number of trajectories WP.Obj = [Trajectory()]*num_traj d = 0.5 #Distance from origin #=================# # Trajectory # #=================# traj1 = Trajectory() # Position traj1.x = d*cos(0*np.pi/num_traj)+a*cos(w1*t) traj1.y = d*sin(0*np.pi/num_traj)+b*sin(w1*t) traj1.z = n+c*sin(w1*t) traj1.psi = w1*t # Velocity traj1.xdot = -a*w1*sin(w1*t) traj1.ydot = b*w1*cos(w1*t) traj1.zdot = c*w1*cos(w1*t) traj1.psidot = w1 # Acceleration traj1.xddot = -a*w1*w1*cos(w1*t) traj1.yddot = -b*w1*w1*sin(w1*t) traj1.zddot = -c*w1*w1*sin(w1*t) traj1.psiddot = 0 traj2 = Trajectory() # Position traj2.x = d*cos(2*1*np.pi/num_traj)+a*cos(w1*t+period/num_traj) traj2.y = d*sin(2*1*np.pi/num_traj)+b*sin(w1*t+period/num_traj) traj2.z = n+c*sin(w1*t+period/num_traj) traj2.psi = w1*t+period/num_traj # Velocity traj2.xdot = -a*w1*sin(w1*t+period/2) traj2.ydot = b*w1*cos(w1*t+period/2) traj2.zdot = c*w1*cos(w1*t+period/2) traj2.psidot = w1 # Acceleration traj2.xddot = -a*w1*w1*cos(w1*t+period/2) traj2.yddot = -b*w1*w1*sin(w1*t+period/2) traj2.zddot = -c*w1*w1*sin(w1*t+period/2) traj2.psiddot = 0 traj3 = Trajectory() # Position traj3.x = d*cos(2*2*np.pi/num_traj)+a*cos(w1*t+2*period/num_traj) traj3.y = d*sin(2*2*np.pi/num_traj)+b*sin(w1*t+2*period/num_traj) traj3.z = n+c*sin(w1*t+2*period/num_traj) traj3.psi = w1*t+2*period/num_traj # Velocity traj3.xdot = -a*w1*sin(w1*t+period/2) traj3.ydot = b*w1*cos(w1*t+period/2) traj3.zdot = c*w1*cos(w1*t+period/2) traj3.psidot = w1 # Acceleration traj3.xddot = -a*w1*w1*cos(w1*t+period/2) traj3.yddot = -b*w1*w1*sin(w1*t+period/2) traj3.zddot = -c*w1*w1*sin(w1*t+period/2) traj3.psiddot = 0 #==================# # Publish # #==================# WP.Obj = [traj1, traj2, traj3] pub.publish(WP) #===================# # Main # #===================# if __name__=='__main__': rospy.init_node('circles_traj') start_time = rospy.get_time() #=====================================# # Set up Publish/Subscribe Loop # #=====================================# r = rospy.Rate(200) while not rospy.is_shutdown(): Datahandler() r.sleep() rospy.loginfo("Trajectory Node Has Shutdown.") rospy.signal_shutdown(0)
bsd-2-clause
-1,619,490,676,580,143,600
27.704545
75
0.484033
false
2.582822
false
false
false
jeremykid/Algorithm_project
trial_division.py
1
1193
import math import time def primeGenerate(number): largest = number prime_list = largest*[1] if (number<4): return [2,3] prime_list[1] = 0 for i in range(0,largest,2): prime_list[i] = 0 prime_list[2] = 1 for i in range(3,largest,2): if (prime_list[i] == 1): for j in range(2*i,largest,i): prime_list[j] == 1 result = [] # print (prime_list,number) for i in range(0,number): if(prime_list[i] == 1): result.append(i) return result def trial_division(n): """Return a list of the prime factors for a natural number.""" if n < 2: return [] prime_factors = [] for p in primeGenerate(int(n**0.5) + 1): if p*p > n: break while n % p == 0: prime_factors.append(p) n //= p if n > 1: prime_factors.append(n) return prime_factors def runner(): testcases = int(input("How many Testcases: ")) for i in range(testcases): timeA = time.time() number = int(input("number:")) print trial_division(number) timeB = time.time() print (timeB - timeA)
mit
765,561,132,604,464,000
22.392157
66
0.524728
false
3.360563
false
false
false
sgibbes/carbon-budget
gain/utilities.py
1
4640
import subprocess import glob import sys sys.path.append('../') import constants_and_names as cn def s3_folder_download(source, dest): cmd = ['aws', 's3', 'cp', source, dest, '--recursive'] subprocess.check_call(cmd) def s3_file_download(source, dest): cmd = ['aws', 's3', 'cp', source, dest] subprocess.check_call(cmd) # Lists the tiles in a folder in s3 def tile_list(source): ## For an s3 folder in a bucket using AWSCLI # Captures the list of the files in the folder out = subprocess.Popen(['aws', 's3', 'ls', source], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = out.communicate() # Writes the output string to a text file for easier interpretation biomass_tiles = open("biomass_tiles.txt", "w") biomass_tiles.write(stdout) biomass_tiles.close() file_list = [] # Iterates through the text file to get the names of the tiles and appends them to list with open("biomass_tiles.txt", 'r') as tile: for line in tile: num = len(line.strip('\n').split(" ")) tile_name = line.strip('\n').split(" ")[num - 1] # Only tifs will be in the tile list if '.tif' in tile_name: # For stripping down standard tree biomass tiles to the tile id if '_biomass.tif' in tile_name: tile_short_name = tile_name.replace('_biomass.tif', '') file_list.append(tile_short_name) # For stripping down mangrove biomass tiles to the tile id if cn.pattern_mangrove_biomass_2000 in tile_name: tile_short_name = tile_name.replace('{}_'.format(cn.pattern_mangrove_biomass_2000), '') tile_short_name = tile_short_name.replace('.tif', '') file_list.append(tile_short_name) file_list = file_list[0:] return file_list # Gets the bounding coordinates of a tile def coords(tile_id): NS = tile_id.split("_")[0][-1:] EW = tile_id.split("_")[1][-1:] if NS == 'S': ymax =-1*int(tile_id.split("_")[0][:2]) else: ymax = int(str(tile_id.split("_")[0][:2])) if EW == 'W': xmin = -1*int(str(tile_id.split("_")[1][:3])) else: xmin = int(str(tile_id.split("_")[1][:3])) ymin = str(int(ymax) - 10) xmax = str(int(xmin) + 10) return ymax, xmin, ymin, xmax # Rasterizes the shapefile within the bounding coordinates of a tile def rasterize(in_shape, out_tif, xmin, ymin, xmax, ymax, tr=None, ot=None, gainEcoCon=None, anodata=None): cmd = ['gdal_rasterize', '-co', 'COMPRESS=LZW', # Input raster is ingested as 1024x1024 pixel tiles (rather than the default of 1 pixel wide strips '-co', 'TILED=YES', '-co', 'BLOCKXSIZE=1024', '-co', 'BLOCKYSIZE=1024', '-te', str(xmin), str(ymin), str(xmax), str(ymax), '-tr', tr, tr, '-ot', ot, '-a', gainEcoCon, '-a_nodata', anodata, in_shape, '{}.tif'.format(out_tif)] subprocess.check_call(cmd) return out_tif # Uploads tile to specified location def upload_final(upload_dir, tile_id, pattern): file = '{}_{}.tif'.format(tile_id, pattern) print "Uploading {}".format(file) cmd = ['aws', 's3', 'cp', file, upload_dir] try: subprocess.check_call(cmd) except: print "Error uploading output tile" ##### Not currently using the below functions def wgetloss(tile_id): print "download hansen loss tile" cmd = ['wget', r'http://glad.geog.umd.edu/Potapov/GFW_2015/tiles/{}.tif'.format(tile_id)] subprocess.check_call(cmd) def wget2015data(tile_id, filetype): outfile = '{0}_{1}_h.tif'.format(tile_id, filetype) website = 'https://storage.googleapis.com/earthenginepartners-hansen/GFC-2015-v1.3/Hansen_GFC-2015-v1.3_{0}_{1}.tif'.format(filetype, tile_id) cmd = ['wget', website, '-O', outfile] print cmd subprocess.check_call(cmd) return outfile def rasterize_shapefile(xmin, ymax, xmax, ymin, shapefile, output_tif, attribute_field): layer = shapefile.replace(".shp", "") # attribute_field = 'old_100' cmd= ['gdal_rasterize', '-te', str(xmin), str(ymin), str(xmax), str(ymax), '-a', attribute_field, '-co', 'COMPRESS=LZW', '-tr', '.00025', '.00025', '-tap', '-a_nodata', '0', '-l', layer, shapefile, output_tif] subprocess.check_call(cmd) return output_tif def resample_00025(input_tif, resampled_tif): # resample to .00025 cmd = ['gdal_translate', input_tif, resampled_tif, '-tr', '.00025', '.00025', '-co', 'COMPRESS=LZW'] subprocess.check_call(cmd)
apache-2.0
3,722,346,610,201,929,000
31.222222
213
0.603448
false
3.228949
false
false
false
dwhoman/CVPI
tests/hypothesis/testing_hypothesis.py
1
2209
import subprocess import numpy as np import hypothesis as h import hypothesis.strategies as st import hypothesis.extra.numpy as hnp #h.settings(buffer_size = 819200000) min_img_width = 1 min_img_height = 1 max_img_width = 10 max_img_height = 10 max_uint32 = 2**32 - 1 max_int32 = 2**31 - 1 min_int32 = -(2**31) max_short = 2**15 - 1 min_short = -(2**15) def thirty2to8s(np_num): return [int(i) for i in int(np_num).to_bytes(4, byteorder='big', signed=False)] def twoDto3d(np_array): return np.array([[[z for z in thirty2to8s(y)] for y in x] for x in np_array], dtype=np.uint64) def image_hex(np_array): return ''.join(["%02x" % (x) for x in np_array.flatten('C')]) @st.composite def np_images(draw, number, width=st.integers(min_img_width, max_img_width).example(), height=st.integers(min_img_height, max_img_height).example()): return draw(st.lists(hnp.arrays(np.uint32, (width,height), elements=st.integers(0,max_uint32)), min_size=number, max_size=number)) @h.given(np_images(2), st.integers(1, 5), st.integers(1, 5), st.floats(1.0, 1.0), st.floats(0, 0)) def test_add_images(images, a, b, scale, bias): assert len(images) == 2 assert images[0].shape == images[1].shape image_1 = twoDto3d(images[0]) image_2 = twoDto3d(images[1]) image_sum = np.clip(np.ceil(scale * (a * image_1 + b * image_2) + bias), 0, 255) compl_proc = subprocess.check_output([ "./cvpi_tests_hyp", "cvpi_image_add", image_hex(image_1), image_hex(image_2), str(images[0].shape[0]), str(images[0].shape[1]), str(a), str(b), format(scale, 'f'), format(bias, 'f')]) compl_proc_str = ''.join(map(chr, compl_proc)) numpy_image_str = image_hex(image_sum) + "\n" h.note(str(images[0].shape[0]) + " " + str(images[0].shape[1])) h.note(image_hex(image_1)) h.note(image_hex(image_2)) h.note("cvpi: " + compl_proc_str) h.note("numpy: " + numpy_image_str) assert numpy_image_str == compl_proc_str if __name__ == '__main__': test_add_images()
apache-2.0
-1,831,955,842,194,675,200
29.680556
98
0.583975
false
2.828425
false
false
false
theanalyst/cinder
cinder/db/sqlalchemy/api.py
1
94793
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import sys import threading import uuid import warnings from oslo.config import cfg from sqlalchemy.exc import IntegrityError from sqlalchemy import or_ from sqlalchemy.orm import joinedload, joinedload_all from sqlalchemy.orm import RelationshipProperty from sqlalchemy.sql.expression import literal_column from sqlalchemy.sql import func from cinder.common import sqlalchemyutils from cinder.db.sqlalchemy import models from cinder import exception from cinder.openstack.common.db import exception as db_exc from cinder.openstack.common.db import options from cinder.openstack.common.db.sqlalchemy import session as db_session from cinder.openstack.common import log as logging from cinder.openstack.common import timeutils from cinder.openstack.common import uuidutils CONF = cfg.CONF LOG = logging.getLogger(__name__) options.set_defaults(sql_connection='sqlite:///$state_path/cinder.sqlite', sqlite_db='cinder.sqlite') _LOCK = threading.Lock() _FACADE = None def _create_facade_lazily(): global _LOCK with _LOCK: global _FACADE if _FACADE is None: _FACADE = db_session.EngineFacade( CONF.database.connection, **dict(CONF.database.iteritems()) ) return _FACADE def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(**kwargs): facade = _create_facade_lazily() return facade.get_session(**kwargs) _DEFAULT_QUOTA_NAME = 'default' def get_backend(): """The backend is this module itself.""" return sys.modules[__name__] def is_admin_context(context): """Indicates if the request context is an administrator.""" if not context: warnings.warn(_('Use of empty request context is deprecated'), DeprecationWarning) raise Exception('die') return context.is_admin def is_user_context(context): """Indicates if the request context is a normal user.""" if not context: return False if context.is_admin: return False if not context.user_id or not context.project_id: return False return True def authorize_project_context(context, project_id): """Ensures a request has permission to access the given project.""" if is_user_context(context): if not context.project_id: raise exception.NotAuthorized() elif context.project_id != project_id: raise exception.NotAuthorized() def authorize_user_context(context, user_id): """Ensures a request has permission to access the given user.""" if is_user_context(context): if not context.user_id: raise exception.NotAuthorized() elif context.user_id != user_id: raise exception.NotAuthorized() def authorize_quota_class_context(context, class_name): """Ensures a request has permission to access the given quota class.""" if is_user_context(context): if not context.quota_class: raise exception.NotAuthorized() elif context.quota_class != class_name: raise exception.NotAuthorized() def require_admin_context(f): """Decorator to require admin request context. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): raise exception.AdminRequired() return f(*args, **kwargs) return wrapper def require_context(f): """Decorator to require *any* user or admin context. This does no authorization for user or project access matching, see :py:func:`authorize_project_context` and :py:func:`authorize_user_context`. The first argument to the wrapped function must be the context. """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): raise exception.NotAuthorized() return f(*args, **kwargs) return wrapper def require_volume_exists(f): """Decorator to require the specified volume to exist. Requires the wrapped function to use context and volume_id as their first two arguments. """ def wrapper(context, volume_id, *args, **kwargs): volume_get(context, volume_id) return f(context, volume_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def require_snapshot_exists(f): """Decorator to require the specified snapshot to exist. Requires the wrapped function to use context and snapshot_id as their first two arguments. """ def wrapper(context, snapshot_id, *args, **kwargs): snapshot_get(context, snapshot_id) return f(context, snapshot_id, *args, **kwargs) wrapper.__name__ = f.__name__ return wrapper def model_query(context, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. :param context: context to query under :param session: if present, the session to use :param read_deleted: if present, overrides context's read_deleted field. :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ session = kwargs.get('session') or get_session() read_deleted = kwargs.get('read_deleted') or context.read_deleted project_only = kwargs.get('project_only') query = session.query(*args) if read_deleted == 'no': query = query.filter_by(deleted=False) elif read_deleted == 'yes': pass # omit the filter to include deleted and active elif read_deleted == 'only': query = query.filter_by(deleted=True) else: raise Exception( _("Unrecognized read_deleted value '%s'") % read_deleted) if project_only and is_user_context(context): query = query.filter_by(project_id=context.project_id) return query def _sync_volumes(context, project_id, session, volume_type_id=None, volume_type_name=None): (volumes, gigs) = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'volumes' if volume_type_name: key += '_' + volume_type_name return {key: volumes} def _sync_snapshots(context, project_id, session, volume_type_id=None, volume_type_name=None): (snapshots, gigs) = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'snapshots' if volume_type_name: key += '_' + volume_type_name return {key: snapshots} def _sync_gigabytes(context, project_id, session, volume_type_id=None, volume_type_name=None): (_junk, vol_gigs) = _volume_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) key = 'gigabytes' if volume_type_name: key += '_' + volume_type_name if CONF.no_snapshot_gb_quota: return {key: vol_gigs} (_junk, snap_gigs) = _snapshot_data_get_for_project( context, project_id, volume_type_id=volume_type_id, session=session) return {key: vol_gigs + snap_gigs} QUOTA_SYNC_FUNCTIONS = { '_sync_volumes': _sync_volumes, '_sync_snapshots': _sync_snapshots, '_sync_gigabytes': _sync_gigabytes, } ################### @require_admin_context def service_destroy(context, service_id): session = get_session() with session.begin(): service_ref = _service_get(context, service_id, session=session) service_ref.delete(session=session) @require_admin_context def _service_get(context, service_id, session=None): result = model_query( context, models.Service, session=session).\ filter_by(id=service_id).\ first() if not result: raise exception.ServiceNotFound(service_id=service_id) return result @require_admin_context def service_get(context, service_id): return _service_get(context, service_id) @require_admin_context def service_get_all(context, disabled=None): query = model_query(context, models.Service) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_all_by_topic(context, topic, disabled=None): query = model_query( context, models.Service, read_deleted="no").\ filter_by(topic=topic) if disabled is not None: query = query.filter_by(disabled=disabled) return query.all() @require_admin_context def service_get_by_host_and_topic(context, host, topic): result = model_query( context, models.Service, read_deleted="no").\ filter_by(disabled=False).\ filter_by(host=host).\ filter_by(topic=topic).\ first() if not result: raise exception.ServiceNotFound(service_id=None) return result @require_admin_context def service_get_all_by_host(context, host): return model_query( context, models.Service, read_deleted="no").\ filter_by(host=host).\ all() @require_admin_context def _service_get_all_topic_subquery(context, session, topic, subq, label): sort_value = getattr(subq.c, label) return model_query(context, models.Service, func.coalesce(sort_value, 0), session=session, read_deleted="no").\ filter_by(topic=topic).\ filter_by(disabled=False).\ outerjoin((subq, models.Service.host == subq.c.host)).\ order_by(sort_value).\ all() @require_admin_context def service_get_all_volume_sorted(context): session = get_session() with session.begin(): topic = CONF.volume_topic label = 'volume_gigabytes' subq = model_query(context, models.Volume.host, func.sum(models.Volume.size).label(label), session=session, read_deleted="no").\ group_by(models.Volume.host).\ subquery() return _service_get_all_topic_subquery(context, session, topic, subq, label) @require_admin_context def service_get_by_args(context, host, binary): result = model_query(context, models.Service).\ filter_by(host=host).\ filter_by(binary=binary).\ first() if not result: raise exception.HostBinaryNotFound(host=host, binary=binary) return result @require_admin_context def service_create(context, values): service_ref = models.Service() service_ref.update(values) if not CONF.enable_new_services: service_ref.disabled = True session = get_session() with session.begin(): service_ref.save(session) return service_ref @require_admin_context def service_update(context, service_id, values): session = get_session() with session.begin(): service_ref = _service_get(context, service_id, session=session) service_ref.update(values) ################### def _metadata_refs(metadata_dict, meta_class): metadata_refs = [] if metadata_dict: for k, v in metadata_dict.iteritems(): metadata_ref = meta_class() metadata_ref['key'] = k metadata_ref['value'] = v metadata_refs.append(metadata_ref) return metadata_refs def _dict_with_extra_specs(inst_type_query): """Convert type query result to dict with extra_spec and rate_limit. Takes a volume type query returned by sqlalchemy and returns it as a dictionary, converting the extra_specs entry from a list of dicts: 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] to a single dict: 'extra_specs' : {'k1': 'v1'} """ inst_type_dict = dict(inst_type_query) extra_specs = dict([(x['key'], x['value']) for x in inst_type_query['extra_specs']]) inst_type_dict['extra_specs'] = extra_specs return inst_type_dict ################### @require_admin_context def iscsi_target_count_by_host(context, host): return model_query(context, models.IscsiTarget).\ filter_by(host=host).\ count() @require_admin_context def iscsi_target_create_safe(context, values): iscsi_target_ref = models.IscsiTarget() for (key, value) in values.iteritems(): iscsi_target_ref[key] = value session = get_session() with session.begin(): try: iscsi_target_ref.save(session) return iscsi_target_ref except IntegrityError: return None ################### @require_context def _quota_get(context, project_id, resource, session=None): result = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.ProjectQuotaNotFound(project_id=project_id) return result @require_context def quota_get(context, project_id, resource): return _quota_get(context, project_id, resource) @require_context def quota_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.Quota, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_create(context, project_id, resource, limit): quota_ref = models.Quota() quota_ref.project_id = project_id quota_ref.resource = resource quota_ref.hard_limit = limit session = get_session() with session.begin(): quota_ref.save(session) return quota_ref @require_admin_context def quota_update(context, project_id, resource, limit): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) quota_ref.hard_limit = limit @require_admin_context def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): quota_ref = _quota_get(context, project_id, resource, session=session) quota_ref.delete(session=session) ################### @require_context def _quota_class_get(context, class_name, resource, session=None): result = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaClassNotFound(class_name=class_name) return result @require_context def quota_class_get(context, class_name, resource): return _quota_class_get(context, class_name, resource) def quota_class_get_default(context): rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=_DEFAULT_QUOTA_NAME).all() result = {'class_name': _DEFAULT_QUOTA_NAME} for row in rows: result[row.resource] = row.hard_limit return result @require_context def quota_class_get_all_by_name(context, class_name): authorize_quota_class_context(context, class_name) rows = model_query(context, models.QuotaClass, read_deleted="no").\ filter_by(class_name=class_name).\ all() result = {'class_name': class_name} for row in rows: result[row.resource] = row.hard_limit return result @require_admin_context def quota_class_create(context, class_name, resource, limit): quota_class_ref = models.QuotaClass() quota_class_ref.class_name = class_name quota_class_ref.resource = resource quota_class_ref.hard_limit = limit session = get_session() with session.begin(): quota_class_ref.save(session) return quota_class_ref @require_admin_context def quota_class_update(context, class_name, resource, limit): session = get_session() with session.begin(): quota_class_ref = _quota_class_get(context, class_name, resource, session=session) quota_class_ref.hard_limit = limit @require_admin_context def quota_class_destroy(context, class_name, resource): session = get_session() with session.begin(): quota_class_ref = _quota_class_get(context, class_name, resource, session=session) quota_class_ref.delete(session=session) @require_admin_context def quota_class_destroy_all_by_name(context, class_name): session = get_session() with session.begin(): quota_classes = model_query(context, models.QuotaClass, session=session, read_deleted="no").\ filter_by(class_name=class_name).\ all() for quota_class_ref in quota_classes: quota_class_ref.delete(session=session) ################### @require_context def quota_usage_get(context, project_id, resource): result = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ filter_by(resource=resource).\ first() if not result: raise exception.QuotaUsageNotFound(project_id=project_id) return result @require_context def quota_usage_get_all_by_project(context, project_id): authorize_project_context(context, project_id) rows = model_query(context, models.QuotaUsage, read_deleted="no").\ filter_by(project_id=project_id).\ all() result = {'project_id': project_id} for row in rows: result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) return result @require_admin_context def _quota_usage_create(context, project_id, resource, in_use, reserved, until_refresh, session=None): quota_usage_ref = models.QuotaUsage() quota_usage_ref.project_id = project_id quota_usage_ref.resource = resource quota_usage_ref.in_use = in_use quota_usage_ref.reserved = reserved quota_usage_ref.until_refresh = until_refresh quota_usage_ref.save(session=session) return quota_usage_ref ################### def _reservation_create(context, uuid, usage, project_id, resource, delta, expire, session=None): reservation_ref = models.Reservation() reservation_ref.uuid = uuid reservation_ref.usage_id = usage['id'] reservation_ref.project_id = project_id reservation_ref.resource = resource reservation_ref.delta = delta reservation_ref.expire = expire reservation_ref.save(session=session) return reservation_ref ################### # NOTE(johannes): The quota code uses SQL locking to ensure races don't # cause under or over counting of resources. To avoid deadlocks, this # code always acquires the lock on quota_usages before acquiring the lock # on reservations. def _get_quota_usages(context, session, project_id): # Broken out for testability rows = model_query(context, models.QuotaUsage, read_deleted="no", session=session).\ filter_by(project_id=project_id).\ with_lockmode('update').\ all() return dict((row.resource, row) for row in rows) @require_context def quota_reserve(context, resources, quotas, deltas, expire, until_refresh, max_age, project_id=None): elevated = context.elevated() session = get_session() with session.begin(): if project_id is None: project_id = context.project_id # Get the current usages usages = _get_quota_usages(context, session, project_id) # Handle usage refresh work = set(deltas.keys()) while work: resource = work.pop() # Do we need to refresh the usage? refresh = False if resource not in usages: usages[resource] = _quota_usage_create(elevated, project_id, resource, 0, 0, until_refresh or None, session=session) refresh = True elif usages[resource].in_use < 0: # Negative in_use count indicates a desync, so try to # heal from that... refresh = True elif usages[resource].until_refresh is not None: usages[resource].until_refresh -= 1 if usages[resource].until_refresh <= 0: refresh = True elif max_age and usages[resource].updated_at is not None and ( (usages[resource].updated_at - timeutils.utcnow()).seconds >= max_age): refresh = True # OK, refresh the usage if refresh: # Grab the sync routine sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync] volume_type_id = getattr(resources[resource], 'volume_type_id', None) volume_type_name = getattr(resources[resource], 'volume_type_name', None) updates = sync(elevated, project_id, volume_type_id=volume_type_id, volume_type_name=volume_type_name, session=session) for res, in_use in updates.items(): # Make sure we have a destination for the usage! if res not in usages: usages[res] = _quota_usage_create( elevated, project_id, res, 0, 0, until_refresh or None, session=session ) # Update the usage usages[res].in_use = in_use usages[res].until_refresh = until_refresh or None # Because more than one resource may be refreshed # by the call to the sync routine, and we don't # want to double-sync, we make sure all refreshed # resources are dropped from the work set. work.discard(res) # NOTE(Vek): We make the assumption that the sync # routine actually refreshes the # resources that it is the sync routine # for. We don't check, because this is # a best-effort mechanism. # Check for deltas that would go negative unders = [r for r, delta in deltas.items() if delta < 0 and delta + usages[r].in_use < 0] # Now, let's check the quotas # NOTE(Vek): We're only concerned about positive increments. # If a project has gone over quota, we want them to # be able to reduce their usage without any # problems. overs = [r for r, delta in deltas.items() if quotas[r] >= 0 and delta >= 0 and quotas[r] < delta + usages[r].total] # NOTE(Vek): The quota check needs to be in the transaction, # but the transaction doesn't fail just because # we're over quota, so the OverQuota raise is # outside the transaction. If we did the raise # here, our usage updates would be discarded, but # they're not invalidated by being over-quota. # Create the reservations if not overs: reservations = [] for resource, delta in deltas.items(): reservation = _reservation_create(elevated, str(uuid.uuid4()), usages[resource], project_id, resource, delta, expire, session=session) reservations.append(reservation.uuid) # Also update the reserved quantity # NOTE(Vek): Again, we are only concerned here about # positive increments. Here, though, we're # worried about the following scenario: # # 1) User initiates resize down. # 2) User allocates a new instance. # 3) Resize down fails or is reverted. # 4) User is now over quota. # # To prevent this, we only update the # reserved value if the delta is positive. if delta > 0: usages[resource].reserved += delta if unders: LOG.warning(_("Change will make usage less than 0 for the following " "resources: %s") % unders) if overs: usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved'])) for k, v in usages.items()) raise exception.OverQuota(overs=sorted(overs), quotas=quotas, usages=usages) return reservations def _quota_reservations(session, context, reservations): """Return the relevant reservations.""" # Get the listed reservations return model_query(context, models.Reservation, read_deleted="no", session=session).\ filter(models.Reservation.uuid.in_(reservations)).\ with_lockmode('update').\ all() @require_context def reservation_commit(context, reservations, project_id=None): session = get_session() with session.begin(): usages = _get_quota_usages(context, session, project_id) for reservation in _quota_reservations(session, context, reservations): usage = usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta usage.in_use += reservation.delta reservation.delete(session=session) @require_context def reservation_rollback(context, reservations, project_id=None): session = get_session() with session.begin(): usages = _get_quota_usages(context, session, project_id) for reservation in _quota_reservations(session, context, reservations): usage = usages[reservation.resource] if reservation.delta >= 0: usage.reserved -= reservation.delta reservation.delete(session=session) @require_admin_context def quota_destroy_all_by_project(context, project_id): session = get_session() with session.begin(): quotas = model_query(context, models.Quota, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_ref in quotas: quota_ref.delete(session=session) quota_usages = model_query(context, models.QuotaUsage, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for quota_usage_ref in quota_usages: quota_usage_ref.delete(session=session) reservations = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter_by(project_id=project_id).\ all() for reservation_ref in reservations: reservation_ref.delete(session=session) @require_admin_context def reservation_expire(context): session = get_session() with session.begin(): current_time = timeutils.utcnow() results = model_query(context, models.Reservation, session=session, read_deleted="no").\ filter(models.Reservation.expire < current_time).\ all() if results: for reservation in results: if reservation.delta >= 0: reservation.usage.reserved -= reservation.delta reservation.usage.save(session=session) reservation.delete(session=session) ################### @require_admin_context def volume_allocate_iscsi_target(context, volume_id, host): session = get_session() with session.begin(): iscsi_target_ref = model_query(context, models.IscsiTarget, session=session, read_deleted="no").\ filter_by(volume=None).\ filter_by(host=host).\ with_lockmode('update').\ first() # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not iscsi_target_ref: raise exception.NoMoreTargets() iscsi_target_ref.volume_id = volume_id session.add(iscsi_target_ref) return iscsi_target_ref.target_num @require_admin_context def volume_attached(context, volume_id, instance_uuid, host_name, mountpoint): if instance_uuid and not uuidutils.is_uuid_like(instance_uuid): raise exception.InvalidUUID(uuid=instance_uuid) session = get_session() with session.begin(): volume_ref = _volume_get(context, volume_id, session=session) volume_ref['status'] = 'in-use' volume_ref['mountpoint'] = mountpoint volume_ref['attach_status'] = 'attached' volume_ref['instance_uuid'] = instance_uuid volume_ref['attached_host'] = host_name return volume_ref @require_context def volume_create(context, values): values['volume_metadata'] = _metadata_refs(values.get('metadata'), models.VolumeMetadata) if is_admin_context(context): values['volume_admin_metadata'] = \ _metadata_refs(values.get('admin_metadata'), models.VolumeAdminMetadata) elif values.get('volume_admin_metadata'): del values['volume_admin_metadata'] volume_ref = models.Volume() if not values.get('id'): values['id'] = str(uuid.uuid4()) volume_ref.update(values) session = get_session() with session.begin(): session.add(volume_ref) return _volume_get(context, values['id'], session=session) @require_admin_context def volume_data_get_for_host(context, host, count_only=False): if count_only: result = model_query(context, func.count(models.Volume.id), read_deleted="no").\ filter_by(host=host).\ first() return result[0] or 0 else: result = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no").\ filter_by(host=host).\ first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def _volume_data_get_for_project(context, project_id, volume_type_id=None, session=None): query = model_query(context, func.count(models.Volume.id), func.sum(models.Volume.size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if volume_type_id: query = query.filter_by(volume_type_id=volume_type_id) result = query.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_admin_context def volume_data_get_for_project(context, project_id, volume_type_id=None): return _volume_data_get_for_project(context, project_id, volume_type_id) @require_admin_context def finish_volume_migration(context, src_vol_id, dest_vol_id): """Copy almost all columns from dest to source.""" session = get_session() with session.begin(): src_volume_ref = _volume_get(context, src_vol_id, session=session) dest_volume_ref = _volume_get(context, dest_vol_id, session=session) # NOTE(rpodolyaka): we should copy only column values, while model # instances also have relationships attributes, which # should be ignored def is_column(inst, attr): return attr in inst.__class__.__table__.columns for key, value in dest_volume_ref.iteritems(): if key == 'id' or not is_column(dest_volume_ref, key): continue elif key == 'migration_status': value = None elif key == '_name_id': value = dest_volume_ref['_name_id'] or dest_volume_ref['id'] setattr(src_volume_ref, key, value) @require_admin_context def volume_destroy(context, volume_id): session = get_session() now = timeutils.utcnow() with session.begin(): model_query(context, models.Volume, session=session).\ filter_by(id=volume_id).\ update({'status': 'deleted', 'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) model_query(context, models.IscsiTarget, session=session).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) model_query(context, models.VolumeMetadata, session=session).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) model_query(context, models.VolumeAdminMetadata, session=session).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) model_query(context, models.Transfer, session=session).\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': now, 'updated_at': literal_column('updated_at')}) @require_admin_context def volume_detached(context, volume_id): session = get_session() with session.begin(): volume_ref = _volume_get(context, volume_id, session=session) # Hide status update from user if we're performing a volume migration if not volume_ref['migration_status']: volume_ref['status'] = 'available' volume_ref['mountpoint'] = None volume_ref['attach_status'] = 'detached' volume_ref['instance_uuid'] = None volume_ref['attached_host'] = None volume_ref['attach_time'] = None @require_context def _volume_get_query(context, session=None, project_only=False): if is_admin_context(context): return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_admin_metadata')).\ options(joinedload('volume_type')) else: return model_query(context, models.Volume, session=session, project_only=project_only).\ options(joinedload('volume_metadata')).\ options(joinedload('volume_type')) @require_context def _volume_get(context, volume_id, session=None): result = _volume_get_query(context, session=session, project_only=True).\ filter_by(id=volume_id).\ first() if not result: raise exception.VolumeNotFound(volume_id=volume_id) return result @require_context def volume_get(context, volume_id): return _volume_get(context, volume_id) @require_admin_context def volume_get_all(context, marker, limit, sort_key, sort_dir, filters=None): """Retrieves all volumes. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_key: single attributes by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) :param filters: Filters for the query. A filter key/value of 'no_migration_targets'=True causes volumes with either a NULL 'migration_status' or a 'migration_status' that does not start with 'target:' to be retrieved. :returns: list of matching volumes """ session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_key, sort_dir, filters) # No volumes would match, return empty list if query == None: return [] return query.all() @require_admin_context def volume_get_all_by_host(context, host): return _volume_get_query(context).filter_by(host=host).all() @require_context def volume_get_all_by_project(context, project_id, marker, limit, sort_key, sort_dir, filters=None): """"Retrieves all volumes in a project. :param context: context to query under :param project_id: project for all volumes being retrieved :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_key: single attributes by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) :param filters: Filters for the query. A filter key/value of 'no_migration_targets'=True causes volumes with either a NULL 'migration_status' or a 'migration_status' that does not start with 'target:' to be retrieved. :returns: list of matching volumes """ session = get_session() with session.begin(): authorize_project_context(context, project_id) # Add in the project filter without modifying the given filters filters = filters.copy() if filters else {} filters['project_id'] = project_id # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_key, sort_dir, filters) # No volumes would match, return empty list if query == None: return [] return query.all() def _generate_paginate_query(context, session, marker, limit, sort_key, sort_dir, filters): """Generate the query to include the filters and the paginate options. Returns a query with sorting / pagination criteria added or None if the given filters will not yield any results. :param context: context to query under :param session: the session to use :param marker: the last item of the previous page; we returns the next results after this value. :param limit: maximum number of items to return :param sort_key: single attributes by which results should be sorted :param sort_dir: direction in which results should be sorted (asc, desc) :param filters: dictionary of filters; values that are lists, tuples, sets, or frozensets cause an 'IN' test to be performed, while exact matching ('==' operator) is used for other values :returns: updated query or None """ query = _volume_get_query(context, session=session) if filters: filters = filters.copy() # 'no_migration_targets' is unique, must be either NULL or # not start with 'target:' if ('no_migration_targets' in filters and filters['no_migration_targets'] == True): filters.pop('no_migration_targets') try: column_attr = getattr(models.Volume, 'migration_status') conditions = [column_attr == None, column_attr.op('NOT LIKE')('target:%')] query = query.filter(or_(*conditions)) except AttributeError: log_msg = _("'migration_status' column could not be found.") LOG.debug(log_msg) return None # Apply exact match filters for everything else, ensure that the # filter value exists on the model for key in filters.keys(): # metadata is unique, must be a dict if key == 'metadata': if not isinstance(filters[key], dict): log_msg = _("'metadata' filter value is not valid.") LOG.debug(log_msg) return None continue try: column_attr = getattr(models.Volume, key) # Do not allow relationship properties since those require # schema specific knowledge prop = getattr(column_attr, 'property') if isinstance(prop, RelationshipProperty): log_msg = (_("'%s' filter key is not valid, " "it maps to a relationship.")) % key LOG.debug(log_msg) return None except AttributeError: log_msg = _("'%s' filter key is not valid.") % key LOG.debug(log_msg) return None # Holds the simple exact matches filter_dict = {} # Iterate over all filters, special case the filter is necessary for key, value in filters.iteritems(): if key == 'metadata': # model.VolumeMetadata defines the backref to Volumes as # 'volume_metadata' or 'volume_admin_metadata', use those as # column attribute keys col_attr = getattr(models.Volume, 'volume_metadata') col_ad_attr = getattr(models.Volume, 'volume_admin_metadata') for k, v in value.iteritems(): query = query.filter(or_(col_attr.any(key=k, value=v), col_ad_attr.any(key=k, value=v))) elif isinstance(value, (list, tuple, set, frozenset)): # Looking for values in a list; apply to query directly column_attr = getattr(models.Volume, key) query = query.filter(column_attr.in_(value)) else: # OK, simple exact match; save for later filter_dict[key] = value # Apply simple exact matches if filter_dict: query = query.filter_by(**filter_dict) marker_volume = None if marker is not None: marker_volume = _volume_get(context, marker, session) return sqlalchemyutils.paginate_query(query, models.Volume, limit, [sort_key, 'created_at', 'id'], marker=marker_volume, sort_dir=sort_dir) @require_admin_context def volume_get_iscsi_target_num(context, volume_id): result = model_query(context, models.IscsiTarget, read_deleted="yes").\ filter_by(volume_id=volume_id).\ first() if not result: raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) return result.target_num @require_context def volume_update(context, volume_id, values): session = get_session() with session.begin(): metadata = values.get('metadata') if metadata is not None: _volume_user_metadata_update(context, volume_id, values.pop('metadata'), delete=True, session=session) admin_metadata = values.get('admin_metadata') if is_admin_context(context) and admin_metadata is not None: _volume_admin_metadata_update(context, volume_id, values.pop('admin_metadata'), delete=True, session=session) volume_ref = _volume_get(context, volume_id, session=session) volume_ref.update(values) return volume_ref #################### def _volume_x_metadata_get_query(context, volume_id, model, session=None): return model_query(context, model, session=session, read_deleted="no").\ filter_by(volume_id=volume_id) def _volume_x_metadata_get(context, volume_id, model, session=None): rows = _volume_x_metadata_get_query(context, volume_id, model, session=session).all() result = {} for row in rows: result[row['key']] = row['value'] return result def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec, session=None): result = _volume_x_metadata_get_query(context, volume_id, model, session=session).\ filter_by(key=key).\ first() if not result: raise notfound_exec(metadata_key=key, volume_id=volume_id) return result def _volume_x_metadata_update(context, volume_id, metadata, delete, model, notfound_exec, session=None): if not session: session = get_session() with session.begin(subtransactions=True): # Set existing metadata to deleted if delete argument is True if delete: original_metadata = _volume_x_metadata_get(context, volume_id, model, session=session) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = _volume_x_metadata_get_item(context, volume_id, meta_key, model, notfound_exec, session=session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _volume_x_metadata_get_item(context, volume_id, meta_key, model, notfound_exec, session=session) except notfound_exec: meta_ref = model() item.update({"key": meta_key, "volume_id": volume_id}) meta_ref.update(item) meta_ref.save(session=session) return _volume_x_metadata_get(context, volume_id, model) def _volume_user_metadata_get_query(context, volume_id, session=None): return _volume_x_metadata_get_query(context, volume_id, models.VolumeMetadata, session=session) @require_context @require_volume_exists def _volume_user_metadata_get(context, volume_id, session=None): return _volume_x_metadata_get(context, volume_id, models.VolumeMetadata, session=session) @require_context def _volume_user_metadata_get_item(context, volume_id, key, session=None): return _volume_x_metadata_get_item(context, volume_id, key, models.VolumeMetadata, exception.VolumeMetadataNotFound, session=session) @require_context @require_volume_exists def _volume_user_metadata_update(context, volume_id, metadata, delete, session=None): return _volume_x_metadata_update(context, volume_id, metadata, delete, models.VolumeMetadata, exception.VolumeMetadataNotFound, session=session) @require_context @require_volume_exists def volume_metadata_get_item(context, volume_id, key): return _volume_user_metadata_get_item(context, volume_id, key) @require_context @require_volume_exists def volume_metadata_get(context, volume_id): return _volume_user_metadata_get(context, volume_id) @require_context @require_volume_exists def volume_metadata_delete(context, volume_id, key): _volume_user_metadata_get_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context @require_volume_exists def volume_metadata_update(context, volume_id, metadata, delete): return _volume_user_metadata_update(context, volume_id, metadata, delete) ################### def _volume_admin_metadata_get_query(context, volume_id, session=None): return _volume_x_metadata_get_query(context, volume_id, models.VolumeAdminMetadata, session=session) @require_admin_context @require_volume_exists def _volume_admin_metadata_get(context, volume_id, session=None): return _volume_x_metadata_get(context, volume_id, models.VolumeAdminMetadata, session=session) @require_admin_context @require_volume_exists def _volume_admin_metadata_update(context, volume_id, metadata, delete, session=None): return _volume_x_metadata_update(context, volume_id, metadata, delete, models.VolumeAdminMetadata, exception.VolumeAdminMetadataNotFound, session=session) @require_admin_context @require_volume_exists def volume_admin_metadata_get(context, volume_id): return _volume_admin_metadata_get(context, volume_id) @require_admin_context @require_volume_exists def volume_admin_metadata_delete(context, volume_id, key): _volume_admin_metadata_get_query(context, volume_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context @require_volume_exists def volume_admin_metadata_update(context, volume_id, metadata, delete): return _volume_admin_metadata_update(context, volume_id, metadata, delete) ################### @require_context def snapshot_create(context, values): values['snapshot_metadata'] = _metadata_refs(values.get('metadata'), models.SnapshotMetadata) if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): snapshot_ref = models.Snapshot() snapshot_ref.update(values) session.add(snapshot_ref) return _snapshot_get(context, values['id'], session=session) @require_admin_context def snapshot_destroy(context, snapshot_id): session = get_session() with session.begin(): model_query(context, models.Snapshot, session=session).\ filter_by(id=snapshot_id).\ update({'status': 'deleted', 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) model_query(context, models.SnapshotMetadata, session=session).\ filter_by(snapshot_id=snapshot_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _snapshot_get(context, snapshot_id, session=None): result = model_query(context, models.Snapshot, session=session, project_only=True).\ options(joinedload('volume')).\ options(joinedload('snapshot_metadata')).\ filter_by(id=snapshot_id).\ first() if not result: raise exception.SnapshotNotFound(snapshot_id=snapshot_id) return result @require_context def snapshot_get(context, snapshot_id): return _snapshot_get(context, snapshot_id) @require_admin_context def snapshot_get_all(context): return model_query(context, models.Snapshot).\ options(joinedload('snapshot_metadata')).\ all() @require_context def snapshot_get_all_for_volume(context, volume_id): return model_query(context, models.Snapshot, read_deleted='no', project_only=True).\ filter_by(volume_id=volume_id).\ options(joinedload('snapshot_metadata')).\ all() @require_context def snapshot_get_all_by_project(context, project_id): authorize_project_context(context, project_id) return model_query(context, models.Snapshot).\ filter_by(project_id=project_id).\ options(joinedload('snapshot_metadata')).\ all() @require_context def _snapshot_data_get_for_project(context, project_id, volume_type_id=None, session=None): authorize_project_context(context, project_id) query = model_query(context, func.count(models.Snapshot.id), func.sum(models.Snapshot.volume_size), read_deleted="no", session=session).\ filter_by(project_id=project_id) if volume_type_id: query = query.join('volume').filter_by(volume_type_id=volume_type_id) result = query.first() # NOTE(vish): convert None to 0 return (result[0] or 0, result[1] or 0) @require_context def snapshot_data_get_for_project(context, project_id, volume_type_id=None): return _snapshot_data_get_for_project(context, project_id, volume_type_id) @require_context def snapshot_get_active_by_window(context, begin, end=None, project_id=None): """Return snapshots that were active during window.""" query = model_query(context, models.Snapshot, read_deleted="yes") query = query.filter(or_(models.Snapshot.deleted_at == None, models.Snapshot.deleted_at > begin)) query = query.options(joinedload(models.Snapshot.volume)) if end: query = query.filter(models.Snapshot.created_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() @require_context def snapshot_update(context, snapshot_id, values): session = get_session() with session.begin(): snapshot_ref = _snapshot_get(context, snapshot_id, session=session) snapshot_ref.update(values) #################### def _snapshot_metadata_get_query(context, snapshot_id, session=None): return model_query(context, models.SnapshotMetadata, session=session, read_deleted="no").\ filter_by(snapshot_id=snapshot_id) @require_context @require_snapshot_exists def _snapshot_metadata_get(context, snapshot_id, session=None): rows = _snapshot_metadata_get_query(context, snapshot_id, session).all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context @require_snapshot_exists def snapshot_metadata_get(context, snapshot_id): return _snapshot_metadata_get(context, snapshot_id) @require_context @require_snapshot_exists def snapshot_metadata_delete(context, snapshot_id, key): _snapshot_metadata_get_query(context, snapshot_id).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _snapshot_metadata_get_item(context, snapshot_id, key, session=None): result = _snapshot_metadata_get_query(context, snapshot_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.SnapshotMetadataNotFound(metadata_key=key, snapshot_id=snapshot_id) return result @require_context @require_snapshot_exists def snapshot_metadata_update(context, snapshot_id, metadata, delete): session = get_session() with session.begin(): # Set existing metadata to deleted if delete argument is True if delete: original_metadata = _snapshot_metadata_get(context, snapshot_id, session) for meta_key, meta_value in original_metadata.iteritems(): if meta_key not in metadata: meta_ref = _snapshot_metadata_get_item(context, snapshot_id, meta_key, session) meta_ref.update({'deleted': True}) meta_ref.save(session=session) meta_ref = None # Now update all existing items with new values, or create new meta # objects for meta_key, meta_value in metadata.items(): # update the value whether it exists or not item = {"value": meta_value} try: meta_ref = _snapshot_metadata_get_item(context, snapshot_id, meta_key, session) except exception.SnapshotMetadataNotFound: meta_ref = models.SnapshotMetadata() item.update({"key": meta_key, "snapshot_id": snapshot_id}) meta_ref.update(item) meta_ref.save(session=session) return snapshot_metadata_get(context, snapshot_id) ################### @require_admin_context def volume_type_create(context, values): """Create a new instance type. In order to pass in extra specs, the values dict should contain a 'extra_specs' key/value pair: {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} """ if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): try: _volume_type_get_by_name(context, values['name'], session) raise exception.VolumeTypeExists(id=values['name']) except exception.VolumeTypeNotFoundByName: pass try: _volume_type_get(context, values['id'], session) raise exception.VolumeTypeExists(id=values['id']) except exception.VolumeTypeNotFound: pass try: values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeTypes() volume_type_ref.update(values) session.add(volume_type_ref) except Exception as e: raise db_exc.DBError(e) return volume_type_ref @require_context def volume_type_get_all(context, inactive=False, filters=None): """Returns a dict describing all volume_types with name as key.""" filters = filters or {} read_deleted = "yes" if inactive else "no" rows = model_query(context, models.VolumeTypes, read_deleted=read_deleted).\ options(joinedload('extra_specs')).\ order_by("name").\ all() result = {} for row in rows: result[row['name']] = _dict_with_extra_specs(row) return result @require_context def _volume_type_get(context, id, session=None, inactive=False): read_deleted = "yes" if inactive else "no" result = model_query(context, models.VolumeTypes, session=session, read_deleted=read_deleted).\ options(joinedload('extra_specs')).\ filter_by(id=id).\ first() if not result: raise exception.VolumeTypeNotFound(volume_type_id=id) return _dict_with_extra_specs(result) @require_context def volume_type_get(context, id, inactive=False): """Return a dict describing specific volume_type.""" return _volume_type_get(context, id, None, inactive) @require_context def _volume_type_get_by_name(context, name, session=None): result = model_query(context, models.VolumeTypes, session=session).\ options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not result: raise exception.VolumeTypeNotFoundByName(volume_type_name=name) else: return _dict_with_extra_specs(result) @require_context def volume_type_get_by_name(context, name): """Return a dict describing specific volume_type.""" return _volume_type_get_by_name(context, name) @require_admin_context def volume_type_qos_associations_get(context, qos_specs_id, inactive=False): read_deleted = "yes" if inactive else "no" return model_query(context, models.VolumeTypes, read_deleted=read_deleted). \ filter_by(qos_specs_id=qos_specs_id).all() @require_admin_context def volume_type_qos_associate(context, type_id, qos_specs_id): session = get_session() with session.begin(): _volume_type_get(context, type_id, session) session.query(models.VolumeTypes). \ filter_by(id=type_id). \ update({'qos_specs_id': qos_specs_id, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_disassociate(context, qos_specs_id, type_id): """Disassociate volume type from qos specs.""" session = get_session() with session.begin(): _volume_type_get(context, type_id, session) session.query(models.VolumeTypes). \ filter_by(id=type_id). \ filter_by(qos_specs_id=qos_specs_id). \ update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_disassociate_all(context, qos_specs_id): """Disassociate all volume types associated with specified qos specs.""" session = get_session() with session.begin(): session.query(models.VolumeTypes). \ filter_by(qos_specs_id=qos_specs_id). \ update({'qos_specs_id': None, 'updated_at': timeutils.utcnow()}) @require_admin_context def volume_type_qos_specs_get(context, type_id): """Return all qos specs for given volume type. result looks like: { 'qos_specs': { 'id': 'qos-specs-id', 'name': 'qos_specs_name', 'consumer': 'Consumer', 'specs': { 'key1': 'value1', 'key2': 'value2', 'key3': 'value3' } } } """ session = get_session() with session.begin(): _volume_type_get(context, type_id, session) row = session.query(models.VolumeTypes). \ options(joinedload('qos_specs')). \ filter_by(id=type_id). \ first() # row.qos_specs is a list of QualityOfServiceSpecs ref specs = _dict_with_qos_specs(row.qos_specs) if not specs: # turn empty list to None specs = None else: specs = specs[0] return {'qos_specs': specs} @require_admin_context def volume_type_destroy(context, id): session = get_session() with session.begin(): _volume_type_get(context, id, session) results = model_query(context, models.Volume, session=session). \ filter_by(volume_type_id=id).all() if results: msg = _('VolumeType %s deletion failed, VolumeType in use.') % id LOG.error(msg) raise exception.VolumeTypeInUse(volume_type_id=id) model_query(context, models.VolumeTypes, session=session).\ filter_by(id=id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) model_query(context, models.VolumeTypeExtraSpecs, session=session).\ filter_by(volume_type_id=id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_get_active_by_window(context, begin, end=None, project_id=None): """Return volumes that were active during window.""" query = model_query(context, models.Volume, read_deleted="yes") query = query.filter(or_(models.Volume.deleted_at == None, models.Volume.deleted_at > begin)) if end: query = query.filter(models.Volume.created_at < end) if project_id: query = query.filter_by(project_id=project_id) return query.all() #################### def _volume_type_extra_specs_query(context, volume_type_id, session=None): return model_query(context, models.VolumeTypeExtraSpecs, session=session, read_deleted="no").\ filter_by(volume_type_id=volume_type_id) @require_context def volume_type_extra_specs_get(context, volume_type_id): rows = _volume_type_extra_specs_query(context, volume_type_id).\ all() result = {} for row in rows: result[row['key']] = row['value'] return result @require_context def volume_type_extra_specs_delete(context, volume_type_id, key): session = get_session() with session.begin(): _volume_type_extra_specs_get_item(context, volume_type_id, key, session) _volume_type_extra_specs_query(context, volume_type_id, session).\ filter_by(key=key).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def _volume_type_extra_specs_get_item(context, volume_type_id, key, session=None): result = _volume_type_extra_specs_query( context, volume_type_id, session=session).\ filter_by(key=key).\ first() if not result: raise exception.VolumeTypeExtraSpecsNotFound( extra_specs_key=key, volume_type_id=volume_type_id) return result @require_context def volume_type_extra_specs_update_or_create(context, volume_type_id, specs): session = get_session() with session.begin(): spec_ref = None for key, value in specs.iteritems(): try: spec_ref = _volume_type_extra_specs_get_item( context, volume_type_id, key, session) except exception.VolumeTypeExtraSpecsNotFound: spec_ref = models.VolumeTypeExtraSpecs() spec_ref.update({"key": key, "value": value, "volume_type_id": volume_type_id, "deleted": False}) spec_ref.save(session=session) return specs #################### @require_admin_context def qos_specs_create(context, values): """Create a new QoS specs. :param values dictionary that contains specifications for QoS e.g. {'name': 'Name', 'qos_specs': { 'consumer': 'front-end', 'total_iops_sec': 1000, 'total_bytes_sec': 1024000 } } """ specs_id = str(uuid.uuid4()) session = get_session() with session.begin(): try: _qos_specs_get_by_name(context, values['name'], session) raise exception.QoSSpecsExists(specs_id=values['name']) except exception.QoSSpecsNotFound: pass try: # Insert a root entry for QoS specs specs_root = models.QualityOfServiceSpecs() root = dict(id=specs_id) # 'QoS_Specs_Name' is an internal reserved key to store # the name of QoS specs root['key'] = 'QoS_Specs_Name' root['value'] = values['name'] LOG.debug("DB qos_specs_create(): root %s", root) specs_root.update(root) specs_root.save(session=session) # Insert all specification entries for QoS specs for k, v in values['qos_specs'].iteritems(): item = dict(key=k, value=v, specs_id=specs_id) item['id'] = str(uuid.uuid4()) spec_entry = models.QualityOfServiceSpecs() spec_entry.update(item) spec_entry.save(session=session) except Exception as e: raise db_exc.DBError(e) return dict(id=specs_root.id, name=specs_root.value) @require_admin_context def _qos_specs_get_by_name(context, name, session=None, inactive=False): read_deleted = 'yes' if inactive else 'no' results = model_query(context, models.QualityOfServiceSpecs, read_deleted=read_deleted, session=session). \ filter_by(key='QoS_Specs_Name'). \ filter_by(value=name). \ options(joinedload('specs')).all() if not results: raise exception.QoSSpecsNotFound(specs_id=name) return results @require_admin_context def _qos_specs_get_ref(context, qos_specs_id, session=None, inactive=False): read_deleted = 'yes' if inactive else 'no' result = model_query(context, models.QualityOfServiceSpecs, read_deleted=read_deleted, session=session). \ filter_by(id=qos_specs_id). \ options(joinedload_all('specs')).all() if not result: raise exception.QoSSpecsNotFound(specs_id=qos_specs_id) return result def _dict_with_children_specs(specs): """Convert specs list to a dict.""" result = {} for spec in specs: # Skip deleted keys if not spec['deleted']: result.update({spec['key']: spec['value']}) return result def _dict_with_qos_specs(rows): """Convert qos specs query results to list. Qos specs query results are a list of quality_of_service_specs refs, some are root entry of a qos specs (key == 'QoS_Specs_Name') and the rest are children entry, a.k.a detailed specs for a qos specs. This function converts query results to a dict using spec name as key. """ result = [] for row in rows: if row['key'] == 'QoS_Specs_Name': member = {} member['name'] = row['value'] member.update(dict(id=row['id'])) if row.specs: spec_dict = _dict_with_children_specs(row.specs) member.update(dict(consumer=spec_dict['consumer'])) del spec_dict['consumer'] member.update(dict(specs=spec_dict)) result.append(member) return result @require_admin_context def qos_specs_get(context, qos_specs_id, inactive=False): rows = _qos_specs_get_ref(context, qos_specs_id, None, inactive) return _dict_with_qos_specs(rows)[0] @require_admin_context def qos_specs_get_all(context, inactive=False, filters=None): """Returns a list of all qos_specs. Results is like: [{ 'id': SPECS-UUID, 'name': 'qos_spec-1', 'consumer': 'back-end', 'specs': { 'key1': 'value1', 'key2': 'value2', ... } }, { 'id': SPECS-UUID, 'name': 'qos_spec-2', 'consumer': 'front-end', 'specs': { 'key1': 'value1', 'key2': 'value2', ... } }, ] """ filters = filters or {} #TODO(zhiteng) Add filters for 'consumer' read_deleted = "yes" if inactive else "no" rows = model_query(context, models.QualityOfServiceSpecs, read_deleted=read_deleted). \ options(joinedload_all('specs')).all() return _dict_with_qos_specs(rows) @require_admin_context def qos_specs_get_by_name(context, name, inactive=False): rows = _qos_specs_get_by_name(context, name, None, inactive) return _dict_with_qos_specs(rows)[0] @require_admin_context def qos_specs_associations_get(context, qos_specs_id): """Return all entities associated with specified qos specs. For now, the only entity that is possible to associate with a qos specs is volume type, so this is just a wrapper of volume_type_qos_associations_get(). But it's possible to extend qos specs association to other entities, such as volumes, sometime in future. """ # Raise QoSSpecsNotFound if no specs found _qos_specs_get_ref(context, qos_specs_id, None) return volume_type_qos_associations_get(context, qos_specs_id) @require_admin_context def qos_specs_associate(context, qos_specs_id, type_id): """Associate volume type from specified qos specs.""" return volume_type_qos_associate(context, type_id, qos_specs_id) @require_admin_context def qos_specs_disassociate(context, qos_specs_id, type_id): """Disassociate volume type from specified qos specs.""" return volume_type_qos_disassociate(context, qos_specs_id, type_id) @require_admin_context def qos_specs_disassociate_all(context, qos_specs_id): """Disassociate all entities associated with specified qos specs. For now, the only entity that is possible to associate with a qos specs is volume type, so this is just a wrapper of volume_type_qos_disassociate_all(). But it's possible to extend qos specs association to other entities, such as volumes, sometime in future. """ return volume_type_qos_disassociate_all(context, qos_specs_id) @require_admin_context def qos_specs_item_delete(context, qos_specs_id, key): session = get_session() with session.begin(): _qos_specs_get_item(context, qos_specs_id, key) session.query(models.QualityOfServiceSpecs). \ filter(models.QualityOfServiceSpecs.key == key). \ filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def qos_specs_delete(context, qos_specs_id): session = get_session() with session.begin(): _qos_specs_get_ref(context, qos_specs_id, session) session.query(models.QualityOfServiceSpecs).\ filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id, models.QualityOfServiceSpecs.specs_id == qos_specs_id)).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def _qos_specs_get_item(context, qos_specs_id, key, session=None): result = model_query(context, models.QualityOfServiceSpecs, session=session). \ filter(models.QualityOfServiceSpecs.key == key). \ filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \ first() if not result: raise exception.QoSSpecsKeyNotFound( specs_key=key, specs_id=qos_specs_id) return result @require_admin_context def qos_specs_update(context, qos_specs_id, specs): """Make updates to an existing qos specs. Perform add, update or delete key/values to a qos specs. """ session = get_session() with session.begin(): # make sure qos specs exists _qos_specs_get_ref(context, qos_specs_id, session) spec_ref = None for key in specs.keys(): try: spec_ref = _qos_specs_get_item( context, qos_specs_id, key, session) except exception.QoSSpecsKeyNotFound: spec_ref = models.QualityOfServiceSpecs() id = None if spec_ref.get('id', None): id = spec_ref['id'] else: id = str(uuid.uuid4()) value = dict(id=id, key=key, value=specs[key], specs_id=qos_specs_id, deleted=False) LOG.debug('qos_specs_update() value: %s' % value) spec_ref.update(value) spec_ref.save(session=session) return specs #################### @require_context def volume_type_encryption_get(context, volume_type_id, session=None): return model_query(context, models.Encryption, session=session, read_deleted="no").\ filter_by(volume_type_id=volume_type_id).first() @require_admin_context def volume_type_encryption_delete(context, volume_type_id): session = get_session() with session.begin(): encryption = volume_type_encryption_get(context, volume_type_id, session) encryption.update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_admin_context def volume_type_encryption_create(context, volume_type_id, values): session = get_session() with session.begin(): encryption = models.Encryption() if 'volume_type_id' not in values: values['volume_type_id'] = volume_type_id encryption.update(values) session.add(encryption) return encryption @require_admin_context def volume_type_encryption_update(context, volume_type_id, values): session = get_session() with session.begin(): encryption = volume_type_encryption_get(context, volume_type_id, session) if not encryption: raise exception.VolumeTypeEncryptionNotFound(type_id= volume_type_id) encryption.update(values) return encryption def volume_type_encryption_volume_get(context, volume_type_id, session=None): volume_list = _volume_get_query(context, session=session, project_only=False).\ filter_by(volume_type_id=volume_type_id).\ all() return volume_list #################### @require_context def volume_encryption_metadata_get(context, volume_id, session=None): """Return the encryption key id for a given volume.""" volume_ref = _volume_get(context, volume_id) encryption_ref = volume_type_encryption_get(context, volume_ref['volume_type_id']) return { 'encryption_key_id': volume_ref['encryption_key_id'], 'control_location': encryption_ref['control_location'], 'cipher': encryption_ref['cipher'], 'key_size': encryption_ref['key_size'], 'provider': encryption_ref['provider'], } #################### @require_context def _volume_glance_metadata_get_all(context, session=None): rows = model_query(context, models.VolumeGlanceMetadata, project_only=True, session=session).\ filter_by(deleted=False).\ all() return rows @require_context def volume_glance_metadata_get_all(context): """Return the Glance metadata for all volumes.""" return _volume_glance_metadata_get_all(context) @require_context @require_volume_exists def _volume_glance_metadata_get(context, volume_id, session=None): rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ filter_by(volume_id=volume_id).\ filter_by(deleted=False).\ all() if not rows: raise exception.GlanceMetadataNotFound(id=volume_id) return rows @require_context @require_volume_exists def volume_glance_metadata_get(context, volume_id): """Return the Glance metadata for the specified volume.""" return _volume_glance_metadata_get(context, volume_id) @require_context @require_snapshot_exists def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None): rows = model_query(context, models.VolumeGlanceMetadata, session=session).\ filter_by(snapshot_id=snapshot_id).\ filter_by(deleted=False).\ all() if not rows: raise exception.GlanceMetadataNotFound(id=snapshot_id) return rows @require_context @require_snapshot_exists def volume_snapshot_glance_metadata_get(context, snapshot_id): """Return the Glance metadata for the specified snapshot.""" return _volume_snapshot_glance_metadata_get(context, snapshot_id) @require_context @require_volume_exists def volume_glance_metadata_create(context, volume_id, key, value): """Update the Glance metadata for a volume by adding a new key:value pair. This API does not support changing the value of a key once it has been created. """ session = get_session() with session.begin(): rows = session.query(models.VolumeGlanceMetadata).\ filter_by(volume_id=volume_id).\ filter_by(key=key).\ filter_by(deleted=False).all() if len(rows) > 0: raise exception.GlanceMetadataExists(key=key, volume_id=volume_id) vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = key vol_glance_metadata.value = str(value) session.add(vol_glance_metadata) return @require_context @require_snapshot_exists def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id): """Update the Glance metadata for a snapshot. This copies all of the key:value pairs from the originating volume, to ensure that a volume created from the snapshot will retain the original metadata. """ session = get_session() with session.begin(): metadata = _volume_glance_metadata_get(context, volume_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.snapshot_id = snapshot_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context @require_volume_exists def volume_glance_metadata_copy_from_volume_to_volume(context, src_volume_id, volume_id): """Update the Glance metadata for a volume. This copies all all of the key:value pairs from the originating volume, to ensure that a volume created from the volume (clone) will retain the original metadata. """ session = get_session() with session.begin(): metadata = _volume_glance_metadata_get(context, src_volume_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context @require_volume_exists def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id): """Update the Glance metadata from a volume (created from a snapshot) by copying all of the key:value pairs from the originating snapshot. This is so that the Glance metadata from the original volume is retained. """ session = get_session() with session.begin(): metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id, session=session) for meta in metadata: vol_glance_metadata = models.VolumeGlanceMetadata() vol_glance_metadata.volume_id = volume_id vol_glance_metadata.key = meta['key'] vol_glance_metadata.value = meta['value'] vol_glance_metadata.save(session=session) @require_context def volume_glance_metadata_delete_by_volume(context, volume_id): model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ filter_by(volume_id=volume_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def volume_glance_metadata_delete_by_snapshot(context, snapshot_id): model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\ filter_by(snapshot_id=snapshot_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) ############################### @require_context def backup_get(context, backup_id): result = model_query(context, models.Backup, project_only=True).\ filter_by(id=backup_id).\ first() if not result: raise exception.BackupNotFound(backup_id=backup_id) return result def _backup_get_all(context, filters=None): session = get_session() with session.begin(): # Generate the query query = model_query(context, models.Backup) if filters: query = query.filter_by(**filters) return query.all() @require_admin_context def backup_get_all(context, filters=None): return _backup_get_all(context, filters) @require_admin_context def backup_get_all_by_host(context, host): return model_query(context, models.Backup).filter_by(host=host).all() @require_context def backup_get_all_by_project(context, project_id, filters=None): authorize_project_context(context, project_id) if not filters: filters = {} else: filters = filters.copy() filters['project_id'] = project_id return _backup_get_all(context, filters) @require_context def backup_create(context, values): backup = models.Backup() if not values.get('id'): values['id'] = str(uuid.uuid4()) backup.update(values) session = get_session() with session.begin(): backup.save(session) return backup @require_context def backup_update(context, backup_id, values): session = get_session() with session.begin(): backup = model_query(context, models.Backup, session=session, read_deleted="yes").\ filter_by(id=backup_id).first() if not backup: raise exception.BackupNotFound( _("No backup with id %s") % backup_id) backup.update(values) return backup @require_admin_context def backup_destroy(context, backup_id): model_query(context, models.Backup).\ filter_by(id=backup_id).\ update({'status': 'deleted', 'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) ############################### @require_context def _transfer_get(context, transfer_id, session=None): query = model_query(context, models.Transfer, session=session).\ filter_by(id=transfer_id) if not is_admin_context(context): volume = models.Volume query = query.filter(models.Transfer.volume_id == volume.id, volume.project_id == context.project_id) result = query.first() if not result: raise exception.TransferNotFound(transfer_id=transfer_id) return result @require_context def transfer_get(context, transfer_id): return _transfer_get(context, transfer_id) def _translate_transfers(transfers): results = [] for transfer in transfers: r = {} r['id'] = transfer['id'] r['volume_id'] = transfer['volume_id'] r['display_name'] = transfer['display_name'] r['created_at'] = transfer['created_at'] r['deleted'] = transfer['deleted'] results.append(r) return results @require_admin_context def transfer_get_all(context): results = model_query(context, models.Transfer).all() return _translate_transfers(results) @require_context def transfer_get_all_by_project(context, project_id): authorize_project_context(context, project_id) query = model_query(context, models.Transfer).\ filter(models.Volume.id == models.Transfer.volume_id, models.Volume.project_id == project_id) results = query.all() return _translate_transfers(results) @require_context def transfer_create(context, values): if not values.get('id'): values['id'] = str(uuid.uuid4()) session = get_session() with session.begin(): volume_ref = _volume_get(context, values['volume_id'], session=session) if volume_ref['status'] != 'available': msg = _('Volume must be available') LOG.error(msg) raise exception.InvalidVolume(reason=msg) volume_ref['status'] = 'awaiting-transfer' transfer = models.Transfer() transfer.update(values) session.add(transfer) volume_ref.update(volume_ref) return transfer @require_context def transfer_destroy(context, transfer_id): session = get_session() with session.begin(): transfer_ref = _transfer_get(context, transfer_id, session=session) volume_ref = _volume_get(context, transfer_ref['volume_id'], session=session) # If the volume state is not 'awaiting-transfer' don't change it, but # we can still mark the transfer record as deleted. if volume_ref['status'] != 'awaiting-transfer': msg = _('Volume in unexpected state %s, ' 'expected awaiting-transfer') % volume_ref['status'] LOG.error(msg) else: volume_ref['status'] = 'available' volume_ref.update(volume_ref) volume_ref.save(session=session) model_query(context, models.Transfer, session=session).\ filter_by(id=transfer_id).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')}) @require_context def transfer_accept(context, transfer_id, user_id, project_id): session = get_session() with session.begin(): transfer_ref = _transfer_get(context, transfer_id, session) volume_id = transfer_ref['volume_id'] volume_ref = _volume_get(context, volume_id, session=session) if volume_ref['status'] != 'awaiting-transfer': msg = _('Transfer %(transfer_id)s: Volume id %(volume_id)s in ' 'unexpected state %(status)s, expected ' 'awaiting-transfer') % {'transfer_id': transfer_id, 'volume_id': volume_ref['id'], 'status': volume_ref['status']} LOG.error(msg) raise exception.InvalidVolume(reason=msg) volume_ref['status'] = 'available' volume_ref['user_id'] = user_id volume_ref['project_id'] = project_id volume_ref['updated_at'] = literal_column('updated_at') volume_ref.update(volume_ref) session.query(models.Transfer).\ filter_by(id=transfer_ref['id']).\ update({'deleted': True, 'deleted_at': timeutils.utcnow(), 'updated_at': literal_column('updated_at')})
apache-2.0
-7,665,454,611,863,467,000
32.830478
79
0.588356
false
4.157223
false
false
false
tomkralidis/pywps
pywps/processing/job.py
1
3570
################################################################## # Copyright 2018 Open Source Geospatial Foundation and others # # licensed under MIT, Please consult LICENSE.txt for details # ################################################################## import os import tempfile import pywps.configuration as config import logging LOGGER = logging.getLogger("PYWPS") class Job(object): """ :class:`Job` represents a processing job. """ def __init__(self, process, wps_request, wps_response): self.process = process self.method = '_run_process' self.wps_request = wps_request self.wps_response = wps_response @property def name(self): return self.process.identifier @property def workdir(self): return self.process.workdir @property def uuid(self): return self.process.uuid def dump(self): LOGGER.debug('dump job ...') import dill filename = tempfile.mkstemp(prefix='job_', suffix='.dump', dir=self.workdir)[1] with open(filename, 'w') as fp: dill.dump(self, fp) LOGGER.debug("dumped job status to {}".format(filename)) return filename return None @classmethod def load(cls, filename): LOGGER.debug('load job ...') import dill with open(filename) as fp: job = dill.load(fp) return job return None def run(self): getattr(self.process, self.method)(self.wps_request, self.wps_response) class JobLauncher(object): """ :class:`JobLauncher` is a command line tool to launch a job from a file with a dumped job state. Example call: ``joblauncher -c /etc/pywps.cfg job-1001.dump`` """ def create_parser(self): import argparse parser = argparse.ArgumentParser(prog="joblauncher") parser.add_argument("-c", "--config", help="Path to pywps configuration.") parser.add_argument("filename", help="File with dumped pywps job object.") return parser def run(self, args): if args.config: LOGGER.debug("using pywps_cfg={}".format(args.config)) os.environ['PYWPS_CFG'] = args.config self._run_job(args.filename) def _run_job(self, filename): job = Job.load(filename) # init config if 'PYWPS_CFG' in os.environ: config.load_configuration(os.environ['PYWPS_CFG']) # update PATH os.environ['PATH'] = "{0}:{1}".format( config.get_config_value('processing', 'path'), os.environ.get('PATH')) # cd into workdir os.chdir(job.workdir) # init logger ... code copied from app.Service if config.get_config_value('logging', 'file') and config.get_config_value('logging', 'level'): LOGGER.setLevel(getattr(logging, config.get_config_value('logging', 'level'))) if not LOGGER.handlers: # hasHandlers in Python 3.x fh = logging.FileHandler(config.get_config_value('logging', 'file')) fh.setFormatter(logging.Formatter(config.get_config_value('logging', 'format'))) LOGGER.addHandler(fh) else: # NullHandler if not LOGGER.handlers: LOGGER.addHandler(logging.NullHandler()) job.run() def launcher(): """ Run job launcher command line. """ job_launcher = JobLauncher() parser = job_launcher.create_parser() args = parser.parse_args() job_launcher.run(args)
mit
5,095,248,502,263,513,000
31.454545
102
0.582353
false
4.089347
true
false
false
jcmcclurg/serverpower
profiling/defaultGetSetpoints.py
1
1840
#!/usr/bin/python import numpy import re import cPickle as pickle import gzip import time def getSetpointsFromRaw(filename,verbose=False): printEvery = 1 if(verbose): startTime = time.time() print "Opened raw file %s."%(filename) else: startTime = 0 f=open(filename,'rb') data = [] for line in f: if re.search('^[0-9]+(\.[0-9]*)?,[0-9]+(\.[0-9]*)?$', line) != None: v = [ float(i) for i in line.strip().split(',') ] data.append(v) if verbose and (time.time() - startTime > printEvery): startTime = time.time() print "The list has %d blocks."%(len(data)) return numpy.array(data) def rawFileToSetpointsFile(oldFilename,newFilename,verbose=False): if verbose: print "Loading data from raw..." data = getSetpointsFromRaw(oldFilename,verbose) if verbose: print "Writing data (%d blocks) to setpoints file..."%(data.shape[0]) fp = gzip.open(newFilename,'wb') pickle.dump(data,fp,-1) fp.close() return data def readSetpointsFile(filename,verbose=False): try: if verbose: print "Loading data from setpoints file..." fp = gzip.open(filename+"_cache.gz","rb") data = pickle.load(fp) fp.close() except IOError as err: if verbose: print "Does not exist (%s). Attempting to create..."%(err) data = rawFileToSetpointsFile(filename, filename+"_cache.gz", verbose) if verbose: print "Got %d blocks."%(data.shape[0]) return data if __name__ == "__main__": exps = { 'stress': '1452722752.651508100', 'signal_insert_delays':'1452732970.201413700', 'rapl':'1452743186.881235700','powerclamp':'1452753403.717082000','cpufreq':'1452796934.955382300' } for exp in exps: date = exps[exp] print exp+": "+date d = "experiments/"+exp+"/"+date for i in [1,2,3,4]: print " server "+str(i) data = readSetpointsFile(d+"/server"+str(i)+"/"+date+".testlog",True) print ""
gpl-2.0
-3,381,506,806,175,974,000
25.285714
191
0.66413
false
2.830769
false
false
false
bacher09/gpackages-metadata
tests/test_news.py
1
1144
from .utils import TestCase, TESTDATA_DIR from packages_metadata.generic_metadata import news import os.path class TestNews(TestCase): @classmethod def setUpClass(cls): cls.news_dir = os.path.join(TESTDATA_DIR, 'news') cls.news_titles = [ 'ia64-java-removal', 'glib-228', 'gnustep-new-layout', 'gnome-232' ] def test_news(self): test_news = news.News(repo_path=self.news_dir, news_path=self.news_dir) news_list = list(test_news) news_titles = [item.title for item in news_list] self.assertSetEqual(set(news_titles), set(self.news_titles)) glib_news = None for item in news_list: if item.title == "glib-228": glib_news = item.default_news break self.assertEqual(glib_news.title.strip(), "Upgrade to GLIB 2.28") self.assertEqual(glib_news.revision, 1) authors = glib_news.authors self.assertEqual(len(authors), 1) self.assertEqual(authors[0].email, "[email protected]") self.assertTupleEqual(glib_news.if_installed, ("<dev-libs/glib-2.28",))
gpl-2.0
3,034,550,257,392,151,600
34.75
79
0.625
false
3.445783
true
false
false
djolent/WebApp
LifeSciences/AzureBlast/AzureBlast/BatchScripts/AnalysisJobManager.py
1
4604
import os import sys import datetime import time import azure.batch.batch_service_client as batch import azure.batch.batch_auth as batchauth import azure.batch.models as batchmodels from azure.storage.table import TableService, TableBatch from azure.storage.blob import BlockBlobService def get_analysis_state(all_tasks_complete, any_failures): if all_tasks_complete and any_failures: return 'Error' if all_tasks_complete: return 'Complete' return 'Running' def get_query_state(task): if task.state == batchmodels.TaskState.active: return 'Waiting' if task.state == batchmodels.TaskState.preparing: return 'Waiting' if task.state == batchmodels.TaskState.running: return 'Running' if task.state == batchmodels.TaskState.completed: if task.execution_info.exit_code == 0: return 'Success' return 'Error' def wait_for_tasks_to_complete( table_service, batch_client, entity_pk, entity_rk, job_id): """ Returns when all tasks in the specified job reach the Completed state. """ while True: entity = table_service.get_entity( 'AnalysisEntity', entity_pk, entity_rk) tasks = batch_client.task.list(job_id) incomplete_tasks = [task for task in tasks if task.id != 'JobManager' and task.state != batchmodels.TaskState.completed] complete_tasks = [task for task in tasks if task.id != 'JobManager' and task.state == batchmodels.TaskState.completed] failed_tasks = [task for task in complete_tasks if task.execution_info.exit_code != 0 or task.execution_info.scheduling_error is not None] queries = table_service.query_entities( 'AnalysisQueryEntity', filter="PartitionKey eq '{}'".format(entity.RowKey)) current_batch_count = 0 updateBatch = TableBatch() for task in tasks: matching_queries = [q for q in queries if q.RowKey == task.id] if not matching_queries: print('Could not find query {}'.format(task.id)) continue query = matching_queries[0] update = False state = get_query_state(task) if query._State != state: query._State = state update = True if task.state == batchmodels.TaskState.running: if not hasattr(query, 'StartTime'): query.StartTime = task.execution_info.start_time update = True if task.state == batchmodels.TaskState.completed: if not hasattr(query, 'EndTime'): query.EndTime = task.execution_info.end_time update = True if update: updateBatch.update_entity(query) current_batch_count += 1 if current_batch_count == 99: table_service.commit_batch('AnalysisQueryEntity', updateBatch) current_batch_count = 0 updateBatch = TableBatch() if current_batch_count > 0: table_service.commit_batch('AnalysisQueryEntity', updateBatch) all_tasks_complete = not incomplete_tasks any_failures = len(failed_tasks) > 0 entity.CompletedTasks = len(complete_tasks) entity._State = get_analysis_state(all_tasks_complete, any_failures) if not incomplete_tasks: entity.EndTime = datetime.datetime.utcnow() table_service.update_entity('AnalysisEntity', entity) return else: table_service.update_entity('AnalysisEntity', entity) time.sleep(5) if __name__ == '__main__': storage_account = sys.argv[1] storage_key = sys.argv[2] batch_account = sys.argv[3] batch_key = sys.argv[4] batch_url = sys.argv[5] job_id = sys.argv[6] entity_pk = sys.argv[7] entity_rk = sys.argv[8] table_service = TableService(account_name=storage_account, account_key=storage_key) blob_service = BlockBlobService(account_name=storage_account, account_key=storage_key) credentials = batchauth.SharedKeyCredentials(batch_account, batch_key) batch_client = batch.BatchServiceClient(credentials, base_url=batch_url) wait_for_tasks_to_complete(table_service, batch_client, entity_pk, entity_rk, job_id)
mit
1,266,419,782,913,377,800
35.251969
89
0.597524
false
4.223853
false
false
false
jelly/calibre
src/calibre/gui2/tweak_book/check.py
2
9562
#!/usr/bin/env python2 # vim:fileencoding=utf-8 from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' import sys from PyQt5.Qt import ( QIcon, Qt, QSplitter, QListWidget, QTextBrowser, QPalette, QMenu, QListWidgetItem, pyqtSignal, QApplication, QStyledItemDelegate) from calibre.ebooks.oeb.polish.check.base import WARN, INFO, DEBUG, ERROR, CRITICAL from calibre.ebooks.oeb.polish.check.main import run_checks, fix_errors from calibre.gui2 import NO_URL_FORMATTING from calibre.gui2.tweak_book import tprefs from calibre.gui2.tweak_book.widgets import BusyCursor def icon_for_level(level): if level > WARN: icon = 'dialog_error.png' elif level == WARN: icon = 'dialog_warning.png' elif level == INFO: icon = 'dialog_information.png' else: icon = None return QIcon(I(icon)) if icon else QIcon() def prefix_for_level(level): if level > WARN: text = _('ERROR') elif level == WARN: text = _('WARNING') elif level == INFO: text = _('INFO') else: text = '' if text: text += ': ' return text class Delegate(QStyledItemDelegate): def initStyleOption(self, option, index): super(Delegate, self).initStyleOption(option, index) if index.row() == self.parent().currentRow(): option.font.setBold(True) option.backgroundBrush = self.parent().palette().brush(QPalette.AlternateBase) class Check(QSplitter): item_activated = pyqtSignal(object) check_requested = pyqtSignal() fix_requested = pyqtSignal(object) def __init__(self, parent=None): QSplitter.__init__(self, parent) self.setChildrenCollapsible(False) self.items = i = QListWidget(self) i.setContextMenuPolicy(Qt.CustomContextMenu) i.customContextMenuRequested.connect(self.context_menu) self.items.setSpacing(3) self.items.itemDoubleClicked.connect(self.current_item_activated) self.items.currentItemChanged.connect(self.current_item_changed) self.items.setSelectionMode(self.items.NoSelection) self.delegate = Delegate(self.items) self.items.setItemDelegate(self.delegate) self.addWidget(i) self.help = h = QTextBrowser(self) h.anchorClicked.connect(self.link_clicked) h.setOpenLinks(False) self.addWidget(h) self.setStretchFactor(0, 100) self.setStretchFactor(1, 50) self.clear_at_startup() state = tprefs.get('check-book-splitter-state', None) if state is not None: self.restoreState(state) def clear_at_startup(self): self.clear_help(_('Check has not been run')) self.items.clear() def context_menu(self, pos): m = QMenu() if self.items.count() > 0: m.addAction(QIcon(I('edit-copy.png')), _('Copy list of errors to clipboard'), self.copy_to_clipboard) if list(m.actions()): m.exec_(self.mapToGlobal(pos)) def copy_to_clipboard(self): items = [] for item in (self.items.item(i) for i in xrange(self.items.count())): msg = unicode(item.text()) msg = prefix_for_level(item.data(Qt.UserRole).level) + msg items.append(msg) if items: QApplication.clipboard().setText('\n'.join(items)) def save_state(self): tprefs.set('check-book-splitter-state', bytearray(self.saveState())) def clear_help(self, msg=None): if msg is None: msg = _('No problems found') self.help.setText('<h2>%s</h2><p><a style="text-decoration:none" title="%s" href="run:check">%s</a></p>' % ( msg, _('Click to run a check on the book'), _('Run check'))) def link_clicked(self, url): url = unicode(url.toString(NO_URL_FORMATTING)) if url == 'activate:item': self.current_item_activated() elif url == 'run:check': self.check_requested.emit() elif url == 'fix:errors': errors = [self.items.item(i).data(Qt.UserRole) for i in xrange(self.items.count())] self.fix_requested.emit(errors) elif url.startswith('fix:error,'): num = int(url.rpartition(',')[-1]) errors = [self.items.item(num).data(Qt.UserRole)] self.fix_requested.emit(errors) elif url.startswith('activate:item:'): index = int(url.rpartition(':')[-1]) self.location_activated(index) def next_error(self, delta=1): row = self.items.currentRow() num = self.items.count() if num > 0: row = (row + delta) % num self.items.setCurrentRow(row) self.current_item_activated() def current_item_activated(self, *args): i = self.items.currentItem() if i is not None: err = i.data(Qt.UserRole) if err.has_multiple_locations: self.location_activated(0) else: self.item_activated.emit(err) def location_activated(self, index): i = self.items.currentItem() if i is not None: err = i.data(Qt.UserRole) err.current_location_index = index self.item_activated.emit(err) def current_item_changed(self, *args): i = self.items.currentItem() self.help.setText('') def loc_to_string(line, col): loc = '' if line is not None: loc = _('line: %d') % line if col is not None: loc += _(' column: %d') % col if loc: loc = ' (%s)' % loc return loc if i is not None: err = i.data(Qt.UserRole) header = {DEBUG:_('Debug'), INFO:_('Information'), WARN:_('Warning'), ERROR:_('Error'), CRITICAL:_('Error')}[err.level] ifix = '' loc = loc_to_string(err.line, err.col) if err.INDIVIDUAL_FIX: ifix = '<a href="fix:error,%d" title="%s">%s</a><br><br>' % ( self.items.currentRow(), _('Try to fix only this error'), err.INDIVIDUAL_FIX) open_tt = _('Click to open in editor') fix_tt = _('Try to fix all fixable errors automatically. Only works for some types of error.') fix_msg = _('Try to correct all fixable errors automatically') run_tt, run_msg = _('Re-run the check'), _('Re-run check') header = '<style>a { text-decoration: none}</style><h2>%s [%d / %d]</h2>' % ( header, self.items.currentRow()+1, self.items.count()) msg = '<p>%s</p>' footer = '<div>%s<a href="fix:errors" title="%s">%s</a><br><br> <a href="run:check" title="%s">%s</a></div>' if err.has_multiple_locations: activate = [] for i, (name, lnum, col) in enumerate(err.all_locations): activate.append('<a href="activate:item:%d" title="%s">%s %s</a>' % ( i, open_tt, name, loc_to_string(lnum, col))) many = len(activate) > 2 activate = '<div>%s</div>' % ('<br>'.join(activate)) if many: activate += '<br>' template = header + ((msg + activate) if many else (activate + msg)) + footer else: activate = '<div><a href="activate:item" title="%s">%s %s</a></div>' % ( open_tt, err.name, loc) template = header + activate + msg + footer self.help.setText( template % (err.HELP, ifix, fix_tt, fix_msg, run_tt, run_msg)) def run_checks(self, container): with BusyCursor(): self.show_busy() QApplication.processEvents() errors = run_checks(container) self.hide_busy() for err in sorted(errors, key=lambda e:(100 - e.level, e.name)): i = QListWidgetItem('%s\xa0\xa0\xa0\xa0[%s]' % (err.msg, err.name), self.items) i.setData(Qt.UserRole, err) i.setIcon(icon_for_level(err.level)) if errors: self.items.setCurrentRow(0) self.current_item_changed() self.items.setFocus(Qt.OtherFocusReason) else: self.clear_help() def fix_errors(self, container, errors): with BusyCursor(): self.show_busy(_('Running fixers, please wait...')) QApplication.processEvents() changed = fix_errors(container, errors) self.run_checks(container) return changed def show_busy(self, msg=_('Running checks, please wait...')): self.help.setText(msg) self.items.clear() def hide_busy(self): self.help.setText('') self.items.clear() def keyPressEvent(self, ev): if ev.key() in (Qt.Key_Enter, Qt.Key_Return): self.current_item_activated() return super(Check, self).keyPressEvent(ev) def clear(self): self.items.clear() self.clear_help() def main(): from calibre.gui2 import Application from calibre.gui2.tweak_book.boss import get_container app = Application([]) # noqa path = sys.argv[-1] container = get_container(path) d = Check() d.run_checks(container) d.show() app.exec_() if __name__ == '__main__': main()
gpl-3.0
6,725,639,900,550,539,000
35.496183
131
0.566409
false
3.684778
false
false
false
alexliew/learn_python_the_hard_way
ex5.py
1
2837
my_name = "Alex Liew" my_age = 25 # this is no lie my_height = 174 # cm my_weight = 65 # kg my_eyes = 'Brown' my_teeth = 'White' my_hair = 'Black' print("Let's talk about {0}.".format(my_name)) print("He's {0} centimeters tall.".format(my_height)) print("He's {0} kilograms heavy.".format(my_weight)) print("Actually that's not that heavy.") print("He's got {0} eyes and {1} hair.".format(my_eyes, my_hair)) print("His teeth are usually {0} dependong on the coffee.".format(my_teeth)) print("If I add {0}, {1}, and {2} I'd get {3}.".format(my_age, my_height, my_weight, my_age + my_height + my_weight)) print("Without 'my_' in front of the variables.") name = "Alex Liew" age = 25 # this is no lie height = 174 # cm weight = 65 # kg eyes = 'Brown' teeth = 'White' hair = 'Black' print("Let's talk about {0}.".format(name)) print("He's {0} centimeters tall.".format(height)) print("He's {0} kilograms heavy.".format(weight)) print("Actually that's not that heavy.") print("He's got {0} eyes and {1} hair.".format(eyes, hair)) print("His teeth are usually {0} dependong on the coffee.".format(teeth)) print("If I add {0}, {1}, and {2} I'd get {3}.".format(age, height, weight, age + height + weight)) # Additional Study Drills # convert inches to centimeters inches = 23 centimeters = 23 * 1.5 print("{0} inches is equal to {1} centimeters.".format(inches, centimeters)) # convert pounds to kilograms pounds = 22 kilograms = 22 / 2.2 print("{0} pounds is equal to {1} kilograms.".format(pounds, kilograms)) # You cannot switch between automatic and manual field numbering. # print("The number {} in base 10 is equal to the number {0:b} in base 2.".format(5)) # You must include the field number if using a format specification in a string with multiple fields. # print("The number {} in base 10 is equal to the number {:b} in base 2.".format(5)) num = 23 print("The number {0} in base 10 is equal to the number {0:b} in base 2.".format(num)) print("The number {0} in base 10 is equal to the number {0:o} in base 8.".format(num)) print("The number {0} in base 10 is equal to the number {0:x} in base 16.".format(num)) print("The unicode character represented by the integer {0} is {0:c}.".format(97)) print("The number {0} represented using exponent notation is {0:e}.".format(num)) print("The number {0} represented using fixed point notation is {0:f}.".format(num)) fnum = 123985.12376908 print("{0} is {0:-<+,.5f}".format(fnum)) print("{0} is {0:-<+20,.5f}".format(fnum)) print("{0} is {0:-<20,.5f}".format(fnum)) print("{0} is {0:->20,.5f}".format(fnum)) print("{0} is {0:-=20,.5f}".format(fnum)) print("{0} is {0:-^20,.5f}".format(fnum)) # thing = [1, 2, 3] thing = 'a sentence' print("{0} stringified is {0!s}".format(thing)) print("{0} reprified is {0!r}".format(thing)) print("{0} asciified is {0!a}".format(thing))
mit
7,507,327,738,741,249,000
35.844156
117
0.667607
false
2.709647
false
true
false
maxwward/SCOPEBak
askbot/views/readers.py
1
27155
# encoding:utf-8 """ :synopsis: views "read-only" for main textual content By main textual content is meant - text of Exercises, Problems and Comments. The "read-only" requirement here is not 100% strict, as for example "exercise" view does allow adding new comments via Ajax form post. """ import datetime import logging import urllib import operator from django.shortcuts import get_object_or_404 from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseNotAllowed from django.core.paginator import Paginator, EmptyPage, InvalidPage from django.template import Context from django.utils import simplejson from django.utils.html import escape from django.utils.translation import ugettext as _ from django.utils.translation import ungettext from django.utils import translation from django.views.decorators import csrf from django.core.urlresolvers import reverse from django.core import exceptions as django_exceptions from django.contrib.humanize.templatetags import humanize from django.http import QueryDict from django.conf import settings import askbot from askbot import exceptions from askbot.utils.diff import textDiff as htmldiff from askbot.forms import ProblemForm, ShowExerciseForm, AnswerForm from askbot import conf from askbot import models from askbot import schedules from askbot.models.tag import Tag from askbot import const from askbot.utils import functions from askbot.utils.html import sanitize_html from askbot.utils.decorators import anonymous_forbidden, ajax_only, get_only from askbot.search.state_manager import SearchState, DummySearchState from askbot.templatetags import extra_tags from askbot.conf import settings as askbot_settings from askbot.skins.loaders import render_into_skin, get_template #jinja2 template loading enviroment from askbot.views import context # used in index page #todo: - take these out of const or settings from askbot.models import Post, Vote INDEX_PAGE_SIZE = 30 INDEX_AWARD_SIZE = 15 INDEX_TAGS_SIZE = 25 # used in tags list DEFAULT_PAGE_SIZE = 60 # used in exercises # used in problems #refactor? - we have these #views that generate a listing of exercises in one way or another: #index, without_problem, exercises, search, tag #should we dry them up? #related topics - information drill-down, search refinement def index(request):#generates front page - shows listing of exercises sorted in various ways """index view mapped to the root url of the Q&A site """ return HttpResponseRedirect(reverse('exercises')) def exercises(request, **kwargs): """ List of Exercises, Tagged exercises, and Exercises without problems. matching search query or user selection """ #before = datetime.datetime.now() if request.method != 'GET': return HttpResponseNotAllowed(['GET']) search_state = SearchState( user_logged_in=request.user.is_authenticated(), **kwargs ) page_size = int(askbot_settings.DEFAULT_EXERCISES_PAGE_SIZE) qs, meta_data = models.Thread.objects.run_advanced_search( request_user=request.user, search_state=search_state ) if meta_data['non_existing_tags']: search_state = search_state.remove_tags(meta_data['non_existing_tags']) paginator = Paginator(qs, page_size) if paginator.num_pages < search_state.page: search_state.page = 1 page = paginator.page(search_state.page) page.object_list = list(page.object_list) # evaluate the queryset # INFO: Because for the time being we need exercise posts and thread authors # down the pipeline, we have to precache them in thread objects models.Thread.objects.precache_view_data_hack(threads=page.object_list) related_tags = Tag.objects.get_related_to_search( threads=page.object_list, ignored_tag_names=meta_data.get('ignored_tag_names',[]) ) tag_list_type = askbot_settings.TAG_LIST_FORMAT if tag_list_type == 'cloud': #force cloud to sort by name related_tags = sorted(related_tags, key = operator.attrgetter('name')) contributors = list( models.Thread.objects.get_thread_contributors( thread_list=page.object_list ).only('id', 'username', 'gravatar') ) paginator_context = { 'is_paginated' : (paginator.count > page_size), 'pages': paginator.num_pages, 'page': search_state.page, 'has_previous': page.has_previous(), 'has_next': page.has_next(), 'previous': page.previous_page_number(), 'next': page.next_page_number(), 'base_url' : search_state.query_string(), 'page_size' : page_size, } # We need to pass the rss feed url based # on the search state to the template. # We use QueryDict to get a querystring # from dicts and arrays. Much cleaner # than parsing and string formating. rss_query_dict = QueryDict("").copy() if search_state.query: # We have search string in session - pass it to # the QueryDict rss_query_dict.update({"q": search_state.query}) if search_state.tags: # We have tags in session - pass it to the # QueryDict but as a list - we want tags+ rss_query_dict.setlist("tags", search_state.tags) context_feed_url = '/%sfeeds/rss/?%s' % (settings.ASKBOT_URL, rss_query_dict.urlencode()) # Format the url with the QueryDict reset_method_count = len(filter(None, [search_state.query, search_state.tags, meta_data.get('author_name', None)])) if request.is_ajax(): q_count = paginator.count exercise_counter = ungettext('%(q_num)s exercise', '%(q_num)s exercises', q_count) exercise_counter = exercise_counter % {'q_num': humanize.intcomma(q_count),} if q_count > page_size: paginator_tpl = get_template('main_page/paginator.html', request) paginator_html = paginator_tpl.render(Context({ 'context': functions.setup_paginator(paginator_context), 'exercises_count': q_count, 'page_size' : page_size, 'search_state': search_state, })) else: paginator_html = '' exercises_tpl = get_template('main_page/exercises_loop.html', request) exercises_html = exercises_tpl.render(Context({ 'threads': page, 'search_state': search_state, 'reset_method_count': reset_method_count, 'request': request })) ajax_data = { 'query_data': { 'tags': search_state.tags, 'sort_order': search_state.sort, 'ask_query_string': search_state.ask_query_string(), }, 'paginator': paginator_html, 'exercise_counter': exercise_counter, 'faces': [],#[extra_tags.gravatar(contributor, 48) for contributor in contributors], 'feed_url': context_feed_url, 'query_string': search_state.query_string(), 'page_size' : page_size, 'exercises': exercises_html.replace('\n',''), 'non_existing_tags': meta_data['non_existing_tags'] } ajax_data['related_tags'] = [{ 'name': escape(tag.name), 'used_count': humanize.intcomma(tag.local_used_count) } for tag in related_tags] return HttpResponse(simplejson.dumps(ajax_data), mimetype = 'application/json') else: # non-AJAX branch template_data = { 'active_tab': 'exercises', 'author_name' : meta_data.get('author_name',None), 'contributors' : contributors, 'context' : paginator_context, 'is_without_problem' : False,#remove this from template 'interesting_tag_names': meta_data.get('interesting_tag_names', None), 'ignored_tag_names': meta_data.get('ignored_tag_names', None), 'subscribed_tag_names': meta_data.get('subscribed_tag_names', None), 'language_code': translation.get_language(), 'name_of_anonymous_user' : models.get_name_of_anonymous_user(), 'page_class': 'main-page', 'page_size': page_size, 'query': search_state.query, 'threads' : page, 'exercises_count' : paginator.count, 'reset_method_count': reset_method_count, 'scope': search_state.scope, 'show_sort_by_relevance': conf.should_show_sort_by_relevance(), 'search_tags' : search_state.tags, 'sort': search_state.sort, 'tab_id' : search_state.sort, 'tags' : related_tags, 'tag_list_type' : tag_list_type, 'font_size' : extra_tags.get_tag_font_size(related_tags), 'display_tag_filter_strategy_choices': conf.get_tag_display_filter_strategy_choices(), 'email_tag_filter_strategy_choices': const.TAG_EMAIL_FILTER_STRATEGY_CHOICES, 'update_avatar_data': schedules.should_update_avatar_data(request), 'query_string': search_state.query_string(), 'search_state': search_state, 'feed_url': context_feed_url, } return render_into_skin('main_page.html', template_data, request) def tags(request):#view showing a listing of available tags - plain list tag_list_type = askbot_settings.TAG_LIST_FORMAT if tag_list_type == 'list': stag = "" is_paginated = True sortby = request.GET.get('sort', 'used') try: page = int(request.GET.get('page', '1')) except ValueError: page = 1 stag = request.GET.get("query", "").strip() if stag != '': objects_list = Paginator( models.Tag.objects.filter( deleted=False, name__icontains=stag ).exclude( used_count=0 ), DEFAULT_PAGE_SIZE ) else: if sortby == "name": objects_list = Paginator(models.Tag.objects.all().filter(deleted=False).exclude(used_count=0).order_by("name"), DEFAULT_PAGE_SIZE) else: objects_list = Paginator(models.Tag.objects.all().filter(deleted=False).exclude(used_count=0).order_by("-used_count"), DEFAULT_PAGE_SIZE) try: tags = objects_list.page(page) except (EmptyPage, InvalidPage): tags = objects_list.page(objects_list.num_pages) paginator_data = { 'is_paginated' : is_paginated, 'pages': objects_list.num_pages, 'page': page, 'has_previous': tags.has_previous(), 'has_next': tags.has_next(), 'previous': tags.previous_page_number(), 'next': tags.next_page_number(), 'base_url' : reverse('tags') + '?sort=%s&amp;' % sortby } paginator_context = functions.setup_paginator(paginator_data) data = { 'active_tab': 'tags', 'page_class': 'tags-page', 'tags' : tags, 'tag_list_type' : tag_list_type, 'stag' : stag, 'tab_id' : sortby, 'keywords' : stag, 'paginator_context' : paginator_context, } else: stag = "" sortby = request.GET.get('sort', 'name') if request.method == "GET": stag = request.GET.get("query", "").strip() if stag != '': tags = models.Tag.objects.filter(deleted=False, name__icontains=stag).exclude(used_count=0) else: if sortby == "name": tags = models.Tag.objects.all().filter(deleted=False).exclude(used_count=0).order_by("name") else: tags = models.Tag.objects.all().filter(deleted=False).exclude(used_count=0).order_by("-used_count") font_size = extra_tags.get_tag_font_size(tags) data = { 'active_tab': 'tags', 'page_class': 'tags-page', 'tags' : tags, 'tag_list_type' : tag_list_type, 'font_size' : font_size, 'stag' : stag, 'tab_id' : sortby, 'keywords' : stag, 'search_state': SearchState(*[None for x in range(7)]) } return render_into_skin('tags.html', data, request) @csrf.csrf_protect #@cache_page(60 * 5) def exercise(request, id):#refactor - long subroutine. display exercise body, problems and comments """view that displays body of the exercise and all problems to it """ #process url parameters #todo: fix inheritance of sort method from exercises #before = datetime.datetime.now() default_sort_method = request.session.get('exercises_sort_method', 'votes') form = ShowExerciseForm(request.GET, default_sort_method) form.full_clean()#always valid show_problem = form.cleaned_data['show_problem'] show_comment = form.cleaned_data['show_comment'] show_page = form.cleaned_data['show_page'] problem_sort_method = form.cleaned_data['problem_sort_method'] #load exercise and maybe refuse showing deleted exercise #if the exercise does not exist - try mapping to old exercises #and and if it is not found again - then give up try: exercise_post = models.Post.objects.filter( post_type = 'exercise', id = id ).select_related('thread')[0] except IndexError: # Handle URL mapping - from old Q/A/C/ URLs to the new one try: exercise_post = models.Post.objects.filter( post_type='exercise', old_exercise_id = id ).select_related('thread')[0] except IndexError: raise Http404 if show_problem: try: old_problem = models.Post.objects.get_problems().get(old_problem_id=show_problem) return HttpResponseRedirect(old_problem.get_absolute_url()) except models.Post.DoesNotExist: pass elif show_comment: try: old_comment = models.Post.objects.get_comments().get(old_comment_id=show_comment) return HttpResponseRedirect(old_comment.get_absolute_url()) except models.Post.DoesNotExist: pass try: exercise_post.assert_is_visible_to(request.user) except exceptions.ExerciseHidden, error: request.user.message_set.create(message = unicode(error)) return HttpResponseRedirect(reverse('index')) #redirect if slug in the url is wrong if request.path.split('/')[-2] != exercise_post.slug: logging.debug('no slug match!') exercise_url = '?'.join(( exercise_post.get_absolute_url(), urllib.urlencode(request.GET) )) return HttpResponseRedirect(exercise_url) #resolve comment and problem permalinks #they go first because in theory both can be moved to another exercise #this block "returns" show_post and assigns actual comment and problem #to show_comment and show_problem variables #in the case if the permalinked items or their parents are gone - redirect #redirect also happens if id of the object's origin post != requested id show_post = None #used for permalinks if show_comment: #if url calls for display of a specific comment, #check that comment exists, that it belongs to #the current exercise #if it is an problem comment and the problem is hidden - #redirect to the default view of the exercise #if the exercise is hidden - redirect to the main page #in addition - if url points to a comment and the comment #is for the problem - we need the problem object try: show_comment = models.Post.objects.get_comments().get(id=show_comment) except models.Post.DoesNotExist: error_message = _( 'Sorry, the comment you are looking for has been ' 'deleted and is no longer accessible' ) request.user.message_set.create(message = error_message) return HttpResponseRedirect(exercise_post.thread.get_absolute_url()) if str(show_comment.thread._exercise_post().id) != str(id): return HttpResponseRedirect(show_comment.get_absolute_url()) show_post = show_comment.parent try: show_comment.assert_is_visible_to(request.user) except exceptions.ProblemHidden, error: request.user.message_set.create(message = unicode(error)) #use reverse function here because exercise is not yet loaded return HttpResponseRedirect(reverse('exercise', kwargs = {'id': id})) except exceptions.ExerciseHidden, error: request.user.message_set.create(message = unicode(error)) return HttpResponseRedirect(reverse('index')) elif show_problem: #if the url calls to view a particular problem to #exercise - we must check whether the exercise exists #whether problem is actually corresponding to the current exercise #and that the visitor is allowed to see it show_post = get_object_or_404(models.Post, post_type='problem', id=show_problem) if str(show_post.thread._exercise_post().id) != str(id): return HttpResponseRedirect(show_post.get_absolute_url()) try: show_post.assert_is_visible_to(request.user) except django_exceptions.PermissionDenied, error: request.user.message_set.create(message = unicode(error)) return HttpResponseRedirect(reverse('exercise', kwargs = {'id': id})) thread = exercise_post.thread logging.debug('problem_sort_method=' + unicode(problem_sort_method)) #load problems and post id's->athor_id mapping #posts are pre-stuffed with the correctly ordered comments updated_exercise_post, problems, post_to_author, published_problem_ids = thread.get_cached_post_data( sort_method = problem_sort_method, user = request.user ) exercise_post.set_cached_comments( updated_exercise_post.get_cached_comments() ) #Post.objects.precache_comments(for_posts=[exercise_post] + problems, visitor=request.user) user_votes = {} user_post_id_list = list() #todo: cache this query set, but again takes only 3ms! if request.user.is_authenticated(): user_votes = Vote.objects.filter( user=request.user, voted_post__id__in = post_to_author.keys() ).values_list('voted_post_id', 'vote') user_votes = dict(user_votes) #we can avoid making this query by iterating through #already loaded posts user_post_id_list = [ id for id in post_to_author if post_to_author[id] == request.user.id ] #resolve page number and comment number for permalinks show_comment_position = None if show_comment: show_page = show_comment.get_page_number(problem_posts=problems) show_comment_position = show_comment.get_order_number() elif show_problem: show_page = show_post.get_page_number(problem_posts=problems) objects_list = Paginator(problems, const.PROBLEMS_PAGE_SIZE) if show_page > objects_list.num_pages: return HttpResponseRedirect(exercise_post.get_absolute_url()) page_objects = objects_list.page(show_page) #count visits #import ipdb; ipdb.set_trace() if functions.not_a_robot_request(request): #todo: split this out into a subroutine #todo: merge view counts per user and per session #1) view count per session update_view_count = False if 'exercise_view_times' not in request.session: request.session['exercise_view_times'] = {} last_seen = request.session['exercise_view_times'].get(exercise_post.id, None) if thread.last_activity_by_id != request.user.id: if last_seen: if last_seen < thread.last_activity_at: update_view_count = True else: update_view_count = True request.session['exercise_view_times'][exercise_post.id] = \ datetime.datetime.now() #2) run the slower jobs in a celery task from askbot import tasks tasks.record_exercise_visit.delay( exercise_post = exercise_post, user = request.user, update_view_count = update_view_count ) paginator_data = { 'is_paginated' : (objects_list.count > const.PROBLEMS_PAGE_SIZE), 'pages': objects_list.num_pages, 'page': show_page, 'has_previous': page_objects.has_previous(), 'has_next': page_objects.has_next(), 'previous': page_objects.previous_page_number(), 'next': page_objects.next_page_number(), 'base_url' : request.path + '?sort=%s&amp;' % problem_sort_method, } paginator_context = functions.setup_paginator(paginator_data) #todo: maybe consolidate all activity in the thread #for the user into just one query? favorited = thread.has_favorite_by_user(request.user) is_cacheable = True if show_page != 1: is_cacheable = False elif show_comment_position > askbot_settings.MAX_COMMENTS_TO_SHOW: is_cacheable = False initial = { 'wiki': exercise_post.wiki and askbot_settings.WIKI_ON, 'email_notify': thread.is_followed_by(request.user) } #maybe load draft if request.user.is_authenticated(): #todo: refactor into methor on thread drafts = models.DraftProblem.objects.filter( author=request.user, thread=thread ) if drafts.count() > 0: initial['text'] = drafts[0].text problem_form = ProblemForm(initial) answer_form = AnswerForm(initial) user_can_post_comment = ( request.user.is_authenticated() and request.user.can_post_comment() ) user_already_gave_problem = False previous_problem = None if request.user.is_authenticated(): if askbot_settings.LIMIT_ONE_PROBLEM_PER_USER: for problem in problems: if problem.author == request.user: user_already_gave_problem = True previous_problem = problem break data = { 'is_cacheable': False,#is_cacheable, #temporary, until invalidation fix 'long_time': const.LONG_TIME,#"forever" caching 'page_class': 'exercise-page', 'active_tab': 'exercises', 'exercise' : exercise_post, 'thread': thread, 'thread_is_moderated': thread.is_moderated(), 'user_is_thread_moderator': thread.has_moderator(request.user), 'published_problem_ids': published_problem_ids, 'problem' : problem_form, 'problems' : page_objects.object_list, 'problem_count': thread.get_problem_count(request.user), 'category_tree_data': askbot_settings.CATEGORY_TREE, 'user_votes': user_votes, 'user_post_id_list': user_post_id_list, 'user_can_post_comment': user_can_post_comment,#in general 'user_already_gave_problem': user_already_gave_problem, 'previous_problem': previous_problem, 'tab_id' : problem_sort_method, 'favorited' : favorited, 'similar_threads' : thread.get_similar_threads(), 'language_code': translation.get_language(), 'paginator_context' : paginator_context, 'show_post': show_post, 'show_comment': show_comment, 'show_comment_position': show_comment_position, 'answer': answer_form, #'answers': answer_form, } #shared with ... if askbot_settings.GROUPS_ENABLED: data['sharing_info'] = thread.get_sharing_info() data.update(context.get_for_tag_editor()) return render_into_skin('exercise.html', data, request) def revisions(request, id, post_type = None): assert post_type in ('exercise', 'problem') post = get_object_or_404(models.Post, post_type=post_type, id=id) revisions = list(models.PostRevision.objects.filter(post=post)) revisions.reverse() for i, revision in enumerate(revisions): if i == 0: revision.diff = sanitize_html(revisions[i].html) revision.summary = _('initial version') else: revision.diff = htmldiff( sanitize_html(revisions[i-1].html), sanitize_html(revision.html) ) data = { 'page_class':'revisions-page', 'active_tab':'exercises', 'post': post, 'revisions': revisions, } return render_into_skin('revisions.html', data, request) @csrf.csrf_exempt @ajax_only @anonymous_forbidden @get_only def get_comment(request): """returns text of a comment by id via ajax response requires request method get and request must be ajax """ id = int(request.GET['id']) comment = models.Post.objects.get(post_type='comment', id=id) request.user.assert_can_edit_comment(comment) return {'text': comment.text} #@decorators.check_authorization_to_post(_('Please log in to post answers')) #@decorators.check_spam('text') @csrf.csrf_protect def new_answer_form(request, mid, pid): exercise_post = models.Post.objects.filter( post_type = 'exercise', id = mid ).select_related('thread')[0] problem_post = models.Post.objects.filter( post_type = 'problem', id = pid ).select_related('thread')[0] thread = exercise_post.thread initial = { 'wiki': exercise_post.wiki and askbot_settings.WIKI_ON, 'email_notify': thread.is_followed_by(request.user) } answer_form = AnswerForm(initial) # if exercise doesn't exist, redirect to main page data = { 'pid': pid, 'mid': mid, 'exercise': exercise_post, 'problem': problem_post, 'thread': thread, 'answer_form': answer_form } return render_into_skin('exercise/answer_form.html', data, request)
gpl-3.0
-1,405,857,867,789,381,400
39.409226
153
0.604198
false
4.011078
false
false
false
uptimerobot/uptimerobot-cli
uptimerobot/client.py
1
12485
from __future__ import absolute_import, division, print_function, unicode_literals import re import sys import json import requests from . import APIError, HTTPError from .monitor import Monitor from .alert_contact import AlertContact # Ensure that we can test against the appropriate string types. if sys.version_info < (3, 0): string = basestring else: string = (str, bytes) class Client(object): """An uptimerobot API client""" URL = "http://api.uptimerobot.com/" LIST_SEPARATOR = "-" ID_PATTERN = "^\d+$" def __init__(self, api_key): self.api_key = api_key def get(self, action, **values): payload = { "apiKey": self.api_key, "format": "json", "noJsonCallback": 1, } payload.update(values) response = requests.get(self.URL + action, params=payload) # Handle client/server errors with the request. if response.status_code != requests.codes.ok: try: raise response.raise_for_status() except Exception as ex: raise HTTPError(ex) # Parse the json in the correct response. data = json.loads(response.text) # Request went through, but was bad in some way. if data["stat"] == "fail": raise APIError(data["message"]) return data def get_monitors(self, ids=None, show_logs=False, show_log_alert_contacts=False, show_alert_contacts=False, custom_uptime_ratio=False, show_log_timezone=False): """ Args ids IDs of the monitors to list. If None, then get all contacts. [list<int>] logs Show logs [Boolean] alert_contacts Show alert contacts [Boolean] show_monitor_alert_contacts Show monitors alert contacts [Boolean] custom_uptime_ratio Number of days to calculate uptime over [list<int>] show_log_timezone Show the timezone for the log times [Boolean] Returns List of Monitor detail objects. """ variables = {} if ids: if any(not isinstance(id, string) for id in ids): raise TypeError("ids must be strings") if any(not re.match(self.ID_PATTERN, id) for id in ids): raise ValueError("ids must be numeric") variables["monitors"] = self.LIST_SEPARATOR.join(ids) if show_logs: variables["logs"] = "1" if show_log_timezone: variables["showTimezone"] = "1" if show_log_alert_contacts: variables["alertContacts"] = "1" if show_alert_contacts: variables["showMonitorAlertContacts"] = "1" if custom_uptime_ratio: if not all(isinstance(n, int) and n > 0 for n in custom_uptime_ratio): raise TypeError("custom_uptime_ratio must be a list of positive integers") variables["customUptimeRatio"] = self.LIST_SEPARATOR.join(str(n) for n in custom_uptime_ratio) data = self.get("getMonitors", **variables) monitors = [Monitor(mon, custom_uptime_ratio) for mon in data["monitors"]["monitor"]] return monitors def new_monitor(self, name, url, type, subtype=None, port=None, keyword_type=None, keyword=None, username=None, password=None, alert_contacts=None): """ Args name Human-readable name to assign to the monitor [str]. url URL [str] type Monitor type [int] subtype subtype of the monitor [int] keyword_type Type of keyword to use (requires keyword be set) [int] keyword Keyword to use (requires keyword_type be set) http_username Username to use for private site (requires http_password be set) http_password Password to use for private site (requires http_username be set) alert_contacts Alert contacts to give the monitor [list<int>] Returns ID of monitor created. """ if type not in Monitor.TYPES: raise ValueError("type must be one of %s" % ", ".join(str(m) for m in Monitor.TYPES.keys())) variables = { "monitorFriendlyName": name, "monitorURL": url, "monitorType": str(type), } if subtype is not None: if subtype not in Monitor.SUBTYPES: raise ValueError("subtype must be one of %s" % ", ".join(str(m) for m in Monitor.SUBTYPES.keys())) variables["monitorSubType"] = str(subtype) if port is not None: variables["monitorPort"] = str(port) if keyword_type and keyword: if keyword_type not in Monitor.KEYWORD_TYPES: raise ValueError("keyword_type must be one of %s" % ", ".join(str(m) for m in Monitor.KEYWORD_TYPES.keys())) variables["monitorKeywordType"] = str(keyword_type) variables["monitorKeywordValue"] = keyword elif keyword_type is not None or keyword is not None: raise ValueError("Requires both keyword_type and keyword if either are specified") if username is not None and password is not None: variables["monitorHTTPUsername"] = username variables["monitorHTTPPassword"] = password elif username is not None or password is not None: raise ValueError("Requires both username and password if either are specified") if alert_contacts: if any(not isinstance(id, string) for id in alert_contacts): raise TypeError("alert_contacts must be strings") if any(not re.match(self.ID_PATTERN, id) for id in alert_contacts): raise ValueError("alert_contacts must be numeric") variables["monitorAlertContacts"] = self.LIST_SEPARATOR.join(alert_contacts) data = self.get("newMonitor", **variables) return data["monitor"]["id"] def edit_monitor(self, id, status=None, name=None, url=None, type=None, subtype=None, port=None, keyword_type=None, keyword=None, username=None, password=None, alert_contacts=None): """ Args id ID number of the monitor to edit [str] status Status to set [int] name Human-readable name to assign to the monitor. url URL to monitor type Monitor type [int] subtype subtype of the monitor [int] keyword_type Type of keyword to use (requires keyword be set) [int] keyword Keyword to use (requires keyword_type be set) username Username to use for private site (requires http_password be set) password Password to use for private site (requires http_username be set) alert_contacts Alert contacts to give the monitor [list<int>] Returns ID of monitor edited. """ if not isinstance(id, string): raise TypeError("id must be a string") if not re.match(self.ID_PATTERN, id): raise ValueError("id must be numeric") variables = { "monitorID": id, } if status is not None: if status not in Monitor.STATUSES: raise ValueError("status must be one of %s" % ", ".join(str(m) for m in Monitor.STATUSES.keys())) variables["monitorStatus"] = str(status) if name is not None: variables["monitorFriendlyName"] = name if url is not None: variables["monitorURL"] = url if type is not None: if type not in Monitor.TYPES: raise ValueError("type must be one of %s" % ", ".join(str(m) for m in Monitor.TYPES.keys())) variables["monitorType"] = str(type) if subtype is not None: if subtype not in Monitor.SUBTYPES: raise ValueError("subtype must be one of %s" % ", ".join(str(m) for m in Monitor.SUBTYPES.keys())) variables["monitorSubType"] = str(subtype) if port is not None: variables["monitorPort"] = str(port) if keyword_type is not None: if keyword_type not in Monitor.KEYWORD_TYPES: raise ValueError("keyword_type must be one of %s" % ", ".join(str(m) for m in Monitor.KEYWORD_TYPES.keys())) variables["monitorKeywordType"] = str(keyword_type) if keyword: variables["monitorKeywordValue"] = keyword if username: variables["monitorHTTPUsername"] = username if password: variables["monitorHTTPPassword"] = password if alert_contacts: if any(not isinstance(id, string) for id in alert_contacts): raise TypeError("alert_contacts must be strings") if any(not re.match(self.ID_PATTERN, id) for id in alert_contacts): raise ValueError("alert_contacts must be numeric") variables["monitorAlertContacts"] = self.LIST_SEPARATOR.join(alert_contacts) data = self.get("editMonitor", **variables) return data["monitor"]["id"] def delete_monitor(self, id): """ Args id ID of the monitor to delete [str] Returns ID of monitor deleted [str] """ if not isinstance(id, string): raise TypeError("id must be a string") if not re.match(self.ID_PATTERN, id): raise ValueError("id must be numeric") data = self.get("deleteMonitor", monitorID=id) return data["monitor"]["id"] def get_alert_contacts(self, ids=None): """ Args ids IDs of the alert contacts to list. If None, then get all contacts [list. Returns List of AlertContact detail objects. """ variables = {} if ids is not None: if any(not isinstance(id, string) for id in ids): raise TypeError("ids must be strings") if any(not re.match(self.ID_PATTERN, id) for id in ids): raise ValueError("ids must be numeric") variables["alertcontacts"] = self.LIST_SEPARATOR.join(ids) data = self.get("getAlertContacts", **variables) alerts = [AlertContact(ac) for ac in data["alertcontacts"]["alertcontact"]] return alerts def new_alert_contact(self, type, value): """ Args type Type of the new alert to create [int] value email address (or ) to alert [str] Returns ID of alert contact created [str] """ if type not in AlertContact.TYPES: raise ValueError("type must be one of %s" % ", ".join(str(t) for t in AlertContact.TYPES)) if not isinstance(value, string): raise TypeError("value must be a string") data = self.get("newAlertContact", alertContactType=str(type), alertContactValue=value) return data["alertcontact"]["id"] def delete_alert_contact(self, id): """ Args id ID of the alert contact to delete [str] Returns ID of alert contact deleted [str] """ if not isinstance(id, string): raise TypeError("id must be a string") if not re.match(self.ID_PATTERN, id): raise ValueError("id must be numeric") data = self.get("deleteAlertContact", alertContactID=id) return data["alertcontact"]["id"]
gpl-3.0
-6,106,212,682,719,807,000
29.678133
124
0.546496
false
4.690083
false
false
false
Grumbel/dirtool
dirtools/cmd_desktop.py
1
2895
# dirtool.py - diff tool for directories # Copyright (C) 2018 Ingo Ruhnke <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import argparse import os import sys from xdg.DesktopEntry import DesktopEntry from xdg.BaseDirectory import xdg_data_dirs from dirtools.xdg_desktop import get_desktop_file # https://standards.freedesktop.org/desktop-entry-spec/latest/ def parse_args(args): parser = argparse.ArgumentParser(description="Query the systems .desktop files") group = parser.add_mutually_exclusive_group(required=True) group.add_argument("DESKTOP", nargs='?') group.add_argument('-l', '--list-dirs', action='store_true', default=False, help="List all directories scanned for .desktop files") group.add_argument('-L', '--list-files', action='store_true', default=False, help="List all .desktop files") parser.add_argument('-v', '--verbose', action='store_true', default=False, help="Be verbose") return parser.parse_args(args) def main(argv): args = parse_args(argv[1:]) if args.list_dirs: for directory in xdg_data_dirs: print(os.path.join(directory, "applications")) elif args.list_files: for directory in xdg_data_dirs: path = os.path.join(directory, "applications") try: for entry in os.listdir(path): if entry.endswith(".desktop"): if args.verbose: filename = os.path.join(path, entry) desktop = DesktopEntry(filename) print("{:70} {:40} {:40}".format(filename, desktop.getName(), desktop.getExec())) else: print(os.path.join(path, entry)) except FileNotFoundError: pass else: filename = get_desktop_file(args.DESKTOP) print(filename) desktop = DesktopEntry(filename) print("Name: {}".format(desktop.getName())) print("Exec: {}".format(desktop.getExec())) print("TryExec: {}".format(desktop.getTryExec())) print("Mime-Types: {}".format(desktop.getMimeTypes())) def main_entrypoint(): exit(main(sys.argv)) # EOF #
gpl-3.0
4,080,812,269,677,872,000
35.1875
111
0.629016
false
4.048951
false
false
false
quokkaproject/flask-htmlbuilder
setup.py
1
1061
#!/usr/bin/env python # -*- coding: utf-8 -*- from setuptools import setup setup( name='quokka-flask-htmlbuilder', version='0.13', url='http://github.com/quokkaproject/flask-htmlbuilder', license='MIT', author='QuokkaProject', author_email='[email protected]', description='Fork of Flexible Python-only HTML generation for Flask', long_description=__doc__, packages=['flask_htmlbuilder'], namespace_packages=['flask_htmlbuilder'], test_suite='nose.collector', zip_safe=False, platforms='any', install_requires=[ 'Flask' ], tests_require=[ 'nose' ], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ] )
bsd-3-clause
-672,378,022,798,663,400
28.472222
73
0.6164
false
4.080769
false
false
false
Rfam/rfam-production
scripts/support/mirnas/validate_family.py
1
3027
import os import json import argparse import urllib import logging # ------------------------------------------------------------------------------------------ search_dirs = ["/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch1_chunk1_searches", "/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch1_chunk2_searches", "/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch2/searches"] MEMORY = 8000 CPU = 4 LSF_GROUP = "/family_srch" REF_STRING = """RN [1] RM 30423142 RT miRBase: from microRNA sequences to function. RA Kozomara A, Birgaoanu M, Griffiths-Jones S; RL Nucleic Acids Res. 2019;47:D155.""" # ------------------------------------------------------------------------------------------ def check_desc_reference_is_valid(desc_loc, ref_string): fp = open(desc_loc, 'r') desc_lines = fp.read() fp.close() # check if we can find the reference lines in DESC if desc_lines.find(REF_STRING) != -1: return True return False # ------------------------------------------------------------------------------------------ def parse_arguments(): parser = argparse.ArgumentParser() parser.add_argument("--mirna-list", help="A .json file containing all miRNAs to validate", action="store") parser.add_argument("--desc", help="Only perform DESC validation", action="store_true", default=False) parser.add_argument("--svn", help="Check family exists in the SVN repository", action="store_true", default=False) parser.add_argument("--log", help="Creates a log file with all validated DESC files", action="store_true", default=False) return parser # ------------------------------------------------------------------------------------------ def get_mirna_directory_location(mirna_id): if mirna_id.find("_relabelled")==-1: dir_label = mirna_id+"_relabelled" for search_dir in search_dirs: family_dir_loc = os.path.join(search_dir, dir_label) if os.path.exists(family_dir_loc): return family_dir_loc return None # ------------------------------------------------------------------------------------------ def check_family_exists_in_svn(rfam_acc): svn_url = "https://xfamsvn.ebi.ac.uk/svn/data_repos/trunk/Families/%s" status = False # Check if entry existis on SVN repo; status=True return status # ------------------------------------------------------------------------------------------ if __name__=='__main__': parser = parse_arguments() args = parser.parse_args() fp = open(args.mirna_list, 'r') mirnas = json.load(fp) fp.close() # if args.log is True: for mirna in mirnas: mirna_dir_loc = get_mirna_directory_location(mirna) if mirna_dir_loc is not None: if args.desc is True: desc_loc = os.path.join(mirna_dir_loc, "DESC") if os.path.exists(desc_loc): check = check_desc_reference_is_valid(desc_loc, REF_STRING) if check is False: print (mirna_dir_loc)
apache-2.0
5,354,791,960,162,409,000
26.27027
106
0.553023
false
3.326374
false
false
false
cmheisel/django-jamsession
jamsession/forms/admin.py
1
1149
from django import forms from jamsession.forms.fields import SchemaField from jamsession.models import Schema class SchemaAdminForm(forms.Form): error_css_class = 'error' required_css_class = 'required' name = forms.CharField(required=True, widget=forms.TextInput( attrs={'class': 'vTextField'}) ) schema = SchemaField(widget=forms.Textarea, required=True) def __init__(self, *args, **kwargs): if 'instance' in kwargs: self.instance = kwargs['instance'] del kwargs['instance'] super(SchemaAdminForm, self).__init__(*args, **kwargs) class _meta(object): model = Schema def clean_name(self): data = self.cleaned_data['name'].strip() if not data: raise forms.ValidationError("Name is required.") if self._meta.model.objects.filter(name=data).count() >= 1: raise forms.ValidationError("Name must be unique.") return data def save(self): obj = self._meta.model(**self.cleaned_data) obj.save() return obj
mit
-3,212,703,057,285,607,000
29.236842
67
0.585727
false
4.303371
false
false
false
rbiswas4/SNsims
snsims_previous/snsims/tmp/models.py
1
2804
#!/usr/bin/env python import sncosmo.models import numpy class SEDFileSource(sncosmo.models.TimeSeriesSource): """A TimeSeriesSource stored in a 3-column ASCII file format, for PHASE, LAMBDA, and F_LAMBDA. The hash symbol # is a comment line. The spectral flux density of this model is given by .. math:: F(t, \lambda) = A \\times M(t, \lambda) where _M_ is the flux defined on a grid in phase and wavelength and _A_ (amplitude) is the single free parameter of the model. It should be noted that while t and \lambda are in the rest frame of the object, the flux density is defined at redshift zero. This means that for objects with the same intrinsic luminosity, the amplitude will be smaller for objects at larger luminosity distances. Parameters ---------- filename : str Name of the filename that contains the Time Series zero_before : bool, optional If True, flux at phases before minimum phase will be zeroed. The default is False, in which case the flux at such phases will be equal to the flux at the minimum phase (``flux[0, :]`` in the input array). version : str, optional Version of the model. Default is `None`. Returns ------- `~sncosmo.TimeSeriesSource` instance representing the TimeSeriesSource in file """ _param_names = ['amplitude'] param_names_latex = ['A'] def __init__(self, filename, zero_before=False, version=None): phase, wave, flux = numpy.loadtxt(filename, unpack=True) # Convert 3 column format to that expected by TimeSeriesSource phase_u = numpy.unique(phase) wave_u = numpy.unique(wave) lenp = len(phase_u) lenw = len(wave_u) if lenp * lenw != len(flux): raise TypeError('File is not a TimeSeriesSource') i = numpy.zeros(len(flux), dtype='int') j = numpy.zeros(len(flux), dtype='int') for index, p in enumerate(phase_u): i[phase == p] = index for index, w in enumerate(wave_u): j[wave == w] = index flux = flux[i * lenw + j] flux = numpy.reshape(flux, (lenp, lenw)) super(SEDFileSource, self).__init__(phase_u, wave_u, flux, zero_before=False, name=filename, version=None) if __name__ == '__main__': # filename = '/Users/akim/project/SNDATA_ROOT/snsed/NON1A/SDSS-019323.SED' # data = SEDFileSource(filename) sn = sncosmo.Model(source='snana-2007nc') print sn.param_names # wefwe import matplotlib.pyplot as plt plt.plot(data._wave, data.flux(0, data._wave)) plt.plot(sn.source._wave, sn.flux(0, sn.source._wave) * 0.95) plt.show()
mit
-6,002,426,233,363,240,000
32.783133
78
0.615906
false
3.763758
false
false
false
jendrikseipp/rednotebook-elementary
win/utils.py
1
1582
import logging import os import shutil import sys import subprocess import urllib.request def ensure_path(path): if not os.path.exists(path): os.mkdir(path) def confirm_overwrite(dir): if os.path.exists(dir): answer = input( 'The directory {} exists. Overwrite it? (Y/n): '.format(dir)).strip() if answer and answer.lower() != 'y': sys.exit('Aborting') shutil.rmtree(dir) def fast_copytree(src_dir, dest_dir): subprocess.check_call(['cp', '-r', src_dir, dest_dir]) def fetch(url, path): dirname = os.path.dirname(path) if not os.path.exists(dirname): os.mkdir(dirname) if not os.path.exists(path): logging.info('Fetch {0} to {1}'.format(url, path)) with urllib.request.urlopen(url) as response, open(path, 'wb') as out_file: shutil.copyfileobj(response, out_file) if not os.path.exists(path): sys.exit('Download unsuccessful.') def run(*args, **kwargs): logging.info('Run command: {0} ({1})'.format(args, kwargs)) retcode = subprocess.call(*args, **kwargs) if retcode != 0: sys.exit('Command failed.') def get_output(*args, **kwargs): return subprocess.check_output(*args, **kwargs).decode().strip() def install(path, use_wine): cmd = [] if use_wine: cmd.append('wine') if path.lower().endswith('.exe'): cmd.extend([path]) elif path.lower().endswith('.msi'): cmd.extend(['msiexec', '/i', path]) else: sys.exit('Don\'t know how to install {0}'.format(path)) run(cmd)
gpl-2.0
8,799,536,457,335,876,000
28.296296
83
0.609355
false
3.39485
false
false
false
shaftoe/home-assistant
homeassistant/const.py
1
12266
# coding: utf-8 """Constants used by Home Assistant components.""" MAJOR_VERSION = 0 MINOR_VERSION = 46 PATCH_VERSION = '0.dev0' __short_version__ = '{}.{}'.format(MAJOR_VERSION, MINOR_VERSION) __version__ = '{}.{}'.format(__short_version__, PATCH_VERSION) REQUIRED_PYTHON_VER = (3, 4, 2) REQUIRED_PYTHON_VER_WIN = (3, 5, 2) CONSTRAINT_FILE = 'package_constraints.txt' PROJECT_NAME = 'Home Assistant' PROJECT_PACKAGE_NAME = 'homeassistant' PROJECT_LICENSE = 'Apache License 2.0' PROJECT_AUTHOR = 'The Home Assistant Authors' PROJECT_COPYRIGHT = ' 2013, {}'.format(PROJECT_AUTHOR) PROJECT_URL = 'https://home-assistant.io/' PROJECT_EMAIL = '[email protected]' PROJECT_DESCRIPTION = ('Open-source home automation platform ' 'running on Python 3.') PROJECT_LONG_DESCRIPTION = ('Home Assistant is an open-source ' 'home automation platform running on Python 3. ' 'Track and control all devices at home and ' 'automate control. ' 'Installation in less than a minute.') PROJECT_CLASSIFIERS = [ 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3.4', 'Topic :: Home Automation' ] PROJECT_GITHUB_USERNAME = 'home-assistant' PROJECT_GITHUB_REPOSITORY = 'home-assistant' PYPI_URL = 'https://pypi.python.org/pypi/{}'.format(PROJECT_PACKAGE_NAME) GITHUB_PATH = '{}/{}'.format(PROJECT_GITHUB_USERNAME, PROJECT_GITHUB_REPOSITORY) GITHUB_URL = 'https://github.com/{}'.format(GITHUB_PATH) PLATFORM_FORMAT = '{}.{}' # Can be used to specify a catch all when registering state or event listeners. MATCH_ALL = '*' # If no name is specified DEVICE_DEFAULT_NAME = 'Unnamed Device' WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'] SUN_EVENT_SUNSET = 'sunset' SUN_EVENT_SUNRISE = 'sunrise' # #### CONFIG #### CONF_ABOVE = 'above' CONF_ACCESS_TOKEN = 'access_token' CONF_AFTER = 'after' CONF_ALIAS = 'alias' CONF_API_KEY = 'api_key' CONF_AUTHENTICATION = 'authentication' CONF_BASE = 'base' CONF_BEFORE = 'before' CONF_BELOW = 'below' CONF_BINARY_SENSORS = 'binary_sensors' CONF_BLACKLIST = 'blacklist' CONF_BRIGHTNESS = 'brightness' CONF_CODE = 'code' CONF_COLOR_TEMP = 'color_temp' CONF_COMMAND = 'command' CONF_COMMAND_CLOSE = 'command_close' CONF_COMMAND_OFF = 'command_off' CONF_COMMAND_ON = 'command_on' CONF_COMMAND_OPEN = 'command_open' CONF_COMMAND_STATE = 'command_state' CONF_COMMAND_STOP = 'command_stop' CONF_CONDITION = 'condition' CONF_COVERS = 'covers' CONF_CUSTOMIZE = 'customize' CONF_CUSTOMIZE_DOMAIN = 'customize_domain' CONF_CUSTOMIZE_GLOB = 'customize_glob' CONF_DEVICE = 'device' CONF_DEVICE_CLASS = 'device_class' CONF_DEVICES = 'devices' CONF_DISARM_AFTER_TRIGGER = 'disarm_after_trigger' CONF_DISCOVERY = 'discovery' CONF_DISPLAY_OPTIONS = 'display_options' CONF_DOMAIN = 'domain' CONF_DOMAINS = 'domains' CONF_EFFECT = 'effect' CONF_ELEVATION = 'elevation' CONF_EMAIL = 'email' CONF_ENTITIES = 'entities' CONF_ENTITY_ID = 'entity_id' CONF_ENTITY_NAMESPACE = 'entity_namespace' CONF_EVENT = 'event' CONF_EXCLUDE = 'exclude' CONF_FILE_PATH = 'file_path' CONF_FILENAME = 'filename' CONF_FRIENDLY_NAME = 'friendly_name' CONF_HEADERS = 'headers' CONF_HOST = 'host' CONF_HOSTS = 'hosts' CONF_ICON = 'icon' CONF_INCLUDE = 'include' CONF_ID = 'id' CONF_LATITUDE = 'latitude' CONF_LONGITUDE = 'longitude' CONF_MAC = 'mac' CONF_METHOD = 'method' CONF_MINIMUM = 'minimum' CONF_MAXIMUM = 'maximum' CONF_MONITORED_CONDITIONS = 'monitored_conditions' CONF_MONITORED_VARIABLES = 'monitored_variables' CONF_NAME = 'name' CONF_OFFSET = 'offset' CONF_OPTIMISTIC = 'optimistic' CONF_PACKAGES = 'packages' CONF_PASSWORD = 'password' CONF_PATH = 'path' CONF_PAYLOAD = 'payload' CONF_PAYLOAD_OFF = 'payload_off' CONF_PAYLOAD_ON = 'payload_on' CONF_PENDING_TIME = 'pending_time' CONF_PIN = 'pin' CONF_PLATFORM = 'platform' CONF_PORT = 'port' CONF_PREFIX = 'prefix' CONF_PROTOCOL = 'protocol' CONF_PROXY_SSL = 'proxy_ssl' CONF_QUOTE = 'quote' CONF_RECIPIENT = 'recipient' CONF_RESOURCE = 'resource' CONF_RESOURCES = 'resources' CONF_RGB = 'rgb' CONF_SCAN_INTERVAL = 'scan_interval' CONF_SENDER = 'sender' CONF_SENSOR_CLASS = 'sensor_class' CONF_SENSORS = 'sensors' CONF_SSL = 'ssl' CONF_STATE = 'state' CONF_STRUCTURE = 'structure' CONF_SWITCHES = 'switches' CONF_TEMPERATURE_UNIT = 'temperature_unit' CONF_TIME_ZONE = 'time_zone' CONF_TIMEOUT = 'timeout' CONF_TOKEN = 'token' CONF_TRIGGER_TIME = 'trigger_time' CONF_TYPE = 'type' CONF_UNIT_OF_MEASUREMENT = 'unit_of_measurement' CONF_UNIT_SYSTEM = 'unit_system' CONF_URL = 'url' CONF_USERNAME = 'username' CONF_VALUE_TEMPLATE = 'value_template' CONF_VERIFY_SSL = 'verify_ssl' CONF_WEEKDAY = 'weekday' CONF_WHITELIST = 'whitelist' CONF_WHITE_VALUE = 'white_value' CONF_XY = 'xy' CONF_ZONE = 'zone' # #### EVENTS #### EVENT_HOMEASSISTANT_START = 'homeassistant_start' EVENT_HOMEASSISTANT_STOP = 'homeassistant_stop' EVENT_HOMEASSISTANT_CLOSE = 'homeassistant_close' EVENT_STATE_CHANGED = 'state_changed' EVENT_TIME_CHANGED = 'time_changed' EVENT_CALL_SERVICE = 'call_service' EVENT_SERVICE_EXECUTED = 'service_executed' EVENT_PLATFORM_DISCOVERED = 'platform_discovered' EVENT_COMPONENT_LOADED = 'component_loaded' EVENT_SERVICE_REGISTERED = 'service_registered' EVENT_SERVICE_REMOVED = 'service_removed' EVENT_LOGBOOK_ENTRY = 'logbook_entry' # #### STATES #### STATE_ON = 'on' STATE_OFF = 'off' STATE_HOME = 'home' STATE_NOT_HOME = 'not_home' STATE_UNKNOWN = 'unknown' STATE_OPEN = 'open' STATE_CLOSED = 'closed' STATE_PLAYING = 'playing' STATE_PAUSED = 'paused' STATE_IDLE = 'idle' STATE_STANDBY = 'standby' STATE_ALARM_DISARMED = 'disarmed' STATE_ALARM_ARMED_HOME = 'armed_home' STATE_ALARM_ARMED_AWAY = 'armed_away' STATE_ALARM_PENDING = 'pending' STATE_ALARM_TRIGGERED = 'triggered' STATE_LOCKED = 'locked' STATE_UNLOCKED = 'unlocked' STATE_UNAVAILABLE = 'unavailable' # #### STATE AND EVENT ATTRIBUTES #### # Attribution ATTR_ATTRIBUTION = 'attribution' # Contains current time for a TIME_CHANGED event ATTR_NOW = 'now' # Contains domain, service for a SERVICE_CALL event ATTR_DOMAIN = 'domain' ATTR_SERVICE = 'service' ATTR_SERVICE_DATA = 'service_data' # Data for a SERVICE_EXECUTED event ATTR_SERVICE_CALL_ID = 'service_call_id' # Contains one string or a list of strings, each being an entity id ATTR_ENTITY_ID = 'entity_id' # String with a friendly name for the entity ATTR_FRIENDLY_NAME = 'friendly_name' # A picture to represent entity ATTR_ENTITY_PICTURE = 'entity_picture' # Icon to use in the frontend ATTR_ICON = 'icon' # The unit of measurement if applicable ATTR_UNIT_OF_MEASUREMENT = 'unit_of_measurement' CONF_UNIT_SYSTEM_METRIC = 'metric' # type: str CONF_UNIT_SYSTEM_IMPERIAL = 'imperial' # type: str # Temperature attribute ATTR_TEMPERATURE = 'temperature' TEMP_CELSIUS = '°C' TEMP_FAHRENHEIT = '°F' # Length units LENGTH_CENTIMETERS = 'cm' # type: str LENGTH_METERS = 'm' # type: str LENGTH_KILOMETERS = 'km' # type: str LENGTH_INCHES = 'in' # type: str LENGTH_FEET = 'ft' # type: str LENGTH_YARD = 'yd' # type: str LENGTH_MILES = 'mi' # type: str # Volume units VOLUME_LITERS = 'L' # type: str VOLUME_MILLILITERS = 'mL' # type: str VOLUME_GALLONS = 'gal' # type: str VOLUME_FLUID_OUNCE = 'fl. oz.' # type: str # Mass units MASS_GRAMS = 'g' # type: str MASS_KILOGRAMS = 'kg' # type: str MASS_OUNCES = 'oz' # type: str MASS_POUNDS = 'lb' # type: str # Contains the information that is discovered ATTR_DISCOVERED = 'discovered' # Location of the device/sensor ATTR_LOCATION = 'location' ATTR_BATTERY_LEVEL = 'battery_level' ATTR_WAKEUP = 'wake_up_interval' # For devices which support a code attribute ATTR_CODE = 'code' ATTR_CODE_FORMAT = 'code_format' # For devices which support an armed state ATTR_ARMED = 'device_armed' # For devices which support a locked state ATTR_LOCKED = 'locked' # For sensors that support 'tripping', eg. motion and door sensors ATTR_TRIPPED = 'device_tripped' # For sensors that support 'tripping' this holds the most recent # time the device was tripped ATTR_LAST_TRIP_TIME = 'last_tripped_time' # For all entity's, this hold whether or not it should be hidden ATTR_HIDDEN = 'hidden' # Location of the entity ATTR_LATITUDE = 'latitude' ATTR_LONGITUDE = 'longitude' # Accuracy of location in meters ATTR_GPS_ACCURACY = 'gps_accuracy' # If state is assumed ATTR_ASSUMED_STATE = 'assumed_state' ATTR_STATE = 'state' ATTR_OPTION = 'option' # Bitfield of supported component features for the entity ATTR_SUPPORTED_FEATURES = 'supported_features' # Class of device within its domain ATTR_DEVICE_CLASS = 'device_class' # #### SERVICES #### SERVICE_HOMEASSISTANT_STOP = 'stop' SERVICE_HOMEASSISTANT_RESTART = 'restart' SERVICE_TURN_ON = 'turn_on' SERVICE_TURN_OFF = 'turn_off' SERVICE_TOGGLE = 'toggle' SERVICE_RELOAD = 'reload' SERVICE_VOLUME_UP = 'volume_up' SERVICE_VOLUME_DOWN = 'volume_down' SERVICE_VOLUME_MUTE = 'volume_mute' SERVICE_VOLUME_SET = 'volume_set' SERVICE_MEDIA_PLAY_PAUSE = 'media_play_pause' SERVICE_MEDIA_PLAY = 'media_play' SERVICE_MEDIA_PAUSE = 'media_pause' SERVICE_MEDIA_STOP = 'media_stop' SERVICE_MEDIA_NEXT_TRACK = 'media_next_track' SERVICE_MEDIA_PREVIOUS_TRACK = 'media_previous_track' SERVICE_MEDIA_SEEK = 'media_seek' SERVICE_SHUFFLE_SET = 'shuffle_set' SERVICE_ALARM_DISARM = 'alarm_disarm' SERVICE_ALARM_ARM_HOME = 'alarm_arm_home' SERVICE_ALARM_ARM_AWAY = 'alarm_arm_away' SERVICE_ALARM_TRIGGER = 'alarm_trigger' SERVICE_LOCK = 'lock' SERVICE_UNLOCK = 'unlock' SERVICE_OPEN = 'open' SERVICE_CLOSE = 'close' SERVICE_CLOSE_COVER = 'close_cover' SERVICE_CLOSE_COVER_TILT = 'close_cover_tilt' SERVICE_OPEN_COVER = 'open_cover' SERVICE_OPEN_COVER_TILT = 'open_cover_tilt' SERVICE_SET_COVER_POSITION = 'set_cover_position' SERVICE_SET_COVER_TILT_POSITION = 'set_cover_tilt_position' SERVICE_STOP_COVER = 'stop_cover' SERVICE_STOP_COVER_TILT = 'stop_cover_tilt' SERVICE_SELECT_OPTION = 'select_option' # #### API / REMOTE #### SERVER_PORT = 8123 URL_ROOT = '/' URL_API = '/api/' URL_API_STREAM = '/api/stream' URL_API_CONFIG = '/api/config' URL_API_DISCOVERY_INFO = '/api/discovery_info' URL_API_STATES = '/api/states' URL_API_STATES_ENTITY = '/api/states/{}' URL_API_EVENTS = '/api/events' URL_API_EVENTS_EVENT = '/api/events/{}' URL_API_SERVICES = '/api/services' URL_API_SERVICES_SERVICE = '/api/services/{}/{}' URL_API_COMPONENTS = '/api/components' URL_API_ERROR_LOG = '/api/error_log' URL_API_LOG_OUT = '/api/log_out' URL_API_TEMPLATE = '/api/template' HTTP_OK = 200 HTTP_CREATED = 201 HTTP_MOVED_PERMANENTLY = 301 HTTP_BAD_REQUEST = 400 HTTP_UNAUTHORIZED = 401 HTTP_NOT_FOUND = 404 HTTP_METHOD_NOT_ALLOWED = 405 HTTP_UNPROCESSABLE_ENTITY = 422 HTTP_INTERNAL_SERVER_ERROR = 500 HTTP_BASIC_AUTHENTICATION = 'basic' HTTP_DIGEST_AUTHENTICATION = 'digest' HTTP_HEADER_HA_AUTH = 'X-HA-access' HTTP_HEADER_ACCEPT_ENCODING = 'Accept-Encoding' HTTP_HEADER_CONTENT_TYPE = 'Content-type' HTTP_HEADER_CONTENT_ENCODING = 'Content-Encoding' HTTP_HEADER_VARY = 'Vary' HTTP_HEADER_CONTENT_LENGTH = 'Content-Length' HTTP_HEADER_CACHE_CONTROL = 'Cache-Control' HTTP_HEADER_EXPIRES = 'Expires' HTTP_HEADER_ORIGIN = 'Origin' HTTP_HEADER_X_REQUESTED_WITH = 'X-Requested-With' HTTP_HEADER_ACCEPT = 'Accept' HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN = 'Access-Control-Allow-Origin' HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS = 'Access-Control-Allow-Headers' ALLOWED_CORS_HEADERS = [HTTP_HEADER_ORIGIN, HTTP_HEADER_ACCEPT, HTTP_HEADER_X_REQUESTED_WITH, HTTP_HEADER_CONTENT_TYPE, HTTP_HEADER_HA_AUTH] CONTENT_TYPE_JSON = 'application/json' CONTENT_TYPE_MULTIPART = 'multipart/x-mixed-replace; boundary={}' CONTENT_TYPE_TEXT_PLAIN = 'text/plain' # The exit code to send to request a restart RESTART_EXIT_CODE = 100 UNIT_NOT_RECOGNIZED_TEMPLATE = '{} is not a recognized {} unit.' # type: str LENGTH = 'length' # type: str MASS = 'mass' # type: str VOLUME = 'volume' # type: str TEMPERATURE = 'temperature' # type: str SPEED_MS = 'speed_ms' # type: str ILLUMINANCE = 'illuminance' # type: str
apache-2.0
-7,999,569,583,938,235,000
28.480769
79
0.708741
false
2.953046
false
false
false
kyle-elsalhi/opencv-examples
CalibrationByChessboard/CalibrateCamera.py
1
5055
# System information: # - Linux Mint 18.1 Cinnamon 64-bit # - Python 2.7 with OpenCV 3.2.0 # Resources: # - OpenCV-Python tutorial for calibration: http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html # - Variable names were changed for clarity import numpy import cv2 import pickle import glob # Create arrays you'll use to store object points and image points from all images processed objpoints = [] # 3D point in real world space where chess squares are imgpoints = [] # 2D point in image plane, determined by CV2 # Chessboard variables CHESSBOARD_CORNERS_ROWCOUNT = 9 CHESSBOARD_CORNERS_COLCOUNT = 6 # Theoretical object points for the chessboard we're calibrating against, # These will come out like: # (0, 0, 0), (1, 0, 0), ..., # (CHESSBOARD_CORNERS_ROWCOUNT-1, CHESSBOARD_CORNERS_COLCOUNT-1, 0) # Note that the Z value for all stays at 0, as this is a printed out 2D image # And also that the max point is -1 of the max because we're zero-indexing # The following line generates all the tuples needed at (0, 0, 0) objp = numpy.zeros((CHESSBOARD_CORNERS_ROWCOUNT*CHESSBOARD_CORNERS_COLCOUNT,3), numpy.float32) # The following line fills the tuples just generated with their values (0, 0, 0), (1, 0, 0), ... objp[:,:2] = numpy.mgrid[0:CHESSBOARD_CORNERS_ROWCOUNT,0:CHESSBOARD_CORNERS_COLCOUNT].T.reshape(-1, 2) # Need a set of images or a video taken with the camera you want to calibrate # I'm using a set of images taken with the camera with the naming convention: # 'camera-pic-of-chessboard-<NUMBER>.jpg' images = glob.glob('./camera-pic-of-chessboard-*.jpg') # All images used should be the same size, which if taken with the same camera shouldn't be a problem imageSize = None # Determined at runtime # Loop through images glob'ed for iname in images: # Open the image img = cv2.imread(iname) # Grayscale the image gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Find chessboard in the image, setting PatternSize(2nd arg) to a tuple of (#rows, #columns) board, corners = cv2.findChessboardCorners(gray, (CHESSBOARD_CORNERS_ROWCOUNT,CHESSBOARD_CORNERS_COLCOUNT), None) # If a chessboard was found, let's collect image/corner points if board == True: # Add the points in 3D that we just discovered objpoints.append(objp) # Enhance corner accuracy with cornerSubPix corners_acc = cv2.cornerSubPix( image=gray, corners=corners, winSize=(11, 11), zeroZone=(-1, -1), criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)) # Last parameter is about termination critera imgpoints.append(corners_acc) # If our image size is unknown, set it now if not imageSize: imageSize = gray.shape[::-1] # Draw the corners to a new image to show whoever is performing the calibration # that the board was properly detected img = cv2.drawChessboardCorners(img, (CHESSBOARD_CORNERS_ROWCOUNT, CHESSBOARD_CORNERS_COLCOUNT), corners_acc, board) # Pause to display each image, waiting for key press cv2.imshow('Chessboard', img) cv2.waitKey(0) else: print("Not able to detect a chessboard in image: {}".format(iname)) # Destroy any open CV windows cv2.destroyAllWindows() # Make sure at least one image was found if len(images) < 1: # Calibration failed because there were no images, warn the user print("Calibration was unsuccessful. No images of chessboards were found. Add images of chessboards and use or alter the naming conventions used in this file.") # Exit for failure exit() # Make sure we were able to calibrate on at least one chessboard by checking # if we ever determined the image size if not imageSize: # Calibration failed because we didn't see any chessboards of the PatternSize used print("Calibration was unsuccessful. We couldn't detect chessboards in any of the images supplied. Try changing the patternSize passed into findChessboardCorners(), or try different pictures of chessboards.") # Exit for failure exit() # Now that we've seen all of our images, perform the camera calibration # based on the set of points we've discovered calibration, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera( objectPoints=objpoints, imagePoints=imgpoints, imageSize=imageSize, cameraMatrix=None, distCoeffs=None) # Print matrix and distortion coefficient to the console print(cameraMatrix) print(distCoeffs) # Save values to be used where matrix+dist is required, for instance for posture estimation # I save files in a pickle file, but you can use yaml or whatever works for you f = open('calibration.pckl', 'wb') pickle.dump((cameraMatrix, distCoeffs, rvecs, tvecs), f) f.close() # Print to console our success print('Calibration successful. Calibration file used: {}'.format('calibration.pckl'))
mit
-1,366,045,274,275,861,500
42.956522
212
0.712364
false
3.525105
false
false
false
Blackclaws/client
src/client/_clientwindow.py
1
52765
from functools import partial from PyQt4.QtCore import QUrl from PyQt4.QtGui import QLabel, QStyle from PyQt4.QtNetwork import QAbstractSocket import config import connectivity from base import Client from config import Settings import chat from client.player import Player from client.players import Players from client.updater import ClientUpdater import fa from connectivity.helper import ConnectivityHelper from fa import GameSession from fa.factions import Factions from fa.game_session import GameSessionState from ui.status_logo import StatusLogo ''' Created on Dec 1, 2011 @author: thygrrr ''' from PyQt4 import QtCore, QtGui, QtNetwork, QtWebKit from types import IntType, FloatType, ListType, DictType from client import ClientState, LOBBY_HOST, \ LOBBY_PORT, LOCAL_REPLAY_PORT import logging logger = logging.getLogger(__name__) import util import secondaryServer import json import sys import replays import time import random import notifications as ns FormClass, BaseClass = util.loadUiType("client/client.ui") class mousePosition(object): def __init__(self, parent): self.parent = parent self.onLeftEdge = False self.onRightEdge = False self.onTopEdge = False self.onBottomEdge = False self.cursorShapeChange = False self.warning_buttons = dict() self.onEdges = False def computeMousePosition(self, pos): self.onLeftEdge = pos.x() < 8 self.onRightEdge = pos.x() > self.parent.size().width() - 8 self.onTopEdge = pos.y() < 8 self.onBottomEdge = pos.y() > self.parent.size().height() - 8 self.onTopLeftEdge = self.onTopEdge and self.onLeftEdge self.onBottomLeftEdge = self.onBottomEdge and self.onLeftEdge self.onTopRightEdge = self.onTopEdge and self.onRightEdge self.onBottomRightEdge = self.onBottomEdge and self.onRightEdge self.onEdges = self.onLeftEdge or self.onRightEdge or self.onTopEdge or self.onBottomEdge def resetToFalse(self): self.onLeftEdge = False self.onRightEdge = False self.onTopEdge = False self.onBottomEdge = False self.cursorShapeChange = False def isOnEdge(self): return self.onEdges class ClientWindow(FormClass, BaseClass): ''' This is the main lobby client that manages the FAF-related connection and data, in particular players, games, ranking, etc. Its UI also houses all the other UIs for the sub-modules. ''' topWidget = QtGui.QWidget() # These signals are emitted when the client is connected or disconnected from FAF connected = QtCore.pyqtSignal() authorized = QtCore.pyqtSignal(object) disconnected = QtCore.pyqtSignal() state_changed = QtCore.pyqtSignal(object) # This signal is emitted when the client is done rezising doneresize = QtCore.pyqtSignal() # These signals notify connected modules of game state changes (i.e. reasons why FA is launched) viewingReplay = QtCore.pyqtSignal(QtCore.QUrl) # Game state controls gameEnter = QtCore.pyqtSignal() gameExit = QtCore.pyqtSignal() # These signals propagate important client state changes to other modules statsInfo = QtCore.pyqtSignal(dict) tourneyTypesInfo = QtCore.pyqtSignal(dict) tutorialsInfo = QtCore.pyqtSignal(dict) tourneyInfo = QtCore.pyqtSignal(dict) modInfo = QtCore.pyqtSignal(dict) gameInfo = QtCore.pyqtSignal(dict) modVaultInfo = QtCore.pyqtSignal(dict) coopInfo = QtCore.pyqtSignal(dict) avatarList = QtCore.pyqtSignal(list) playerAvatarList = QtCore.pyqtSignal(dict) usersUpdated = QtCore.pyqtSignal(list) localBroadcast = QtCore.pyqtSignal(str, str) autoJoin = QtCore.pyqtSignal(list) channelsUpdated = QtCore.pyqtSignal(list) replayVault = QtCore.pyqtSignal(dict) coopLeaderBoard = QtCore.pyqtSignal(dict) # These signals are emitted whenever a certain tab is activated showReplays = QtCore.pyqtSignal() showMaps = QtCore.pyqtSignal() showGames = QtCore.pyqtSignal() showTourneys = QtCore.pyqtSignal() showLadder = QtCore.pyqtSignal() showChat = QtCore.pyqtSignal() showMods = QtCore.pyqtSignal() showCoop = QtCore.pyqtSignal() matchmakerInfo = QtCore.pyqtSignal(dict) remember = Settings.persisted_property('user/remember', type=bool, default_value=True) login = Settings.persisted_property('user/login', persist_if=lambda self: self.remember) password = Settings.persisted_property('user/password', persist_if=lambda self: self.remember) gamelogs = Settings.persisted_property('game/logs', type=bool, default_value=False) useUPnP = Settings.persisted_property('game/upnp', type=bool, default_value=True) gamePort = Settings.persisted_property('game/port', type=int, default_value=6112) def __init__(self, *args, **kwargs): BaseClass.__init__(self, *args, **kwargs) logger.debug("Client instantiating") # Hook to Qt's application management system QtGui.QApplication.instance().aboutToQuit.connect(self.cleanup) # Init and wire the TCP Network socket to communicate with faforever.com self.socket = QtNetwork.QTcpSocket() self.socket.readyRead.connect(self.readFromServer) self.socket.disconnected.connect(self.disconnectedFromServer) self.socket.error.connect(self.socketError) self._client_updater = None self.blockSize = 0 self.uniqueId = None self.sendFile = False self.progress = QtGui.QProgressDialog() self.progress.setMinimum(0) self.progress.setMaximum(0) self.warning_buttons = {} # Tray icon self.tray = QtGui.QSystemTrayIcon() self.tray.setIcon(util.icon("client/tray_icon.png")) self.tray.show() self._state = ClientState.NONE self.auth_state = ClientState.NONE # Using ClientState for reasons self.session = None self._connection_attempts = 0 # Timer for resize events self.resizeTimer = QtCore.QTimer(self) self.resizeTimer.timeout.connect(self.resized) self.preferedSize = 0 self._receivers = {} # Process used to run Forged Alliance (managed in module fa) fa.instance.started.connect(self.startedFA) fa.instance.finished.connect(self.finishedFA) fa.instance.error.connect(self.errorFA) self.gameInfo.connect(fa.instance.processGameInfo) # Local Replay Server self.replayServer = fa.replayserver.ReplayServer(self) # GameSession self.game_session = None # type: GameSession # ConnectivityTest self.connectivity = None # type: ConnectivityHelper self.localIP = None # stat server self.statsServer = secondaryServer.SecondaryServer("Statistic", 11002, self) # create user interface (main window) and load theme self.setupUi(self) self.setStyleSheet(util.readstylesheet("client/client.css")) self.whatNewsView.setHtml("<body style='background-color: #000;'></body>") self.setWindowTitle("FA Forever " + util.VERSION_STRING) # Frameless self.setWindowFlags( QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMinimizeButtonHint) self.rubberBand = QtGui.QRubberBand(QtGui.QRubberBand.Rectangle) self.mousePosition = mousePosition(self) self.installEventFilter(self) self.minimize = QtGui.QToolButton(self) self.minimize.setIcon(util.icon("client/minimize-button.png")) self.maximize = QtGui.QToolButton(self) self.maximize.setIcon(util.icon("client/maximize-button.png")) close = QtGui.QToolButton(self) close.setIcon(util.icon("client/close-button.png")) self.minimize.setMinimumHeight(10) close.setMinimumHeight(10) self.maximize.setMinimumHeight(10) close.setIconSize(QtCore.QSize(22, 22)) self.minimize.setIconSize(QtCore.QSize(22, 22)) self.maximize.setIconSize(QtCore.QSize(22, 22)) close.setProperty("windowControlBtn", True) self.maximize.setProperty("windowControlBtn", True) self.minimize.setProperty("windowControlBtn", True) self.logo = StatusLogo(self) self.logo.disconnect_requested.connect(self.disconnect) self.logo.reconnect_requested.connect(self.reconnect) self.logo.about_dialog_requested.connect(self.linkAbout) self.logo.connectivity_dialog_requested.connect(self.connectivityDialog) self.menu = self.menuBar() self.topLayout.addWidget(self.logo) titleLabel = QLabel("FA Forever" if not config.is_beta() else "FA Forever BETA") titleLabel.setProperty('titleLabel', True) self.topLayout.addWidget(titleLabel) self.topLayout.addStretch(500) self.topLayout.addWidget(self.menu) self.topLayout.addWidget(self.minimize) self.topLayout.addWidget(self.maximize) self.topLayout.addWidget(close) self.topLayout.setSpacing(0) self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed) self.maxNormal = False close.clicked.connect(self.close) self.minimize.clicked.connect(self.showSmall) self.maximize.clicked.connect(self.showMaxRestore) self.moving = False self.dragging = False self.draggingHover = False self.offset = None self.curSize = None sizeGrip = QtGui.QSizeGrip(self) self.mainGridLayout.addWidget(sizeGrip, 2, 2) # Wire all important signals self.mainTabs.currentChanged.connect(self.mainTabChanged) self.topTabs.currentChanged.connect(self.vaultTabChanged) # Handy reference to the Player object representing the logged-in user. self.me = None # FIXME: Move it elsewhere self.players = Players( self.me) # Players known to the client, contains the player_info messages sent by the server self.urls = {} self.power = 0 # current user power self.id = 0 # Initialize the Menu Bar according to settings etc. self.initMenus() # Load the icons for the tabs self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.whatNewTab), util.icon("client/feed.png")) self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.chatTab), util.icon("client/chat.png")) self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.gamesTab), util.icon("client/games.png")) self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.coopTab), util.icon("client/coop.png")) self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.vaultsTab), util.icon("client/mods.png")) self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.ladderTab), util.icon("client/ladder.png")) self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tourneyTab), util.icon("client/tourney.png")) self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.livestreamTab), util.icon("client/twitch.png")) self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.replaysTab), util.icon("client/replays.png")) self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tutorialsTab), util.icon("client/tutorials.png")) QtWebKit.QWebSettings.globalSettings().setAttribute(QtWebKit.QWebSettings.PluginsEnabled, True) # for moderator self.modMenu = None @property def state(self): return self._state @state.setter def state(self, value): self._state = value self.state_changed.emit(value) @QtCore.pyqtSlot(bool) def on_actionSavegamelogs_toggled(self, value): self.gamelogs = value def eventFilter(self, obj, event): if (event.type() == QtCore.QEvent.HoverMove): self.draggingHover = self.dragging if self.dragging: self.resizeWidget(self.mapToGlobal(event.pos())) else: if self.maxNormal == False: self.mousePosition.computeMousePosition(event.pos()) else: self.mousePosition.resetToFalse() self.updateCursorShape(event.pos()) return False def updateCursorShape(self, pos): if self.mousePosition.onTopLeftEdge or self.mousePosition.onBottomRightEdge: self.mousePosition.cursorShapeChange = True self.setCursor(QtCore.Qt.SizeFDiagCursor) elif self.mousePosition.onTopRightEdge or self.mousePosition.onBottomLeftEdge: self.setCursor(QtCore.Qt.SizeBDiagCursor) self.mousePosition.cursorShapeChange = True elif self.mousePosition.onLeftEdge or self.mousePosition.onRightEdge: self.setCursor(QtCore.Qt.SizeHorCursor) self.mousePosition.cursorShapeChange = True elif self.mousePosition.onTopEdge or self.mousePosition.onBottomEdge: self.setCursor(QtCore.Qt.SizeVerCursor) self.mousePosition.cursorShapeChange = True else: if self.mousePosition.cursorShapeChange == True: self.unsetCursor() self.mousePosition.cursorShapeChange = False def showSmall(self): self.showMinimized() def showMaxRestore(self): if (self.maxNormal): self.maxNormal = False if self.curSize: self.setGeometry(self.curSize) else: self.maxNormal = True self.curSize = self.geometry() self.setGeometry(QtGui.QDesktopWidget().availableGeometry(self)) def mouseDoubleClickEvent(self, event): self.showMaxRestore() def mouseReleaseEvent(self, event): self.dragging = False self.moving = False if self.rubberBand.isVisible(): self.maxNormal = True self.curSize = self.geometry() self.setGeometry(self.rubberBand.geometry()) self.rubberBand.hide() # self.showMaxRestore() def mousePressEvent(self, event): if event.button() == QtCore.Qt.LeftButton: if self.mousePosition.isOnEdge() and self.maxNormal == False: self.dragging = True return else: self.dragging = False self.moving = True self.offset = event.pos() def mouseMoveEvent(self, event): if self.dragging and self.draggingHover == False: self.resizeWidget(event.globalPos()) elif self.moving and self.offset != None: desktop = QtGui.QDesktopWidget().availableGeometry(self) if event.globalPos().y() == 0: self.rubberBand.setGeometry(desktop) self.rubberBand.show() elif event.globalPos().x() == 0: desktop.setRight(desktop.right() / 2.0) self.rubberBand.setGeometry(desktop) self.rubberBand.show() elif event.globalPos().x() == desktop.right(): desktop.setRight(desktop.right() / 2.0) desktop.moveLeft(desktop.right()) self.rubberBand.setGeometry(desktop) self.rubberBand.show() else: self.rubberBand.hide() if self.maxNormal == True: self.showMaxRestore() self.move(event.globalPos() - self.offset) def resizeWidget(self, globalMousePos): if globalMousePos.y() == 0: self.rubberBand.setGeometry(QtGui.QDesktopWidget().availableGeometry(self)) self.rubberBand.show() else: self.rubberBand.hide() origRect = self.frameGeometry() left, top, right, bottom = origRect.getCoords() minWidth = self.minimumWidth() minHeight = self.minimumHeight() if self.mousePosition.onTopLeftEdge: left = globalMousePos.x() top = globalMousePos.y() elif self.mousePosition.onBottomLeftEdge: left = globalMousePos.x() bottom = globalMousePos.y() elif self.mousePosition.onTopRightEdge: right = globalMousePos.x() top = globalMousePos.y() elif self.mousePosition.onBottomRightEdge: right = globalMousePos.x() bottom = globalMousePos.y() elif self.mousePosition.onLeftEdge: left = globalMousePos.x() elif self.mousePosition.onRightEdge: right = globalMousePos.x() elif self.mousePosition.onTopEdge: top = globalMousePos.y() elif self.mousePosition.onBottomEdge: bottom = globalMousePos.y() newRect = QtCore.QRect(QtCore.QPoint(left, top), QtCore.QPoint(right, bottom)) if newRect.isValid(): if minWidth > newRect.width(): if left != origRect.left(): newRect.setLeft(origRect.left()) else: newRect.setRight(origRect.right()) if minHeight > newRect.height(): if top != origRect.top(): newRect.setTop(origRect.top()) else: newRect.setBottom(origRect.bottom()) self.setGeometry(newRect) def setup(self): import chat import tourneys import stats import vault import games import tutorials import downloadManager import modvault import coop from chat._avatarWidget import avatarWidget # download manager self.downloader = downloadManager.downloadManager(self) self.loadSettings() # Initialize chat self.chat = chat.Lobby(self) # Color table used by the following method # CAVEAT: This will break if the theme is loaded after the client package is imported chat.CHAT_COLORS = json.loads(util.readfile("client/colors.json")) # build main window with the now active client self.ladder = stats.Stats(self) self.games = games.Games(self) self.tourneys = tourneys.Tourneys(self) self.vault = vault.MapVault(self) self.modvault = modvault.ModVault(self) self.replays = replays.Replays(self) self.tutorials = tutorials.Tutorials(self) self.Coop = coop.Coop(self) self.notificationSystem = ns.Notifications(self) # set menu states self.actionNsEnabled.setChecked(self.notificationSystem.settings.enabled) # Other windows self.avatarAdmin = self.avatarSelection = avatarWidget(self, None) # warning setup self.warning = QtGui.QHBoxLayout() # live streams self.LivestreamWebView.setUrl(QtCore.QUrl("http://www.faforever.com/livestream")) self.warnPlayer = QtGui.QLabel(self) self.warnPlayer.setText( "A player of your skill level is currently searching for a 1v1 game. Click a faction to join them! ") self.warnPlayer.setAlignment(QtCore.Qt.AlignHCenter) self.warnPlayer.setAlignment(QtCore.Qt.AlignVCenter) self.warnPlayer.setProperty("warning", True) self.warning.addStretch() self.warning.addWidget(self.warnPlayer) def add_warning_button(faction): button = QtGui.QToolButton(self) button.setMaximumSize(25, 25) button.setIcon(util.icon("games/automatch/%s.png" % faction.to_name())) button.clicked.connect(partial(self.games.startSearchRanked, faction)) self.warning.addWidget(button) return button self.warning_buttons = {faction: add_warning_button(faction) for faction in Factions} self.warning.addStretch() self.mainGridLayout.addLayout(self.warning, 2, 0) self.warningHide() def warningHide(self): ''' hide the warning bar for matchmaker ''' self.warnPlayer.hide() for i in self.warning_buttons.values(): i.hide() def warningShow(self): ''' show the warning bar for matchmaker ''' self.warnPlayer.show() for i in self.warning_buttons.values(): i.show() def disconnect(self): self.state = ClientState.DISCONNECTED self.socket.disconnectFromHost() self.chat.disconnect() @QtCore.pyqtSlot() def cleanup(self): ''' Perform cleanup before the UI closes ''' self.state = ClientState.SHUTDOWN self.progress.setWindowTitle("FAF is shutting down") self.progress.setMinimum(0) self.progress.setMaximum(0) self.progress.setValue(0) self.progress.setCancelButton(None) self.progress.show() # Important: If a game is running, offer to terminate it gently self.progress.setLabelText("Closing ForgedAllianceForever.exe") if fa.instance.running(): fa.instance.close() # Terminate Lobby Server connection if self.socket.state() == QtNetwork.QTcpSocket.ConnectedState: self.progress.setLabelText("Closing main connection.") self.socket.disconnectFromHost() # Clear UPnP Mappings... if self.useUPnP: self.progress.setLabelText("Removing UPnP port mappings") fa.upnp.removePortMappings() # Terminate local ReplayServer if self.replayServer: self.progress.setLabelText("Terminating local replay server") self.replayServer.close() self.replayServer = None # Clean up Chat if self.chat: self.progress.setLabelText("Disconnecting from IRC") self.chat.disconnect() self.chat = None # Get rid of the Tray icon if self.tray: self.progress.setLabelText("Removing System Tray icon") self.tray.deleteLater() self.tray = None # Terminate UI if self.isVisible(): self.progress.setLabelText("Closing main window") self.close() self.progress.close() def closeEvent(self, event): logger.info("Close Event for Application Main Window") self.saveWindow() if fa.instance.running(): if QtGui.QMessageBox.question(self, "Are you sure?", "Seems like you still have Forged Alliance running!<br/><b>Close anyway?</b>", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.No: event.ignore() return return QtGui.QMainWindow.closeEvent(self, event) def resizeEvent(self, size): self.resizeTimer.start(400) def resized(self): self.resizeTimer.stop() self.doneresize.emit() def initMenus(self): self.actionLink_account_to_Steam.triggered.connect(partial(self.open_url, Settings.get("STEAMLINK_URL"))) self.actionLinkWebsite.triggered.connect(partial(self.open_url, Settings.get("WEBSITE_URL"))) self.actionLinkWiki.triggered.connect(partial(self.open_url, Settings.get("WIKI_URL"))) self.actionLinkForums.triggered.connect(partial(self.open_url, Settings.get("FORUMS_URL"))) self.actionLinkUnitDB.triggered.connect(partial(self.open_url, Settings.get("UNITDB_URL"))) self.actionNsSettings.triggered.connect(lambda: self.notificationSystem.on_showSettings()) self.actionNsEnabled.triggered.connect(lambda enabled: self.notificationSystem.setNotificationEnabled(enabled)) self.actionWiki.triggered.connect(partial(self.open_url, Settings.get("WIKI_URL"))) self.actionReportBug.triggered.connect(partial(self.open_url, Settings.get("TICKET_URL"))) self.actionShowLogs.triggered.connect(self.linkShowLogs) self.actionTechSupport.triggered.connect(partial(self.open_url, Settings.get("SUPPORT_URL"))) self.actionAbout.triggered.connect(self.linkAbout) self.actionClearCache.triggered.connect(self.clearCache) self.actionClearSettings.triggered.connect(self.clearSettings) self.actionClearGameFiles.triggered.connect(self.clearGameFiles) self.actionSetGamePath.triggered.connect(self.switchPath) self.actionSetGamePort.triggered.connect(self.switchPort) # Toggle-Options self.actionSetAutoLogin.triggered.connect(self.updateOptions) self.actionSetAutoLogin.setChecked(self.remember) self.actionSetSoundEffects.triggered.connect(self.updateOptions) self.actionSetOpenGames.triggered.connect(self.updateOptions) self.actionSetJoinsParts.triggered.connect(self.updateOptions) self.actionSetLiveReplays.triggered.connect(self.updateOptions) self.actionSaveGamelogs.toggled.connect(self.on_actionSavegamelogs_toggled) self.actionSaveGamelogs.setChecked(self.gamelogs) self.actionColoredNicknames.triggered.connect(self.updateOptions) # Init themes as actions. themes = util.listThemes() for theme in themes: action = self.menuTheme.addAction(str(theme)) action.triggered.connect(self.switchTheme) action.theme = theme action.setCheckable(True) if util.getTheme() == theme: action.setChecked(True) # Nice helper for the developers self.menuTheme.addSeparator() self.menuTheme.addAction("Reload Stylesheet", lambda: self.setStyleSheet(util.readstylesheet("client/client.css"))) @QtCore.pyqtSlot() def updateOptions(self): self.remember = self.actionSetAutoLogin.isChecked() self.soundeffects = self.actionSetSoundEffects.isChecked() self.opengames = self.actionSetOpenGames.isChecked() self.joinsparts = self.actionSetJoinsParts.isChecked() self.livereplays = self.actionSetLiveReplays.isChecked() self.gamelogs = self.actionSaveGamelogs.isChecked() self.players.coloredNicknames = self.actionColoredNicknames.isChecked() self.saveChat() @QtCore.pyqtSlot() def switchTheme(self): util.setTheme(self.sender().theme, True) @QtCore.pyqtSlot() def switchPath(self): fa.wizards.Wizard(self).exec_() @QtCore.pyqtSlot() def switchPort(self): import loginwizards loginwizards.gameSettingsWizard(self).exec_() @QtCore.pyqtSlot() def clearSettings(self): result = QtGui.QMessageBox.question(None, "Clear Settings", "Are you sure you wish to clear all settings, login info, etc. used by this program?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if (result == QtGui.QMessageBox.Yes): util.settings.clear() util.settings.sync() QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.") QtGui.QApplication.quit() @QtCore.pyqtSlot() def clearGameFiles(self): util.clearDirectory(util.BIN_DIR) util.clearDirectory(util.GAMEDATA_DIR) @QtCore.pyqtSlot() def clearCache(self): changed = util.clearDirectory(util.CACHE_DIR) if changed: QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.") QtGui.QApplication.quit() @QtCore.pyqtSlot(str) def open_url(self, url): QtGui.QDesktopServices.openUrl(QUrl(url)) @QtCore.pyqtSlot() def linkShowLogs(self): util.showInExplorer(util.LOG_DIR) @QtCore.pyqtSlot() def connectivityDialog(self): dialog = connectivity.ConnectivityDialog(self.connectivity) dialog.exec_() @QtCore.pyqtSlot() def linkAbout(self): dialog = util.loadUi("client/about.ui") dialog.version_label.setText("Version: {}".format(util.VERSION_STRING)) dialog.exec_() def saveWindow(self): util.settings.beginGroup("window") util.settings.setValue("geometry", self.saveGeometry()) util.settings.endGroup() def saveChat(self): util.settings.beginGroup("chat") util.settings.setValue("soundeffects", self.soundeffects) util.settings.setValue("livereplays", self.livereplays) util.settings.setValue("opengames", self.opengames) util.settings.setValue("joinsparts", self.joinsparts) util.settings.setValue("coloredNicknames", self.players.coloredNicknames) util.settings.endGroup() def loadSettings(self): self.loadChat() # Load settings util.settings.beginGroup("window") geometry = util.settings.value("geometry", None) if geometry: self.restoreGeometry(geometry) util.settings.endGroup() util.settings.beginGroup("ForgedAlliance") util.settings.endGroup() def loadChat(self): try: util.settings.beginGroup("chat") self.soundeffects = (util.settings.value("soundeffects", "true") == "true") self.opengames = (util.settings.value("opengames", "true") == "true") self.joinsparts = (util.settings.value("joinsparts", "false") == "true") self.livereplays = (util.settings.value("livereplays", "true") == "true") self.players.coloredNicknames = (util.settings.value("coloredNicknames", "false") == "true") util.settings.endGroup() self.actionColoredNicknames.setChecked(self.players.coloredNicknames) self.actionSetSoundEffects.setChecked(self.soundeffects) self.actionSetLiveReplays.setChecked(self.livereplays) self.actionSetOpenGames.setChecked(self.opengames) self.actionSetJoinsParts.setChecked(self.joinsparts) except: pass def doConnect(self): if not self.replayServer.doListen(LOCAL_REPLAY_PORT): return False # Begin connecting. self.socket.connected.connect(self.on_connected) self.socket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1) self.socket.connectToHost(LOBBY_HOST, LOBBY_PORT) return True def reconnect(self): """ Reconnect to the server :return: """ self._connection_attempts += 1 self.state = ClientState.RECONNECTING self.socket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1) self.socket.connectToHost(LOBBY_HOST, LOBBY_PORT) @QtCore.pyqtSlot() def on_connected(self): self.state = ClientState.ACCEPTED self.localIP = self.socket.localAddress() self.send(dict(command="ask_session", version=config.VERSION, user_agent="faf-client")) self.connected.emit() @property def can_login(self): return self.remember and self.password and self.login def show_login_wizard(self): from loginwizards import LoginWizard wizard = LoginWizard(self) wizard.accepted.connect(self.perform_login) wizard.exec_() def doLogin(self): self.state = ClientState.NONE if not self.can_login: self.show_login_wizard() def getColor(self, name): return chat.get_color(name) @QtCore.pyqtSlot() def startedFA(self): ''' Slot hooked up to fa.instance when the process has launched. It will notify other modules through the signal gameEnter(). ''' logger.info("FA has launched in an attached process.") self.gameEnter.emit() @QtCore.pyqtSlot(int) def finishedFA(self, exit_code): ''' Slot hooked up to fa.instance when the process has ended. It will notify other modules through the signal gameExit(). ''' if not exit_code: logger.info("FA has finished with exit code: " + str(exit_code)) else: logger.warn("FA has finished with exit code: " + str(exit_code)) self.gameExit.emit() @QtCore.pyqtSlot(int) def errorFA(self, error_code): ''' Slot hooked up to fa.instance when the process has failed to start. ''' if error_code == 0: logger.error("FA has failed to start") QtGui.QMessageBox.critical(self, "Error from FA", "FA has failed to start.") elif error_code == 1: logger.error("FA has crashed or killed after starting") else: text = "FA has failed to start with error code: " + str(error_code) logger.error(text) QtGui.QMessageBox.critical(self, "Error from FA", text) self.gameExit.emit() @QtCore.pyqtSlot(int) def mainTabChanged(self, index): ''' The main visible tab (module) of the client's UI has changed. In this case, other modules may want to load some data or cease particularly CPU-intensive interactive functionality. LATER: This can be rewritten as a simple Signal that each module can then individually connect to. ''' new_tab = self.mainTabs.widget(index) if new_tab is self.gamesTab: self.showGames.emit() if new_tab is self.chatTab: self.showChat.emit() if new_tab is self.replaysTab: self.showReplays.emit() if new_tab is self.ladderTab: self.showLadder.emit() if new_tab is self.tourneyTab: self.showTourneys.emit() if new_tab is self.coopTab: self.showCoop.emit() @QtCore.pyqtSlot(int) def vaultTabChanged(self, index): new_tab = self.topTabs.widget(index) if new_tab is self.mapsTab: self.showMaps.emit() if new_tab is self.modsTab: self.showMods.emit() @QtCore.pyqtSlot() def joinGameFromURL(self, url): ''' Tries to join the game at the given URL ''' logger.debug("joinGameFromURL: " + url.toString()) if fa.instance.available(): add_mods = [] try: modstr = url.queryItemValue("mods") add_mods = json.loads(modstr) # should be a list except: logger.info("Couldn't load urlquery value 'mods'") if fa.check.game(self): uid, mod, map = url.queryItemValue('uid'), url.queryItemValue('mod'), url.queryItemValue('map') if fa.check.check(mod, map, sim_mods=add_mods): self.join_game(uid) def writeToServer(self, action, *args, **kw): ''' Writes data to the deprecated stream API. Do not use. ''' logger.debug("Client: " + action) block = QtCore.QByteArray() out = QtCore.QDataStream(block, QtCore.QIODevice.ReadWrite) out.setVersion(QtCore.QDataStream.Qt_4_2) out.writeUInt32(2 * len(action) + 4) out.writeQString(action) self.socket.write(block) @QtCore.pyqtSlot() def readFromServer(self): ins = QtCore.QDataStream(self.socket) ins.setVersion(QtCore.QDataStream.Qt_4_2) while ins.atEnd() == False: if self.blockSize == 0: if self.socket.bytesAvailable() < 4: return self.blockSize = ins.readUInt32() if self.socket.bytesAvailable() < self.blockSize: return action = ins.readQString() logger.debug("Server: '%s'" % action) if action == "PING": self.writeToServer("PONG") self.blockSize = 0 return try: self.dispatch(json.loads(action)) except: logger.error("Error dispatching JSON: " + action, exc_info=sys.exc_info()) self.blockSize = 0 @QtCore.pyqtSlot() def disconnectedFromServer(self): logger.warn("Disconnected from lobby server.") if self.state == ClientState.ACCEPTED: # Clear the online users lists oldplayers = self.players.keys() self.players = Players(self.me) self.urls = {} self.usersUpdated.emit(oldplayers) if self.state != ClientState.DISCONNECTED: self.state = ClientState.DROPPED if self._connection_attempts < 2: logger.info("Reconnecting immediately") self.reconnect() else: timer = QtCore.QTimer(self) timer.setSingleShot(True) timer.timeout.connect(self.reconnect) t = self._connection_attempts * 10000 timer.start(t) logger.info("Scheduling reconnect in {}".format(t / 1000)) self.disconnected.emit() @QtCore.pyqtSlot(QtNetwork.QAbstractSocket.SocketError) def socketError(self, error): if (error == QAbstractSocket.SocketTimeoutError or error == QAbstractSocket.NetworkError or error == QAbstractSocket.ConnectionRefusedError or error == QAbstractSocket.RemoteHostClosedError): logger.info("Timeout/network error: {}".format(self.socket.errorString())) self.disconnectedFromServer() else: self.state = ClientState.DISCONNECTED logger.error("Fatal TCP Socket Error: " + self.socket.errorString()) @QtCore.pyqtSlot() def forwardLocalBroadcast(self, source, message): self.localBroadcast.emit(source, message) def manage_power(self): ''' update the interface accordingly to the power of the user''' if self.power >= 1: if self.modMenu == None: self.modMenu = self.menu.addMenu("Administration") actionAvatar = QtGui.QAction("Avatar manager", self.modMenu) actionAvatar.triggered.connect(self.avatarManager) self.modMenu.addAction(actionAvatar) def requestAvatars(self, personal): if personal: self.send(dict(command="avatar", action="list_avatar")) else: self.send(dict(command="admin", action="requestavatars")) def joinChannel(self, username, channel): '''Join users to a channel''' self.send(dict(command="admin", action="join_channel", user_ids=[self.players[username].id], channel=channel)) def closeFA(self, username): '''Close FA remotly''' self.send(dict(command="admin", action="closeFA", user_id=self.players[username].id)) def closeLobby(self, username): '''Close lobby remotly''' self.send(dict(command="admin", action="closelobby", user_id=self.players[username].id)) def addFriend(self, friend_id): if friend_id in self.players: self.players.friends.add(friend_id) self.send(dict(command="social_add", friend=friend_id)) self.usersUpdated.emit([friend_id]) def addFoe(self, foe_id): if foe_id in self.players: self.players.foes.add(foe_id) self.send(dict(command="social_add", foe=foe_id)) self.usersUpdated.emit([foe_id]) def remFriend(self, friend_id): if friend_id in self.players: self.players.friends.remove(friend_id) self.send(dict(command="social_remove", friend=friend_id)) self.usersUpdated.emit([friend_id]) def remFoe(self, foe_id): if foe_id in self.players: self.players.foes.remove(foe_id) self.send(dict(command="social_remove", foe=foe_id)) self.usersUpdated.emit([foe_id]) def send(self, message): data = json.dumps(message) if message.get('command') == 'hello': logger.info('Logging in with {}'.format({ k: v for k, v in message.items() if k != 'password' })) else: logger.info("Outgoing JSON Message: " + data) self.writeToServer(data) def subscribe_to(self, target, receiver): self._receivers[target] = receiver def unsubscribe(self, target, receiver): del self._receivers[target] def dispatch(self, message): if "command" in message: cmd = "handle_" + message['command'] if "target" in message: receiver = self._receivers.get(message['target']) if hasattr(receiver, cmd): getattr(receiver, cmd)(message) elif hasattr(receiver, 'handle_message'): receiver.handle_message(message) else: logger.warn("No receiver for message {}".format(message)) else: if hasattr(self, cmd): getattr(self, cmd)(message) else: logger.error("Unknown JSON command: %s" % message['command']) raise ValueError else: logger.debug("No command in message.") def handle_session(self, message): self.session = str(message['session']) if self.remember and self.login and self.password: self.perform_login() @QtCore.pyqtSlot() def perform_login(self): self.uniqueId = util.uniqueID(self.login, self.session) self.send(dict(command="hello", login=self.login, password=self.password, unique_id=self.uniqueId, session=self.session)) return True def handle_invalid(self, message): self.state = ClientState.DISCONNECTED raise Exception(message) def handle_stats(self, message): self.statsInfo.emit(message) def handle_update(self, message): # Remove geometry settings prior to updating # could be incompatible with an updated client. Settings.remove('window/geometry') logger.warn("Server says we need an update") self.progress.close() self.state = ClientState.DISCONNECTED self._client_updater = ClientUpdater(message['update']) self._client_updater.exec_() def handle_welcome(self, message): self._connection_attempts = 0 self.id = message["id"] self.login = message["login"] self.me = Player(id=self.id, login=self.login) self.players[self.me.id] = self.me # FIXME self.players.me = self.me # FIXME self.players.login = self.login logger.debug("Login success") self.state = ClientState.ACCEPTED util.crash.CRASH_REPORT_USER = self.login if self.useUPnP: fa.upnp.createPortMapping(self.socket.localAddress().toString(), self.gamePort, "UDP") # update what's new page self.whatNewsView.setUrl(QtCore.QUrl( "http://www.faforever.com/?page_id=114&username={user}&pwdhash={pwdhash}".format(user=self.login, pwdhash=self.password))) self.updateOptions() self.state = ClientState.ONLINE self.authorized.emit(self.me) # Run an initial connectivity test and initialize a gamesession object # when done self.connectivity = ConnectivityHelper(self, self.gamePort) self.connectivity.connectivity_status_established.connect(self.initialize_game_session) self.connectivity.start_test() def initialize_game_session(self): self.game_session = GameSession(self, self.connectivity) def handle_registration_response(self, message): if message["result"] == "SUCCESS": self.auth_state = ClientState.CREATED return self.auth_state = ClientState.REJECTED self.handle_notice({"style": "notice", "text": message["error"]}) def search_ranked(self, faction): def request_launch(): msg = { 'command': 'game_matchmaking', 'mod': 'ladder1v1', 'state': 'start', 'gameport': self.gamePort, 'faction': faction } if self.connectivity.state == 'STUN': msg['relay_address'] = self.connectivity.relay_address self.send(msg) self.game_session.ready.disconnect(request_launch) if self.game_session: self.game_session.ready.connect(request_launch) self.game_session.listen() def host_game(self, title, mod, visibility, mapname, password): def request_launch(): msg = { 'command': 'game_host', 'title': title, 'mod': mod, 'visibility': visibility, 'mapname': mapname, 'password': password, } if self.connectivity.state == 'STUN': msg['relay_address'] = self.connectivity.relay_address self.send(msg) self.game_session.ready.disconnect(request_launch) if self.game_session: self.game_session.ready.connect(request_launch) self.game_session.listen() def join_game(self, uid, password=None): def request_launch(): msg = { 'command': 'game_join', 'uid': uid, 'gameport': self.gamePort } if password: msg['password'] = password if self.connectivity.state == "STUN": msg['relay_address'] = self.connectivity.relay_address self.send(msg) self.game_session.ready.disconnect(request_launch) if self.game_session: self.game_session.ready.connect(request_launch) self.game_session.listen() def handle_game_launch(self, message): if not self.game_session or not self.connectivity.is_ready: logger.error("Not ready for game launch") logger.info("Handling game_launch via JSON " + str(message)) silent = False # Do some special things depending of the reason of the game launch. rank = False # HACK: Ideally, this comes from the server, too. LATER: search_ranked message arguments = [] if message["mod"] == "ladder1v1": arguments.append('/' + Factions.to_name(self.games.race)) # Player 1v1 rating arguments.append('/mean') arguments.append(str(self.me.ladder_rating_mean)) arguments.append('/deviation') arguments.append(str(self.me.ladder_rating_deviation)) arguments.append('/players 2') # Always 2 players in 1v1 ladder arguments.append('/team 1') # Always FFA team # Launch the auto lobby self.game_session.init_mode = 1 else: # Player global rating arguments.append('/mean') arguments.append(str(self.me.rating_mean)) arguments.append('/deviation') arguments.append(str(self.me.rating_deviation)) if self.me.country is not None: arguments.append('/country ') arguments.append(self.me.country) # Launch the normal lobby self.game_session.init_mode = 0 if self.me.clan is not None: arguments.append('/clan') arguments.append(self.me.clan) # Ensure we have the map if "mapname" in message: fa.check.map(message['mapname'], force=True, silent=silent) if "sim_mods" in message: fa.mods.checkMods(message['sim_mods']) # UPnP Mapper - mappings are removed on app exit if self.useUPnP: fa.upnp.createPortMapping(self.socket.localAddress().toString(), self.gamePort, "UDP") info = dict(uid=message['uid'], recorder=self.login, featured_mod=message['mod'], launched_at=time.time()) fa.run(info, self.game_session.relay_port, arguments) def handle_coop_info(self, message): self.coopInfo.emit(message) def handle_tournament_types_info(self, message): self.tourneyTypesInfo.emit(message) def handle_tournament_info(self, message): self.tourneyInfo.emit(message) def handle_tutorials_info(self, message): self.tutorialsInfo.emit(message) def handle_mod_info(self, message): self.modInfo.emit(message) def handle_game_info(self, message): if 'games' in message: for game in message['games']: self.gameInfo.emit(game) else: self.gameInfo.emit(message) def handle_modvault_list_info(self, message): modList = message["modList"] for mod in modList: self.handle_modvault_info(mod) def handle_modvault_info(self, message): self.modVaultInfo.emit(message) def handle_replay_vault(self, message): self.replayVault.emit(message) def handle_coop_leaderboard(self, message): self.coopLeaderBoard.emit(message) def handle_matchmaker_info(self, message): if "action" in message: self.matchmakerInfo.emit(message) elif "potential" in message: if message["potential"]: self.warningShow() else: self.warningHide() def handle_avatar(self, message): if "avatarlist" in message: self.avatarList.emit(message["avatarlist"]) def handle_admin(self, message): if "avatarlist" in message: self.avatarList.emit(message["avatarlist"]) elif "player_avatar_list" in message: self.playerAvatarList.emit(message) def handle_social(self, message): if "friends" in message: self.players.friends = set(message["friends"]) self.usersUpdated.emit(self.players.keys()) if "foes" in message: self.players.foes = set(message["foes"]) self.usersUpdated.emit(self.players.keys()) if "channels" in message: # Add a delay to the notification system (insane cargo cult) self.notificationSystem.disabledStartup = False self.channelsUpdated.emit(message["channels"]) if "autojoin" in message: self.autoJoin.emit(message["autojoin"]) if "power" in message: self.power = message["power"] self.manage_power() def handle_player_info(self, message): players = message["players"] # Firstly, find yourself. Things get easier once "me" is assigned. for player in players: if player["id"] == self.id: self.me = Player(**player) for player in players: id = player["id"] new_player = Player(**player) self.players[id] = new_player self.usersUpdated.emit([player['login']]) if self.me.clan is not None and new_player.clan == self.me.clan: self.players.clanlist.add(player['login']) def avatarManager(self): self.requestAvatars(0) self.avatarSelection.show() def handle_authentication_failed(self, message): QtGui.QMessageBox.warning(self, "Authentication failed", message["text"]) self.state = ClientState.DISCONNECTED self.show_login_wizard() def handle_notice(self, message): if "text" in message: style = message.get('style', None) if style == "error": QtGui.QMessageBox.critical(self, "Error from Server", message["text"]) elif style == "warning": QtGui.QMessageBox.warning(self, "Warning from Server", message["text"]) elif style == "scores": self.tray.showMessage("Scores", message["text"], QtGui.QSystemTrayIcon.Information, 3500) self.localBroadcast.emit("Scores", message["text"]) else: QtGui.QMessageBox.information(self, "Notice from Server", message["text"]) if message["style"] == "kill": logger.info("Server has killed your Forged Alliance Process.") fa.instance.kill() if message["style"] == "kick": logger.info("Server has kicked you from the Lobby.") Client.register(ClientWindow)
gpl-3.0
6,156,876,616,512,699,000
36.02807
130
0.622458
false
4.063222
false
false
false
DigitalSlideArchive/cdsa_ipython_helpers
dsa_mongo_common_functions.py
1
38290
""" This contains various helper functions for the Digital Slide Archive""" import re, csv, os, sys, optparse import collections from PIL import Image import openslide from openslide.lowlevel import OpenSlideError import hashlib import subprocess import shutil,glob import random from functools import partial def md5sum(filename): with open(filename, mode='rb') as f: d = hashlib.md5() for buf in iter(partial(f.read, 128), b''): d.update(buf) return d.hexdigest() """Default Directories """ DEFAULT_WSI_DIR = '/NDPI_VAULT/ADRC/' DEFAULT_PYRAMID_DIR = '/bigdata3/PYRAMIDS/ADRC/' DEFAULT_DATABASE = 'adrc_slide_database' DEFAULT_IIP_SERVER_ADDRESS = "http://node15.cci.emory.edu/cgi-bin/iipsrv.fcgi?Zoomify="; """CDSA SPECIFIC VARIABLES AND PATHS """ tcga_tumor_types = [ 'acc','blca','blnp','blp','brca','cesc','cntl','coad','dlbc','esca','gbm','hnsc','kich','kirc','kirp','laml','lcll','lcml','lgg','lihc','luad',\ 'lusc','meso','ov','paad','pcpg','prad','read','sarc','skcm','stad','tgct','thca','ucec','ucs','uvm'] PATH_REPORT_ROOT_DIRS = ['/bcr/intgen.org/pathology_reports/reports/','/bcr/nationwidechildrens.org/pathology_reports/reports/'] CLIN_REPORT_ROOT = '/bcr/biotab/clin/' CLIN_REPORT_ROOT_DIRS = ['/bcr/biotab/clin/'] dl_dir = "/SYNOLOGY_TCGA_MIRROR/TCGA_LOCAL_MIRROR/" TCGA_LOCAL_ROOT_DIR = dl_dir + 'tcga-data.nci.nih.gov/tcgafiles/ftp_auth/distro_ftpusers/anonymous/tumor/' TCGA_HTTP_ROOT_URL = 'https://tcga-data.nci.nih.gov/tcgafiles/ftp_auth/distro_ftpusers/anonymous/tumor/' """PARAMETERS AND VARIABLE INITIALIZATION """ verbose = 0 default_level = ',0 ' ### default layer to use for ndpi2tiff ndpi_count = 0 _verbose = 0 _verbose = 1 script_id_num = 3800 ### going to increment from some number...maybe ill make this random later class LinePrinter(): """ Print things to stdout on one line dynamically """ def __init__(self,data): sys.stdout.write("\r\x1b[K"+data.__str__()) sys.stdout.flush() """ REGULAR EXPRESSION """ parse_tcga_tissue_and_stain_type = re.compile(r'org_(..*)\.(diagnostic|tissue)_images',re.IGNORECASE) parse_TCGA_SUBJECT_ID = re.compile(r'(TCGA-..-....)') parse_full_TCGA_ID = re.compile(r'(TCGA-..-....)-(\d\d)(.)-([^-]*)',re.IGNORECASE) adrc_pat_one = re.compile(r'(ADRC\d\d-\d+)_(...?)_(.*)\.ndpi$', re.IGNORECASE) adrc_pat_two = re.compile(r'(OS\d\d-\d+)_(\d+)_(.+)_(.*)\.ndpi$|(OS\d\d-\d+)_([^_]*)_(.*)\.ndpi$',re.IGNORECASE) adrc_pat_three = re.compile(r'(E\d\d-\d+)_(\d+)_([^_]+)_(.*)\.ndpi$',re.IGNORECASE) adrc_dzi_pat_one = re.compile(r'(ADRC\d\d-\d+)_(...?)_(.+)\.ndpi\.dzi\.tif$', re.IGNORECASE) adrc_dzi_pat_two = re.compile(r'(OS\d\d-\d+)_(\d+)_(.+)_(.*)\.ndpi\.dzi\.tif$|(OS\d\d-\d+)_([^_]*)_(.*)\.ndpi\.dzi\.tif',re.IGNORECASE) adrc_dzi_pat_three = re.compile(r'(E\d\d-\d+)_(\d?)_(.+)_(.*)\.ndpi\.dzi\.tif$',re.IGNORECASE) """ Output files and other logs """ f_out = open('corrupt_svs_files.txt','a+') def connect_to_db( host, user, passwd, db): """I will return two cursors to make my life easier """ try: db_dict = MySQLdb.connect(host, user, passwd, db, cursorclass=MySQLdb.cursors.DictCursor ) db_dict_cursor = db_dict.cursor() update_cursor = db_dict.cursor() return( db_dict_cursor, update_cursor) except: print "Could not connect to the database!!!",host,user,passwd,db sys.exit() return (None,None) def openslide_test_file(full_file_path,file_type,db_cursor): """This will use the openslide bindings to get the width, height and filesize for an image or return an Error otherwise""" width=height=filesize=orig_resolution=slide_title=md5 = None try: im = openslide.open_slide(full_file_path) (width, height) = im.dimensions base_file_name = os.path.basename(full_file_path) filesize = os.path.getsize(full_file_path) if(file_type== 'svs'): orig_resolution = im.properties['aperio.AppMag'] #md5 = md5Checksum(full_file_path) slide_name = os.path.basename(full_file_path) return(True,width,height,filesize,orig_resolution,slide_name,md5) except OpenSlideError, e: print "Openslide returned an error",full_file_path print >>sys.stderr, "Verify failed with:", repr(e.args) print "Openslide returned an error",full_file_path f_out.write(full_file_path+';\n') insert_corrupt_batch_stmt = "insert into `corrupt_or_unreadable_%s_files` (full_file_name,filesize) Values ('%s',%d) " print insert_corrupt_batch_stmt % (file_type,full_file_path,os.path.getsize(full_file_path) ) #update_cursor.execute( insert_corrupt_batch_stmt % (full_file_path,os.path.getsize(full_file_path) )) return(False,None,None,None,None,None,None) except StandardError, e: #file name likely not valid print >>sys.stderr, "Verify failed with:", repr(e.args) print "Openslide returned an error",full_file_path f_out.write(full_file_path+';\n') insert_corrupt_batch_stmt = "insert into `corrupt_or_unreadable_%s_files` (full_file_name,filesize) Values ('%s',%d) " print insert_corrupt_batch_stmt % (file_type,full_file_path,os.path.getsize(full_file_path) ) #update_cursor.execute( insert_corrupt_batch_stmt % (full_file_path,os.path.getsize(full_file_path) )) return(False,None,None,None,None,None,None) except: print "failed even earlier on",full_file_path """will log this to a file""" return(False,width,height,filesize,orig_resolution,slide_title,md5) return(False,width,height,filesize,orig_resolution,slide_title,md5) def check_image_status_in_db(full_file_path,filetype,db_cursor): """ this will do a lookup in the thumb database and see if the image is already there... if it is... I don't bother do any additional file lookups some of the metadata extraction can take a bit of time as I need to parse the PNG headers filetype can be svs, bigtiff image, ndpi, pyramid image """ v = _verbose >= 1; vv = _verbose >= 2 if filetype == 'svs': sql_lookup = "select count(*) as count from `svs_slide_info` where full_file_path='%s'" % (full_file_path) db_cursor.execute(sql_lookup) data = db_cursor.fetchone() if data['count'] == 0: if vv: print "Need to update entry" (valid_image,width,height,filesize,orig_resolution,base_file_name,md5) = openslide_test_file(full_file_path,'svs',db_cursor) if valid_image: slide_folder = str(full_file_path.split('/')[-2]) sql = "insert into `svs_slide_info` ( slide_filename, image_width,image_height, resolution, full_file_path, slide_folder, filesize ,md5sum ) " sql += " Values ('%s',%s,%s,%s,'%s', '%s',%d,'%s' ) " % ( base_file_name, width, height, orig_resolution, full_file_path, slide_folder, filesize ,md5) db_cursor.execute(sql) elif filetype == 'pyramid': sql_lookup = "select count(*) as count from `dzi_pyramid_info` where full_file_path like ('"+full_file_path+"')" db_cursor.execute(sql_lookup) data = db_cursor.fetchone() if data['count'] == 0: if vv: print "Need to update entry" (valid_image,width,height,filesize,orig_resolution,pyramid_file_name,md5) = openslide_test_file(full_file_path,'pyramid',db_cursor) if valid_image: slide_folder = str(full_file_path.split('/')[-2]) insert_sql = "insert into `dzi_pyramid_info` ( pyramid_filename, image_width, image_height, full_file_path, file_basename, filesize ,pyramid_folder) "\ + " Values ('%s',%d,%d,'%s','%s', %d, '%s' ) " % ( pyramid_file_name, width, height, full_file_path , slide_folder, filesize , slide_folder) print insert_sql db_cursor.execute(insert_sql) def set_active_archive_status(metadata_dict_cursor): """This will update and/or set the flag for a slide being an active archive from the TCGA data set""" select_stmt = " select * from `latest_archive_info`" print select_stmt metadata_dict_cursor.execute(select_stmt) result = metadata_dict_cursor.fetchall() active_slide_archive = [] for row in result: archive_name = row['ARCHIVE_NAME'] if 'slide' in archive_name or 'diagnostic' in archive_name or 'tissue' in archive_name: # print archive_name active_slide_archive.append(archive_name) print "I have found",len(active_slide_archive),"active slid archives" ## i should probably set all rchives to null first.. ####first set the entire thing to not have update_stmt = "update svs_slide_info set active_tcga_slide='0'" print update_stmt metadata_dict_cursor.execute(update_stmt) for cur_archive in active_slide_archive: update_stmt = "update svs_slide_info set active_tcga_slide='1' where slide_folder='%s'" % cur_archive print update_stmt metadata_dict_cursor.execute(update_stmt) """Now need to check if file is on the filesystem result = metadata_dict_cursor.fetchall() null_rows = 0 for row in result: full_file_path = row['full_file_path'] patient_id = get_tcga_id( os.path.basename(full_file_path) ,False) """ def validate_slide_pyramid_linkage(db_cursor,db_cursor_two): select_stmt = " select * from `svs_slide_info`" db_cursor.execute(select_stmt) """Now need to check if file is on the filesystem""" result = db_cursor.fetchall() invalid_pyramid_link = 0 print len(result),"rows to process" for row in result: #print row invalid_row = False pyramid = (row['pyramid_filename']) if not os.path.isfile(pyramid): print "Pyramid is missing...",pyramid invalid_row = True svs = (row['full_file_path']) if not os.path.isfile(svs): print "SVS is missing",svs invalid_row = True if os.path.basename(pyramid).split('.')[0] != os.path.basename(svs).split('.')[0]: print svs,pyramid,"DONT SEEM TO MATCH" print os.path.basename(pyramid),os.path.basename(svs) invalid_row = True if invalid_row: del_sql = "delete from svs_slide_info where slide_id='%d'" % row['slide_id'] db_cursor_two.execute(del_sql) ##pyramid_file_name and full_file_path def generate_slide_pyramid_linkage(db_cursor,db_cursor_two): """ This will update the slide database and link the pyramids associated with the image.... will scan multiple tables """ v = _verbose >= 1; vv = _verbose >= 2 v= True vv = True """pyramid filenames match on slide_filename in the svs_slide_info table and slide_folder... there are the two main keys""" """ other fields of import include stain_type and main_project_name... this needs to be duplictable at some point since a slide can be in more than one project.... other key field is tissue_type and patient_id I may want to have this field iterate multiple fields one by one.... """ ## in the dzi_pyramid_info I have two fields that need to be dupdated...parent_slide_title and parent_slide_id ## probably only need one of these... other field thats relevant is pyramid_folder select_stmt = " select * from `svs_slide_info` where pyramid_generated is NULL" db_cursor.execute(select_stmt) """Now need to check if file is on the filesystem""" result = db_cursor.fetchall() null_rows = 0 matched_pyramids_found = 0 for row in result: null_rows += 1 matched_pyramid_file = row['full_file_path'].replace('/bigdata/RAW_SLIDE_LINKS/CDSA/','/bigdata2/PYRAMIDS/CDSA/')+'.dzi.tif' # print matched_pyramid_file if(os.path.isfile(matched_pyramid_file)): update_sql = "update svs_slide_info set pyramid_filename='%s',pyramid_generated='%d' where slide_id='%d'" % (matched_pyramid_file,True,row['slide_id']) db_cursor.execute(update_sql) matched_pyramids_found += 1 else: pass #//there should be a matching pyramid #patient_id = get_tcga_id( os.path.basename(full_file_path) ,False) # print patient_id # if not patient_id[0] == None: # else: # print "Found no patient id...",full_file_path print "there were",null_rows,"empty rows and",matched_pyramids_found,"matched pyramids" select_stmt = " select * from `svs_slide_info` where patient_id is NULL" db_cursor.execute(select_stmt) """Now need to check if file is on the filesystem""" result = db_cursor.fetchall() null_rows = 0 for row in result: full_file_path = row['full_file_path'] patient_id = get_tcga_id( os.path.basename(full_file_path) ,False) # print patient_id null_rows += 1 if not patient_id[0] == None: update_sql = "update svs_slide_info set patient_id='%s' where slide_id='%d'" % (patient_id[0],row['slide_id']) db_cursor.execute(update_sql) else: print "Found no patient id...",full_file_path print "there were",null_rows,"empty rows" select_stmt = " select * from `svs_slide_info` where stain_type is NULL and tissue_type is NULL" db_cursor.execute(select_stmt) """Now need to check if file is on the filesystem""" result = db_cursor.fetchall() null_rows = 0 for row in result: full_file_path = row['full_file_path'] (stain_type,tissue_type) = get_tcga_stain_type(full_file_path ) """I originally AND 'ed the sql statement and it caused it to crash.... i guess that's the logical operator""" null_rows += 1 if not stain_type == None and not tissue_type == None: update_sql = "update svs_slide_info set stain_type='%s', tissue_type='%s' where slide_id=%d" %\ (stain_type,tissue_type,row['slide_id']) db_cursor.execute(update_sql) else: print "Found no matching group type ...",full_file_path print "there were",null_rows,"empty rows" select_stmt = " select * from `dzi_pyramid_info` where parent_slide_id is NULL" db_cursor.execute(select_stmt) """Now need to check if file is on the filesystem""" result = db_cursor.fetchall() null_rows = 0 for row in result: full_file_path = row['full_file_path'] pyramid_folder = row['pyramid_folder'] pyramid_filename = row['pyramid_filename'] ### of note it is quite likely the pyramid filename does NOT match the ## origin slide filename but has extra crap at the end... ## and also this can be a one to many relationship.. i.e. i may have pyramidized a file ## multiple times pyramid_id = row['pyramid_id'] slide_filename = pyramid_filename.replace('.dzi.tif','') ### = row['pyramid_filename'] ### of note it is quite likely the pyramid filename does NOT match the the dzi.tif is the issue pyramid_to_orig_slide_match = "select * from svs_slide_info where slide_folder='%s' and slide_filename like '%s'" %(pyramid_folder,slide_filename) db_cursor_two.execute(pyramid_to_orig_slide_match) slide_match_result = db_cursor_two.fetchall() if slide_match_result: for slide_row in slide_match_result: print slide_row slide_id = slide_row['slide_id'] """so now that I found a match I need to reverse the lookup and get the pyramid id..""" # set_slide_match_sql = "update svs_slide_info select * from svs_slide_info where slide_folder='%s' and slide_filename like '%s'" %(pyramid_folder,slide_filename) set_pyramid_match_sql = "update dzi_pyramid_info set parent_slide_id='%d' where pyramid_id='%d'" %(slide_id,pyramid_id) db_cursor_two.execute( set_pyramid_match_sql) else: # print "No match for",slide_filename,"so found a null file set",pyramid_folder pass """ null_rows += 1 if not stain_type == None and not tissue_type == None: update_sql = "update svs_slide_info set stain_type='%s', tissue_type='%s' where slide_id=%d" %\ (stain_type,tissue_type,row['slide_id']) metadata_cursor.execute(update_sql) else: print "Found no matching group type ...",full_file_path print "there were",null_rows,"empty rows" """ def get_file_metadata ( input_file, file_type): """this function wil scan a system file and try axtract certain metadata about the file.. this will vary based on the root file type i.e. ndpi, svs, big tff, etc""" print input_file, file_type def find_clin_reports ( tumor_type ): """also grab all the clinical data.....""" clin_data = [] clin_data_struct = {} """ it seems like the clinical data reports are the cleanest with nationwidechildrens """ for clin_rpt_dir in CLIN_REPORT_ROOT_DIRS: path_base_dir = TCGA_LOCAL_ROOT_DIR+tumor_type+clin_rpt_dir #print path_base_dir for dpath, dnames, fnames in os.walk( path_base_dir, followlinks=True): for file in fnames: if '.txt' in file: filebase = file.rstrip('.txt') full_file_path = dpath+'/'+filebase #full_file_path = 'temp' web_path = full_file_path.replace(TCGA_LOCAL_ROOT_DIR,'') clin_data_struct[filebase] = { 'web_path':web_path, 'full_file_path':full_file_path } #Making the full file path a relative web path #pdf_path_reports.append(path_data_struct) return clin_data_struct def find_path_reports ( tumor_type ): """this will walk the directories and find pdf files that are path reports """ pdf_path_reports = [] path_data_struct = {} """Path reports seem to be in more than one base directory depending on if intgen or nationwides curated them""" for PATH_REPORT_ROOT in PATH_REPORT_ROOT_DIRS: path_base_dir = TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT #print path_base_dir for dpath, dnames, fnames in os.walk( TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT, followlinks=True): for file in fnames: if '.pdf' in file: filebase = file.rstrip('.pdf') full_file_path = dpath+'/'+filebase #full_file_path = 'temp' web_path = full_file_path.replace(TCGA_LOCAL_ROOT_DIR,'') path_data_struct[filebase] = { 'web_path':web_path, 'full_file_path':full_file_path } #Making the full file path a relative web path #pdf_path_reports.append(path_data_struct) return path_data_struct def find_tcga_clinical_files ( tumor_type ): """this will walk the directories and find pdf files that are path reports """ pdf_path_reports = [] path_data_struct = {} path_base_dir = TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT #print path_base_dir for dpath, dnames, fnames in os.walk( TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT, followlinks=True): for file in fnames: if '.pdf' in file: filebase = file.rstrip('.pdf') #full_file_path = dpath+'/'+filebase full_file_path = 'temp' web_path = full_file_path.replace(TCGA_LOCAL_ROOT_DIR,'') path_data_struct[filebase] = { 'web_path':web_path, 'full_file_path':full_file_path } #Making the full file path a relative web path #pdf_path_reports.append(path_data_struct) return path_data_struct def find_ndpi_image_list( ndpi_root_path ): """project_name is passed along with the potentially more than one root image path for ndpi files""" found_ndpi_files = [] ndpi_root_path = ndpi_root_path.rstrip('/') for dpath, dnames, fnames in os.walk( ndpi_root_path, followlinks=True): for file in fnames: if '.ndpi' in file: #filebase = file.rstrip('.ndpi') #print dpath found_ndpi_files.append(dpath +'/'+file) print len(found_ndpi_files),"NDPI files were located" return found_ndpi_files def find_svs_image_list( project_name, svs_root_path_list ): """project_name is passed along with the potentially more than one root image path for ndpi files""" found_svs_files = [] svs_files_found = 0 for svs_root_path in svs_root_path_list: print svs_root_path for dpath, dnames, fnames in os.walk( svs_root_path+project_name, followlinks=True): for file in fnames: if '.svs' in file: filebase = file.rstrip('.svs') full_filename = dpath+'/'+file #check_image_status_in_db(full_filename,'svs') # change this to add corrupt files and bytes file found # found_svs_files.append(filebase) found_svs_files.append(full_filename) svs_files_found += 1 output = "Processed: %d svsfiles " % \ (svs_files_found ) #corrupt_svs_count, total_gigapixels, total_bytes, old_batch_svs) LinePrinter(output) return(found_svs_files) def find_pyramid_images( project_name, pyramid_root_dirs): ## first find the available resolutions... pyramid_images = [] pyramids_found = 0 ### I am going to add or scan for a 20X, 5X or 40X instead... and use that for pyramid_root in pyramid_root_dirs: if os.path.isdir(pyramid_root+project_name): for dpath, dnames, fnames in os.walk( pyramid_root+project_name, followlinks=True): for file in fnames: if '.dzi.tif' in file.lower(): full_filename = dpath+'/'+file pyramids_found += 1 if verbose: print file,dpath #check_image_status_in_db(full_filename,'pyramid') # change this to add corrupt files and bytes file found output = "Processed: %d pyramids" % pyramids_found LinePrinter(output) pyramid_images.append(full_filename) return(pyramid_images) def get_tcga_stain_type( string_to_check): """ this function pulls out the stain and tissue type from the TCGA path file names """ m = parse_tcga_tissue_and_stain_type.search(string_to_check) if m: return (m.group(1),m.group(2) ) else: return (None,None) class Table: def __init__(self, db, name): self.db = db self.name = name self.dbc = self.db.cursor() def __getitem__(self, item): self.dbc.execute("select * from %s limit %s, 1" %(self.name, item)) return self.dbc.fetchone() def __len__(self): self.dbc.execute("select count(*) as count from %s" % (self.name)) count_info = self.dbc.fetchone() l = int( count_info['count'] ) return l """ Acronyyms and abbreivations used as well as syntax info wsi = whole slide image -8 specifies bigtiff output and the -c sets the compression pick the level to get which should be 0-- i.e. what layer am i trying to convert """ def check_for_valid_ADRC_ID( string_to_check): """a file should start with ideally ADRC##-#### or OS or osmething similar Valid filename should be ADRCXX-XXXX_<Section>_<STAIN>_<NOtes> """ m = adrc_pat_one.match(string_to_check) m_second_pat = adrc_pat_two.match(string_to_check) m_third_pat = adrc_pat_three.match(string_to_check) if m: patient_id = m.group(1) section_id = m.group(2) stain = m.group(3) # print patient_id,section_id,stain return(True) elif m_second_pat: patient_id = m_second_pat.group(1) section_id = m_second_pat.group(2) stain = m_second_pat.group(3) # print patient_id,section_id,stain return(True) elif m_third_pat: patient_id = m_third_pat.group(1) section_id = m_third_pat.group(2) stain = m_third_pat.group(3) else: print "no match",string_to_check return(False) def parse_slide_info_for_ADRC_ID( string_to_check): """a file should start with ideally ADRC##-#### or OS or osmething similar Valid filename should be ADRCXX-XXXX_<Section>_<STAIN>_<NOtes> """ stain_tag_normalization_dict = { "AB" : "Abeta", "ABETA" : "ABeta", "US_tau": "Tau", "US_pTDP" : "pTDP", "TAU" : "Tau" , "TAU" : "tau", "US_AB" : "ABeta", "US_aSYN-4B12" : "aSyn-4B12", "BIEL" : "Biel"} m = adrc_dzi_pat_one.match(string_to_check) m_second_pat = adrc_dzi_pat_two.match(string_to_check) m_third_pat = adrc_dzi_pat_three.match(string_to_check) if m: patient_id = m.group(1) section_id = m.group(2) stain = m.group(3) if stain in stain_tag_normalization_dict.keys(): stain = stain_tag_normalization_dict[stain] print patient_id,section_id,stain return(True,patient_id,section_id,stain) elif m_second_pat: patient_id = m_second_pat.group(1) section_id = m_second_pat.group(2) stain = m_second_pat.group(3) if stain in stain_tag_normalization_dict.keys(): stain = stain_tag_normalization_dict[stain] print patient_id,section_id,stain return(True,patient_id,section_id,stain) elif m_third_pat: patient_id = m_third_pat.group(1) section_id = m_third_pat.group(2) stain = m_third_pat.group(3) if stain in stain_tag_normalization_dict.keys(): stain = stain_tag_normalization_dict[stain] print patient_id,section_id,stain return(True,patient_id,section_id,stain) else: print "no match",string_to_check return(False,None,None,None) def get_tcga_id( string_to_check , get_full_tcga_id): """ will either return the TCGA-12-3456 or the entire TCGA sample ID which is much much longer... TCGA-12-3456-12-23-232-32""" if(get_full_tcga_id): m = parse_full_TCGA_ID.match(string_to_check) if m: TCGA_FULL_ID = m.group(1)+'-'+m.group(2)+m.group(3)+'-'+m.group(4) return (m.group(1),TCGA_FULL_ID) else: return None,None m = parse_TCGA_SUBJECT_ID.match(string_to_check) if m: return (m.group(0),'None') else: return (None,None) def set_database_slide_metadata(database,table): """this will iterate and update various project related attributes that may not be set on initial parse such as stain type, tissue_type , etc... """ ## update stain_Type first sql_lookup = "select * from `"+ database + "`.`dzi_pyramid_info` where stain_type is NULL " metadata_dict_cursor.execute(sql_lookup) data = metadata_dict_cursor.fetchall() for row in data: # print row (found_tags, patient_id, section_id, stain) = parse_slide_info_for_ADRC_ID( row['pyramid_filename']) if found_tags: update_sql = "update `" + database + "`.`"+"dzi_pyramid_info` set stain_type='%s' where pyramid_id='%d'" % ( stain, row['pyramid_id']) print update_sql update_cursor.execute(update_sql) update_annotation_sql = "select * from `" + database + "`.`dzi_pyramid_info` where has_annotation is Null" metadata_dict_cursor.execute(update_annotation_sql) data = metadata_dict_cursor.fetchall() for row in data: print row def update_annotations(database): """will find xml annotation files and update the database """ base_path = '/var/www/adrc_js/xml_annotation_files/' # crawl looking for svs files for dirpath, dirnames, filenames in os.walk(base_path, followlinks=True, onerror=_listdir_error): for fname in filenames: # NDPI (slide) file? if 'xml' in fname: file_with_path = os.path.join(dirpath, fname) print file_with_path,dirpath,dirnames,filenames base_filename = os.path.basename(fname) base_filename = base_filename.replace('.xml','') print base_filename find_slide_sql = "select * from dzi_pyramid_info where pyramid_filename like '%s%%'" % (base_filename) print find_slide_sql metadata_dict_cursor.execute( find_slide_sql) data = metadata_dict_cursor.fetchall() for row in data: print data update_sql = "update dzi_pyramid_info set has_annotation='1' where pyramid_id='%d'" % (row['pyramid_id']) print update_sql update_cursor.execute(update_sql) def gen_ndpi_pyramid(input_file,pyramid_file): """ this is a new method that will convert an NDPI to a tiff without necessitating tiling""" v = _verbose >= 1; vv = _verbose >= 2 ndpi2tiff_command = "/bigdata3/BIG_TIFF_IMAGES/ndpi2tiff -8 -t -c lzw:2 " script_file_base_path = '/fastdata/tmp/SGE_SCRIPTS/' SSD_TEMP_SPACE = '/fastdata/tmp/' global script_id_num ### going to increment from some number...maybe ill make this random later current_command_list = '#/bin/bash \n' ### set this to null... ill only open a script file if i actually run a command delete_bigtiff_image = True ## determines if I should cleanup/delete the bigtiff i generate ## this is an intermediate file before pyramid generation print input_file,pyramid_file if not os.path.isfile(pyramid_file): ### for speed I am going to copy the input file to /fastdata/tmp.. ### I am copying the input_file from its home to a cache dir of SSD goodness ssd_cached_file = SSD_TEMP_SPACE + os.path.basename(input_file) if v: print ssd_cached_file,"cached file name" if not os.path.isfile(ssd_cached_file): current_command_list += "sleep "+str(random.randint(1,180) ) + ' \n' current_command_list += "cp "+input_file+' '+SSD_TEMP_SPACE+'\n' ## after deliberation copying from the script versus via ssh helps throttle disk copy from ## the long term image store which is slower.. ## I decided to add a random sleep time of 0 - 180 seconds in each job ndpi2tiff_command = ndpi2tiff_command + ssd_cached_file + default_level if v: print ndpi2tiff_command output_file = ssd_cached_file+',0.tif' if not os.path.isfile(output_file): current_command_list += ndpi2tiff_command +'\n' pyramid_output_dir = os.path.dirname(pyramid_file) if not os.path.isdir(pyramid_output_dir): os.makedirs(pyramid_output_dir) #vips_pyramid_output = cur_file.replace(input_dir,pyramid_directory) +'.dzi.tif' vips_command = 'vips im_vips2tiff -v '+output_file+' '+pyramid_file+':jpeg:90,tile:256x256,pyramid,,,,8 ' print vips_command current_command_list += vips_command if v: print current_command_list ### now writing the script current_bash_script = script_file_base_path+'ndpi2tiff-'+str(script_id_num)+'.sh' f_out = open(current_bash_script,'w') f_out.write(current_command_list) if delete_bigtiff_image: f_out.write('\n rm -rf \''+output_file+'\' \n') f_out.write('rm -rf '+ssd_cached_file+' \n') ## this may be better to just not put part of the command script script_id_num += 1 f_out.close() sge_submit_cmd = "qsub -q slide_convert.q "+current_bash_script print sge_submit_cmd output = subprocess.check_output (sge_submit_cmd,stderr=subprocess.STDOUT, shell=True) print output def _listdir_error(error): print >>sys.stderr, "Could not traverse/list:", error.filename def check_files(wsi_dir=DEFAULT_WSI_DIR): """Checks for NDPI and SVS images can probably be deleted... Arguments: wsi_dir -- The base directory to (recursively) search for .ndpi images. Returns: counts of found images: (ndpi, pyramid) """ print "Parsing",wsi_dir # sanity checks if not os.path.isdir(wsi_dir): raise IOError('SVS or NDPI base path is not a directory or is unreadable: ' + str(wsi_dir)) # get rid of any trailing slashes wsi_dir = wsi_dir.rstrip('/') global ndpi_count # arg handling v = _verbose >= 1; vv = _verbose >= 2 wsi_prefix_len = len(wsi_dir) + 1 # plus 1 for leading '/' ndpi_pat = re.compile(r'.*\.ndpi$', re.IGNORECASE) # crawl looking for svs files for dirpath, dirnames, filenames in os.walk(wsi_dir, followlinks=True, onerror=_listdir_error): for fname in filenames: # NDPI (slide) file? if ndpi_pat.match(fname): ndpi_count +=1 file_with_path = os.path.join(dirpath, fname) if v: print >>sys.stderr, "Slide: ", file_with_path path_suffix = dirpath[wsi_prefix_len:] path = fname.split('/') file = path[len(path)-1] ### first check if the ndpi file is registered in our database... check_image_status_in_db(file_with_path,'ndpi','adrc_slide_database','ndpi_slide_info') if check_for_valid_ADRC_ID( file) or True : input_file = os.path.join(dirpath)+'/'+file pyramid_file = input_file.replace(DEFAULT_WSI_DIR,DEFAULT_PYRAMID_DIR)+'.dzi.tif' if not os.path.isfile(pyramid_file): print "Generate pyramid for",file gen_ndpi_pyramid(input_file,pyramid_file) else: check_image_status_in_db(pyramid_file,'pyramid','adrc_slide_database','dzi_pyramid_info') return ( ndpi_count) def create_ADRC_schemas(): create_adrc_pyramid_schema = """ CREATE TABLE `dzi_pyramid_info` ( `pyramid_filename` varchar(200) DEFAULT NULL, `image_width` int(10) unsigned DEFAULT NULL, `image_height` int(10) unsigned DEFAULT NULL, `resolution` int(11) DEFAULT '40', `full_file_path` varchar(255) DEFAULT NULL, `file_basename` varchar(100) DEFAULT NULL, `filesize` int(10) unsigned DEFAULT NULL, `parent_slide_filename` varchar(50) DEFAULT NULL, `parent_slide_id` int(10) unsigned DEFAULT NULL, `pyramid_folder` varchar(80) DEFAULT NULL, `main_project_name` varchar(20) DEFAULT NULL, `stain_type` varchar(30) DEFAULT NULL, `tissue_type` varchar(30) DEFAULT NULL, `pyramid_id` int(11) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`pyramid_id`), KEY `full_file_name` (`full_file_path`), KEY `full_file_path` (`full_file_path`) ) ENGINE=MyISAM ; CREATE TABLE `corrupt_or_unreadable_pyramid_files` ( `full_file_name` text, `filesize` int(10) unsigned DEFAULT NULL, `active_archive` tinyint(4) DEFAULT NULL, `pyramid_id` int(11) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`pyramid_id`) ) """ print create_adrc_pyramid_schema """def main(args=None): if args is None: args = sys.argv[1:] global _verbose; _verbose = opts.verbose currentdir = DEFAULT_WSI_DIR # for currentdir in DIRS_WITH_IMAGES: #check_files(wsi_dir=opts.wsi_dir) # (ndpi_count) = check_files(currentdir+'ADRC61-128/') ## is running on node16 (ndpi_count) = check_files(currentdir) # create_ADRC_schemas() #et_database_slide_metadata('adrc_slide_database','dzi_pyramid_info') # update_annotations('adrc_slide_databse') print "NDPI slides:", ndpi_count """ def update_md5_values(database,table_to_crawl,primary_key,db_cursor, update_cursor): #sql_lookup = "select * from `%s`.`%s` where md5sum is NULL and pyramid_folder like '%%BRCA%%' " % (database,table_to_crawl) sql_lookup = "select * from `%s`.`%s` where md5sum is NULL " % (database,table_to_crawl) db_cursor.execute(sql_lookup) data = db_cursor.fetchall() print len(data),"rows to process" for row in data: if os.path.isfile(row['full_file_path']): print row update_stmt = "update `%s`.`%s` set md5sum='%s' where %s='%s'" % (database,table_to_crawl,md5sum(row['full_file_path']),primary_key,row[primary_key]) print update_stmt update_cursor.execute(update_stmt) else: print "missing",row update_stmt = "delete from `%s`.`%s` where %s='%s'" % (database,table_to_crawl,primary_key,row[primary_key]) print update_stmt #update_cursor.execute(update_stmt) def locate_md5_collissions(database,table_to_crawl,db_cursor, update_cursor): sql_lookup = "select md5sum, count(*) as count from `%s`.`%s` group by md5sum having count>1" % (database,table_to_crawl) print sql_lookup db_cursor.execute(sql_lookup) data = db_cursor.fetchall() print len(data),"rows to process" md5_collision_list = [] for row in data: #print row md5_collision_list.append(row['md5sum']) #print md5_collision_list print len(md5_collision_list),"entries with 2 or more matching md5 values" for md5 in md5_collision_list: if md5 is not None: dup_sql = "select * from `%s`.`%s` where md5sum='%s'" % (database,table_to_crawl,md5) #print dup_sql db_cursor.execute(dup_sql) data = db_cursor.fetchall() #print data[0] print "------------NEXT ENTRY has %d---------------" % len(data) #print data filename = os.path.basename(data[0]['full_file_path']) #print svs_filename for row in data: print row['pyramid_filename'] if filename not in row['full_file_path']: base_tcga_id = filename.split('.')[0] if base_tcga_id not in row['full_file_path']: print "shit",filename,row['full_file_path'],base_tcga_id print row # print data[0] #print update_stmt #update_cursor.execute(update_stmt) #pyramid_filename': '/bigdata2/PYRAMIDS/CDSA/BRCA_Diagnostic/nationwidechildrens.org_BRCA.diagnostic_images.Level_1.93.0.0/TCGA-E2-A14Y-01Z-00-DX1.804A22A3-FD8D-4C8A-A766-48D28434DE22.svs.dzi.tif', 'active_tcga_slide': 0, 'resolution': 40L, 'md5sum': None, 'image_width': 113288L, 'pyramid_generated': 1, 'patient_id': 'TCGA-E2-A14Y', 'stain_type': 'BRCA', 'image_height': 84037L, 'filesize': 1971660649L, 'slide_folder': 'nationwidechildrens.org_BRCA.diagnostic_images.Level_1.93.0.0', 'slide_filename': 'TCGA-E2-A14Y-01Z-00-DX1.804A22A3-FD8D-4C8A-A766-48D28434DE22.svs', 'main_project_name': None, 'slide_id': 29602L, # 'full_file_path': '/bigdata/RAW_SLIDE_LINKS/CDSA-LOCAL/BRCA_Diagnostic/nationwidechildrens.org_BRCA.diagnostic_images.Level_1.93.0.0/TCGA-E2-A14Y-01Z-00-DX1.804A22A3-FD8D-4C8A-A766-48D28434DE22.svs', # 'tissue_type': 'diagnostic'} ### find collisions across pyramid_filenames as well.. def find_rogue_pyramid_filenames(database,db_cursor,con_two): """so this will check and see if the full file path and the pyramid_filename are... the same file... im wondering if I screwed up at some point and made the associations wrong""" rogue_sql = "select * from `%s`.`svs_slide_info`" % (database) print rogue_sql db_cursor.execute(rogue_sql) data = db_cursor.fetchall() for row in data: pyr = os.path.basename( row['pyramid_filename']) svs = os.path.basename( row['full_file_path'] ) if svs not in pyr and pyr is not '': print "SHIT, pyr=%s,svs=%s" % ( pyr,svs) print row def find_unlinked_files( db_cursor): """this will look for archive directories that do not have a corresponding link in the RAW_SLIDE_LINK dir""" select_stmt = " select * from `latest_archive_info`" print select_stmt db_cursor.execute(select_stmt) result = db_cursor.fetchall() active_slide_archive = [] for row in result: archive_name = row['ARCHIVE_NAME'] if 'slide' in archive_name or 'diagnostic' in archive_name or 'tissue' in archive_name: # print archive_name active_slide_archive.append(archive_name) print "I have found",len(active_slide_archive),"active slid archives" link_path = '/bigdata/RAW_SLIDE_LINKS/CDSA/*/' all_linked_dirs = glob.glob( link_path+'*') currently_linked_dirs = [ os.path.basename(dir) for dir in all_linked_dirs] for active_dir in active_slide_archive: if active_dir not in currently_linked_dirs: print "need to link",active_dir return(active_slide_archive) #(cur_one, cur_two) = dsa.connect_to_db('localhost','root','cancersuckz!','cdsa_js_prod') #import dsa_common_functions as dsa #(cur_one, cur_two) = dsa.connect_to_db('localhost','root','cancersuckz!','cdsa_js_prod') #active_archive_list = dsa.find_unlinked_files(cur_one) #active_archive_list #history """Now need to check if file is on the filesystem result = metadata_dict_cursor.fetchall() null_rows = 0 for row in result: full_file_path = row['full_file_path'] patient_id = get_tcga_id( os.path.basename(full_file_path) ,False) """ """ """ if __name__ == '__main__': print "Nothing to do..." #(con_one,con_two) = connect_to_db('localhost', 'root', 'cancersuckz!', 'cdsa_js_prod') find_unlinked_files(con_one) #update_md5_values('cdsa_js_prod','svs_slide_info','slide_id',con_one,con_two) #locate_md5_collissions('cdsa_js_prod','svs_slide_info',con_one,con_two) #locate_md5_collissions('cdsa_js_prod','dzi_pyramid_info',con_one,con_two) validate_slide_pyramid_linkage(con_one,con_two) #find_rogue_pyramid_filenames('cdsa_js_prod',con_one,con_two) #update_md5_values('cdsa_js_prod','dzi_pyramid_info','pyramid_id',con_one,con_two) generate_slide_pyramid_linkage(con_one,con_two)
apache-2.0
2,299,405,208,769,935,400
39.347734
619
0.688587
false
2.813993
false
false
false
mosra/m.css
plugins/m/dox.py
1
8330
# # This file is part of m.css. # # Copyright © 2017, 2018, 2019, 2020 Vladimír Vondruš <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # from docutils.parsers.rst.states import Inliner from docutils import nodes, utils from docutils.parsers import rst from docutils.parsers.rst.roles import set_classes import xml.etree.ElementTree as ET import os import re import logging logger = logging.getLogger(__name__) # Modified from abbr / gh / gl / ... to add support for queries and hashes link_regexp = re.compile(r'(?P<title>.*) <(?P<link>[^?#]+)(?P<hash>[?#].+)?>') def parse_link(text): link = utils.unescape(text) m = link_regexp.match(link) if m: title, link, hash = m.group('title', 'link', 'hash') if not hash: hash = '' # it's None otherwise else: title, hash = '', '' return title, link, hash def init(tagfiles, input): global symbol_mapping, symbol_prefixes, tagfile_basenames # Pre-round to populate subclasses. Clear everything in case we init'd # before already. tagfile_basenames = [] symbol_mapping = {} symbol_prefixes = [''] for f in tagfiles: tagfile, path = f[:2] prefixes = f[2] if len(f) > 2 else [] css_classes = f[3] if len(f) > 3 else [] tagfile_basenames += [(os.path.splitext(os.path.basename(tagfile))[0], path, css_classes)] symbol_prefixes += prefixes tree = ET.parse(os.path.join(input, tagfile)) root = tree.getroot() for child in root: if child.tag == 'compound' and 'kind' in child.attrib: # Linking to pages if child.attrib['kind'] == 'page': link = path + child.find('filename').text + '.html' symbol_mapping[child.find('name').text] = (child.find('title').text, link, css_classes) # Linking to files if child.attrib['kind'] == 'file': file_path = child.find('path') link = path + child.find('filename').text + ".html" symbol_mapping[(file_path.text if file_path is not None else '') + child.find('name').text] = (None, link, css_classes) for member in child.findall('member'): if not 'kind' in member.attrib: continue # Preprocessor defines and macros if member.attrib['kind'] == 'define': symbol_mapping[member.find('name').text + ('()' if member.find('arglist').text else '')] = (None, link + '#' + member.find('anchor').text, css_classes) # Linking to namespaces, structs and classes if child.attrib['kind'] in ['class', 'struct', 'namespace']: name = child.find('name').text link = path + child.findtext('filename') # <filename> can be empty (cppreference tag file) symbol_mapping[name] = (None, link, css_classes) for member in child.findall('member'): if not 'kind' in member.attrib: continue # Typedefs, constants if member.attrib['kind'] == 'typedef' or member.attrib['kind'] == 'enumvalue': symbol_mapping[name + '::' + member.find('name').text] = (None, link + '#' + member.find('anchor').text, css_classes) # Functions if member.attrib['kind'] == 'function': # <filename> can be empty (cppreference tag file) symbol_mapping[name + '::' + member.find('name').text + "()"] = (None, link + '#' + member.findtext('anchor'), css_classes) # Enums with values if member.attrib['kind'] == 'enumeration': enumeration = name + '::' + member.find('name').text symbol_mapping[enumeration] = (None, link + '#' + member.find('anchor').text, css_classes) for value in member.findall('enumvalue'): symbol_mapping[enumeration + '::' + value.text] = (None, link + '#' + value.attrib['anchor'], css_classes) # Sections for section in child.findall('docanchor'): symbol_mapping[section.text] = (section.attrib.get('title', ''), link + '#' + section.text, css_classes) def dox(name, rawtext, text, lineno, inliner: Inliner, options={}, content=[]): title, target, hash = parse_link(text) # Otherwise adding classes to the options behaves globally (uh?) _options = dict(options) set_classes(_options) # Avoid assert on adding to undefined member later if 'classes' not in _options: _options['classes'] = [] # Try linking to the whole docs first for basename, url, css_classes in tagfile_basenames: if basename == target: if not title: # TODO: extract title from index page in the tagfile logger.warning("Link to main page `{}` requires a title".format(target)) title = target _options['classes'] += css_classes node = nodes.reference(rawtext, title, refuri=url + hash, **_options) return [node], [] for prefix in symbol_prefixes: if prefix + target in symbol_mapping: link_title, url, css_classes = symbol_mapping[prefix + target] if title: use_title = title elif link_title: use_title = link_title else: if link_title is not None: logger.warning("Doxygen anchor `{}` has no title, using its ID as link title".format(target)) use_title = target _options['classes'] += css_classes node = nodes.reference(rawtext, use_title, refuri=url + hash, **_options) return [node], [] # TODO: print file and line #msg = inliner.reporter.warning( #'Doxygen symbol %s not found' % target, line=lineno) #prb = inliner.problematic(rawtext, rawtext, msg) if title: logger.warning("Doxygen symbol `{}` not found, rendering just link title".format(target)) node = nodes.inline(rawtext, title, **_options) else: logger.warning("Doxygen symbol `{}` not found, rendering as monospace".format(target)) node = nodes.literal(rawtext, target, **_options) return [node], [] def register_mcss(mcss_settings, **kwargs): rst.roles.register_local_role('dox', dox) init(input=mcss_settings['INPUT'], tagfiles=mcss_settings.get('M_DOX_TAGFILES', [])) # Below is only Pelican-specific functionality. If Pelican is not found, these # do nothing. def _pelican_configure(pelicanobj): settings = { # For backwards compatibility, the input directory is pelican's CWD 'INPUT': os.getcwd(), } for key in ['M_DOX_TAGFILES']: if key in pelicanobj.settings: settings[key] = pelicanobj.settings[key] register_mcss(mcss_settings=settings) def register(): # for Pelican from pelican import signals signals.initialized.connect(_pelican_configure)
mit
-2,459,522,413,668,937,700
42.369792
179
0.59169
false
4.108041
false
false
false
lliss/tr-55
tr55/water_quality.py
1
1621
# -*- coding: utf-8 -*- from __future__ import print_function from __future__ import unicode_literals from __future__ import division from tr55.tablelookup import lookup_load, lookup_nlcd def get_volume_of_runoff(runoff, cell_count, cell_resolution): """ Calculate the volume of runoff over the entire modeled area Args: runoff (number): Q from TR55, averaged amount of runoff over a number of cells. cell_count (integer): The number of cells included in the area cell_resolution (number): The size in square meters that a cell represents Returns: The volume of runoff liters in of the total area of interest """ # Runoff is in inches, so convert to meters which is the units for the cell # area and compute the meter-cells in the group. Multiply the resolution # of the cell to get the runoff volume in cubic meters. inch_to_meter = 0.0254 runoff_m = runoff * inch_to_meter meter_cells = runoff_m * cell_count volume_cubic_meters = meter_cells * cell_resolution liters = volume_cubic_meters * 1000 return liters def get_pollutant_load(use_type, pollutant, runoff_liters): """ Calculate the pollutant load over a particular land use type given an amount of runoff generated on that area and an event mean concentration of the pollutant. Returns the pollutant load in lbs. """ mg_per_kg = 1000000 lbs_per_kg = 2.205 nlcd = lookup_nlcd(use_type) emc = lookup_load(nlcd, pollutant) load_mg_l = emc * runoff_liters return (load_mg_l / mg_per_kg) * lbs_per_kg
apache-2.0
-5,661,412,742,735,621,000
29.018519
79
0.679827
false
3.562637
false
false
false
examachine/pisi
pisi/data/component.py
1
7983
# -*- coding: utf-8 -*- # # Copyright (C) 2005, TUBITAK/UEKAE # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 3 of the License, or (at your option) # any later version. # # Please read the COPYING file. # # Author: Eray Ozkural <eray at pardus.org.tr> import gettext __trans = gettext.translation('pisi', fallback=True) _ = __trans.ugettext import pisi import pisi.context as ctx import pisi.exml.xmlfile as xmlfile import pisi.exml.autoxml as autoxml from pisi.db.itembyrepo import ItemByRepoDB class Error(pisi.Error): pass __metaclass__ = autoxml.autoxml class Distribution(xmlfile.XmlFile): __metaclass__ = autoxml.autoxml tag = "PISI" t_SourceName = [autoxml.Text, autoxml.mandatory] # name of distribution (source) t_Description = [autoxml.LocalText, autoxml.mandatory] t_Version = [autoxml.Text, autoxml.optional] t_Type = [autoxml.Text, autoxml.mandatory] t_Dependencies = [ [autoxml.Text], autoxml.optional, "Dependencies/Distribution"] t_BinaryName = [autoxml.Text, autoxml.optional] # name of repository (binary distro) t_Architecture = [autoxml.Text, autoxml.optional] # architecture identifier class Component(xmlfile.XmlFile): "representation for component declarations" __metaclass__ = autoxml.autoxml tag = "PISI" t_Name = [autoxml.String, autoxml.mandatory] # fully qualified name # component name in other languages, for instance in Turkish # LocalName for system.base could be sistem.taban or "Taban Sistem", # this could be useful for GUIs t_LocalName = [autoxml.LocalText, autoxml.mandatory] # Information about the component t_Summary = [autoxml.LocalText, autoxml.mandatory] t_Description = [autoxml.LocalText, autoxml.mandatory] #t_Icon = [autoxml.Binary, autoxml.mandatory] # Dependencies to other components t_Dependencies = [ [autoxml.String], autoxml.optional, "Dependencies/Component"] # the parts of this component. # to be filled by the component database, thus it is optional. t_Packages = [ [autoxml.String], autoxml.optional, "Parts/Package"] t_Sources = [ [autoxml.String], autoxml.optional, "Parts/Source"] # TODO: this is probably not necessary since we use fully qualified # module names (like in Java) #t_PartOf = [autoxml.Text, autoxml.mandatory] #FIXME: recursive declarations do not work! #class ComponentTree(xmlfile.XmlFile): # "index representation for the component structure" # # __metaclass__ = autoxml.autoxml # # tag = "Component" # # t_Name = [autoxml.Text, autoxml.mandatory] # fully qualified name # #t_Icon = [autoxml.Binary, autoxml.mandatory] # t_Dependencies = [ [autoxml.Text], autoxml.optional, "Component"] # #t_Parts = [ [pisi.component.ComponentTree], autoxml.optional, "Component"] class ComponentDB(object): """a database of components""" #FIXME: we might need a database per repo in the future def __init__(self): self.d = ItemByRepoDB('component') def close(self): self.d.close() def destroy(self): self.d.destroy() def has_component(self, name, repo = pisi.db.itembyrepo.repos, txn = None): #name = shelve.LockedDBShelf.encodekey(name) name = str(name) return self.d.has_key(name, repo, txn) def get_component(self, name, repo=None, txn = None): try: return self.d.get_item(name, repo, txn=txn) except pisi.db.itembyrepo.NotfoundError, e: raise Error(_('Component %s not found') % name) def get_component_repo(self, name, repo=None, txn = None): #name = shelve.LockedDBShelf.encodekey(name) try: return self.d.get_item_repo(name, repo, txn=txn) except pisi.db.itembyrepo.NotfoundError, e: raise Error(_('Component %s not found') % name) def get_union_comp(self, name, txn = None, repo = pisi.db.itembyrepo.repos ): """get a union of all repository components packages, not just the first repo in order. get only basic repo info from the first repo""" def proc(txn): s = self.d.d.get(name, txn=txn) pkgs = set() srcs = set() for repostr in self.d.order(repo = repo): if s.has_key(repostr): pkgs |= set(s[repostr].packages) srcs |= set(s[repostr].sources) comp = self.get_component(name) comp.packages = list(pkgs) comp.sources = list(srcs) return comp return self.d.txn_proc(proc, txn) def list_components(self, repo=None): return self.d.list(repo) def update_component(self, component, repo, txn = None): def proc(txn): if self.has_component(component.name, repo, txn): # preserve the list of packages component.packages = self.d.get_item(component.name, repo, txn).packages self.d.add_item(component.name, component, repo, txn) self.d.txn_proc(proc, txn) def add_package(self, component_name, package, repo, txn = None): def proc(txn): assert component_name if self.has_component(component_name, repo, txn): component = self.get_component(component_name, repo, txn) else: component = Component( name = component_name ) if not package in component.packages: component.packages.append(package) self.d.add_item(component_name, component, repo, txn) # update self.d.txn_proc(proc, txn) def remove_package(self, component_name, package, repo = None, txn = None): def proc(txn, repo): if not self.has_component(component_name, repo, txn): raise Error(_('Information for component %s not available') % component_name) if not repo: repo = self.d.which_repo(component_name, txn=txn) # get default repo then component = self.get_component(component_name, repo, txn) if package in component.packages: component.packages.remove(package) self.d.add_item(component_name, component, repo, txn) # update ctx.txn_proc(lambda x: proc(txn, repo), txn) def add_spec(self, component_name, spec, repo, txn = None): def proc(txn): assert component_name if self.has_component(component_name, repo, txn): component = self.get_component(component_name, repo, txn) else: component = Component( name = component_name ) if not spec in component.sources: component.sources.append(spec) self.d.add_item(component_name, component, repo, txn) # update self.d.txn_proc(proc, txn) def remove_spec(self, component_name, spec, repo = None, txn = None): def proc(txn, repo): if not self.has_component(component_name, repo, txn): raise Error(_('Information for component %s not available') % component_name) if not repo: repo = self.d.which_repo(component_name, txn=txn) # get default repo then component = self.get_component(component_name, repo, txn) if spec in component.sources: component.sources.remove(spec) self.d.add_item(component_name, component, repo, txn) # update ctx.txn_proc(lambda x: proc(txn, repo), txn) def clear(self, txn = None): self.d.clear(txn) def remove_component(self, name, repo = None, txn = None): name = str(name) self.d.remove_item(name, repo, txn) def remove_repo(self, repo, txn = None): self.d.remove_repo(repo, txn=txn)
gpl-3.0
-2,851,246,268,599,992,300
37.196172
95
0.629463
false
3.706128
false
false
false
timmartin/ScanManager
setup.py
1
1198
from setuptools import setup from codecs import open from os import path import glob here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() setup( name="scan_manager", version="0.0.1", description="GUI app for collating images produced by a document scanner", long_description=long_description, url="https://github.com/timmartin/ScanManager", author="Tim Martin", author_email="[email protected]", license="MIT", classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: End Users/Desktop', 'Topic :: Office/Business', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', ], keywords="scanner", packages=['scan_manager'], install_requires=['PySide~=1.2.4', 'fpdf~=1.7.2'], data_files=[('images', glob.glob('images/*.svg'))], entry_points={ 'console_scripts': [ 'scan_manager=scan_manager:main' ] } )
mit
-2,133,402,301,166,609,200
21.603774
78
0.606845
false
3.815287
false
false
false
pculture/unisubs
apps/teams/workflows/teamworkflows.py
1
14051
# Amara, universalsubtitles.org # # Copyright (C) 2014 Participatory Culture Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see # http://www.gnu.org/licenses/agpl-3.0.html. """ Team Workflows ============== Team workflows are ways for teams to get their subtitling work done. Team workflows compliment the :doc:`subtitle-workflows` and add team-specific features. Team workflows are responsible for: - Providing a SubtitleWorkflow for team videos - Handling the workflow settings page - Handling the dashboard page - Creating extra tabs or the teams section .. autoclass:: TeamWorkflow :members: label, dashboard_view, workflow_settings_view, setup_team, get_subtitle_workflow, extra_pages, extra_settings_pages .. autoclass:: TeamPage .. autoclass:: teams.workflows.old.workflow.OldTeamWorkflow """ from collections import namedtuple from django.urls import reverse from django.shortcuts import render from django.template.loader import render_to_string from django.utils.safestring import mark_safe from django.utils.translation import ungettext, ugettext as _, ugettext_lazy from activity.models import ActivityRecord from videos.models import Video from subtitles.models import SubtitleLanguage from teams import experience from utils.behaviors import DONT_OVERRIDE from utils.pagination import AmaraPaginatorFuture from utils.text import fmt class TeamWorkflow(object): label = NotImplemented """Human-friendly name for this workflow. This is what appears on the team creation form. """ dashboard_view = NotImplemented member_view = NotImplemented """ view function for the dashboard page. """ user_dashboard_extra = None """ Team-specific extra data to render in user dashboard page. """ workflow_settings_view = NotImplemented """ view function for the workflow settings page. """ has_workflow_settings_page = False has_subtitle_visibility_setting = False def __init__(self, team): self.team = team def setup_team(self): """Do any additional setup for newly created teams.""" pass def get_subtitle_workflow(self, team_video): """Get the SubtitleWorkflow for a video with this workflow. """ raise NotImplementedError() def extra_pages(self, user): """Get extra team pages to handle this workflow. These pages will be listed as tabs in the team section. Workflows will typically use this for things like dashboard pages. Args: user -- user viewing the page Returns: list of :class:`TeamPage` objects """ return [] def extra_settings_pages(self, user): """Get extra team settings pages to handle this workflow. This works just like extra_pages(), but the pages will show up as tabs under the settings section. Args: user -- user viewing the page Returns: list of :class:`TeamPage` objects """ return [] def team_page(self, name, title, view_name): """Convenience function to create an TeamPage object This method automatically generates the URL from view_name using reverse() """ url = reverse(view_name, kwargs={'slug': self.team.slug}) return TeamPage(name, title, url) def video_page_customize(self, request, video): """Add extra content to the video page when viewing from the context of a team.""" return DONT_OVERRIDE def subtitles_page_customize(self, request, video, subtitle_language): """Add extra content to the subtitles page when viewing from the context of a team.""" return DONT_OVERRIDE def team_video_page_extra_tabs(self, request): """Add extra sub tabs to the team video page. These appear near the top of the page. """ return [] def management_page_extra_tabs(self, user, *args, **kwargs): """Add extra sub tabs to the team management page. These appear near the top of the page. """ return [] def team_video_page_default(self, request): extra_tabs = self.team_video_page_extra_tabs(request) if extra_tabs: return extra_tabs[0].url else: return reverse("teams:videos", kwargs={ 'slug': self.team.slug, }) def management_page_default(self, user): extra_tabs = self.management_page_extra_tabs(user) if extra_tabs: return extra_tabs[0].url else: return reverse("teams:manage_videos", kwargs={ 'slug': self.team.slug, }) def video_management_add_counts(self, videos): """Add the subtitle counts for the videos management page By default we add the number of completed subtitles, but other workflows may want to add other/different counts. For each video you can set the counts attribute to a list of strings. Each string should describe a count of something, like the number of completed subtitles. The number should be wrapped in a <strong> tag (and the whole thing should be wrapped in a mark_safe() call). You can also set the counts2 attribute to create a second line of counts. Args: videos -- List of Video instances. """ counts = SubtitleLanguage.count_completed_subtitles(videos) for v in videos: incomplete_count, completed_count = counts[v.id] v.counts = [] if completed_count > 0: msg = ungettext( (u'<strong>%(count)s</strong> subtitle completed'), (u'<strong>%(count)s</strong> subtitles completed'), completed_count) v.counts.append(mark_safe(fmt(msg, count=completed_count))) if incomplete_count > 0: msg = ungettext( (u'<strong>%(count)s</strong> subtitle started'), (u'<strong>%(count)s</strong> subtitles started'), incomplete_count) v.counts.append(mark_safe(fmt(msg, count=incomplete_count))) def video_management_alter_context_menu(self, video, menu): """Alter the context menu for the video management page.""" def video_management_extra_forms(self): """Add extra forms to the video management page """ return [] def activity_type_filter_options(self): """ Get possible activity type filter values This is used on the activity page to populate the type dropdown. """ return [ 'video-added', 'comment-added', 'version-added', 'video-url-added', 'member-joined', 'member-left', 'video-title-changed', 'video-deleted', 'video-url-edited', 'video-url-deleted', 'video-moved-from-team', 'video-moved-to-team', 'team-settings-changed', 'language-changed', ] def customize_permissions_table(self, team, form, permissions_table): """ Customize the table show on the permissions settings page """ pass # these can be used to customize the content in the project/language # manager pages def render_project_page(self, request, team, project, page_data): page_data['videos'] = (team.videos .filter(teamvideo__project=project) .order_by('-id'))[:5] return render(request, 'new-teams/project-page.html', page_data) def render_all_languages_page(self, request, team, page_data): return render(request, 'new-teams/all-languages-page.html', page_data) def render_language_page(self, request, team, language_code, page_data): qs = (self.team.videos .filter(primary_audio_language_code=language_code) .order_by('-id')) page_data['videos']= qs[:5] return render(request, 'new-teams/language-page.html', page_data) def fetch_member_history(self, user, query=None): """ Fetch the member subtitling history data for the dashboard/profile page This method should return a queryset of items to display. The items can be any django model. They will get rendered by the template in the member_history_template attribute. """ qs = (ActivityRecord.objects .for_team(self.team) .filter(user=user) .order_by('-created')) if query: qs = qs.filter(video__in=Video.objects.search(query)) return qs member_history_template = 'future/teams/member-subtitling-history.html' """ Template that can render the results from member_history. It will be passed the following variables: - team: Team the history is for - user: User the history is for - member_history: Single page from the queryset returned by fetch_member_history() """ member_history_header = ugettext_lazy('Recent activity') def get_experience_column_label(self): """ Team members page label for the experience coluumn. """ return _('Subtitles completed') def add_experience_to_members(self, page): """ Add experience attributes to a list of members We call this for the team members page to populate the experience column (usually subtitles completed). This method should: - Set the experience attribute to each member to a TeamExperience object - Optionally, set the experience_extra attribute, which is a list of extra experience to show in the expanded view. """ subtitles_completed = experience.get_subtitles_completed(page) for member, count in zip(page, subtitles_completed): member.experience_count = count # map type codes to subclasses _type_code_map = {} # map API codes to type codes _api_code_map = {} @classmethod def get_workflow(cls, team): """Get a TeamWorkflow subclass for a team.""" klass = cls._type_code_map[team.workflow_type] return klass(team) @classmethod def get_choices(cls): choices = [(type_code, subclass.label) for (type_code, subclass) in cls._type_code_map.items()] cls._sort_choices(choices) return choices @classmethod def get_api_choices(cls): choices = [ (type_code, api_code) for (api_code, type_code) in cls._api_code_map.items() ] cls._sort_choices(choices) return choices @classmethod def _sort_choices(cls, choices): """Sort workflow type choices We sort choices so that: - unisubs choices are first, then extensions (unisubs choices are 1-char) - after that it's sorted alphabeticaly by code """ choices.sort(key=lambda (code, _): (len(code), code)) @classmethod def register(cls, type_code, api_code=None): """Register a TeamWorkflow subclass. Calling this class method will enable it for teams whose workflow_type value is type_code Args: type_code: string code value for this workflow. Workflows in the unisubs repository should be 1 char long. Workflows on other repositories should be 2 chars with the first char being unique to the repository. api_code: API code value for this workflow. Pass in a non-None value to enable creating this workflow via the API """ TeamWorkflow._type_code_map[type_code] = cls if api_code is not None: TeamWorkflow._api_code_map[api_code] = type_code TeamPage = namedtuple('TeamPage', 'name title url') """Represents a page in the team's section Attributes: name: machine-name for this tuple. This is value to use for current in the _teams/tabs.html template title: human friendly tab title url: URL for the page """ TeamExperience = namedtuple('TeamExperience', 'label icon count') """Used to list experience counts on the members directory By default, we show subtitles completed, but other workflows might want to display different things, like assignments completed, etc. """ class TeamPermissionsRow(object): """ Used to display the checks/Xs on the permissions settings page """ def __init__(self, label, admins, managers, contributors, setting_name=None): self.label = label self.admins = admins self.managers = managers self.contributors = contributors self.setting_name = setting_name @classmethod def from_setting(cls, label, form, setting_name): value = form[setting_name].value() permissions = form[setting_name].field.widget.decompress(value) # some fields only have settings for admins/managers. Make sure to # extend permissions to 3 items in that case permissions.extend([False] * (3 - len(permissions))) return cls(label, *permissions, setting_name=setting_name)
agpl-3.0
7,985,709,267,886,165,000
34.1275
90
0.632197
false
4.395058
false
false
false
commonsense/divisi
csc/divisi/recycling_set.py
1
3241
from ordered_set import OrderedSet from priodict import priorityDictionary class RecyclingSet(OrderedSet): __slots__ = ['items', 'indices', 'index', 'indexFor', '__contains__', '__getitem__', '__len__', 'count', 'maxsize', 'drop_listeners', 'priority'] def __init__(self, maxsize, origitems=None): self.count = 0 self.maxsize = maxsize self.priority = priorityDictionary() self.drop_listeners = [] OrderedSet.__init__(self, origitems) def __getstate__(self): return (self.items, self.priority, self.maxsize, self.count) def __setstate__(self, state): items, self.priority, self.maxsize, self.count = state OrderedSet.__setstate__(self, items) def add(self, key): """ Add an item to the set (unless it's already there), returning its index. Drop an old item if necessary. ``None`` is never an element of an OrderedSet. """ if key in self.indices: self.touch(key) return self.indices[key] n = len(self.items) if n < self.maxsize: self.items.append(key) if key is not None: self.indices[key] = n self.touch(key) return n else: newindex = self.drop_oldest() self.items[newindex] = key self.indices[key] = newindex self.touch(key) return newindex append = add def __delitem__(self, n): """ Deletes an item from the RecyclingSet. """ oldkey = self.items[n] del self.indices[oldkey] self.items[n] = None self.announce_drop(n, oldkey) def drop_oldest(self): """ Drop the least recently used item, to make room for a new one. Return the number of the slot that just became free. """ slot = self.priority.smallest() oldest = self.items[slot] del self[slot] return slot def listen_for_drops(self, callback): """ If an object needs to know when a slot becomes invalid because its key gets dropped, it should register a callback with listen_for_drops. """ self.drop_listeners.append(callback) def announce_drop(self, index, key): """ Tell all registered listeners that we dropped a key. """ print "dropping key:", key for listener in self.drop_listeners: listener(index, key) def touch(self, key): """ Remember that this key is useful. """ if key not in self: raise IndexError else: self.count += 1 self.priority[self.index(key, False)] = self.count def index(self, key, touch=True): if touch: self.touch(key) return self.indices[key] indexFor = index def __contains__(self, key): return key in self.indices def __getitem__(self, key): if key < self.maxsize and key >= len(self.items): return None return self.items[key] def __len__(self): return len(self.indices) def _setup_quick_lookup_methods(self): pass
gpl-3.0
-7,574,904,923,457,729,000
29.28972
78
0.558161
false
4.187339
false
false
false
epuzanov/ZenPacks.community.HPMon
ZenPacks/community/HPMon/modeler/plugins/community/snmp/HPFanMap.py
1
3236
################################################################################ # # This program is part of the HPMon Zenpack for Zenoss. # Copyright (C) 2008, 2009, 2010, 2011 Egor Puzanov. # # This program can be used under the GNU General Public License version 2 # You can find full information here: http://www.zenoss.com/oss # ################################################################################ __doc__="""HPFanMap HPFanMap maps the cpqHeFltTolFanTable table to fab objects $Id: HPFanMap.py,v 1.3 2011/01/02 19:01:17 egor Exp $""" __version__ = '$Revision: 1.3 $'[11:-2] from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetTableMap class HPFanMap(SnmpPlugin): """Map HP/Compaq insight manager Fans table to model.""" maptype = "HPFanMap" modname = "ZenPacks.community.HPMon.HPFan" relname = "fans" compname = "hw" snmpGetTableMaps = ( GetTableMap('cpqHeFltTolFanTable', '.1.3.6.1.4.1.232.6.2.6.7.1', { '.3': '_locale', '.4': '_present', '.5': 'type', '.9': 'status', '.12': '_rpm', } ), ) typemap = {1: 'other', 2: 'Tach Output', 3: 'Spin Detect', } localemap = {1: 'other', 2: 'unknown', 3: 'system', 4: 'systemBoard', 5: 'ioBoard', 6: 'cpu', 7: 'memory', 8: 'storage', 9: 'removableMedia', 10: 'powerSupply', 11: 'ambient', 12: 'chassis', 13: 'bridgeCard', 14: 'managementBoard', 15: 'backplane', 16: 'networkSlot', 17: 'bladeSlot', 18: 'virtual', } def process(self, device, results, log): """collect snmp information from this device""" log.info('processing %s for device %s', self.name(), device.id) getdata, tabledata = results rm = self.relMap() localecounter = {} for oid, fan in tabledata.get('cpqHeFltTolFanTable', {}).iteritems(): try: om = self.objectMap(fan) if om._present < 3: continue if not hasattr(om, '_rpm'): om.modname = "ZenPacks.community.HPMon.HPsdFan" om.snmpindex = oid.strip('.') om.type = self.typemap.get(getattr(om,'type',1),self.typemap[1]) if om._locale in localecounter: localecounter[om._locale] = localecounter[om._locale] + 1 else: localecounter[om._locale] = 1 om.id = self.prepId("%s%d" % (self.localemap.get( getattr(om, '_locale', 1), self.localemap[1]), localecounter[om._locale])) except AttributeError: continue rm.append(om) return rm
gpl-2.0
-2,512,895,411,321,758,700
33.425532
82
0.439122
false
4.127551
false
false
false
juvvadi/keystone
keystone/logic/types/auth.py
1
5621
# Copyright (c) 2010-2011 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABCMeta from datetime import datetime import json from lxml import etree import keystone.logic.types.fault as fault class PasswordCredentials(object): """Credentials based on username, password, and (optional) tenant_id. To handle multiple token for a user depending on tenants, tenant_id is mandatory. """ def __init__(self, username, password, tenant_id): self.username = username self.password = password self.tenant_id = tenant_id @staticmethod def from_xml(xml_str): try: dom = etree.Element("root") dom.append(etree.fromstring(xml_str)) root = dom.find("{http://docs.openstack.org/idm/api/v1.0}" "passwordCredentials") if root == None: raise fault.BadRequestFault("Expecting passwordCredentials") username = root.get("username") if username == None: raise fault.BadRequestFault("Expecting a username") password = root.get("password") if password == None: raise fault.BadRequestFault("Expecting a password") tenant_id = root.get("tenantId") #--for multi-token handling-- if tenant_id == None: raise fault.BadRequestFault("Expecting tenant") # ---- return PasswordCredentials(username, password, tenant_id) except etree.LxmlError as e: raise fault.BadRequestFault("Cannot parse password credentials", str(e)) @staticmethod def from_json(json_str): try: obj = json.loads(json_str) if not "passwordCredentials" in obj: raise fault.BadRequestFault("Expecting passwordCredentials") cred = obj["passwordCredentials"] if not "username" in cred: raise fault.BadRequestFault("Expecting a username") username = cred["username"] if not "password" in cred: raise fault.BadRequestFault("Expecting a password") password = cred["password"] if "tenantId" in cred: tenant_id = cred["tenantId"] else: #--for multi-token handling-- if tenant_id == None: raise fault.BadRequestFault("Expecting a tenant") # --- return PasswordCredentials(username, password, tenant_id) except (ValueError, TypeError) as e: raise fault.BadRequestFault("Cannot parse password credentials", str(e)) class Token(object): "An auth token." def __init__(self, expires, token_id): self.expires = expires self.token_id = token_id class Group(object): "A group, optionally belonging to a tenant." def __init__(self, group_id, tenant_id): self.tenant_id = tenant_id self.group_id = group_id class Groups(object): "A collection of groups." def __init__(self, values, links): self.values = values self.links = links class User(object): "A user." def __init__(self, username, tenant_id, groups): self.username = username self.tenant_id = tenant_id self.groups = groups class AuthData(object): "Authentation Infor returned upon successful login." def __init__(self, token, user): self.token = token self.user = user def to_xml(self): dom = etree.Element("auth", xmlns="http://docs.openstack.org/idm/api/v1.0") token = etree.Element("token", expires=self.token.expires.isoformat()) token.set("id", self.token.token_id) user = etree.Element("user", username=self.user.username, tenantId=str(self.user.tenant_id)) groups = etree.Element("groups") for group in self.user.groups.values: g = etree.Element("group", tenantId=group.tenant_id) g.set("id", group.group_id) groups.append(g) user.append(groups) dom.append(token) dom.append(user) return etree.tostring(dom) def to_json(self): token = {} token["id"] = self.token.token_id token["expires"] = self.token.expires.isoformat() user = {} user["username"] = self.user.username user["tenantId"] = self.user.tenant_id group = [] for g in self.user.groups.values: grp = {} grp["tenantId"] = g.tenant_id grp["id"] = g.group_id group.append(grp) groups = {} groups["group"] = group user["groups"] = groups auth = {} auth["token"] = token auth["user"] = user ret = {} ret["auth"] = auth return json.dumps(ret)
apache-2.0
4,376,412,600,012,934,700
32.064706
76
0.569294
false
4.412088
false
false
false
nickmarton/Paxos-Distributed-Calendar
Classes/Node.py
1
24814
"""Node (User) Class for Paxos Calendar.""" import os import sys import time import thread import pickle import socket import logging from Bully import bully_algorithm from Appointment import Appointment from Calendar import Calendar from Proposer import Proposer from Acceptor import Acceptor class Node(object): """ Node class. node_id: Unique ID used for Node identification as well as for unique proposal number generation; int. calendar: Calendar object which contains Appointment objects. proposer: Proposer object used in Synod Algorithm; passed node_id so it can create unique proposal numbers. acceptor: Acceptor object used in Synod Algorithm. log: Dictionary of Calendar objects used in Paxos Algorithm; intially empty, Synod Algorithm is used to fill each entry of log where integer keys represents slots and the values being the Calendar agreed upon via conscensus. leader: The current leader elected via the bully algorithm; initially None and updated every ~6 seconds. """ _ip_filename = "./IP_translations.txt" def __init__(self, node_id): """Construct a Node object.""" if type(node_id) != int: raise TypeError("node_id must be an int") if node_id < 0: raise ValueError("node id must be a nonnegative integer") try: Node._ip_table = Node._make_ip_table() except IOError: raise IOError("Node-to-IP translation file: " + ip_filename + " not found.") self._node_id = node_id self._calendar = Calendar() self._proposer = Proposer(node_id,self._ip_table) self._acceptor = Acceptor(self._ip_table) self._log = {} self._leader = None self._terminate = False self._is_Node = True def insert(self, appointment): """Insert an Appointment into this Node's Calendar.""" #First create new Calendar with new appointment from copy import deepcopy new_calendar = deepcopy(self._calendar) new_calendar += appointment if self._log.keys(): next_log_slot = max(self._log.keys()) + 1 else: next_log_slot = 0 #Then ask leader to propose the new Calendar try: leader_IP, leader_TCP, leader_UDP = self._ip_table[self._leader] proposal_message = pickle.dumps( ("propose", Calendar.serialize(new_calendar), next_log_slot)) udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udp_socket.sendto(proposal_message, (leader_IP, leader_UDP)) udp_socket.close() except KeyError as excinfo: print "Unable to find leader, waiting until one is selected..." while self._leader == None: pass print "Found leader, continuing...\n" self.insert(appointment) def delete(self, appointment): """Delete an Appointment in this Node's Calendar.""" #First create new Calendar without appointment from copy import deepcopy new_calendar = Calendar() for self_appointment in self._calendar: if self_appointment != appointment: new_calendar += deepcopy(self_appointment) if self._log.keys(): next_log_slot = max(self._log.keys()) + 1 else: next_log_slot = 0 #Then ask leader to propose the new Calendar try: leader_IP, leader_TCP, leader_UDP = self._ip_table[self._leader] proposal_message = pickle.dumps( ("propose", Calendar.serialize(new_calendar), next_log_slot)) udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) udp_socket.sendto(proposal_message, (leader_IP, leader_UDP)) udp_socket.close() except KeyError as excinfo: print "Unable to find leader, waiting until one is selected..." while self._leader == None: pass print "Found leader, continuing...\n" self.delete(appointment) def paxos(self): """Engage this Node in Paxos algorithm.""" def _parse_message(message): """ Parse UDP pickled tuple message. Self is available from closure. """ valid_message_types = [ "propose", "prepare", "promise", "accept", "ack", "commit"] message_type, message_args = message[0], message[1:] #syntactic checking if message_type not in valid_message_types: logging.error("Invalid message type") return if 3 <= len(message_args) <= 4: arg_0_is_int = type(message_args[0]) == int arg_0_is_calendar = hasattr(message_args[0], "_is_Calendar") arg_1_is_calendar = hasattr(message_args[1], "_is_Calendar") if not arg_0_is_calendar: arg_0_is_None = message_args[0] == None else: arg_0_is_None = False if not arg_1_is_calendar: arg_1_is_None = message_args[1] == None else: arg_1_is_None = False #handle prepare messages if message_type == "propose": if arg_0_is_calendar: #If in this conditional, we are the leader. #First we have to fill any empty log slots #''' log_slots = self._log.keys() proposed_slot = message[2] for i in range(proposed_slot): if i not in self._log.keys(): #dummy_message = ("propose", Calendar(), i, self._node_id) #self._proposer._command_queue.append(dummy_message) #time.sleep(.1) slot_calendar = self._acceptor._accVals[i] self._log[i] = slot_calendar #''' #Then we can add this new proposal self._proposer._command_queue.append(message) else: logging.error( "Propose message must be of form " "'propose' Calendar") #handle prepare messages elif message_type == "prepare": if arg_0_is_int: self._acceptor._command_queue.append(message) else: logging.error( "Prepare message must be of form 'prepare' int") #handle promise messages elif message_type == "promise": if (arg_0_is_int and arg_1_is_calendar) or (arg_0_is_None and arg_1_is_None): self._proposer._command_queue.append(message) else: logging.error( "Promise message must be of form " "'promise' int Calendar") #handle accept messages elif message_type == "accept": if arg_0_is_int and arg_1_is_calendar: self._acceptor._command_queue.append(message) else: print ' '.join([str(i) for i in message]) logging.error( "Accept message must be of form " "'accept' int Calendar") #handle ack messages elif message_type == "ack": if arg_0_is_int and arg_1_is_calendar: self._proposer._command_queue.append(message) else: logging.error( "Ack message must be of form " "'ack' int Calendar") #handle commit messages elif message_type == "commit": if arg_0_is_calendar: self._acceptor._command_queue.append(message) else: logging.error( "Commit message must be of form 'commit' Calendar") else: logging.error("Invalid message parameters") return def _learner(self): """Poll the Acceptor commits queue to update Node's log.""" while True: if self._acceptor._commits_queue: (log_slot, v) = self._acceptor._commits_queue.pop() self._log[log_slot] = v self._calendar = self._log[max(self._log.keys())] if self._terminate: break time.sleep(.001) def _shut_down(self): """.""" while True: if self._terminate: self._proposer._terminate = True self._acceptor._terminate = True break def _do_paxos(self): """Do Paxos algorithm for this Node.""" #Begin running the Acceptor and Proposer in the background thread.start_new_thread(self._proposer.start, ()) thread.start_new_thread(self._acceptor.start, ()) thread.start_new_thread(_learner, (self,)) thread.start_new_thread(_shut_down, (self,)) IP, UDP_PORT = '0.0.0.0', self._ip_table[self._node_id][2] sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((IP, UDP_PORT)) while True: data, addr = sock.recvfrom(4096) # buffer size is 1024 bytes if data == "terminate": sock.close() break #Quick lookup of ID of sender from IP received sender_ID = filter( lambda row: row[1][0] == addr[0], self._ip_table.items())[0][0] message = pickle.loads(data) #bind sender_ID to message message = message + (sender_ID,) #construct deserailized version of message new_message = [] for field in message: if type(field) == str: try: deserialized_calendar = Calendar.deserialize(field) new_message.append(deserialized_calendar) except: new_message.append(field) else: new_message.append(field) new_message = tuple(new_message) _parse_message(new_message) thread.start_new_thread(_do_paxos, (self,)) def elect_leader(self, poll_time=6, timeout=3): """Engage this Node in leader selection.""" def _do_leader_election(self, poll_time, timeout): """Do leader election as new thread.""" IP, TCP_PORT = "0.0.0.0", self._ip_table[self._node_id][1] recv_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) recv_socket.bind((IP, TCP_PORT)) #backlog; 1 for each Node besides self recv_socket.listen(4) prev_leader = None while True: thread.start_new_thread(bully_algorithm, (self, recv_socket, timeout)) time.sleep(poll_time) if self._leader != prev_leader: logging.debug("NEW LEADER IS: " + str(self._leader)) prev_leader = self._leader if self._terminate: break recv_socket.close() thread.start_new_thread(_do_leader_election, (self, poll_time, timeout)) def terminate(self): """Initiate termination protocol; close all threads.""" #Set termination field self._terminate = True #Send special termination message to self s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) my_ip_info = self._ip_table[self._node_id] my_IP, my_UDP_PORT = my_ip_info[0], my_ip_info[2] s.sendto("terminate", (my_IP, my_UDP_PORT)) s.close() #Sleep for a second to ensure everything closes before main time.sleep(1) @staticmethod def save(Node, path="./", filename="state.pkl"): """Save this Node's log and Acceptor to stable storage.""" if not hasattr(Node, "_is_Node"): raise TypeError("Node parameter must be a Node object") if type(filename) != str or type(path) != str: raise TypeError("path and filename must be strings") if filename[-4:] != ".pkl": raise ValueError("filename must have .pkl extension") if not os.path.exists(path): raise ValueError("path provided does not exist") import pickle with open(path + filename, 'w') as f: state = (Node._node_id, Node._log, Node._acceptor) pickle.dump(state, f) @staticmethod def load(path="./", filename="state.pkl"): """ Load log and Acceptor from stable storage if path and filename exist. """ def _rebuild_calendar(node, log): """Rebuild the calendar of node by reconstructing it from log.""" #Get the latest entry in the log for most up-to-date Calendar node._calendar = log[max(log.keys())] if type(filename) != str or type(path) != str: raise TypeError("path and filename must be strings") if filename[-4:] != ".pkl": raise ValueError("filename must have .pkl extension") if not os.path.exists(path+filename): raise ValueError("path provided does not exist") with open(path + filename, 'r') as f: state = pickle.load(f) node_id, log, acceptor = state node = Node(node_id) node._log = log node._acceptor = acceptor _rebuild_calendar(node, log) return node @staticmethod def _make_ip_table(): """Create the ID-to-IP translation table used for socket connection.""" table = {} import re pattern = r"^\d+,\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3},\d{4},\d{5}$" with open(Node._ip_filename, "r") as f: for translation in f: match = re.match(pattern, translation.strip()) if not match: raise ValueError( "Every line in IP_translations.txt must be of " "form ID,IP") ID, IP, TCP_PORT, UDP_PORT, = translation.strip().split(',') table[int(ID)] = [IP, int(TCP_PORT), int(UDP_PORT)] return table @staticmethod def _parse_command(command, node): """Parse command provided, possibly involving provided node.""" def _do_show(argv, node): """Perform show command for debugging/user information.""" if len(argv) == 1: raise ValueError( "Invalid show argument; show needs argument " "{calendar,log,acceptor,proposer,all}") #Handle showing the calendar if argv[1] == "calendar": print node._calendar #Handle showing the log elif argv[1] == "log": print "Log:" #copy the log into a list ordered by slot number ordered_slots = sorted(node._log.items(), key=lambda x: x[0]) #if -short flag not thrown, print entire log if len(argv) == 2: for slot in ordered_slots: print "Slot " + str(slot[0]) + ' ' + str(slot[1]) #Short flag is thrown, just print names of Appointments in each #Calendar slot elif len(argv) == 3: if argv[2] == "-s": for slot in ordered_slots: log_string = "Slot " + str(slot[0]) + " Calendar: \t" log_string += ', '.join( slot[1].get_appointment_names()) print log_string print else: raise ValueError( "Invalid show arguments; Only flags \"-s\" " "permitted") #Bad number of arguments to show log else: raise ValueError( "Invalid show arguments; show log supports only a " "single optional flag argument \"-s\"") #Handle showing Node's Acceptor object elif argv[1] == "acceptor": print str(node._acceptor) + '\n' #Handle showing Node's Proposer object elif argv[1] == "proposer": print str(node._proposer) + '\n' #Handle printing entire state of Node elif argv[1] == "all": print "-" * 100 print "Node ID: " + str(node._node_id) _do_show(['show', 'calendar'], node) _do_show(['show', 'log', '-s'], node) _do_show(['show', 'acceptor'], node) _do_show(['show', 'proposer'], node) print "-" * 100 else: raise ValueError( "Invalid show argument; show needs argument " "{calendar,log,acceptor,proposer,all}") def _parse_appointment(argv): """Try to parse an Appointment object from given argv.""" generic_error_msg = "Invalid command; Schedule and cancel " + \ "commands must be of form: \n" + \ "{schedule,cancel} [Appointment name] " + \ "(user1,user2,...usern) (start_time,end_time) [day]" if len(argv) != 5: raise ValueError(generic_error_msg) name, participants, times, day = argv[1:] participants = participants[1:-1].split(",") try: participants = [int(user[4:]) for user in participants] except ValueError: raise ValueError( "Invalid command; participants must be of form " "(user1,user2,...,usern)") try: start, end = times[1:-1].split(',') except ValueError: raise ValueError( "Invalid command; times must be of form " "(start_time,end_time)") try: return Appointment(name, day, start, end, participants) except ValueError as excinfo: raise ValueError("Invalid command; " + excinfo.message) def _do_clear(): """Perform clear command via ASCI escape code.""" print(chr(27) + "[2J") argv = command.split() if not argv: return #If command is to clear, clear the screen if argv[0] == "clear": _do_clear() return #If command was to show something, do show if argv[0] == "show": try: _do_show(argv, node) except ValueError as excinfo: print excinfo print finally: return #If command is to schedule or cancel an Appointment, parse then #initiate Synod algorithm if argv[0] == "schedule": try: appointment = _parse_appointment(argv) for user in appointment._participants: node._ip_table[user] #determine if the Appointment the user is trying to schedule #is already in their Calendar or in conflict with some #Appointment in their Calendar conflict_cond = node._calendar._is_appointment_conflicting( appointment) in_cond = appointment in node._calendar #if it's not already in the Calendar and not in conflict with #any Appointment in it, begin Synod if not conflict_cond and not in_cond: node.insert(appointment) else: print "User scheduled appointment already in their " + \ "own Calendar or in conflict with their own " + \ "Calendar; ignoring.\n" except KeyError: print "User id is not in the IP table." except ValueError as excinfo: print excinfo print #fail-safe catch in case something fucks up and we don't know what except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info()[:] fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, fname, exc_tb.tb_lineno) finally: return if argv[0] == "cancel": try: appointment = _parse_appointment(argv) if appointment in node._calendar: node.delete(appointment) else: print "User cancelled appointment not in their own " + \ "Calendar; ignoring.\n" except ValueError as excinfo: print excinfo print finally: return print "Invalid command; supported commands = {clear,show,schedule,cancel}" print def set_verbosity(verbose_level=3): """Set the level of verbosity of the Preprocessing.""" if not type(verbose_level) == int: raise TypeError("verbose_level must be an int") if verbose_level < 0 or verbose_level > 4: raise ValueError("verbose_level must be between 0 and 4") verbosity = [ logging.CRITICAL, logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG] logging.basicConfig( format='%(asctime)s:\t %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=verbosity[verbose_level]) def main(): """Quick tests.""" "schedule yaboi (user0,user1,user2,user3) (4:00pm,6:00pm) Friday" "schedule xxboi (user1,user3,user4) (1:30am,11:30am) Wednesday" "schedule beez (user0,user1,user2,user3) (4:00pm,6:00pm) Saturday" "schedule beez2 (user0,user1,user2,user3) (3:00pm,4:00pm) Saturday" "schedule zo (user1,user2,user3) (12:30pm,1:30pm) Friday" "schedule hamma (user1,user2,user3) (1:00am,1:30am) Friday" "cancel yaboi (user0,user1,user2,user3) (4:00pm,6:00pm) Friday" "cancel xxboi (user1,user3,user4) (1:30am,11:30am) Wednesday" a1 = Appointment("zo","Friday","12:30pm","1:30pm", [1, 2, 8]) a2 = Appointment("xxboi","Wednesday","1:30am","11:30am", [1, 4, 5]) a3 = Appointment("lol","saturday","11:30am","12:30pm", [1]) a4 = Appointment("yeee","MondAy","11:30am","12:30pm", [1]) a5 = Appointment("lolololol","Thursday","11:30am","12:30pm", [1]) c = Calendar() c1 = Calendar(a1) c2 = Calendar(a1, a2) c3 = Calendar(a1, a2, a3) c4 = Calendar(a1, a2, a3, a4) c5 = Calendar(a1, a2, a3, a4, a5) set_verbosity(4) N = Node(int(sys.argv[1])) ''' N._log[0] = c1 N._log[1] = c2 N._log[2] = c3 N._log[3] = c4 N._log[4] = c5 ''' N._calendar = c #try to load a previous state of this Node #''' try: N = Node.load() except ValueError: pass except IOError: pass #''' N.elect_leader(poll_time=6, timeout=3) N.paxos() print("@> Node Started") while True: message = raw_input('') if message == "quit": Node.save(N) N.terminate() break else: Node._parse_command(message, N) if __name__ == "__main__": main()
mit
-1,191,132,476,585,437,400
37.472868
97
0.503264
false
4.40746
false
false
false
uw-it-aca/course-dashboards
coursedashboards/migrations/0014_auto_20200911_2040.py
1
1189
# Generated by Django 2.1.15 on 2020-09-11 20:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('coursedashboards', '0013_auto_20190108_2238'), ] operations = [ migrations.CreateModel( name='CourseGradeAverage', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('curriculum', models.CharField(max_length=20)), ('course_number', models.PositiveSmallIntegerField()), ('grade', models.CharField(max_length=5, null=True)), ], options={ 'db_table': 'CourseGradeAverage', }, ), migrations.AlterField( model_name='course', name='course_title', field=models.CharField(default='', max_length=64), ), migrations.AlterField( model_name='term', name='quarter', field=models.CharField(choices=[('winter', 'Winter'), ('spring', 'Spring'), ('summer', 'Summer'), ('autumn', 'Autumn')], max_length=6), ), ]
apache-2.0
5,090,606,726,105,432,000
32.971429
147
0.544996
false
4.216312
false
false
false
kdar/rconsoft
setup.py
1
2919
#!/usr/bin/env python # Read LICENSE for licensing details. import sys import textwrap import glob import shutil import os app_name = 'rconsoft' #----------------------------- # Do some checks if sys.version_info < (2, 4, 0): sys.stderr.write(app_name+' requires Python 2.4 or newer.\n') sys.exit(-1) try: from setuptools import setup, find_packages except ImportError: from ez_setup import use_setuptools use_setuptools() from setuptools import setup, find_packages #----------------------------- # Get all of our packages #plugin_names = find_packages('plugins') #plugins = [app_name+'.plugins.'+p for p in plugin_names] #packages = find_packages(exclude=['ez_setup', 'tests', 'tests.*', 'plugins', 'plugins.*'])+[app_name+'.plugins']+plugins #package_dir = {app_name+'.plugins': 'plugins'} #for name in plugin_names: # package_dir[app_name+'.plugins.' + name] = 'plugins/' + name packages = find_packages(exclude=['ez_setup', 'tests', 'tests.*']) package_dir = {} version = '0.1' setup( # Metadata name=app_name, version=version, author='Kevin Darlington', url='', author_email='[email protected]', download_url='', description='A program to interact with HL servers.', install_requires=[ 'configobj', 'twisted', 'mechanize' ], #install_requires=[ # "Routes>=1.10.1", "WebHelpers>=0.6.3", "Beaker>=1.1.3", # "Paste>=1.7.2", "PasteDeploy>=1.3.2", "PasteScript>=1.7.3", # "FormEncode>=1.2.1", "simplejson>=2.0.6", "decorator>=2.3.2", # "nose>=0.10.4", "Mako>=0.2.4", "WebOb>=0.9.5", "WebError>=0.10.1", # "WebTest>=1.1", "Tempita>=0.2", # ], # dependency_links=[ # "http://www.pylonshq.com/download/0.9.7" # ], # classifiers=[ # "Development Status :: 5 - Production/Stable", # "Intended Audience :: Developers", # "License :: OSI Approved :: BSD License", # "Framework :: Pylons", # "Programming Language :: Python", # "Topic :: Internet :: WWW/HTTP", # "Topic :: Internet :: WWW/HTTP :: Dynamic Content", # "Topic :: Internet :: WWW/HTTP :: WSGI", # "Topic :: Software Development :: Libraries :: Python Modules", # ], # extras_require = { # 'cheetah': ["Cheetah>=1.0", "TurboCheetah>=0.9.5"], # 'myghty': ["Myghty>=1.1"], # 'kid': ["kid>=0.9", "TurboKid>=0.9.1"], # 'genshi': ["Genshi>=0.4.4"], # 'jinja2': ['Jinja2'], # 'full': [ # "docutils>=0.4", "elementtree>=1.2.6", # "Pygments>=0.7", "Cheetah>=1.0", # "TurboCheetah>=0.9.5", "kid>=0.9", "TurboKid>=0.9.1", # 'Genshi>=0.4.4', # ], # }, # Installation data packages=packages, package_dir=package_dir, include_package_data=True, #scripts=['scripts/'+app_name], entry_points = { 'console_scripts': [ '%s = %s.app:main_func' % (app_name, app_name) ] } )
mit
1,058,570,883,171,243,500
28.484848
121
0.564919
false
2.92485
false
false
false
lmazuel/azure-sdk-for-python
azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/virtual_machine_scale_set_update_os_disk.py
1
2121
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class VirtualMachineScaleSetUpdateOSDisk(Model): """Describes virtual machine scale set operating system disk Update Object. This should be used for Updating VMSS OS Disk. :param caching: The caching type. Possible values include: 'None', 'ReadOnly', 'ReadWrite' :type caching: str or ~azure.mgmt.compute.v2017_03_30.models.CachingTypes :param image: The Source User Image VirtualHardDisk. This VirtualHardDisk will be copied before using it to attach to the Virtual Machine. If SourceImage is provided, the destination VirtualHardDisk should not exist. :type image: ~azure.mgmt.compute.v2017_03_30.models.VirtualHardDisk :param vhd_containers: The list of virtual hard disk container uris. :type vhd_containers: list[str] :param managed_disk: The managed disk parameters. :type managed_disk: ~azure.mgmt.compute.v2017_03_30.models.VirtualMachineScaleSetManagedDiskParameters """ _attribute_map = { 'caching': {'key': 'caching', 'type': 'CachingTypes'}, 'image': {'key': 'image', 'type': 'VirtualHardDisk'}, 'vhd_containers': {'key': 'vhdContainers', 'type': '[str]'}, 'managed_disk': {'key': 'managedDisk', 'type': 'VirtualMachineScaleSetManagedDiskParameters'}, } def __init__(self, **kwargs): super(VirtualMachineScaleSetUpdateOSDisk, self).__init__(**kwargs) self.caching = kwargs.get('caching', None) self.image = kwargs.get('image', None) self.vhd_containers = kwargs.get('vhd_containers', None) self.managed_disk = kwargs.get('managed_disk', None)
mit
-2,492,063,701,815,970,000
46.133333
102
0.654408
false
4.2251
false
false
false
mjsauvinen/P4UL
pyRaster/tif2NumpyTile.py
1
1956
#!/usr/bin/env python3 import sys import argparse import numpy as np from mapTools import * from utilities import filesFromList, writeLog from plotTools import addImagePlot import matplotlib.pyplot as plt ''' Author: Mikko Auvinen [email protected] University of Helsinki & Finnish Meteorological Institute ''' #==========================================================# parser = argparse.ArgumentParser(prog='tif2NumpyTile.py') parser.add_argument("-f", "--filename",type=str, help="Input tif-image file name.") parser.add_argument("-fo", "--fileout",type=str, help="Output npz file name.") parser.add_argument("-r", "--reso",type=float, help="Resolution of the tif-image.") parser.add_argument("-xo", "--xorig",type=float, nargs=2,default=[0.,0.],\ help="Coords [N,E] of the tif-images top-left corner. Default=[0,0]") parser.add_argument("-p", "--printOn", help="Print the numpy array data.",\ action="store_true", default=False) parser.add_argument("-pp", "--printOnly", help="Only print the numpy array data. Don't save.",\ action="store_true", default=False) parser.add_argument("-s", "--scale",type=float, default=1.,\ help="Scale factor for the output. Default=1.") args = parser.parse_args() writeLog( parser, args, args.printOnly ) #==========================================================# # Renaming, nothing more. filename = args.filename fileout = args.fileout reso = args.reso ROrig = args.xorig printOn = args.printOn printOnly = args.printOnly sc = args.scale R = openTifAsNumpy(filename) dPx = np.array([sc*reso, sc*reso]) Rdict = {'R' : R, 'GlobOrig' : ROrig, 'gridRot' : 0., 'dPx' : dPx} if( not printOnly ): print(' Writing file {} ... '.format(fileout) ) saveTileAsNumpyZ( fileout, Rdict) print(' ... done! ') if( printOn or printOnly ): pfig = plt.figure(num=1, figsize=(10.,10.)) pfig = addImagePlot( pfig, R, fileout, gridOn=True ) plt.show()
mit
4,073,113,538,880,150,500
34.563636
95
0.641616
false
3.165049
false
false
false
Kronopt/pipUpdateAll
pipUpdateAll.py
1
3682
#!python2 # coding: utf-8 """ PIP UPDATE ALL Updates outdated python modules using pip Checks outdated modules using "pip list --outdated --format columns", parses that column to only show relevant information (name, current version, new version) and then updates all detected modules using "pip install -U" followed by each module's name DEPENDENCIES: - Python 2.7 - pip HOW TO RUN: - Directly, by double clicking the script. """ import subprocess import sys from time import sleep __author__ = 'Pedro HC David, https://github.com/Kronopt' __credits__ = ['Pedro HC David'] __version__ = '1.0' __date__ = '02:40h, 16/12/2016' __status__ = 'Finished' def pip_list_columns_parser(pip_list_columns_format_output): """ Parses the output of "pip list --outdated --format columns" into a dictionary PARAMETERS: pip_list_columns_format_output : str output of "pip list --outdated --format columns" RETURNS: {{module_name : (current_version, new_version)} Module_name associated with its current_version and new_version """ # Column format: # # Package Version Latest Type # ------------- --------- --------- ---- # module_1_name version_1 version_2 type # module_2_name version_1 version_2 type final_dictionary = {} # removes "Package", "Version", etc and "----" modules_and_versions = pip_list_columns_format_output.split()[8:] number_of_modules = len(modules_and_versions)/4 # parses list for module_number in xrange(number_of_modules): list_position = module_number*4 final_dictionary[modules_and_versions[list_position]] = (modules_and_versions[list_position+1], modules_and_versions[list_position+2]) return final_dictionary if __name__ == '__main__': # location of python executable, avoids dependency on windows PATH python_executable = sys.executable # checking if pip is installed try: pip_version_output = subprocess.check_output([python_executable, "-m", "pip", "--version"]) pip_version = pip_version_output.split()[1] except subprocess.CalledProcessError: print "Python cannot locate pip..." sys.exit() print "Modules to be updated using pip version", pip_version + ":" # Get modules out of date modules_to_update_columns = subprocess.check_output( [python_executable, "-m", "pip", "list", "--outdated", "--format", "columns"]) # dictionary in the format {module_name : (current_version, new_version)} modules_to_update = pip_list_columns_parser(modules_to_update_columns) if len(modules_to_update) > 0: module_names = [] # shows modules out of date and each respective current versions and new versions for module_name, (current_version, new_version) in sorted(modules_to_update.iteritems()): print module_name + ":", current_version, "->", new_version module_names.append(module_name) print no_correct_answer_given_yet = True while no_correct_answer_given_yet: answer = raw_input("Do you wish to continue (y/n)? ") if answer == "y": # call "pip install -U" with every outdated module name as parameters subprocess.call([python_executable, "-m", "pip", "install", "--upgrade"] + module_names) no_correct_answer_given_yet = False elif answer == "n": print "Update canceled" no_correct_answer_given_yet = False else: print "All modules are up to date" sleep(2)
mit
5,579,989,667,137,161,000
33.735849
118
0.625204
false
3.942184
false
false
false
leebird/legonlp
utils/runner.py
1
1644
import sys import os import codecs class Runner(object): runnerName = None def __init__(self): ''' read input files and process run directly on input files run directly on input dir process output ''' pass def run(self, args): ''' inputs: a list of (dir, suffix) pairs outputs: a list of (dir, suffix) pairs Note that dir should be an absolute path ''' raise NotImplementedError def read_file(self, filepath): if not os.path.isfile(filepath): print >> sys.stderr, 'file not found: ' + filepath return None f = codecs.open(filepath, 'r', 'utf-8') text = f.read().strip() f.close() return text def write_file(self, content, filepath): f = codecs.open(filepath, 'w', 'utf-8') f.write(content) f.close() def get_files(self, dirname, sux, docList): ''' get a list of path for the docList ''' return [os.path.join(dirname, doc + sux) for doc in docList] def get_io_files(self, dirsux, docList): """ get a zipped list of paths for all the dirs and the docList :param dirsux: a list of (dir, suffix) pairs :type dirsux: list :param docList: a list of doc name :type docList: list :return: a zipped list of dir+file+suffix tuples :rtype: list """ res = [] for ds in dirsux: dirname, sux = ds[:2] res.append(self.get_files(dirname, sux, docList)) return zip(*res)
gpl-2.0
7,072,275,004,045,526,000
23.909091
68
0.544404
false
3.98063
false
false
false
crossbario/crossbar-fabric-cli
cbsh/idl/loader.py
1
16965
##################################################################################### # # Copyright (c) Crossbar.io Technologies GmbH # # Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g. # you have purchased a commercial license), the license terms below apply. # # Should you enter into a separate license agreement after having received a copy of # this software, then the terms of such license agreement replace the terms below at # the time at which such license agreement becomes effective. # # In case a separate license agreement ends, and such agreement ends without being # replaced by another separate license agreement, the license terms below apply # from the time at which said agreement ends. # # LICENSE TERMS # # This program is free software: you can redistribute it and/or modify it under the # terms of the GNU General Public License, version 3, as published by the # Free Software Foundation. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see <https://www.gnu.org/licenses/gpl-3.0.en.html>. # ##################################################################################### import os import json import argparse import hashlib import pprint from typing import Dict, Any # noqa import six import click from cbsh.util import hl from cbsh.reflection import Schema import txaio txaio.use_asyncio() def extract_attributes(item, allowed_attributes=None): num_attrs = item.AttributesLength() attrs = [item.Attributes(i) for i in range(num_attrs)] attrs_dict = { x.Key().decode('utf8'): x.Value().decode('utf8') if x.Value().decode('utf8') not in ['0'] else None for x in attrs } if allowed_attributes: for attr in attrs_dict: if attr not in allowed_attributes: raise Exception( 'invalid XBR attribute "{}" - must be one of {}'.format( attr, allowed_attributes)) return attrs_dict def extract_docs(item): num_docs = item.DocumentationLength() item_docs = [ item.Documentation(i).decode('utf8').strip() for i in range(num_docs) ] return item_docs INTERFACE_ATTRS = ['type', 'uuid'] INTERFACE_MEMBER_ATTRS = ['type', 'stream'] INTERFACE_MEMBER_TYPES = ['procedure', 'topic'] INTERFACE_MEMBER_STREAM_VALUES = [None, 'in', 'out', 'inout'] EXTRACT_ATTRS_RAW = False _BASETYPE_ID2NAME = { None: 'Unknown', 0: 'none', 1: 'utype', 2: 'bool', 3: 'int8', 4: 'uint8', 5: 'int16', 6: 'uint16', 7: 'int32', 8: 'uint32', 9: 'int64', 10: 'uint64', 11: 'float', 12: 'double', 13: 'string', 14: 'vector', 15: 'object', 16: 'union', } def read_reflection_schema(buf, log=None): """ Read a binary FlatBuffers buffer that is typed according to the FlatBuffers reflection schema. The function returns extracted information in a plain, JSON serializable dict. """ if not log: log = txaio.make_logger() _schema = Schema.GetRootAsSchema(buf, 0) _root = _schema.RootTable() if _root: root_name = _root.Name().decode('utf8').strip() else: root_name = None _file_ident = _schema.FileIdent().decode('utf8').strip() if _file_ident == '': _file_ident = None _file_ext = _schema.FileExt().decode('utf8').strip() if _file_ext == '': _file_ext = None m = hashlib.sha256() m.update(buf) schema_meta = { 'bfbs_size': len(buf), 'bfbs_sha256': m.hexdigest(), 'file_ident': _file_ident, 'file_ext': _file_ext, 'root': root_name, } schema = None # type: dict schema = { 'meta': schema_meta, 'tables': [], 'enums': [], 'services': [], } schema_by_uri = None # type: dict schema_by_uri = { 'meta': schema_meta, 'types': {}, } enums = [] objects = [] services = [] fqn2type = dict() # type: Dict[str, Any] enum_cnt = 0 object_cnt = 0 service_cnt = 0 typerefs_cnt = 0 typerefs_error_cnt = 0 for i in range(_schema.EnumsLength()): item = _schema.Enums(i) name = item.Name().decode('utf8') if name in fqn2type: raise Exception('duplicate name "{}"'.format(name)) enum_cnt += 1 for i in range(_schema.ObjectsLength()): item = _schema.Objects(i) name = item.Name().decode('utf8') if name in fqn2type: raise Exception('duplicate name "{}"'.format(name)) object_cnt += 1 for i in range(_schema.ServicesLength()): item = _schema.Services(i) name = item.Name().decode('utf8') if name in fqn2type: raise Exception('duplicate name "{}"'.format(name)) service_cnt += 1 log.info('Processing schema with {} enums, {} objects and {} services ...'. format(enum_cnt, object_cnt, service_cnt)) # enums # num_enums = _schema.EnumsLength() for i in range(num_enums): # extract enum base information # _enum = _schema.Enums(i) enum_name = _enum.Name().decode('utf8') log.debug('processing enum {} ("{}")'.format(i, enum_name)) enum = { # '_index': i, 'type': 'enum', 'name': enum_name, 'docs': extract_docs(_enum), } if EXTRACT_ATTRS_RAW: enum['attr'] = extract_attributes(_enum) # extract enum values # enum_values_dict = dict() # type: Dict[str, Any] for j in range(_enum.ValuesLength()): _enum_value = _enum.Values(j) enum_value_name = _enum_value.Name().decode('utf8') enum_value = { 'docs': extract_docs(_enum_value), # enum values cannot have attributes } if enum_value_name in enum_values_dict: raise Exception( 'duplicate enum value "{}"'.format(enum_value_name)) enum_values_dict[enum_value_name] = enum_value enum['values'] = enum_values_dict if enum_name in schema_by_uri['types']: raise Exception( 'unexpected duplicate definition for qualified name "{}"'. format(enum_name)) enums.append(enum) schema_by_uri['types'][enum_name] = enum # objects (tables/structs) # for i in range(_schema.ObjectsLength()): _obj = _schema.Objects(i) obj_name = _obj.Name().decode('utf8') object_type = 'struct' if _obj.IsStruct() else 'table' obj = { # '_index': i, 'type': object_type, 'name': obj_name, 'docs': extract_docs(_obj), } if EXTRACT_ATTRS_RAW: obj['attr'] = extract_attributes(_obj) # extract fields num_fields = _obj.FieldsLength() fields = [] fields_by_name = {} for j in range(num_fields): _field = _obj.Fields(j) field_name = _field.Name().decode('utf8') log.debug('processing field {} ("{}")'.format(i, field_name)) _field_type = _field.Type() _field_index = int(_field_type.Index()) _field_base_type = _BASETYPE_ID2NAME.get(_field_type.BaseType(), None) _field_element = _BASETYPE_ID2NAME.get(_field_type.Element(), None) if _field_element == 'none': _field_element = None # FIXME # if _field_element == 'object': # el = _schema.Objects(_field_type.Element()) # if isinstance(el, reflection.Type) and hasattr(el, 'IsStruct'): # _field_element = 'struct' if el.Element().IsStruct( # ) else 'table' field = { # '_index': j, 'name': field_name, 'id': int(_field.Id()), 'offset': int(_field.Offset()), 'base_type': _field_base_type, } if _field_element: # vector field['element_type'] = _field_element if _field_index != -1: # field['field_index'] = _field_index if _field_base_type in [ 'object', 'struct' ] or _field_element in ['object', 'struct']: # obj/struct if _field_index < _schema.ObjectsLength(): l_obj = _schema.Objects(_field_index) l_obj_ref = _obj.Name().decode('utf8') field['ref_category'] = 'struct' if l_obj.IsStruct( ) else 'table' field['ref_type'] = l_obj_ref typerefs_cnt += 1 else: log.info( 'WARNING - referenced table/struct for index {} ("{}.{}") not found'. format(_field_index, obj_name, field_name)) field['ref_category'] = 'object' field['ref_type'] = None typerefs_error_cnt += 1 elif _field_base_type in [ 'utype', 'bool', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', 'int64', 'uint64', 'float', 'double', 'string' ]: # enum field['ref_category'] = 'enum' if _field_index < _schema.EnumsLength(): _enum_ref = _schema.Enums(_field_index).Name().decode( 'utf8') field['ref_type'] = _enum_ref typerefs_cnt += 1 else: log.info('WARNING - referenced enum not found') field['ref_type'] = None typerefs_error_cnt += 1 else: raise Exception('unhandled field type: {} {} {} {}'.format( field_name, _field_base_type, _field_element, _field_index)) field_docs = extract_docs(_field) if field_docs: field['docs'] = field_docs if EXTRACT_ATTRS_RAW: _field_attrs = extract_attributes(_field) if _field_attrs: field['attr'] = _field_attrs fields.append(field) fields_by_name[field_name] = field obj['fields'] = fields_by_name if obj['name'] in schema_by_uri['types']: raise Exception( 'unexpected duplicate definition for qualified name "{}"'. format(field['name'])) # always append the object here, so we can dereference indexes # correctly objects.append(obj) # skip our "void marker" if False and obj_name in ['Void']: pass else: schema_by_uri['types'][obj['name']] = obj # iterate over services # num_services = _schema.ServicesLength() for i in range(num_services): _service = _schema.Services(i) service_name = _service.Name().decode('utf8') service_attrs_dict = extract_attributes(_service, INTERFACE_ATTRS) service_type = service_attrs_dict.get('type', None) if service_type != 'interface': raise Exception( 'invalid value "{}" for attribute "type" in XBR interface'. format(service_type)) service = { # '_index': i, 'type': service_type, 'name': service_name, 'docs': extract_docs(_service), } if EXTRACT_ATTRS_RAW: service['attrs'] = service_attrs_dict else: service['uuid'] = service_attrs_dict.get('uuid', None) num_calls = _service.CallsLength() calls = [] calls_by_name = {} for j in range(num_calls): _call = _service.Calls(j) _call_name = _call.Name().decode('utf8') call_attrs_dict = extract_attributes(_call) call_type = call_attrs_dict.get('type', None) if call_type not in INTERFACE_MEMBER_TYPES: raise Exception( 'invalid XBR interface member type "{}" - must be one of {}'. format(call_type, INTERFACE_MEMBER_TYPES)) call_stream = call_attrs_dict.get('stream', None) if call_stream in ['none', 'None', 'null', 'Null']: call_stream = None if call_stream not in INTERFACE_MEMBER_STREAM_VALUES: raise Exception( 'invalid XBR interface member stream modifier "{}" - must be one of {}'. format(call_stream, INTERFACE_MEMBER_STREAM_VALUES)) def _decode_type(x): res = x.Name().decode('utf8') if res in ['Void', 'wamp.Void']: res = None return res call = { 'type': call_type, 'name': _call_name, 'in': _decode_type(_call.Request()), 'out': _decode_type(_call.Response()), 'stream': call_stream, # 'id': int(_call.Id()), # 'offset': int(_call.Offset()), } # call['attrs'] = call_attrs_dict call['docs'] = extract_docs(_call) calls.append(call) calls_by_name[_call_name] = call # service['calls'] = sorted(calls, key=lambda field: field['id']) service['slots'] = calls_by_name services.append(service) if service_name in schema_by_uri['types']: raise Exception( 'unexpected duplicate definition for qualified name "{}"'. format(service_name)) else: schema_by_uri['types'][service_name] = service if typerefs_error_cnt: raise Exception( '{} unresolved type references encountered in schema'.format( typerefs_error_cnt)) schema['enums'] = sorted(enums, key=lambda enum: enum['name']) schema['tables'] = sorted(objects, key=lambda obj: obj['name']) schema['services'] = sorted(services, key=lambda service: service['name']) return schema_by_uri if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( 'infile', help='FlatBuffers binary schema input file (.bfbs)') parser.add_argument( '-o', '--outfile', help='FlatBuffers JSON schema output (.json)') parser.add_argument( '-v', '--verbose', action='store_true', help='Enable verbose processing output.') parser.add_argument( '-d', '--debug', action='store_true', help='Enable debug output.') options = parser.parse_args() log = txaio.make_logger() txaio.start_logging(level='debug' if options.debug else 'info') infile_path = os.path.abspath(options.infile) with open(infile_path, 'rb') as f: buf = f.read() log.info('Loading FlatBuffers binary schema ({} bytes) ...'.format( len(buf))) try: schema = read_reflection_schema(buf, log=log) except Exception as e: log.error(e) if True: schema['meta']['file_name'] = os.path.basename(options.infile) schema['meta']['file_path'] = infile_path with open(options.outfile, 'wb') as f: outdata = json.dumps( schema, ensure_ascii=False, sort_keys=False, indent=4, separators=(', ', ': ')).encode('utf8') f.write(outdata) cnt_bytes = len(outdata) cnt_defs = len(schema['types'].keys()) log.info( 'FlatBuffers JSON schema data written ({} bytes, {} defs).'.format( cnt_bytes, cnt_defs)) if options.verbose: log.info('Schema metadata:') schema_meta_str = pprint.pformat(schema['meta']) # log.info(schema_meta_str) # log.info('{}'.format(schema_meta_str)) print(schema_meta_str) for o in schema['types'].values(): if o['type'] == 'interface': log.info('interface: {}'.format(hl(o['name'], bold=True))) for s in o['slots'].values(): log.info('{:>12}: {}'.format(s['type'], hl(s['name'])))
mit
3,829,342,878,639,213,000
31.009434
97
0.519776
false
4.0625
false
false
false
omargammoh/rpislave
website/processing.py
1
6269
from bson import json_util import multiprocessing from website.models import Conf from time import time, sleep import inspect import subprocess import json try: import signal except: print "signal cannot be imported" def execute(cmd, daemon=False): if daemon: _ = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return None else: return subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read() def fix_corrupt_db(): conf = get_conf() write_json_file(conf, "/home/pi/data/conf") execute('sudo rm /home/pi/rpislave/db.sqlite3') execute('sudo reboot') return None def read_json_file(fp): try: f = file(fp, "r") s = f.read() f.close() js = json.loads(s) except: js = None return js def write_json_file(js, fp): f = file(fp, "w") f.write(json.dumps(js)) f.close() class Timeout: def __init__(self, seconds=1, error_message='Timeout'): self.seconds = seconds self.error_message = error_message def handle_timeout(self, signum, frame): raise BaseException(self.error_message) def __enter__(self): signal.signal(signal.SIGALRM, self.handle_timeout) signal.setitimer(signal.ITIMER_REAL, self.seconds) def __exit__(self, type, value, traceback): signal.alarm(0) def filter_kwargs(func, kwargs_input): """ creates the kwargs of func from kwargs_input func: function to inspect """ argnames,_,_,defaults = inspect.getargspec(func) if defaults is None: defaults=[] required_args = set(argnames[:len(argnames)-len(defaults)]) optional_args = set(argnames[len(argnames)-len(defaults):]) kwargs_needed = {k:v for (k,v) in kwargs_input.iteritems() if k in required_args.union(optional_args) } return kwargs_needed def get_pid(command): """ gets the pid of the process using the command column in the ps aux table """ s = subprocess.Popen("ps aux", shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read() lines = [line.split(None, 10) for line in s.split("\n") if line.lstrip() != ""] matches = [line for line in lines if line[-1] == command] if len(matches)==0: print "no maches found" return None elif len(matches)>1: print "multiple matches found" return None else: pid = matches[0][1] return pid def get_conf(): for ob in Conf.objects.all(): try: js = json_util.loads(ob.data) if not ("label" in js): raise BaseException('no label in conf') return js except: print "!!was not able to parse and get label of a configuration row, skipping" pass return None def fix_malformed_db(): try: #get conf print 'fix_malformed_db >> getting conf' conf_x = get_conf() #save it on a text file print 'fix_malformed_db >> saving conf as text' f = file('/home/pi/rpislave/conf.json', 'w') f.write(json_util.dumps(conf_x)) f.close() #remove db import os print 'fix_malformed_db >> deleting db' os.remove('/home/pi/rpislave/db.sqlite3') #keep a note as a file print 'fix_malformed_db >> saving log as text' from datetime import datetime now = datetime.utcnow() f = file('/home/pi/data/dbdelete-' + now.strftime('%Y%m%d%H%M%S'),'w') f.write('we have taken a copy of conf, saved it on disk, deleted the database and restarted. %s' %str(now)) f.close() #restart print 'fix_malformed_db >> rebooting' os.system('sudo reboot') except: print "error while trying to fix malformed db" class MP(): def __init__(self, name, target, request, cmd=None): self.t1 = time() self.name = name self.target = target self.request = request self.cmd = cmd if cmd else request.GET.get("cmd", None) self.dic = {} def start(self): app_conf = get_conf()['apps'][self.name] p = multiprocessing.Process(name=self.name, target=self.target, kwargs=filter_kwargs(func=self.target, kwargs_input=app_conf)) p.start() def ison(self): ac = [m for m in multiprocessing.active_children() if m.name == self.name ] if len(ac) == 0: return False else: #return ac[0].is_alive() #this line does not work when switching to uwsgi and gives the error: can only test a child process, this is due to the fact that uwsgi has many workers return True def stop(self): ac = [m for m in multiprocessing.active_children() if self.name == m.name][0] if ac: if ac.pid: kill_command = "sudo kill -INT %s" % ac.pid print "stopping process in the good way: %s" % kill_command s = subprocess.Popen(kill_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout.read() else: print "stopping process in in the hard way" ac.terminate() sleep(0.5) return True else: return False def process_command(self): lis = [] print "%s" %(self.name) ison_at_start = self.ison() if self.cmd is None: lis.append('no cmd has provided') elif self.cmd == 'start': if ison_at_start: lis.append('process was already running') else: self.start() lis.append('process has been started') elif self.cmd == 'stop': if self.stop(): lis.append('terminated process') else: lis.append('process was not running') elif self.cmd == 'status': self.dic["%s" %self.name] = get_conf()['apps'][self.name] else: lis.append("we didnt understand your cmd") #respond with some info self.dic['log'] = lis self.dic['ison'] = self.ison() self.dic['took'] = "%s seconds" %(time()-self.t1)
gpl-2.0
3,806,615,012,987,262,500
31.148718
189
0.584304
false
3.751646
false
false
false
ilastikdev/ilastik
ilastik/applets/thresholdTwoLevels/_OpObjectsSegment.py
1
10889
############################################################################### # ilastik: interactive learning and segmentation toolkit # # Copyright (C) 2011-2014, the ilastik developers # <[email protected]> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # In addition, as a special exception, the copyright holders of # ilastik give you permission to combine ilastik with applets, # workflows and plugins which are not covered under the GNU # General Public License. # # See the LICENSE file for details. License information is also available # on the ilastik web site at: # http://ilastik.org/license.html ############################################################################## # basic python modules import functools import logging logger = logging.getLogger(__name__) from threading import Lock as ThreadLock # required numerical modules import numpy as np import vigra import opengm # basic lazyflow types from lazyflow.operator import Operator from lazyflow.slot import InputSlot, OutputSlot from lazyflow.rtype import SubRegion from lazyflow.stype import Opaque from lazyflow.request import Request, RequestPool # required lazyflow operators from lazyflow.operators.opLabelVolume import OpLabelVolume from lazyflow.operators.valueProviders import OpArrayCache from lazyflow.operators.opCompressedCache import OpCompressedCache from lazyflow.operators.opReorderAxes import OpReorderAxes from _OpGraphCut import segmentGC, OpGraphCut ## segment predictions with pre-thresholding # # This operator segments an image into foreground and background and makes use # of a preceding thresholding step. After thresholding, connected components # are computed and are then considered to be "cores" of objects to be segmented. # The Graph Cut optimization (see _OpGraphCut.OpGraphCut) is then applied to # the bounding boxes of the object "cores, enlarged by a user-specified margin. # The pre-thresholding operation allows to apply Graph Cut segmentation on # large data volumes, in case the segmented foreground consists of sparse objects # of limited size and the probability map of the unaries is of high recall, but # possibly low precision. One particular application for this setup is # segmentation of synapses in anisotropic 3D Electron Microscopy image stacks. # # # The slot CachedOutput guarantees consistent results, the slot Output computes # the roi on demand. # # The operator inherits from OpGraphCut because they share some details: # * output meta # * dirtiness propagation # * input slots # class OpObjectsSegment(OpGraphCut): name = "OpObjectsSegment" # thresholded predictions, or otherwise obtained ROI indicators # (a value of 0 is assumed to be background and ignored) LabelImage = InputSlot() # margin around each object (always xyz!) Margin = InputSlot(value=np.asarray((20, 20, 20))) # bounding boxes of the labeled objects # this slot returns an array of dicts with shape (t, c) BoundingBoxes = OutputSlot(stype=Opaque) ### slots from OpGraphCut ### ## prediction maps #Prediction = InputSlot() ## graph cut parameter #Beta = InputSlot(value=.2) ## labeled segmentation image #Output = OutputSlot() #CachedOutput = OutputSlot() def __init__(self, *args, **kwargs): super(OpObjectsSegment, self).__init__(*args, **kwargs) def setupOutputs(self): super(OpObjectsSegment, self).setupOutputs() # sanity checks shape = self.LabelImage.meta.shape assert len(shape) == 5,\ "Prediction maps must be a full 5d volume (txyzc)" tags = self.LabelImage.meta.getAxisKeys() tags = "".join(tags) assert tags == 'txyzc',\ "Label image has wrong axes order"\ "(expected: txyzc, got: {})".format(tags) # bounding boxes are just one element arrays of type object, but we # want to request boxes from a specific region, therefore BoundingBoxes # needs a shape shape = self.Prediction.meta.shape self.BoundingBoxes.meta.shape = shape self.BoundingBoxes.meta.dtype = np.object self.BoundingBoxes.meta.axistags = vigra.defaultAxistags('txyzc') def execute(self, slot, subindex, roi, result): # check the axes - cannot do this in setupOutputs because we could be # in some invalid intermediate state where the dimensions do not agree shape = self.LabelImage.meta.shape agree = [i == j for i, j in zip(self.Prediction.meta.shape, shape)] assert all(agree),\ "shape mismatch: {} vs. {}".format(self.Prediction.meta.shape, shape) if slot == self.BoundingBoxes: return self._execute_bbox(roi, result) elif slot == self.Output: self._execute_graphcut(roi, result) else: raise NotImplementedError( "execute() is not implemented for slot {}".format(str(slot))) def _execute_bbox(self, roi, result): cc = self.LabelImage.get(roi).wait() cc = vigra.taggedView(cc, axistags=self.LabelImage.meta.axistags) cc = cc.withAxes(*'xyz') logger.debug("computing bboxes...") feats = vigra.analysis.extractRegionFeatures( cc.astype(np.float32), cc.astype(np.uint32), features=["Count", "Coord<Minimum>", "Coord<Maximum>"]) feats_dict = {} feats_dict["Coord<Minimum>"] = feats["Coord<Minimum>"] feats_dict["Coord<Maximum>"] = feats["Coord<Maximum>"] feats_dict["Count"] = feats["Count"] return feats_dict def _execute_graphcut(self, roi, result): for i in (0, 4): assert roi.stop[i] - roi.start[i] == 1,\ "Invalid roi for graph-cut: {}".format(str(roi)) t = roi.start[0] c = roi.start[4] margin = self.Margin.value beta = self.Beta.value MAXBOXSIZE = 10000000 # FIXME justification?? ## request the bounding box coordinates ## # the trailing index brackets give us the dictionary (instead of an # array of size 1) feats = self.BoundingBoxes.get(roi).wait() mins = feats["Coord<Minimum>"] maxs = feats["Coord<Maximum>"] nobj = mins.shape[0] # these are indices, so they should have an index datatype mins = mins.astype(np.uint32) maxs = maxs.astype(np.uint32) ## request the prediction image ## pred = self.Prediction.get(roi).wait() pred = vigra.taggedView(pred, axistags=self.Prediction.meta.axistags) pred = pred.withAxes(*'xyz') ## request the connected components image ## cc = self.LabelImage.get(roi).wait() cc = vigra.taggedView(cc, axistags=self.LabelImage.meta.axistags) cc = cc.withAxes(*'xyz') # provide xyz view for the output (just need 8bit for segmentation resultXYZ = vigra.taggedView(np.zeros(cc.shape, dtype=np.uint8), axistags='xyz') def processSingleObject(i): logger.debug("processing object {}".format(i)) # maxs are inclusive, so we need to add 1 xmin = max(mins[i][0]-margin[0], 0) ymin = max(mins[i][1]-margin[1], 0) zmin = max(mins[i][2]-margin[2], 0) xmax = min(maxs[i][0]+margin[0]+1, cc.shape[0]) ymax = min(maxs[i][1]+margin[1]+1, cc.shape[1]) zmax = min(maxs[i][2]+margin[2]+1, cc.shape[2]) ccbox = cc[xmin:xmax, ymin:ymax, zmin:zmax] resbox = resultXYZ[xmin:xmax, ymin:ymax, zmin:zmax] nVoxels = ccbox.size if nVoxels > MAXBOXSIZE: #problem too large to run graph cut, assign to seed logger.warn("Object {} too large for graph cut.".format(i)) resbox[ccbox == i] = 1 return probbox = pred[xmin:xmax, ymin:ymax, zmin:zmax] gcsegm = segmentGC(probbox, beta) gcsegm = vigra.taggedView(gcsegm, axistags='xyz') ccsegm = vigra.analysis.labelVolumeWithBackground( gcsegm.astype(np.uint8)) # Extended bboxes of different objects might overlap. # To avoid conflicting segmentations, we find all connected # components in the results and only take the one, which # overlaps with the object "core" or "seed", defined by the # pre-thresholding seed = ccbox == i filtered = seed*ccsegm passed = np.unique(filtered) assert len(passed.shape) == 1 if passed.size > 2: logger.warn("ambiguous label assignment for region {}".format( (xmin, xmax, ymin, ymax, zmin, zmax))) resbox[ccbox == i] = 1 elif passed.size <= 1: logger.warn( "box {} segmented out with beta {}".format(i, beta)) else: # assign to the overlap region label = passed[1] # 0 is background resbox[ccsegm == label] = 1 pool = RequestPool() #FIXME make sure that the parallel computations fit into memory for i in range(1, nobj): req = Request(functools.partial(processSingleObject, i)) pool.add(req) logger.info("Processing {} objects ...".format(nobj-1)) pool.wait() pool.clean() logger.info("object loop done") # prepare result resView = vigra.taggedView(result, axistags=self.Output.meta.axistags) resView = resView.withAxes(*'xyz') # some labels could have been removed => relabel vigra.analysis.labelVolumeWithBackground(resultXYZ, out=resView) def propagateDirty(self, slot, subindex, roi): super(OpObjectsSegment, self).propagateDirty(slot, subindex, roi) if slot == self.LabelImage: # time-channel slices are pairwise independent # determine t, c from input volume t_ind = 0 c_ind = 4 t = (roi.start[t_ind], roi.stop[t_ind]) c = (roi.start[c_ind], roi.stop[c_ind]) # set output dirty start = t[0:1] + (0,)*3 + c[0:1] stop = t[1:2] + self.Output.meta.shape[1:4] + c[1:2] roi = SubRegion(self.Output, start=start, stop=stop) self.Output.setDirty(roi) elif slot == self.Margin: # margin affects the whole volume self.Output.setDirty(slice(None))
gpl-3.0
-4,906,129,953,672,029,000
38.740876
81
0.619616
false
3.953885
false
false
false
asweigart/pygcurse
examples/shadowtest.py
1
1621
# Simplified BSD License, Copyright 2011 Al Sweigart import sys import os sys.path.append(os.path.abspath('..')) import pygcurse, pygame from pygame.locals import * win = pygcurse.PygcurseWindow(40, 25) win.autoblit = False xoffset = 1 yoffset = 1 mousex = mousey = 0 while True: for event in pygame.event.get(): # the event loop if event.type == QUIT or event.type == KEYDOWN and event.key == K_ESCAPE: pygame.quit() sys.exit() if event.type == KEYDOWN: if event.key == K_UP: yoffset -= 1 elif event.key == K_DOWN: yoffset += 1 elif event.key == K_LEFT: xoffset -= 1 elif event.key == K_RIGHT: xoffset += 1 elif event.key == K_p: win.fullscreen = not win.fullscreen elif event.key == K_d: win._debugchars() elif event.type == MOUSEMOTION: mousex, mousey = win.getcoordinatesatpixel(event.pos, onscreen=False) win.setscreencolors('white', 'blue', clear=True) win.fill(bgcolor='red', region=(15, 10, 5, 5)) win.addshadow(51, (15, 10, 5, 5), xoffset=xoffset, yoffset=yoffset) #win.drawline((6,6), (mousex, mousey), bgcolor='red') win.drawline((6,6), (mousex, mousey), char='+', fgcolor='yellow', bgcolor='green') win.cursor = 0, win.height-3 win.write('Use mouse to move line, arrow keys to move shadow, p to switch to fullscreen.') win.cursor = 0, win.height-1 win.putchars('xoffset=%s, yoffset=%s ' % (xoffset, yoffset)) win.blittowindow()
bsd-3-clause
-364,671,775,039,068,860
33.489362
94
0.584207
false
3.349174
false
false
false
koreiklein/fantasia
ui/render/text/colors.py
1
1104
# Copyright (C) 2013 Korei Klein <[email protected]> genericColor = None variableColor = None symbolColor = None andColor = None orColor = None callColor = None quantifierDividerColor = None notColor = None alwaysBackgroundColor = None maybeBackgroundColor = None relationColor = None iffColor = None applyColor = None hiddenColor = None symbolVariablePairBorderColor = None injectionSymbolBackgroundColor = None injectionVariableBackgroundColor = None projectionSymbolBackgroundColor = None projectionVariableBackgroundColor = None callSymbolBackgroundColor = None callVariableBackgroundColor = None _colorPairs = [ (None ,None) , (None ,None) , (None ,None) ] def productPairsColor(i): return _colorPairs[i % len(_colorPairs)] symbolBackgroundColor = None symbolForegroundColor = None def exponentialColor(isAlways): if isAlways: return alwaysBackgroundColor else: return maybeBackgroundColor projectDotColor = None injectDotColor = None trueColor = None falseColor = None
gpl-2.0
6,253,670,935,183,310,000
16.806452
57
0.724638
false
3.820069
false
false
false
hiviah/perspectives-observatory
utilities/cert_client.py
1
3009
# This file is part of the Perspectives Notary Server # # Copyright (C) 2011 Dan Wendlandt # Copyright (C) 2011 Ondrej Mikle, CZ.NIC Labs # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Testing script for get_certs feature. Little copy-pasta from simple_client. import sys import traceback import base64 import urllib import struct from M2Crypto import BIO, RSA, EVP from xml.dom.minidom import parseString def fetch_certs_xml(notary_server, notary_port, service_id): host, port = service_id.split(":") url = "http://%s:%s/get_certs?host=%s&port=%s" % (notary_server, notary_port, host,port) url_file = urllib.urlopen(url) xml_text = url_file.read() code = url_file.getcode() return (code,xml_text) def verify_certs_signature(service_id, xml_text, notary_pub_key_text): doc = parseString(xml_text) root = doc.documentElement sig_to_verify = base64.standard_b64decode(root.getAttribute("sig")) to_verify = service_id cert_elements = root.getElementsByTagName("certificate") for cert_elem in cert_elements: cert = base64.standard_b64decode(cert_elem.getAttribute("body")) to_verify += cert start_ts = int(cert_elem.getAttribute("start")) end_ts = int(cert_elem.getAttribute("end")) to_verify += struct.pack("!2I", start_ts, end_ts) bio = BIO.MemoryBuffer(notary_pub_key_text) rsa_pub = RSA.load_pub_key_bio(bio) pubkey = EVP.PKey() pubkey.assign_rsa(rsa_pub) pubkey.reset_context(md='sha256') pubkey.verify_init() pubkey.verify_update(to_verify) return pubkey.verify_final(sig_to_verify) if len(sys.argv) not in [4,5]: print "usage: %s <service-id> <notary-server> <notary-port> [notary-pubkey]" % sys.argv[0] exit(1) notary_pub_key = None if len(sys.argv) == 5: notary_pub_key_file = sys.argv[4] notary_pub_key = open(notary_pub_key_file,'r').read() try: code, xml_text = fetch_certs_xml(sys.argv[2],int(sys.argv[3]), sys.argv[1]) if code == 404: print "Notary has no results" elif code != 200: print "Notary server returned error code: %s" % code except Exception, e: print "Exception contacting notary server:" traceback.print_exc(e) exit(1) print 50 * "-" print "XML Response:" print xml_text print 50 * "-" if notary_pub_key: if not verify_certs_signature(sys.argv[1].lower(), xml_text, notary_pub_key): print "Signature verify failed. Results are not valid" exit(1) else: print "Warning: no public key specified, not verifying notary signature"
gpl-3.0
7,373,507,836,183,155,000
30.020619
91
0.713526
false
2.979208
false
false
false
frostyfrog/mark2
mk2/events/server.py
1
2945
import re from . import Event, get_timestamp # input/output output_exp = re.compile( r'(\d{4}-\d{2}-\d{2} |)(\d{2}:\d{2}:\d{2}) \[([A-Z]+)\] (?:%s)?(.*)' % '|'.join((re.escape(x) for x in ( '[Minecraft] ', '[Minecraft-Server] ' )))) class ServerInput(Event): """Send data to the server's stdin. In plugins, a shortcut is available: self.send("say hello")""" line = Event.Arg(required=True) class ServerOutput(Event): """Issued when the server gives us a line on stdout. Note that to handle this, you must specify both the 'level' (e.g. INFO or SEVERE) and a regex pattern to match""" line = Event.Arg(required=True) time = Event.Arg() level = Event.Arg() data = Event.Arg() def setup(self): m = output_exp.match(self.line) if m: g = m.groups() self.time = g[0]+g[1] self.level= g[2] self.data = g[3] else: self.level= "???" self.data = self.line.strip() self.time = get_timestamp(self.time) def prefilter(self, pattern, level=None): if level and level != self.level: return False m = re.match(pattern, self.data) if not m: return False self.match = m return True # start class ServerStart(Event): """Issue this event to start the server""" pass class ServerStarting(Event): """Issued by the ServerStart handler to alert listening plugins that the server process has started""" pid = Event.Arg() class ServerStarted(Event): """Issued when we see the "Done! (1.23s)" line from the server This event has a helper method in plugins - just overwrite the server_started method. """ time = Event.Arg() #stop class ServerStop(Event): """Issue this event to stop the server.""" reason = Event.Arg(required=True) respawn = Event.Arg(required=True) kill = Event.Arg(default=False) announce = Event.Arg(default=True) dispatch_once = True class ServerStopping(Event): """Issued by the ServerStop handler to alert listening plugins that the server is going for a shutdown This event has a helper method in plugins - just overwrite the server_started method.""" reason = Event.Arg(required=True) respawn = Event.Arg(required=True) kill = Event.Arg(default=False) class ServerStopped(Event): """When the server process finally dies, this event is raised""" pass class ServerEvent(Event): """Tell plugins about something happening to the server""" cause = Event.Arg(required=True) friendly = Event.Arg() data = Event.Arg(required=True) priority = Event.Arg(default=0) def setup(self): if not self.friendly: self.friendly = self.cause
mit
5,955,427,389,456,440,000
23.541667
108
0.591171
false
3.742058
false
false
false
amitjamadagni/sympy
sympy/functions/special/error_functions.py
2
32620
""" This module contains various functions that are special cases of incomplete gamma functions. It should probably be renamed. """ from sympy.core import Add, S, C, sympify, cacheit, pi, I from sympy.core.function import Function, ArgumentIndexError from sympy.functions.elementary.miscellaneous import sqrt, root from sympy.functions.elementary.complexes import polar_lift from sympy.functions.special.hyper import hyper, meijerg # TODO series expansions # TODO see the "Note:" in Ei ############################################################################### ################################ ERROR FUNCTION ############################### ############################################################################### class erf(Function): """ The Gauss error function. This function is defined as: :math:`\\mathrm{erf}(x)=\\frac{2}{\\sqrt{\\pi}} \\int_0^x e^{-t^2} \\, \\mathrm{d}x` Or, in ASCII:: x / | | 2 | -t 2* | e dt | / 0 ------------- ____ \/ pi Examples ======== >>> from sympy import I, oo, erf >>> from sympy.abc import z Several special values are known: >>> erf(0) 0 >>> erf(oo) 1 >>> erf(-oo) -1 >>> erf(I*oo) oo*I >>> erf(-I*oo) -oo*I In general one can pull out factors of -1 and I from the argument: >>> erf(-z) -erf(z) The error function obeys the mirror symmetry: >>> from sympy import conjugate >>> conjugate(erf(z)) erf(conjugate(z)) Differentiation with respect to z is supported: >>> from sympy import diff >>> diff(erf(z), z) 2*exp(-z**2)/sqrt(pi) We can numerically evaluate the error function to arbitrary precision on the whole complex plane: >>> erf(4).evalf(30) 0.999999984582742099719981147840 >>> erf(-4*I).evalf(30) -1296959.73071763923152794095062*I References ========== .. [1] http://en.wikipedia.org/wiki/Error_function .. [2] http://dlmf.nist.gov/7 .. [3] http://mathworld.wolfram.com/Erf.html .. [4] http://functions.wolfram.com/GammaBetaErf/Erf """ nargs = 1 unbranched = True def fdiff(self, argindex=1): if argindex == 1: return 2*C.exp(-self.args[0]**2)/sqrt(S.Pi) else: raise ArgumentIndexError(self, argindex) @classmethod def eval(cls, arg): if arg.is_Number: if arg is S.NaN: return S.NaN elif arg is S.Infinity: return S.One elif arg is S.NegativeInfinity: return S.NegativeOne elif arg is S.Zero: return S.Zero t = arg.extract_multiplicatively(S.ImaginaryUnit) if t == S.Infinity or t == S.NegativeInfinity: return arg if arg.could_extract_minus_sign(): return -cls(-arg) @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0 or n % 2 == 0: return S.Zero else: x = sympify(x) k = C.floor((n - 1)/S(2)) if len(previous_terms) > 2: return -previous_terms[-2] * x**2 * (n - 2)/(n*k) else: return 2*(-1)**k * x**n/(n*C.factorial(k)*sqrt(S.Pi)) def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def _eval_is_real(self): return self.args[0].is_real def _eval_rewrite_as_uppergamma(self, z): return sqrt(z**2)/z*(S.One - C.uppergamma(S.Half, z**2)/sqrt(S.Pi)) def _eval_rewrite_as_tractable(self, z): return S.One - _erfs(z)*C.exp(-z**2) def _eval_as_leading_term(self, x): arg = self.args[0].as_leading_term(x) if x in arg.free_symbols and C.Order(1, x).contains(arg): return 2*x/sqrt(pi) else: return self.func(arg) ############################################################################### #################### EXPONENTIAL INTEGRALS #################################### ############################################################################### class Ei(Function): r""" The classical exponential integral. For the use in SymPy, this function is defined as .. math:: \operatorname{Ei}(x) = \sum_{n=1}^\infty \frac{x^n}{n\, n!} + \log(x) + \gamma, where :math:`\gamma` is the Euler-Mascheroni constant. If :math:`x` is a polar number, this defines an analytic function on the riemann surface of the logarithm. Otherwise this defines an analytic function in the cut plane :math:`\mathbb{C} \setminus (-\infty, 0]`. **Background** The name 'exponential integral' comes from the following statement: .. math:: \operatorname{Ei}(x) = \int_{-\infty}^x \frac{e^t}{t} \mathrm{d}t If the integral is interpreted as a Cauchy principal value, this statement holds for :math:`x > 0` and :math:`\operatorname{Ei}(x)` as defined above. Note that we carefully avoided defining :math:`\operatorname{Ei}(x)` for negative real x. This is because above integral formula does not hold for any polar lift of such :math:`x`, indeed all branches of :math:`\operatorname{Ei}(x)` above the negative reals are imaginary. However, the following statement holds for all :math:`x \in \mathbb{R}^*`: .. math:: \int_{-\infty}^x \frac{e^t}{t} \mathrm{d}t = \frac{\operatorname{Ei}\left(|x|e^{i \arg(x)}\right) + \operatorname{Ei}\left(|x|e^{- i \arg(x)}\right)}{2}, where the integral is again understood to be a principal value if :math:`x > 0`, and :math:`|x|e^{i \arg(x)}`, :math:`|x|e^{- i \arg(x)}` denote two conjugate polar lifts of :math:`x`. See Also ======== expint, sympy.functions.special.gamma_functions.uppergamma References ========== - Abramowitz & Stegun, section 5: http://www.math.sfu.ca/~cbm/aands/page_228.htm - http://en.wikipedia.org/wiki/Exponential_integral Examples ======== >>> from sympy import Ei, polar_lift, exp_polar, I, pi >>> from sympy.abc import x The exponential integral in SymPy is strictly undefined for negative values of the argument. For convenience, exponential integrals with negative arguments are immediately converted into an expression that agrees with the classical integral definition: >>> Ei(-1) -I*pi + Ei(exp_polar(I*pi)) This yields a real value: >>> Ei(-1).n(chop=True) -0.219383934395520 On the other hand the analytic continuation is not real: >>> Ei(polar_lift(-1)).n(chop=True) -0.21938393439552 + 3.14159265358979*I The exponential integral has a logarithmic branch point at the origin: >>> Ei(x*exp_polar(2*I*pi)) Ei(x) + 2*I*pi Differentiation is supported: >>> Ei(x).diff(x) exp(x)/x The exponential integral is related to many other special functions. For example: >>> from sympy import uppergamma, expint, Shi >>> Ei(x).rewrite(expint) -expint(1, x*exp_polar(I*pi)) - I*pi >>> Ei(x).rewrite(Shi) Chi(x) + Shi(x) """ nargs = 1 @classmethod def eval(cls, z): if not z.is_polar and z.is_negative: # Note: is this a good idea? return Ei(polar_lift(z)) - pi*I nz, n = z.extract_branch_factor() if n: return Ei(nz) + 2*I*pi*n def fdiff(self, argindex=1): from sympy import unpolarify arg = unpolarify(self.args[0]) if argindex == 1: return C.exp(arg)/arg else: raise ArgumentIndexError(self, argindex) def _eval_evalf(self, prec): if (self.args[0]/polar_lift(-1)).is_positive: return Function._eval_evalf(self, prec) + (I*pi)._eval_evalf(prec) return Function._eval_evalf(self, prec) def _eval_rewrite_as_uppergamma(self, z): from sympy import uppergamma # XXX this does not currently work usefully because uppergamma # immediately turns into expint return -uppergamma(0, polar_lift(-1)*z) - I*pi def _eval_rewrite_as_expint(self, z): return -expint(1, polar_lift(-1)*z) - I*pi def _eval_rewrite_as_Si(self, z): return Shi(z) + Chi(z) _eval_rewrite_as_Ci = _eval_rewrite_as_Si _eval_rewrite_as_Chi = _eval_rewrite_as_Si _eval_rewrite_as_Shi = _eval_rewrite_as_Si class expint(Function): r""" Generalized exponential integral. This function is defined as .. math:: \operatorname{E}_\nu(z) = z^{\nu - 1} \Gamma(1 - \nu, z), where `\Gamma(1 - \nu, z)` is the upper incomplete gamma function (``uppergamma``). Hence for :math:`z` with positive real part we have .. math:: \operatorname{E}_\nu(z) = \int_1^\infty \frac{e^{-zt}}{z^\nu} \mathrm{d}t, which explains the name. The representation as an incomplete gamma function provides an analytic continuation for :math:`\operatorname{E}_\nu(z)`. If :math:`\nu` is a non-positive integer the exponential integral is thus an unbranched function of :math:`z`, otherwise there is a branch point at the origin. Refer to the incomplete gamma function documentation for details of the branching behavior. See Also ======== E1: The classical case, returns expint(1, z). Ei: Another related function called exponential integral. sympy.functions.special.gamma_functions.uppergamma References ========== - http://dlmf.nist.gov/8.19 - http://functions.wolfram.com/GammaBetaErf/ExpIntegralE/ - http://en.wikipedia.org/wiki/Exponential_integral Examples ======== >>> from sympy import expint, S >>> from sympy.abc import nu, z Differentiation is supported. Differentiation with respect to z explains further the name: for integral orders, the exponential integral is an iterated integral of the exponential function. >>> expint(nu, z).diff(z) -expint(nu - 1, z) Differentiation with respect to nu has no classical expression: >>> expint(nu, z).diff(nu) -z**(nu - 1)*meijerg(((), (1, 1)), ((0, 0, -nu + 1), ()), z) At non-postive integer orders, the exponential integral reduces to the exponential function: >>> expint(0, z) exp(-z)/z >>> expint(-1, z) exp(-z)/z + exp(-z)/z**2 At half-integers it reduces to error functions: >>> expint(S(1)/2, z) -sqrt(pi)*erf(sqrt(z))/sqrt(z) + sqrt(pi)/sqrt(z) At positive integer orders it can be rewritten in terms of exponentials and expint(1, z). Use expand_func() to do this: >>> from sympy import expand_func >>> expand_func(expint(5, z)) z**4*expint(1, z)/24 + (-z**3 + z**2 - 2*z + 6)*exp(-z)/24 The generalised exponential integral is essentially equivalent to the incomplete gamma function: >>> from sympy import uppergamma >>> expint(nu, z).rewrite(uppergamma) z**(nu - 1)*uppergamma(-nu + 1, z) As such it is branched at the origin: >>> from sympy import exp_polar, pi, I >>> expint(4, z*exp_polar(2*pi*I)) I*pi*z**3/3 + expint(4, z) >>> expint(nu, z*exp_polar(2*pi*I)) z**(nu - 1)*(exp(2*I*pi*nu) - 1)*gamma(-nu + 1) + expint(nu, z) """ nargs = 2 @classmethod def eval(cls, nu, z): from sympy import (unpolarify, expand_mul, uppergamma, exp, gamma, factorial) nu2 = unpolarify(nu) if nu != nu2: return expint(nu2, z) if nu.is_Integer and nu <= 0 or (not nu.is_Integer and (2*nu).is_Integer): return unpolarify(expand_mul(z**(nu - 1)*uppergamma(1 - nu, z))) # Extract branching information. This can be deduced from what is # explained in lowergamma.eval(). z, n = z.extract_branch_factor() if n == 0: return if nu.is_integer: if (nu > 0) is not True: return return expint(nu, z) \ - 2*pi*I*n*(-1)**(nu - 1)/factorial(nu - 1)*unpolarify(z)**(nu - 1) else: return (exp(2*I*pi*nu*n) - 1)*z**(nu - 1)*gamma(1 - nu) + expint(nu, z) def fdiff(self, argindex): from sympy import meijerg nu, z = self.args if argindex == 1: return -z**(nu - 1)*meijerg([], [1, 1], [0, 0, 1 - nu], [], z) elif argindex == 2: return -expint(nu - 1, z) else: raise ArgumentIndexError(self, argindex) def _eval_rewrite_as_uppergamma(self, nu, z): from sympy import uppergamma return z**(nu - 1)*uppergamma(1 - nu, z) def _eval_rewrite_as_Ei(self, nu, z): from sympy import exp_polar, unpolarify, exp, factorial if nu == 1: return -Ei(z*exp_polar(-I*pi)) - I*pi elif nu.is_Integer and nu > 1: # DLMF, 8.19.7 x = -unpolarify(z) return x**(nu - 1)/factorial(nu - 1)*E1(z).rewrite(Ei) + \ exp(x)/factorial(nu - 1) * \ Add(*[factorial(nu - k - 2)*x**k for k in range(nu - 1)]) else: return self def _eval_expand_func(self, **hints): return self.rewrite(Ei).rewrite(expint, **hints) def _eval_rewrite_as_Si(self, nu, z): if nu != 1: return self return Shi(z) - Chi(z) _eval_rewrite_as_Ci = _eval_rewrite_as_Si _eval_rewrite_as_Chi = _eval_rewrite_as_Si _eval_rewrite_as_Shi = _eval_rewrite_as_Si def E1(z): """ Classical case of the generalized exponential integral. This is equivalent to ``expint(1, z)``. """ return expint(1, z) ############################################################################### #################### TRIGONOMETRIC INTEGRALS ################################## ############################################################################### class TrigonometricIntegral(Function): """ Base class for trigonometric integrals. """ nargs = 1 @classmethod def eval(cls, z): if z == 0: return cls._atzero elif z is S.Infinity: return cls._atinf elif z is S.NegativeInfinity: return cls._atneginf nz = z.extract_multiplicatively(polar_lift(I)) if nz is None and cls._trigfunc(0) == 0: nz = z.extract_multiplicatively(I) if nz is not None: return cls._Ifactor(nz, 1) nz = z.extract_multiplicatively(polar_lift(-I)) if nz is not None: return cls._Ifactor(nz, -1) nz = z.extract_multiplicatively(polar_lift(-1)) if nz is None and cls._trigfunc(0) == 0: nz = z.extract_multiplicatively(-1) if nz is not None: return cls._minusfactor(nz) nz, n = z.extract_branch_factor() if n == 0 and nz == z: return return 2*pi*I*n*cls._trigfunc(0) + cls(nz) def fdiff(self, argindex=1): from sympy import unpolarify arg = unpolarify(self.args[0]) if argindex == 1: return self._trigfunc(arg)/arg def _eval_rewrite_as_Ei(self, z): return self._eval_rewrite_as_expint(z).rewrite(Ei) def _eval_rewrite_as_uppergamma(self, z): from sympy import uppergamma return self._eval_rewrite_as_expint(z).rewrite(uppergamma) def _eval_nseries(self, x, n, logx): # NOTE this is fairly inefficient from sympy import log, EulerGamma, Pow n += 1 if self.args[0].subs(x, 0) != 0: return super(TrigonometricIntegral, self)._eval_nseries(x, n, logx) baseseries = self._trigfunc(x)._eval_nseries(x, n, logx) if self._trigfunc(0) != 0: baseseries -= 1 baseseries = baseseries.replace(Pow, lambda t, n: t**n/n) if self._trigfunc(0) != 0: baseseries += EulerGamma + log(x) return baseseries.subs(x, self.args[0])._eval_nseries(x, n, logx) class Si(TrigonometricIntegral): r""" Sine integral. This function is defined by .. math:: \operatorname{Si}(z) = \int_0^z \frac{\sin{t}}{t} \mathrm{d}t. It is an entire function. See Also ======== Ci: Cosine integral. Shi: Sinh integral. Chi: Cosh integral. expint: The generalised exponential integral. References ========== - http://en.wikipedia.org/wiki/Trigonometric_integral Examples ======== >>> from sympy import Si >>> from sympy.abc import z The sine integral is an antiderivative of sin(z)/z: >>> Si(z).diff(z) sin(z)/z It is unbranched: >>> from sympy import exp_polar, I, pi >>> Si(z*exp_polar(2*I*pi)) Si(z) Sine integral behaves much like ordinary sine under multiplication by I: >>> Si(I*z) I*Shi(z) >>> Si(-z) -Si(z) It can also be expressed in terms of exponential integrals, but beware that the latter is branched: >>> from sympy import expint >>> Si(z).rewrite(expint) -I*(-expint(1, z*exp_polar(-I*pi/2))/2 + expint(1, z*exp_polar(I*pi/2))/2) + pi/2 """ _trigfunc = C.sin _atzero = S(0) _atinf = pi*S.Half _atneginf = -pi*S.Half @classmethod def _minusfactor(cls, z): return -Si(z) @classmethod def _Ifactor(cls, z, sign): return I*Shi(z)*sign def _eval_rewrite_as_expint(self, z): # XXX should we polarify z? return pi/2 + (E1(polar_lift(I)*z) - E1(polar_lift(-I)*z))/2/I class Ci(TrigonometricIntegral): r""" Cosine integral. This function is defined for positive :math:`x` by .. math:: \operatorname{Ci}(x) = \gamma + \log{x} + \int_0^x \frac{\cos{t} - 1}{t} \mathrm{d}t = -\int_x^\infty \frac{\cos{t}}{t} \mathrm{d}t, where :math:`\gamma` is the Euler-Mascheroni constant. We have .. math:: \operatorname{Ci}(z) = -\frac{\operatorname{E}_1\left(e^{i\pi/2} z\right) + \operatorname{E}_1\left(e^{-i \pi/2} z\right)}{2} which holds for all polar :math:`z` and thus provides an analytic continuation to the Riemann surface of the logarithm. The formula also holds as stated for :math:`z \in \mathbb{C}` with :math:`Re(z) > 0`. By lifting to the principal branch we obtain an analytic function on the cut complex plane. See Also ======== Si: Sine integral. Shi: Sinh integral. Chi: Cosh integral. expint: The generalised exponential integral. References ========== - http://en.wikipedia.org/wiki/Trigonometric_integral Examples ======== >>> from sympy import Ci >>> from sympy.abc import z The cosine integral is a primitive of cos(z)/z: >>> Ci(z).diff(z) cos(z)/z It has a logarithmic branch point at the origin: >>> from sympy import exp_polar, I, pi >>> Ci(z*exp_polar(2*I*pi)) Ci(z) + 2*I*pi Cosine integral behaves somewhat like ordinary cos under multiplication by I: >>> from sympy import polar_lift >>> Ci(polar_lift(I)*z) Chi(z) + I*pi/2 >>> Ci(polar_lift(-1)*z) Ci(z) + I*pi It can also be expressed in terms of exponential integrals: >>> from sympy import expint >>> Ci(z).rewrite(expint) -expint(1, z*exp_polar(-I*pi/2))/2 - expint(1, z*exp_polar(I*pi/2))/2 """ _trigfunc = C.cos _atzero = S.ComplexInfinity _atinf = S.Zero _atneginf = I*pi @classmethod def _minusfactor(cls, z): return Ci(z) + I*pi @classmethod def _Ifactor(cls, z, sign): return Chi(z) + I*pi/2*sign def _eval_rewrite_as_expint(self, z): return -(E1(polar_lift(I)*z) + E1(polar_lift(-I)*z))/2 class Shi(TrigonometricIntegral): r""" Sinh integral. This function is defined by .. math:: \operatorname{Shi}(z) = \int_0^z \frac{\sinh{t}}{t} \mathrm{d}t. It is an entire function. See Also ======== Si: Sine integral. Ci: Cosine integral. Chi: Cosh integral. expint: The generalised exponential integral. References ========== - http://en.wikipedia.org/wiki/Trigonometric_integral Examples ======== >>> from sympy import Shi >>> from sympy.abc import z The Sinh integral is a primitive of sinh(z)/z: >>> Shi(z).diff(z) sinh(z)/z It is unbranched: >>> from sympy import exp_polar, I, pi >>> Shi(z*exp_polar(2*I*pi)) Shi(z) Sinh integral behaves much like ordinary sinh under multiplication by I: >>> Shi(I*z) I*Si(z) >>> Shi(-z) -Shi(z) It can also be expressed in terms of exponential integrals, but beware that the latter is branched: >>> from sympy import expint >>> Shi(z).rewrite(expint) expint(1, z)/2 - expint(1, z*exp_polar(I*pi))/2 - I*pi/2 """ _trigfunc = C.sinh _atzero = S(0) _atinf = S.Infinity _atneginf = S.NegativeInfinity @classmethod def _minusfactor(cls, z): return -Shi(z) @classmethod def _Ifactor(cls, z, sign): return I*Si(z)*sign def _eval_rewrite_as_expint(self, z): from sympy import exp_polar # XXX should we polarify z? return (E1(z) - E1(exp_polar(I*pi)*z))/2 - I*pi/2 class Chi(TrigonometricIntegral): r""" Cosh integral. This function is defined for positive :math:`x` by .. math:: \operatorname{Chi}(x) = \gamma + \log{x} + \int_0^x \frac{\cosh{t} - 1}{t} \mathrm{d}t, where :math:`\gamma` is the Euler-Mascheroni constant. We have .. math:: \operatorname{Chi}(z) = \operatorname{Ci}\left(e^{i \pi/2}z\right) - i\frac{\pi}{2}, which holds for all polar :math:`z` and thus provides an analytic continuation to the Riemann surface of the logarithm. By lifting to the principal branch we obtain an analytic function on the cut complex plane. See Also ======== Si: Sine integral. Ci: Cosine integral. Shi: Sinh integral. expint: The generalised exponential integral. References ========== - http://en.wikipedia.org/wiki/Trigonometric_integral Examples ======== >>> from sympy import Chi >>> from sympy.abc import z The cosh integral is a primitive of cosh(z)/z: >>> Chi(z).diff(z) cosh(z)/z It has a logarithmic branch point at the origin: >>> from sympy import exp_polar, I, pi >>> Chi(z*exp_polar(2*I*pi)) Chi(z) + 2*I*pi Cosh integral behaves somewhat like ordinary cosh under multiplication by I: >>> from sympy import polar_lift >>> Chi(polar_lift(I)*z) Ci(z) + I*pi/2 >>> Chi(polar_lift(-1)*z) Chi(z) + I*pi It can also be expressed in terms of exponential integrals: >>> from sympy import expint >>> Chi(z).rewrite(expint) -expint(1, z)/2 - expint(1, z*exp_polar(I*pi))/2 - I*pi/2 """ _trigfunc = C.cosh _atzero = S.ComplexInfinity _atinf = S.Infinity _atneginf = S.Infinity @classmethod def _minusfactor(cls, z): return Chi(z) + I*pi @classmethod def _Ifactor(cls, z, sign): return Ci(z) + I*pi/2*sign def _eval_rewrite_as_expint(self, z): from sympy import exp_polar return -I*pi/2 - (E1(z) + E1(exp_polar(I*pi)*z))/2 ############################################################################### #################### FRESNEL INTEGRALS ######################################## ############################################################################### class FresnelIntegral(Function): """ Base class for the Fresnel integrals.""" nargs = 1 unbranched = True @classmethod def eval(cls, z): # Value at zero if z is S.Zero: return S(0) # Try to pull out factors of -1 and I prefact = S.One newarg = z changed = False nz = newarg.extract_multiplicatively(-1) if nz is not None: prefact = -prefact newarg = nz changed = True nz = newarg.extract_multiplicatively(I) if nz is not None: prefact = cls._sign*I*prefact newarg = nz changed = True if changed: return prefact*cls(newarg) # Values at positive infinities signs # if any were extracted automatically if z is S.Infinity: return S.Half elif z is I*S.Infinity: return cls._sign*I*S.Half def fdiff(self, argindex=1): if argindex == 1: return self._trigfunc(S.Half*pi*self.args[0]**2) else: raise ArgumentIndexError(self, argindex) def _eval_is_real(self): return self.args[0].is_real def _eval_conjugate(self): return self.func(self.args[0].conjugate()) def _as_real_imag(self, deep=True, **hints): if self.args[0].is_real: if deep: hints['complex'] = False return (self.expand(deep, **hints), S.Zero) else: return (self, S.Zero) if deep: re, im = self.args[0].expand(deep, **hints).as_real_imag() else: re, im = self.args[0].as_real_imag() return (re, im) def as_real_imag(self, deep=True, **hints): # Fresnel S # http://functions.wolfram.com/06.32.19.0003.01 # http://functions.wolfram.com/06.32.19.0006.01 # Fresnel C # http://functions.wolfram.com/06.33.19.0003.01 # http://functions.wolfram.com/06.33.19.0006.01 x, y = self._as_real_imag(deep=deep, **hints) sq = -y**2/x**2 re = S.Half*(self.func(x + x*sqrt(sq)) + self.func(x - x*sqrt(sq))) im = x/(2*y) * sqrt(sq) * (self.func(x - x*sqrt(sq)) - self.func(x + x*sqrt(sq))) return (re, im) class fresnels(FresnelIntegral): r""" Fresnel integral S. This function is defined by .. math:: \operatorname{S}(z) = \int_0^z \sin{\frac{\pi}{2} t^2} \mathrm{d}t. It is an entire function. Examples ======== >>> from sympy import I, oo, fresnels >>> from sympy.abc import z Several special values are known: >>> fresnels(0) 0 >>> fresnels(oo) 1/2 >>> fresnels(-oo) -1/2 >>> fresnels(I*oo) -I/2 >>> fresnels(-I*oo) I/2 In general one can pull out factors of -1 and I from the argument: >>> fresnels(-z) -fresnels(z) >>> fresnels(I*z) -I*fresnels(z) The Fresnel S integral obeys the mirror symmetry: >>> from sympy import conjugate >>> conjugate(fresnels(z)) fresnels(conjugate(z)) Differentiation with respect to z is supported: >>> from sympy import diff >>> diff(fresnels(z), z) sin(pi*z**2/2) Defining the Fresnel functions via an integral >>> from sympy import integrate, pi, sin, gamma, expand_func >>> integrate(sin(pi*z**2/2), z) 3*fresnels(z)*gamma(3/4)/(4*gamma(7/4)) >>> expand_func(integrate(sin(pi*z**2/2), z)) fresnels(z) We can numerically evaluate the Fresnel integral to arbitrary precision on the whole complex plane: >>> fresnels(2).evalf(30) 0.343415678363698242195300815958 >>> fresnels(-2*I).evalf(30) 0.343415678363698242195300815958*I See Also ======== fresnelc References ========== .. [1] http://en.wikipedia.org/wiki/Fresnel_integral .. [2] http://dlmf.nist.gov/7 .. [3] http://mathworld.wolfram.com/FresnelIntegrals.html .. [4] http://functions.wolfram.com/GammaBetaErf/FresnelS """ _trigfunc = C.sin _sign = -S.One @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0: return S.Zero else: x = sympify(x) if len(previous_terms) > 1: p = previous_terms[-1] return (-pi**2*x**4*(4*n - 1)/(8*n*(2*n + 1)*(4*n + 3))) * p else: return x**3 * (-x**4)**n * (S(2)**(-2*n - 1)*pi**(2*n + 1)) / ((4*n + 3)*C.factorial(2*n + 1)) def _eval_rewrite_as_erf(self, z): return (S.One + I)/4 * (erf((S.One + I)/2*sqrt(pi)*z) - I*erf((S.One - I)/2*sqrt(pi)*z)) def _eval_rewrite_as_hyper(self, z): return pi*z**3/6 * hyper([S(3)/4], [S(3)/2, S(7)/4], -pi**2*z**4/16) def _eval_rewrite_as_meijerg(self, z): return (pi*z**(S(9)/4) / (sqrt(2)*(z**2)**(S(3)/4)*(-z)**(S(3)/4)) * meijerg([], [1], [S(3)/4], [S(1)/4, 0], -pi**2*z**4/16)) class fresnelc(FresnelIntegral): r""" Fresnel integral C. This function is defined by .. math:: \operatorname{C}(z) = \int_0^z \cos{\frac{\pi}{2} t^2} \mathrm{d}t. It is an entire function. Examples ======== >>> from sympy import I, oo, fresnelc >>> from sympy.abc import z Several special values are known: >>> fresnelc(0) 0 >>> fresnelc(oo) 1/2 >>> fresnelc(-oo) -1/2 >>> fresnelc(I*oo) I/2 >>> fresnelc(-I*oo) -I/2 In general one can pull out factors of -1 and I from the argument: >>> fresnelc(-z) -fresnelc(z) >>> fresnelc(I*z) I*fresnelc(z) The Fresnel C integral obeys the mirror symmetry: >>> from sympy import conjugate >>> conjugate(fresnelc(z)) fresnelc(conjugate(z)) Differentiation with respect to z is supported: >>> from sympy import diff >>> diff(fresnelc(z), z) cos(pi*z**2/2) Defining the Fresnel functions via an integral >>> from sympy import integrate, pi, cos, gamma, expand_func >>> integrate(cos(pi*z**2/2), z) fresnelc(z)*gamma(1/4)/(4*gamma(5/4)) >>> expand_func(integrate(cos(pi*z**2/2), z)) fresnelc(z) We can numerically evaluate the Fresnel integral to arbitrary precision on the whole complex plane: >>> fresnelc(2).evalf(30) 0.488253406075340754500223503357 >>> fresnelc(-2*I).evalf(30) -0.488253406075340754500223503357*I See Also ======== fresnels References ========== .. [1] http://en.wikipedia.org/wiki/Fresnel_integral .. [2] http://dlmf.nist.gov/7 .. [3] http://mathworld.wolfram.com/FresnelIntegrals.html .. [4] http://functions.wolfram.com/GammaBetaErf/FresnelC """ _trigfunc = C.cos _sign = S.One @staticmethod @cacheit def taylor_term(n, x, *previous_terms): if n < 0: return S.Zero else: x = sympify(x) if len(previous_terms) > 1: p = previous_terms[-1] return (-pi**2*x**4*(4*n - 3)/(8*n*(2*n - 1)*(4*n + 1))) * p else: return x * (-x**4)**n * (S(2)**(-2*n)*pi**(2*n)) / ((4*n + 1)*C.factorial(2*n)) def _eval_rewrite_as_erf(self, z): return (S.One - I)/4 * (erf((S.One + I)/2*sqrt(pi)*z) + I*erf((S.One - I)/2*sqrt(pi)*z)) def _eval_rewrite_as_hyper(self, z): return z * hyper([S.One/4], [S.One/2, S(5)/4], -pi**2*z**4/16) def _eval_rewrite_as_meijerg(self, z): return (pi*z**(S(3)/4) / (sqrt(2)*root(z**2, 4)*root(-z, 4)) * meijerg([], [1], [S(1)/4], [S(3)/4, 0], -pi**2*z**4/16)) ############################################################################### #################### HELPER FUNCTIONS ######################################### ############################################################################### class _erfs(Function): """ Helper function to make the :math:`erf(z)` function tractable for the Gruntz algorithm. """ nargs = 1 def _eval_aseries(self, n, args0, x, logx): if args0[0] != S.Infinity: return super(_erfs, self)._eval_aseries(n, args0, x, logx) z = self.args[0] l = [ 1/sqrt(S.Pi) * C.factorial(2*k)*(-S( 4))**(-k)/C.factorial(k) * (1/z)**(2*k + 1) for k in xrange(0, n) ] o = C.Order(1/z**(2*n + 1), x) # It is very inefficient to first add the order and then do the nseries return (Add(*l))._eval_nseries(x, n, logx) + o def fdiff(self, argindex=1): if argindex == 1: z = self.args[0] return -2/sqrt(S.Pi) + 2*z*_erfs(z) else: raise ArgumentIndexError(self, argindex) def _eval_rewrite_as_intractable(self, z): return (S.One - erf(z))*C.exp(z**2)
bsd-3-clause
4,165,216,953,519,735,300
26.644068
110
0.549203
false
3.245771
false
false
false
pshchelo/heat
heat/tests/openstack/test_volume.py
1
42764
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import json from cinderclient import exceptions as cinder_exp import six from heat.common import exception from heat.common import template_format from heat.engine.clients.os import cinder from heat.engine.clients.os import glance from heat.engine import rsrc_defn from heat.engine import scheduler from heat.objects import resource_data as resource_data_object from heat.tests.nova import fakes as fakes_nova from heat.tests import test_volume_utils as vt_base from heat.tests import utils cinder_volume_template = ''' heat_template_version: 2013-05-23 description: Cinder volumes and attachments. resources: volume: type: OS::Cinder::Volume properties: availability_zone: nova size: 1 name: test_name description: test_description metadata: key: value volume2: type: OS::Cinder::Volume properties: availability_zone: nova size: 2 volume3: type: OS::Cinder::Volume properties: availability_zone: nova size: 1 name: test_name scheduler_hints: {"hint1": "good_advice"} attachment: type: OS::Cinder::VolumeAttachment properties: instance_uuid: WikiDatabase volume_id: { get_resource: volume } mountpoint: /dev/vdc ''' single_cinder_volume_template = ''' heat_template_version: 2013-05-23 description: Cinder volume resources: volume: type: OS::Cinder::Volume properties: size: 1 name: test_name description: test_description ''' class CinderVolumeTest(vt_base.BaseVolumeTest): def setUp(self): super(CinderVolumeTest, self).setUp() self.t = template_format.parse(cinder_volume_template) self.use_cinder = True def _mock_create_volume(self, fv, stack_name, size=1, final_status='available'): cinder.CinderClientPlugin._create().MultipleTimes().AndReturn( self.cinder_fc) self.cinder_fc.volumes.create( size=size, availability_zone='nova', description='test_description', name='test_name', metadata={'key': 'value'}).AndReturn(fv) self.cinder_fc.volumes.get(fv.id).AndReturn(fv) fv_ready = vt_base.FakeVolume(final_status, id=fv.id) self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready) return fv_ready def test_cinder_volume_size_constraint(self): self.t['resources']['volume']['properties']['size'] = 0 stack = utils.parse_stack(self.t) error = self.assertRaises(exception.StackValidationFailed, self.create_volume, self.t, stack, 'volume') self.assertEqual( "Property error : resources.volume.properties.size: " "0 is out of range (min: 1, max: None)", six.text_type(error)) def test_cinder_create(self): fv = vt_base.FakeVolume('creating') stack_name = 'test_cvolume_stack' self.stub_SnapshotConstraint_validate() self.stub_VolumeConstraint_validate() self.stub_VolumeTypeConstraint_validate() cinder.CinderClientPlugin._create().AndReturn( self.cinder_fc) self.cinder_fc.volumes.create( size=1, availability_zone='nova', description='test_description', name='test_name', metadata={'key': 'value'}, volume_type='lvm').AndReturn(fv) self.cinder_fc.volumes.get(fv.id).AndReturn(fv) fv_ready = vt_base.FakeVolume('available', id=fv.id) self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready) self.m.ReplayAll() self.t['resources']['volume']['properties'].update({ 'volume_type': 'lvm', }) stack = utils.parse_stack(self.t, stack_name=stack_name) self.create_volume(self.t, stack, 'volume') self.m.VerifyAll() def test_cinder_create_from_image(self): fv = vt_base.FakeVolume('downloading') stack_name = 'test_cvolume_create_from_img_stack' image_id = '46988116-6703-4623-9dbc-2bc6d284021b' cinder.CinderClientPlugin._create().AndReturn( self.cinder_fc) self.m.StubOutWithMock(glance.GlanceClientPlugin, 'get_image_id') glance.GlanceClientPlugin.get_image_id( image_id).MultipleTimes().AndReturn(image_id) self.cinder_fc.volumes.create( size=1, availability_zone='nova', description='ImageVolumeDescription', name='ImageVolume', imageRef=image_id).AndReturn(fv) self.cinder_fc.volumes.get(fv.id).AndReturn(fv) fv_ready = vt_base.FakeVolume('available', id=fv.id) self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready) self.m.ReplayAll() self.t['resources']['volume']['properties'] = { 'size': '1', 'name': 'ImageVolume', 'description': 'ImageVolumeDescription', 'availability_zone': 'nova', 'image': image_id, } stack = utils.parse_stack(self.t, stack_name=stack_name) self.create_volume(self.t, stack, 'volume') self.m.VerifyAll() def test_cinder_create_with_read_only(self): fv = vt_base.FakeVolume('with_read_only_access_mode') stack_name = 'test_create_with_read_only' cinder.CinderClientPlugin._create().AndReturn( self.cinder_fc) self.cinder_fc.volumes.create( size=1, availability_zone='nova', description='ImageVolumeDescription', name='ImageVolume').AndReturn(fv) update_readonly_mock = self.patchobject(self.cinder_fc.volumes, 'update_readonly_flag') update_readonly_mock(fv.id, False).return_value(None) fv_ready = vt_base.FakeVolume('available', id=fv.id) self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready) self.m.ReplayAll() self.t['resources']['volume']['properties'] = { 'size': '1', 'name': 'ImageVolume', 'description': 'ImageVolumeDescription', 'availability_zone': 'nova', 'read_only': False, } stack = utils.parse_stack(self.t, stack_name=stack_name) self.create_volume(self.t, stack, 'volume') self.m.VerifyAll() def test_cinder_default(self): fv = vt_base.FakeVolume('creating') stack_name = 'test_cvolume_default_stack' cinder.CinderClientPlugin._create().AndReturn( self.cinder_fc) vol_name = utils.PhysName(stack_name, 'volume') self.cinder_fc.volumes.create( size=1, availability_zone='nova', description=None, name=vol_name).AndReturn(fv) self.cinder_fc.volumes.get(fv.id).AndReturn(fv) fv_ready = vt_base.FakeVolume('available', id=fv.id) self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready) self.m.ReplayAll() self.t['resources']['volume']['properties'] = { 'size': '1', 'availability_zone': 'nova', } stack = utils.parse_stack(self.t, stack_name=stack_name) self.create_volume(self.t, stack, 'volume') self.m.VerifyAll() def test_cinder_fn_getatt(self): stack_name = 'test_cvolume_fngetatt_stack' self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name) fv = vt_base.FakeVolume( 'available', availability_zone='zone1', size=1, snapshot_id='snap-123', name='name', description='desc', volume_type='lvm', metadata={'key': 'value'}, source_volid=None, bootable=False, created_at='2013-02-25T02:40:21.000000', encrypted=False, attachments=[]) self.cinder_fc.volumes.get('vol-123').MultipleTimes().AndReturn(fv) self.m.ReplayAll() stack = utils.parse_stack(self.t, stack_name=stack_name) rsrc = self.create_volume(self.t, stack, 'volume') self.assertEqual(u'zone1', rsrc.FnGetAtt('availability_zone')) self.assertEqual(u'1', rsrc.FnGetAtt('size')) self.assertEqual(u'snap-123', rsrc.FnGetAtt('snapshot_id')) self.assertEqual(u'name', rsrc.FnGetAtt('display_name')) self.assertEqual(u'desc', rsrc.FnGetAtt('display_description')) self.assertEqual(u'lvm', rsrc.FnGetAtt('volume_type')) self.assertEqual(json.dumps({'key': 'value'}), rsrc.FnGetAtt('metadata')) self.assertEqual({'key': 'value'}, rsrc.FnGetAtt('metadata_values')) self.assertEqual(u'None', rsrc.FnGetAtt('source_volid')) self.assertEqual(u'available', rsrc.FnGetAtt('status')) self.assertEqual(u'2013-02-25T02:40:21.000000', rsrc.FnGetAtt('created_at')) self.assertEqual(u'False', rsrc.FnGetAtt('bootable')) self.assertEqual(u'False', rsrc.FnGetAtt('encrypted')) self.assertEqual(u'[]', rsrc.FnGetAtt('attachments')) error = self.assertRaises(exception.InvalidTemplateAttribute, rsrc.FnGetAtt, 'unknown') self.assertEqual( 'The Referenced Attribute (volume unknown) is incorrect.', six.text_type(error)) self.m.VerifyAll() def test_cinder_attachment(self): stack_name = 'test_cvolume_attach_stack' self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name) self._mock_create_server_volume_script(vt_base.FakeVolume('attaching')) self.stub_VolumeConstraint_validate() # delete script fva = vt_base.FakeVolume('in-use') self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123').AndReturn(fva) self.cinder_fc.volumes.get(fva.id).AndReturn(fva) self.fc.volumes.delete_server_volume( 'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None) self.cinder_fc.volumes.get(fva.id).AndReturn( vt_base.FakeVolume('available')) self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123').AndReturn(fva) self.fc.volumes.get_server_volume( u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception()) self.m.ReplayAll() stack = utils.parse_stack(self.t, stack_name=stack_name) self.create_volume(self.t, stack, 'volume') rsrc = self.create_attachment(self.t, stack, 'attachment') scheduler.TaskRunner(rsrc.delete)() self.m.VerifyAll() def test_cinder_volume_shrink_fails(self): stack_name = 'test_cvolume_shrink_fail_stack' # create script self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name, size=2) # update script fv = vt_base.FakeVolume('available', size=2) self.cinder_fc.volumes.get(fv.id).AndReturn(fv) self.m.ReplayAll() self.t['resources']['volume']['properties']['size'] = 2 stack = utils.parse_stack(self.t, stack_name=stack_name) rsrc = self.create_volume(self.t, stack, 'volume') props = copy.deepcopy(rsrc.properties.data) props['size'] = 1 after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) update_task = scheduler.TaskRunner(rsrc.update, after) ex = self.assertRaises(exception.ResourceFailure, update_task) self.assertEqual('NotSupported: Shrinking volume is not supported.', six.text_type(ex)) self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state) self.m.VerifyAll() def test_cinder_volume_extend_detached(self): stack_name = 'test_cvolume_extend_det_stack' # create script self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name) # update script fv = vt_base.FakeVolume('available', size=1, attachments=[]) self.cinder_fc.volumes.get(fv.id).AndReturn(fv) self.cinder_fc.volumes.extend(fv.id, 2) self.cinder_fc.volumes.get(fv.id).AndReturn( vt_base.FakeVolume('extending')) self.cinder_fc.volumes.get(fv.id).AndReturn( vt_base.FakeVolume('extending')) self.cinder_fc.volumes.get(fv.id).AndReturn( vt_base.FakeVolume('available')) self.m.ReplayAll() stack = utils.parse_stack(self.t, stack_name=stack_name) rsrc = self.create_volume(self.t, stack, 'volume') props = copy.deepcopy(rsrc.properties.data) props['size'] = 2 after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) update_task = scheduler.TaskRunner(rsrc.update, after) self.assertIsNone(update_task()) self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() def test_cinder_volume_extend_fails_to_start(self): stack_name = 'test_cvolume_extend_fail_start_stack' # create script self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name) # update script fv = vt_base.FakeVolume('available', size=1, attachments=[]) self.cinder_fc.volumes.get(fv.id).AndReturn(fv) self.cinder_fc.volumes.extend(fv.id, 2).AndRaise( cinder_exp.OverLimit(413)) self.m.ReplayAll() stack = utils.parse_stack(self.t, stack_name=stack_name) rsrc = self.create_volume(self.t, stack, 'volume') props = copy.deepcopy(rsrc.properties.data) props['size'] = 2 after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) update_task = scheduler.TaskRunner(rsrc.update, after) ex = self.assertRaises(exception.ResourceFailure, update_task) self.assertIn('Over limit', six.text_type(ex)) self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state) self.m.VerifyAll() def test_cinder_volume_extend_fails_to_complete(self): stack_name = 'test_cvolume_extend_fail_compl_stack' # create script self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name) # update script fv = vt_base.FakeVolume('available', size=1, attachments=[]) self.cinder_fc.volumes.get(fv.id).AndReturn(fv) self.cinder_fc.volumes.extend(fv.id, 2) self.cinder_fc.volumes.get(fv.id).AndReturn( vt_base.FakeVolume('extending')) self.cinder_fc.volumes.get(fv.id).AndReturn( vt_base.FakeVolume('extending')) self.cinder_fc.volumes.get(fv.id).AndReturn( vt_base.FakeVolume('error_extending')) self.m.ReplayAll() stack = utils.parse_stack(self.t, stack_name=stack_name) rsrc = self.create_volume(self.t, stack, 'volume') props = copy.deepcopy(rsrc.properties.data) props['size'] = 2 after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) update_task = scheduler.TaskRunner(rsrc.update, after) ex = self.assertRaises(exception.ResourceFailure, update_task) self.assertIn("Volume resize failed - Unknown status error_extending", six.text_type(ex)) self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state) self.m.VerifyAll() def test_cinder_volume_extend_attached(self): stack_name = 'test_cvolume_extend_att_stack' # create script self.stub_VolumeConstraint_validate() self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name) self._mock_create_server_volume_script(vt_base.FakeVolume('attaching')) # update script attachments = [{'id': 'vol-123', 'device': '/dev/vdc', 'server_id': u'WikiDatabase'}] fv2 = vt_base.FakeVolume('in-use', attachments=attachments, size=1) self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2) # detach script fvd = vt_base.FakeVolume('in-use') self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123').AndReturn(fvd) self.cinder_fc.volumes.get(fvd.id).AndReturn(fvd) self.fc.volumes.delete_server_volume('WikiDatabase', 'vol-123') self.cinder_fc.volumes.get(fvd.id).AndReturn( vt_base.FakeVolume('available')) self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123').AndReturn(fvd) self.fc.volumes.get_server_volume( u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception()) # resize script self.cinder_fc.volumes.extend(fvd.id, 2) self.cinder_fc.volumes.get(fvd.id).AndReturn( vt_base.FakeVolume('extending')) self.cinder_fc.volumes.get(fvd.id).AndReturn( vt_base.FakeVolume('extending')) self.cinder_fc.volumes.get(fvd.id).AndReturn( vt_base.FakeVolume('available')) # attach script self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'), update=True) self.m.ReplayAll() stack = utils.parse_stack(self.t, stack_name=stack_name) rsrc = self.create_volume(self.t, stack, 'volume') self.create_attachment(self.t, stack, 'attachment') props = copy.deepcopy(rsrc.properties.data) props['size'] = 2 after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) update_task = scheduler.TaskRunner(rsrc.update, after) self.assertIsNone(update_task()) self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() def test_cinder_volume_extend_created_from_backup_with_same_size(self): stack_name = 'test_cvolume_extend_snapsht_stack' # create script fvbr = vt_base.FakeBackupRestore('vol-123') cinder.CinderClientPlugin._create().MultipleTimes().AndReturn( self.cinder_fc) self.m.StubOutWithMock(self.cinder_fc.restores, 'restore') self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr) self.cinder_fc.volumes.get('vol-123').AndReturn( vt_base.FakeVolume('restoring-backup')) vol_name = utils.PhysName(stack_name, 'volume') self.cinder_fc.volumes.update('vol-123', description=None, name=vol_name).AndReturn(None) self.cinder_fc.volumes.get('vol-123').AndReturn( vt_base.FakeVolume('available')) # update script fv = vt_base.FakeVolume('available', size=2) self.cinder_fc.volumes.get(fv.id).AndReturn(fv) self.m.ReplayAll() self.t['resources']['volume']['properties'] = { 'availability_zone': 'nova', 'backup_id': 'backup-123' } stack = utils.parse_stack(self.t, stack_name=stack_name) rsrc = self.create_volume(self.t, stack, 'volume') self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) self.assertEqual('available', fv.status) props = copy.deepcopy(rsrc.properties.data) props['size'] = 2 after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) update_task = scheduler.TaskRunner(rsrc.update, after) self.assertIsNone(update_task()) self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() def test_cinder_volume_retype(self): fv = vt_base.FakeVolume('available', size=1, name='my_vol', description='test') stack_name = 'test_cvolume_retype' new_vol_type = 'new_type' self.patchobject(cinder.CinderClientPlugin, '_create', return_value=self.cinder_fc) self.patchobject(self.cinder_fc.volumes, 'create', return_value=fv) self.patchobject(self.cinder_fc.volumes, 'get', return_value=fv) stack = utils.parse_stack(self.t, stack_name=stack_name) rsrc = self.create_volume(self.t, stack, 'volume2') props = copy.deepcopy(rsrc.properties.data) props['volume_type'] = new_vol_type after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) self.patchobject(cinder.CinderClientPlugin, 'get_volume_type', return_value=new_vol_type) self.patchobject(self.cinder_fc.volumes, 'retype') scheduler.TaskRunner(rsrc.update, after)() self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state) self.assertEqual(1, self.cinder_fc.volumes.retype.call_count) self.cinder_fc.volume_api_version = 1 new_vol_type_1 = 'new_type_1' props = copy.deepcopy(rsrc.properties.data) props['volume_type'] = new_vol_type_1 after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) # if the volume api is v1, not support to retype update_task = scheduler.TaskRunner(rsrc.update, after) ex = self.assertRaises(exception.ResourceFailure, update_task) self.assertEqual('NotSupported: Using Cinder API V1, ' 'volume_type update is not supported.', six.text_type(ex)) self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state) self.assertEqual(1, self.cinder_fc.volumes.retype.call_count) def test_cinder_volume_update_name_and_metadata(self): # update the name, description and metadata fv = vt_base.FakeVolume('creating', size=1, name='my_vol', description='test') stack_name = 'test_cvolume_updname_stack' update_name = 'update_name' meta = {'Key': 'New Value'} update_description = 'update_description' kwargs = { 'name': update_name, 'description': update_description } fv = self._mock_create_volume(fv, stack_name) self.cinder_fc.volumes.get(fv.id).AndReturn(fv) self.cinder_fc.volumes.update(fv, **kwargs).AndReturn(None) self.cinder_fc.volumes.update_all_metadata(fv, meta).AndReturn(None) self.m.ReplayAll() stack = utils.parse_stack(self.t, stack_name=stack_name) rsrc = self.create_volume(self.t, stack, 'volume') props = copy.deepcopy(rsrc.properties.data) props['name'] = update_name props['description'] = update_description props['metadata'] = meta after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) scheduler.TaskRunner(rsrc.update, after)() self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state) def test_cinder_volume_update_read_only(self): # update read only access mode fv = vt_base.FakeVolume('update_read_only_access_mode') stack_name = 'test_update_read_only' cinder.CinderClientPlugin._create().AndReturn( self.cinder_fc) self.cinder_fc.volumes.create( size=1, availability_zone='nova', description='test_description', name='test_name', metadata={u'key': u'value'}).AndReturn(fv) update_readonly_mock = self.patchobject(self.cinder_fc.volumes, 'update_readonly_flag') update_readonly_mock(fv.id, True).return_value(None) fv_ready = vt_base.FakeVolume('available', id=fv.id) self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready) self.m.ReplayAll() stack = utils.parse_stack(self.t, stack_name=stack_name) rsrc = self.create_volume(self.t, stack, 'volume') props = copy.deepcopy(rsrc.properties.data) props['read_only'] = True after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) scheduler.TaskRunner(rsrc.update, after)() self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state) def test_cinder_snapshot(self): stack_name = 'test_cvolume_snpsht_stack' cinder.CinderClientPlugin._create().MultipleTimes().AndReturn( self.cinder_fc) self.cinder_fc.volumes.create( size=1, availability_zone=None, description='test_description', name='test_name' ).AndReturn(vt_base.FakeVolume('creating')) fv = vt_base.FakeVolume('available') self.cinder_fc.volumes.get(fv.id).AndReturn(fv) fb = vt_base.FakeBackup('creating') self.m.StubOutWithMock(self.cinder_fc.backups, 'create') self.cinder_fc.backups.create(fv.id).AndReturn(fb) self.m.StubOutWithMock(self.cinder_fc.backups, 'get') self.cinder_fc.backups.get(fb.id).AndReturn( vt_base.FakeBackup('available')) self.m.ReplayAll() t = template_format.parse(single_cinder_volume_template) stack = utils.parse_stack(t, stack_name=stack_name) rsrc = stack['volume'] scheduler.TaskRunner(rsrc.create)() scheduler.TaskRunner(rsrc.snapshot)() self.assertEqual((rsrc.SNAPSHOT, rsrc.COMPLETE), rsrc.state) self.assertEqual({'backup_id': 'backup-123'}, resource_data_object.ResourceData.get_all(rsrc)) self.m.VerifyAll() def test_cinder_snapshot_error(self): stack_name = 'test_cvolume_snpsht_err_stack' cinder.CinderClientPlugin._create().MultipleTimes().AndReturn( self.cinder_fc) self.cinder_fc.volumes.create( size=1, availability_zone=None, description='test_description', name='test_name' ).AndReturn(vt_base.FakeVolume('creating')) fv = vt_base.FakeVolume('available') self.cinder_fc.volumes.get(fv.id).AndReturn(fv) fb = vt_base.FakeBackup('creating') self.m.StubOutWithMock(self.cinder_fc.backups, 'create') self.cinder_fc.backups.create(fv.id).AndReturn(fb) self.m.StubOutWithMock(self.cinder_fc.backups, 'get') fail_reason = 'Could not determine which Swift endpoint to use' self.cinder_fc.backups.get(fb.id).AndReturn( vt_base.FakeBackup('error', fail_reason=fail_reason)) self.m.ReplayAll() t = template_format.parse(single_cinder_volume_template) stack = utils.parse_stack(t, stack_name=stack_name) rsrc = stack['volume'] scheduler.TaskRunner(rsrc.create)() self.assertRaises(exception.ResourceFailure, scheduler.TaskRunner(rsrc.snapshot)) self.assertEqual((rsrc.SNAPSHOT, rsrc.FAILED), rsrc.state) self.assertIn(fail_reason, rsrc.status_reason) self.assertEqual({u'backup_id': u'backup-123'}, resource_data_object.ResourceData.get_all(rsrc)) self.m.VerifyAll() def test_cinder_volume_attachment_update_device(self): stack_name = 'test_cvolume_attach_udev_stack' self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name) self._mock_create_server_volume_script( vt_base.FakeVolume('attaching')) self.stub_VolumeConstraint_validate() # delete script fva = vt_base.FakeVolume('in-use') self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123').AndReturn(fva) self.cinder_fc.volumes.get(fva.id).AndReturn(fva) self.fc.volumes.delete_server_volume( 'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None) self.cinder_fc.volumes.get(fva.id).AndReturn( vt_base.FakeVolume('available')) self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123').AndReturn(fva) self.fc.volumes.get_server_volume( u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception()) # attach script self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'), device=u'/dev/vdd', update=True) self.m.ReplayAll() stack = utils.parse_stack(self.t, stack_name=stack_name) self.create_volume(self.t, stack, 'volume') rsrc = self.create_attachment(self.t, stack, 'attachment') props = copy.deepcopy(rsrc.properties.data) props['mountpoint'] = '/dev/vdd' props['volume_id'] = 'vol-123' after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) scheduler.TaskRunner(rsrc.update, after)() self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() def test_cinder_volume_attachment_update_volume(self): stack_name = 'test_cvolume_attach_uvol_stack' self.stub_VolumeConstraint_validate() self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name) fv2 = vt_base.FakeVolume('creating', id='vol-456') vol2_name = utils.PhysName(stack_name, 'volume2') self.cinder_fc.volumes.create( size=2, availability_zone='nova', description=None, name=vol2_name).AndReturn(fv2) self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2) fv2 = vt_base.FakeVolume('available', id=fv2.id) self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2) self._mock_create_server_volume_script(vt_base.FakeVolume('attaching')) # delete script fva = vt_base.FakeVolume('in-use') self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123').AndReturn(fva) self.cinder_fc.volumes.get(fva.id).AndReturn(fva) self.fc.volumes.delete_server_volume( 'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None) self.cinder_fc.volumes.get(fva.id).AndReturn( vt_base.FakeVolume('available')) self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123').AndReturn(fva) self.fc.volumes.get_server_volume( u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception()) # attach script fv2a = vt_base.FakeVolume('attaching', id='vol-456') self._mock_create_server_volume_script(fv2a, volume='vol-456', update=True) self.m.ReplayAll() stack = utils.parse_stack(self.t, stack_name=stack_name) self.create_volume(self.t, stack, 'volume') self.create_volume(self.t, stack, 'volume2') rsrc = self.create_attachment(self.t, stack, 'attachment') self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) props = copy.deepcopy(rsrc.properties.data) props['volume_id'] = 'vol-456' after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) scheduler.TaskRunner(rsrc.update, after)() self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state) self.assertEqual(fv2a.id, rsrc.resource_id) self.m.VerifyAll() def test_cinder_volume_attachment_update_server(self): stack_name = 'test_cvolume_attach_usrv_stack' self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name) self._mock_create_server_volume_script( vt_base.FakeVolume('attaching')) self.stub_VolumeConstraint_validate() # delete script fva = vt_base.FakeVolume('in-use') self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123').AndReturn(fva) self.cinder_fc.volumes.get(fva.id).AndReturn(fva) self.fc.volumes.delete_server_volume( 'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None) self.cinder_fc.volumes.get(fva.id).AndReturn( vt_base.FakeVolume('available')) self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123').AndReturn(fva) self.fc.volumes.get_server_volume( u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception()) # attach script self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'), server=u'AnotherServer', update=True) self.m.ReplayAll() stack = utils.parse_stack(self.t, stack_name=stack_name) self.create_volume(self.t, stack, 'volume') rsrc = self.create_attachment(self.t, stack, 'attachment') self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state) props = copy.deepcopy(rsrc.properties.data) props['instance_uuid'] = 'AnotherServer' props['volume_id'] = 'vol-123' after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props) scheduler.TaskRunner(rsrc.update, after)() self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state) self.m.VerifyAll() def test_cinder_create_with_scheduler_hints(self): fv = vt_base.FakeVolume('creating') cinder.CinderClientPlugin._create().AndReturn(self.cinder_fc) self.cinder_fc.volumes.create( size=1, name='test_name', description=None, availability_zone='nova', scheduler_hints={'hint1': 'good_advice'}).AndReturn(fv) self.cinder_fc.volumes.get(fv.id).AndReturn(fv) fv_ready = vt_base.FakeVolume('available', id=fv.id) self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready) self.m.ReplayAll() stack_name = 'test_cvolume_scheduler_hints_stack' stack = utils.parse_stack(self.t, stack_name=stack_name) self.create_volume(self.t, stack, 'volume3') self.m.VerifyAll() def test_cinder_create_with_scheduler_hints_and_cinder_api_v1(self): cinder.CinderClientPlugin._create().AndReturn(self.cinder_fc) self.cinder_fc.volume_api_version = 1 self.m.ReplayAll() stack_name = 'test_cvolume_scheduler_hints_api_v1_stack' stack = utils.parse_stack(self.t, stack_name=stack_name) ex = self.assertRaises(exception.StackValidationFailed, self.create_volume, self.t, stack, 'volume3') self.assertIn('Scheduler hints are not supported by the current ' 'volume API.', six.text_type(ex)) self.m.VerifyAll() def _test_cinder_create_invalid_property_combinations( self, stack_name, combinations, err_msg, exc): stack = utils.parse_stack(self.t, stack_name=stack_name) vp = stack.t['Resources']['volume2']['Properties'] vp.pop('size') vp.update(combinations) rsrc = stack['volume2'] ex = self.assertRaises(exc, rsrc.validate) self.assertEqual(err_msg, six.text_type(ex)) def test_cinder_create_with_image_and_imageRef(self): stack_name = 'test_create_with_image_and_imageRef' combinations = {'imageRef': 'image-456', 'image': 'image-123'} err_msg = ("Cannot define the following properties at the same " "time: image, imageRef.") self.stub_ImageConstraint_validate() self._test_cinder_create_invalid_property_combinations( stack_name, combinations, err_msg, exception.ResourcePropertyConflict) def test_cinder_create_with_size_snapshot_and_image(self): stack_name = 'test_create_with_size_snapshot_and_image' combinations = { 'size': 1, 'image': 'image-123', 'snapshot_id': 'snapshot-123'} self.stub_ImageConstraint_validate() self.stub_SnapshotConstraint_validate() err_msg = ('If "size" is provided, only one of "image", "imageRef", ' '"source_volid", "snapshot_id" can be specified, but ' 'currently specified options: ' '[\'snapshot_id\', \'image\'].') self._test_cinder_create_invalid_property_combinations( stack_name, combinations, err_msg, exception.StackValidationFailed) def test_cinder_create_with_size_snapshot_and_imageRef(self): stack_name = 'test_create_with_size_snapshot_and_imageRef' combinations = { 'size': 1, 'imageRef': 'image-123', 'snapshot_id': 'snapshot-123'} self.stub_ImageConstraint_validate() self.stub_SnapshotConstraint_validate() err_msg = ('If "size" is provided, only one of "image", "imageRef", ' '"source_volid", "snapshot_id" can be specified, but ' 'currently specified options: ' '[\'snapshot_id\', \'imageRef\'].') self._test_cinder_create_invalid_property_combinations( stack_name, combinations, err_msg, exception.StackValidationFailed) def test_cinder_create_with_size_snapshot_and_sourcevol(self): stack_name = 'test_create_with_size_snapshot_and_sourcevol' combinations = { 'size': 1, 'source_volid': 'volume-123', 'snapshot_id': 'snapshot-123'} self.stub_VolumeConstraint_validate() self.stub_SnapshotConstraint_validate() err_msg = ('If "size" is provided, only one of "image", "imageRef", ' '"source_volid", "snapshot_id" can be specified, but ' 'currently specified options: ' '[\'snapshot_id\', \'source_volid\'].') self._test_cinder_create_invalid_property_combinations( stack_name, combinations, err_msg, exception.StackValidationFailed) def test_cinder_create_with_snapshot_and_source_volume(self): stack_name = 'test_create_with_snapshot_and_source_volume' combinations = { 'source_volid': 'source_volume-123', 'snapshot_id': 'snapshot-123'} err_msg = ('If neither "backup_id" nor "size" is provided, one and ' 'only one of "image", "imageRef", "source_volid", ' '"snapshot_id" must be specified, but currently ' 'specified options: [\'snapshot_id\', \'source_volid\'].') self.stub_VolumeConstraint_validate() self.stub_SnapshotConstraint_validate() self._test_cinder_create_invalid_property_combinations( stack_name, combinations, err_msg, exception.StackValidationFailed) def test_cinder_create_with_image_and_source_volume(self): stack_name = 'test_create_with_image_and_source_volume' combinations = { 'source_volid': 'source_volume-123', 'image': 'image-123'} err_msg = ('If neither "backup_id" nor "size" is provided, one and ' 'only one of "image", "imageRef", "source_volid", ' '"snapshot_id" must be specified, but currently ' 'specified options: [\'source_volid\', \'image\'].') self.stub_VolumeConstraint_validate() self.stub_ImageConstraint_validate() self._test_cinder_create_invalid_property_combinations( stack_name, combinations, err_msg, exception.StackValidationFailed) def test_cinder_create_no_size_no_combinations(self): stack_name = 'test_create_no_size_no_options' combinations = {} err_msg = ('If neither "backup_id" nor "size" is provided, one and ' 'only one of "image", "imageRef", "source_volid", ' '"snapshot_id" must be specified, but currently ' 'specified options: [].') self._test_cinder_create_invalid_property_combinations( stack_name, combinations, err_msg, exception.StackValidationFailed) def test_volume_restore(self): stack_name = 'test_cvolume_restore_stack' # create script cinder.CinderClientPlugin._create().MultipleTimes().AndReturn( self.cinder_fc) self.cinder_fc.volumes.create( size=1, availability_zone=None, description='test_description', name='test_name' ).AndReturn(vt_base.FakeVolume('creating')) fv = vt_base.FakeVolume('available') self.cinder_fc.volumes.get(fv.id).AndReturn(fv) # snapshot script fb = vt_base.FakeBackup('creating') self.m.StubOutWithMock(self.cinder_fc.backups, 'create') self.cinder_fc.backups.create(fv.id).AndReturn(fb) self.m.StubOutWithMock(self.cinder_fc.backups, 'get') self.cinder_fc.backups.get(fb.id).AndReturn( vt_base.FakeBackup('available')) # restore script fvbr = vt_base.FakeBackupRestore('vol-123') self.m.StubOutWithMock(self.cinder_fc.restores, 'restore') self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr) self.cinder_fc.volumes.get('vol-123').AndReturn(fv) self.cinder_fc.volumes.update('vol-123', description='test_description', name='test_name') self.cinder_fc.volumes.get('vol-123').AndReturn(fv) self.m.ReplayAll() t = template_format.parse(single_cinder_volume_template) stack = utils.parse_stack(t, stack_name=stack_name) scheduler.TaskRunner(stack.create)() self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state) scheduler.TaskRunner(stack.snapshot)() self.assertEqual((stack.SNAPSHOT, stack.COMPLETE), stack.state) data = stack.prepare_abandon() fake_snapshot = collections.namedtuple( 'Snapshot', ('data', 'stack_id'))(data, stack.id) stack.restore(fake_snapshot) self.assertEqual((stack.RESTORE, stack.COMPLETE), stack.state) self.m.VerifyAll()
apache-2.0
-4,979,401,692,813,748,000
39.883365
79
0.607637
false
3.770411
true
false
false
arruda/rmr
rmr/apps/books/migrations/0006_books_to_userbooks.py
1
8533
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): def forwards(self, orm): "Write your forwards methods here." # Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..." Book = orm['books.Book'] UserBook = orm['books.UserBook'] for book in Book.objects.all(): user = book.user userBook = UserBook(user=user,book=book) userBook.desired = book.desired userBook.purchase_store = book.purchase_store userBook.purchased = book.purchased userBook.purchase_value = book.purchase_value userBook.purchase_date = book.purchase_date userBook.save() def backwards(self, orm): "Write your backwards methods here." models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'authors.author': { 'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Author'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'books.book': { 'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Book'}, 'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'books'", 'to': "orm['authors.Author']"}), 'desired': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}), 'genres': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['books.Genre']", 'symmetrical': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'publisher': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'books'", 'to': "orm['publishers.Publisher']"}), 'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'purchase_store': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'books_old'", 'null': 'True', 'to': "orm['stores.Store']"}), 'purchase_value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'purchased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'release_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today', 'null': 'True', 'blank': 'True'}), 'synopsis': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'books.genre': { 'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Genre'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'books.userbook': { 'Meta': {'object_name': 'UserBook'}, 'book': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'users'", 'to': "orm['books.Book']"}), 'desired': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'purchase_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'purchase_store': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'books'", 'null': 'True', 'to': "orm['stores.Store']"}), 'purchase_value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}), 'purchased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'books'", 'to': "orm['auth.User']"}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'publishers.publisher': { 'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Publisher'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'stores.store': { 'Meta': {'ordering': "['name']", 'unique_together': "(('user', 'name'),)", 'object_name': 'Store'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) } } complete_apps = ['books'] symmetrical = True
mit
-4,910,646,489,504,296,000
70.108333
182
0.545881
false
3.759031
false
false
false
az0/entity-metadata
code/etl_openlibrary.py
1
2492
#!/usr/bin/python3 # # Copyright (C) 2019 by Compassion International. All rights reserved. # License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>. # This is free software: you are free to change and redistribute it. # There is NO WARRANTY, to the extent permitted by law. """ This program ETLs the Open Library authors dump file. The input is a tab-delimited file with JSON in one column. The output is a simpler file, which is a CSV with basic biographical information plus unique identifiers. Get the dump from here https://openlibrary.org/developers/dumps Do not decompress the dump file. """ import csv import sys import gzip import json csv.field_size_limit(sys.maxsize) # The value id_wikidata (not nested under remote_ids) is defined # exactly once out of 6.9M records, and in that case it's redundant # to the value nested under remote_ids. It seems to be a mistake, # so we'll ignore it. retain_keys = ['key', 'id_wikidata', 'entity_type', 'name', 'fuller_name', 'personal_name', 'alternate_names', 'birth_date', 'death_date'] def process_json(j, writer): author = json.loads(j) author_retain = {} for retain_key in retain_keys: if retain_key in author: author_retain[retain_key] = author[retain_key] if 'remote_ids' in author and 'wikidata' in author['remote_ids']: # extract nested value author_retain['id_wikidata'] = author['remote_ids']['wikidata'] if 'alternate_names' in author: # reformat multiple items from JSON list to pipe delimited author_retain['alternate_names'] = '|'.join(author['alternate_names']) writer.writerow(author_retain) def go(): if len(sys.argv) != 3: print( 'Usage: %s (path to OpenLibrary authors .txt.gz) (path to output .csv)' % sys.argv[0]) sys.exit(1) txt_gz_fn = sys.argv[1] csv_out_fn = sys.argv[2] with gzip.open(txt_gz_fn, 'rt') as inf: # inf= IN File reader = csv.reader(inf, delimiter='\t') with open(csv_out_fn, 'w') as outf: writer = csv.DictWriter(outf, fieldnames=retain_keys) writer.writeheader() print('Processing...') count = 0 for row in reader: process_json(row[4], writer) count += 1 if (count % 10000) == 0: # progress indicator print('.', end='', flush=True) print('\nDone.') go()
gpl-3.0
-2,435,090,541,281,703,000
30.15
110
0.630016
false
3.529745
false
false
false
18F/github-issue-lifecycle
app/models.py
1
15856
import itertools import os from collections import OrderedDict from datetime import date, datetime, timedelta import requests from requests.auth import HTTPBasicAuth from . import db from .app import app from .utils import to_py_datetime GH_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' BEGINNING_OF_TIME = '1970-01-01T00:00:00Z' BEGINNING_DATETIME = datetime.strptime(BEGINNING_OF_TIME, GH_DATE_FORMAT) def authorization(): try: auth = HTTPBasicAuth(os.environ['GITHUB_USER'], os.environ['GITHUB_AUTH']) return auth except KeyError: app.logger.warning( 'Environment variables GITHUB_USER and GITHUB_AUTH not set') app.logger.warning('Skipping authentication...') return None class Repo(db.Model): id = db.Column(db.Integer, primary_key=True) owner = db.Column(db.Text, nullable=False) name = db.Column(db.Text, nullable=False) synched_at = db.Column(db.DateTime(), nullable=False, default=BEGINNING_DATETIME) issues = db.relationship('Issue', cascade='all, delete-orphan', order_by='Issue.created_at', backref='repo') ISSUES_PAGE_SIZE = 100 @classmethod def get_fresh(cls, owner_name, repo_name, refresh_threshhold_seconds=None): """For a repo ``repo_name`` owned by ``owner_name``: 1. Fetches or creates the Repo model instance 2. Refreshes the data from Github if necessary""" if refresh_threshhold_seconds is None: refresh_threshhold_seconds = app.config[ 'REFRESH_THRESHHOLD_SECONDS'] (owner_name, repo_name) = (owner_name.lower(), repo_name.lower()) repo = (cls.query.filter_by(owner=owner_name, name=repo_name).first() or cls(owner=owner_name, name=repo_name, synched_at=BEGINNING_DATETIME)) if (datetime.now() - repo.synched_at) > timedelta( seconds=int(refresh_threshhold_seconds)): repo.fetch_issues() db.session.add(repo) db.session.commit() repo.set_milestone_color_map() return repo def url(self): return 'https://api.github.com/repos/{}/{}/'.format(self.owner, self.name) @classmethod def _latest_update(cls, items, field_name='updated_at'): "Returns latest `field_name` in `items`" updates = [datetime.strptime( i.get(field_name, BEGINNING_OF_TIME), GH_DATE_FORMAT) for i in items] return max(updates).strftime(GH_DATE_FORMAT) def raw_issue_data(self): params = { 'since': self.synched_at.strftime(GH_DATE_FORMAT), 'per_page': self.ISSUES_PAGE_SIZE, 'sort': 'updated', 'direction': 'asc', 'state': 'all' # include closed issues } auth = authorization() issues = requests.get(self.url() + 'issues', params=params, auth=auth) if issues.ok: result = {} new_issues = [i for i in issues.json() if i['number'] not in result] while new_issues: result.update({i['number']: i for i in new_issues}) # Github seems to be ignoring `sort` parameter, have to # check all results, alas params['since'] = self._latest_update(new_issues) issues = requests.get(self.url() + 'issues', params=params, auth=authorization()) new_issues = [i for i in issues.json() if i['number'] not in result] return result.values() else: err_msg = 'Could not fetch issues for repo {}/{}: {}'.format( self.owner, self.name, issues.text) if not auth: err_msg += '\nNOTE: Environment variables GITHUB_USER and GITHUB_AUTH not set' raise FileNotFoundError(err_msg) def fetch_issues(self): """Refresh the database's store of issues for this repo from github.""" for issue_data in self.raw_issue_data(): issue = Issue.query.filter_by( number=issue_data.get('number')).first() if issue: db.session.delete(issue) db.session.commit() issue = Issue.from_raw(issue_data) issue.repo = self issue.fetch_events() self.synched_at = datetime.now() db.session.commit() def json_summary(self): result = dict(name=self.name, owner=self.owner, issues=[iss.json_summary() for iss in self.issues]) return result def json_summary_flattened(self): spans = list(self.spans()) result = dict(spans=spans, stones=(self.stones()), colors=[self.milestone_colors[s['span']['milestones'][ -1]] for s in spans], ) return result def spans(self): for (idx, iss) in enumerate(self.issues): lifecycle = iss.lifecycle() for span in lifecycle['spans']: yield {'issue': iss, 'index': idx, 'span': span, 'final': lifecycle['final']} def stones(self): for (idx, iss) in enumerate(self.issues): lifecycle = iss.lifecycle() for stone in lifecycle['points']: yield {'issue': iss, 'index': idx, 'stone': stone} def milestones(self): "List of milestones in all issues, in rough order of first appearance" nested = [[e.milestone for e in i.events] for i in self.issues] all_milestones = list(OrderedDict.fromkeys( itertools.chain.from_iterable(nested))) if None in all_milestones: all_milestones.remove(None) return all_milestones _PALLETTE = ('greenyellow', 'cornflowerblue', 'hotpink', 'indigo', 'fuschia', 'green', 'lightskyblue', 'firebrick', 'gray', 'lightcoral', 'darkslategray', 'darkorange', 'darkolivegreen', 'cyan', 'chocolate', 'blueviolet', 'burlywood', 'aquamarine', ) def set_milestone_color_map(self): "Decide a color to correspond to each type of milestone used in the repo" colors = itertools.cycle(self._PALLETTE ) # reuse colors if too many milestones self.milestone_colors = {} for milestone in self.milestones(): self.milestone_colors[milestone] = colors.__next__() self.milestone_colors.update({'opened': 'gold', 'reopened': 'gold', 'closed': 'black'}) labels_issues = db.Table( 'labels_issues', db.Column('label_id', db.Integer, db.ForeignKey('label.id')), db.Column('issue_id', db.Integer, db.ForeignKey('issue.id'))) class Issue(db.Model): id = db.Column(db.Integer, primary_key=True) repo_id = db.Column(db.Integer(), db.ForeignKey(Repo.id)) number = db.Column(db.Integer) title = db.Column(db.String()) body = db.Column(db.String()) state = db.Column(db.String()) creator_login = db.Column(db.String(), db.ForeignKey('person.login'), nullable=False) assignee_login = db.Column(db.String(), db.ForeignKey('person.login'), nullable=True) comments = db.Column(db.String()) locked = db.Column(db.Boolean) url = db.Column(db.String(), nullable=True) events_url = db.Column(db.String(), nullable=True) labels_url = db.Column(db.String(), nullable=True) comments_url = db.Column(db.String(), nullable=True) html_url = db.Column(db.String(), nullable=True) created_at = db.Column(db.DateTime(), default=date.today) updated_at = db.Column(db.DateTime(), default=date.today) closed_at = db.Column(db.DateTime(), nullable=True) labels = db.relationship('Label', secondary=labels_issues, backref=db.backref('issues', lazy='dynamic')) events = db.relationship('Event', cascade='all, delete-orphan', order_by='Event.created_at', backref='issue') @classmethod def from_raw(cls, issue_data): insertable = { 'id': issue_data.get('id'), 'number': issue_data.get('number'), 'title': issue_data.get('title'), 'state': issue_data.get('state'), 'body': issue_data.get('body'), 'locked': issue_data.get('locked'), 'url': issue_data.get('url'), 'labels_url': issue_data.get('labels_url'), 'html_url': issue_data.get('html_url'), 'events_url': issue_data.get('events_url'), 'updated_at': to_py_datetime(issue_data['updated_at']), 'created_at': to_py_datetime(issue_data['created_at']), 'closed_at': to_py_datetime(issue_data['closed_at']), } creator = Person.from_raw(issue_data['user']) insertable['creator_login'] = creator.login if issue_data.get('assignee'): assignee = Person.from_raw(issue_data['assignee']) insertable['assignee_login'] = assignee.login issue = cls(**insertable) for label_data in issue_data['labels']: issue.labels.append(Label.get_or_create(label_data)) db.session.add(issue) return issue def fetch_events(self): response = requests.get('{}?per_page=100'.format(self.events_url), auth=authorization()) if self.number in (4, 17): from pprint import pprint with open('events{}.json'.format(self.number), 'w') as outfile: pprint(response.json(), outfile) # todo: if > 100 events? if response.ok: for raw_event in response.json(): self.events.append(Event.from_raw(raw_event)) def json_summary(self): lifecycle = self.lifecycle() return { 'number': self.number, 'title': self.title, 'html_url': self.html_url, 'created_at': self.created_at, 'updated_at': self.updated_at, 'closed_at': self.closed_at, 'spans': lifecycle['spans'], 'points': lifecycle['points'], } def lifecycle(self): """Description of the events of this issue's lifecycle. Returns dict with: final: Last milestone marked points: (name, date) of milestones and open/close events spans: ([statuses], start date, end date) describing each time period in the issue's lifecycle. [statuses] is the list of milestones in effect. The last in the list will generally be the one of interest. """ statuses = ['opened', ] result = {'spans': [], 'final': 'opened', 'points': []} start_date = self.created_at for event in self.events: if event.event in ('milestoned', 'demilestoned', 'closed', 'reopened'): if event.milestone and event.milestone in statuses: continue result['spans'].append({'milestones': statuses[:], 'start': start_date, 'end': event.created_at}) if event.event == 'demilestoned': try: statuses.remove(event.milestone) except ValueError: pass # sometimes they demilestone a nonexistent milestone! elif event.event == 'milestoned': statuses.append(event.milestone) elif event.event in ('closed', 'reopened'): statuses.append(event.event) result['points'].append({'status': statuses[-1], 'at': event.created_at}) start_date = event.created_at if self.closed_at: if statuses[-1] != 'closed': if self.closed_at > start_date: result['spans'].append({'milestones': statuses[:], 'start': start_date, 'end': self.closed_at}) result['points'].append({'status': 'closed', 'at': self.closed_at}) else: result['spans'].append({'milestones': statuses[:], 'start': start_date, 'end': datetime.now()}) result['final'] = [s for s in statuses if s not in ('closed', 'reopened')][-1] return result class Person(db.Model): login = db.Column(db.String(), primary_key=True) url = db.Column(db.String(), nullable=True) created = db.relationship('Issue', foreign_keys=[Issue.creator_login, ], backref='author') assigned = db.relationship('Issue', foreign_keys=[Issue.assignee_login, ], backref='assignee') @classmethod def from_raw(cls, raw_data): person = cls.query.filter_by(login=raw_data['login']).first() if person: person.url = raw_data.get('url') else: person = cls(login=raw_data['login'], url=raw_data.get('url')) db.session.add(person) db.session.flush() # TODO: ugh, all this flushing return person class Label(db.Model): id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String()) url = db.Column(db.String()) color = db.Column(db.String(), nullable=True) @classmethod def get_or_create(cls, label_data): label = cls.query.filter_by(name=label_data['name']).first() \ or cls(**label_data) return label class Event(db.Model): id = db.Column(db.Integer, primary_key=True) commit_id = db.Column(db.String()) url = db.Column(db.String()) actor = db.Column(db.String()) event = db.Column(db.String()) milestone = db.Column(db.String()) created_at = db.Column(db.DateTime()) issue_id = db.Column(db.Integer, db.ForeignKey('issue.id')) @classmethod def from_raw(cls, event_data): "Given dict of event data fetched from GitHub API, return instance" insertable = dict( id=event_data['id'], commit_id=event_data['commit_id'], url=event_data['url'], actor=event_data['actor'].get('login') if event_data[ 'actor'] else None, milestone=event_data.get('milestone') and event_data['milestone'][ 'title'], event=event_data['event'], created_at=to_py_datetime(event_data.get('created_at')), ) return cls(**insertable)
cc0-1.0
6,412,924,456,398,388,000
38.739348
94
0.521443
false
4.206951
false
false
false
cscutcher/naruto-aufs-layers
naruto/cli.py
1
8374
# -*- coding: utf-8 -*- """ Main group for naruto cli """ import io import logging import os import pathlib import shutil import click from naruto import NarutoLayer, LayerNotFound DEV_LOGGER = logging.getLogger(__name__) DEFAULT_NARUTO_HOME = pathlib.Path(os.path.expanduser('~/.naruto')) DEFAULT_LOG_LEVEL = logging.INFO class CLIContext(object): ''' Context for CLI ''' def __init__(self): self.naruto_home = DEFAULT_NARUTO_HOME cli_context = click.make_pass_decorator(CLIContext, ensure=True) @click.group() @click.option( '--naruto-home', default=str(DEFAULT_NARUTO_HOME), type=click.Path( file_okay=False, dir_okay=True, writable=True, readable=True, resolve_path=True, exists=False), help=( 'Set default config directory used to store and retrieve layers. Default: {}'.format( DEFAULT_NARUTO_HOME))) @click.option( '--verbosity', '-V', help='Set verbosity level explicitly (int or CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET)', default=DEFAULT_LOG_LEVEL, type=str) @cli_context def naruto_cli(ctx, naruto_home, verbosity): ''' CLI for naruto ''' try: verbosity = int(verbosity) except ValueError: #Ints and strings are ok pass logging.basicConfig(level=verbosity) DEV_LOGGER.debug('Set log level to %s', verbosity) ctx.naruto_home = pathlib.Path(naruto_home) DEV_LOGGER.debug('Home path is %r', ctx.naruto_home) class _LayerLookup(click.ParamType): ''' Type which loads naruto dir ''' name = 'NarutoDir' def __init__(self, allow_discovery=True): self._allow_discovery = allow_discovery def convert(self, value, param, local_context): ''' Parse Naruto argument ''' DEV_LOGGER.debug('Trying to find root layer for value %r', value) root_spec, _, layer_spec = value.partition(':') cli_context = local_context.ensure_object(CLIContext) if not root_spec and self._allow_discovery: try: layer = NarutoLayer.find_layer_mounted_at_dest(pathlib.Path(os.getcwd())) except LayerNotFound: self.fail( 'Couldn\'t auto-discover layer. ' 'You must in a directory which is a mounted layer for auto-discovery to work') else: if os.sep in root_spec: naruto_root = pathlib.Path(root_spec) else: naruto_root = cli_context.naruto_home / root_spec try: naruto_root, = tuple(naruto_root.iterdir()) except FileNotFoundError: self.fail('Directory {} does not exist'.format(naruto_root)) except ValueError: self.fail('Unexpected number of folders in {}'.format(naruto_root)) try: layer = NarutoLayer(naruto_root) except LayerNotFound: self.fail('{!s} is not a layer.'.format(naruto_root)) if layer_spec: layer = layer.find_layer(layer_spec) DEV_LOGGER.debug('Parsed layer at %r from cli', layer) return layer @naruto_cli.command() @click.argument('name_or_path') @click.option('--description', help='Add description to new naruto layer') @cli_context def create(ctx, name_or_path, description): ''' Create new NarutoLayer ''' if os.sep in name_or_path: path = pathlib.Path(name_or_path) DEV_LOGGER.info('Creating at raw path %r', path) else: home_naruto_dir = ctx.naruto_home if not home_naruto_dir.is_dir(): home_naruto_dir.mkdir() home_naruto_dir = home_naruto_dir.resolve() path = home_naruto_dir / name_or_path if not path.is_dir(): path.mkdir() # Check nothing nasty from user assert path.parent == home_naruto_dir DEV_LOGGER.info('Creating %r in naruto home %r', home_naruto_dir, name_or_path) if len(tuple(path.iterdir())) != 0: raise Exception('Expected create directory {!s} to be empty'.format(path)) NarutoLayer.create(path, description=description) @naruto_cli.command() @cli_context def list_home_layers(ctx): ''' List layers stored in home directory ''' for path in ctx.naruto_home.iterdir(): click.echo(str(path)) ################################################################################################# ## Commands that modify or inspect existing layers ################################################################################################# def _modification_command(fn): ''' Add common options for modification ''' fn = naruto_cli.command()(fn) layer_lookup_help = ( 'This specifies the layer you want to act upon. ' 'If not specified we will try and discover the layer you have currently mounted.') fn = click.option('-l', '--layer', type=_LayerLookup(), default='', help=layer_lookup_help)(fn) return fn class InfoNodeAdapter(object): ''' Adapt NarutoLayer for info printout ''' def __init__(self, layer): self._layer = layer def output(self, io_stream, level, highlight): io_stream.write('{indent}+-- {highlight}{layer!s}{highlight}\n'.format( indent=' ' * level, layer=self._layer, highlight='!!!!' if self._layer in highlight else '')) for child in self._layer: self.__class__(child).output(io_stream, level + 1, highlight) @_modification_command def info(layer): ''' Get info about a layer ''' io_stream = io.StringIO() InfoNodeAdapter(layer.get_root()).output(io_stream, 0, highlight=(layer,)) click.echo(io_stream.getvalue()) @_modification_command @click.argument('mount_dest') def mount(layer, mount_dest): ''' Mount a layer ''' layer.mount(mount_dest) @_modification_command @click.argument('mount_dest') @click.option('--description', help='Add description to new naruto layer') def branch_and_mount(layer, mount_dest, description): ''' Branch a layer and mount at new dest ''' layer.create_child(description=description).mount(mount_dest) @_modification_command def unmount_all(layer): ''' Unmount all uses of this layer ''' layer.unmount_all() @_modification_command def find_mounts(layer): ''' Find where layer is mounted ''' for branch in layer.find_mounted_branches_iter(): click.echo('{branch.path}={branch.permission} at {branch.mount_point}'.format( branch=branch)) @_modification_command @click.option('--no-prompt', default=False, is_flag=True) def delete(layer, no_prompt): ''' Delete a layer ''' if no_prompt: confirm = click.echo else: confirm = lambda message: click.confirm(message, abort=True) if layer.has_children: click.secho( 'WARNING: This layer has {} direct children and a further {} descendants.'.format( len(layer.children), len(layer.descendants)), fg='red') if layer.mounted: confirm( '{} is currently mounted. Must unmount first. Continue?'.format(layer)) layer.unmount_all() confirm( click.style( 'This will irreversible delete {} and all {} descendants. Continue?'.format( layer, len(layer.descendants)), fg='red')) shutil.rmtree(str(layer.path.resolve())) @_modification_command @click.argument('description', default='') def description(layer, description): ''' Get set layer description ''' if description: layer.description = description else: click.echo(layer.description) @_modification_command @click.argument('tags', nargs=-1) def tags(layer, tags): ''' Get set tags ''' if tags: layer.tags = tags else: click.echo(', '.join(layer.tags)) @_modification_command @click.argument('tags', nargs=-1) def add_tags(layer, tags): ''' Add tag to layer''' layer.tags = layer.tags.union(tags) @_modification_command @click.argument('tags', nargs=-1) def remove_tags(layer, tags): ''' Remove tag from layer''' layer.tags = layer.tags.difference(tags)
gpl-3.0
-2,532,855,138,760,252,400
26.455738
99
0.597683
false
3.790856
false
false
false
pas256/troposphere
troposphere/servicecatalog.py
1
4652
# Copyright (c) 2012-2018, Mark Peek <[email protected]> # All rights reserved. # # See LICENSE file for full license. from . import AWSObject, AWSProperty, Tags from .validators import boolean class AcceptedPortfolioShare(AWSObject): resource_type = "AWS::ServiceCatalog::AcceptedPortfolioShare" props = { 'AcceptLanguage': (basestring, False), 'PortfolioId': (basestring, True), } class ProvisioningArtifactProperties(AWSProperty): props = { 'Description': (basestring, False), 'Info': (dict, True), 'Name': (basestring, False), } class CloudFormationProduct(AWSObject): resource_type = "AWS::ServiceCatalog::CloudFormationProduct" props = { 'AcceptLanguage': (basestring, False), 'Description': (basestring, False), 'Distributor': (basestring, False), 'Name': (basestring, True), 'Owner': (basestring, True), 'ProvisioningArtifactParameters': ([ProvisioningArtifactProperties], True), 'SupportDescription': (basestring, False), 'SupportEmail': (basestring, False), 'SupportUrl': (basestring, False), 'Tags': (Tags, False), } class ProvisioningParameter(AWSProperty): props = { 'Key': (basestring, False), 'Value': (basestring, False), } class CloudFormationProvisionedProduct(AWSObject): resource_type = "AWS::ServiceCatalog::CloudFormationProvisionedProduct" props = { 'AcceptLanguage': (basestring, False), 'NotificationArns': ([basestring], False), 'PathId': (basestring, False), 'ProductId': (basestring, False), 'ProductName': (basestring, False), 'ProvisionedProductName': (basestring, False), 'ProvisioningArtifactId': (basestring, False), 'ProvisioningArtifactName': (basestring, False), 'ProvisioningParameters': ([ProvisioningParameter], False), 'Tags': (Tags, False), } class LaunchNotificationConstraint(AWSObject): resource_type = "AWS::ServiceCatalog::LaunchNotificationConstraint" props = { 'AcceptLanguage': (basestring, False), 'Description': (basestring, False), 'NotificationArns': ([basestring], True), 'PortfolioId': (basestring, True), 'ProductId': (basestring, True), } class LaunchRoleConstraint(AWSObject): resource_type = "AWS::ServiceCatalog::LaunchRoleConstraint" props = { 'AcceptLanguage': (basestring, False), 'Description': (basestring, False), 'PortfolioId': (basestring, True), 'ProductId': (basestring, True), 'RoleArn': (basestring, True), } class LaunchTemplateConstraint(AWSObject): resource_type = "AWS::ServiceCatalog::LaunchTemplateConstraint" props = { 'AcceptLanguage': (basestring, False), 'Description': (basestring, False), 'PortfolioId': (basestring, True), 'ProductId': (basestring, True), 'Rules': (basestring, True), } class Portfolio(AWSObject): resource_type = "AWS::ServiceCatalog::Portfolio" props = { 'AcceptLanguage': (basestring, False), 'Description': (basestring, False), 'DisplayName': (basestring, True), 'ProviderName': (basestring, True), 'Tags': (Tags, False), } class PortfolioPrincipalAssociation(AWSObject): resource_type = "AWS::ServiceCatalog::PortfolioPrincipalAssociation" props = { 'AcceptLanguage': (basestring, False), 'PortfolioId': (basestring, True), 'PrincipalARN': (basestring, True), 'PrincipalType': (basestring, True), } class PortfolioProductAssociation(AWSObject): resource_type = "AWS::ServiceCatalog::PortfolioProductAssociation" props = { 'AcceptLanguage': (basestring, False), 'PortfolioId': (basestring, True), 'ProductId': (basestring, True), 'SourcePortfolioId': (basestring, False), } class PortfolioShare(AWSObject): resource_type = "AWS::ServiceCatalog::PortfolioShare" props = { 'AcceptLanguage': (basestring, False), 'AccountId': (basestring, True), 'PortfolioId': (basestring, True), } class TagOption(AWSObject): resource_type = "AWS::ServiceCatalog::TagOption" props = { 'Active': (boolean, False), 'Key': (basestring, True), 'Value': (basestring, True), } class TagOptionAssociation(AWSObject): resource_type = "AWS::ServiceCatalog::TagOptionAssociation" props = { 'ResourceId': (basestring, True), 'TagOptionId': (basestring, True), }
bsd-2-clause
-6,446,356,054,065,016,000
27.193939
75
0.634136
false
4.161002
false
false
false
robertmattmueller/sdac-compiler
tests/test_normalization.py
1
1628
try: # Python 2 from StringIO import StringIO except ImportError: # Python 3 from io import StringIO import pddl from pddl_to_prolog import Rule, PrologProgram def test_normalization(): prog = PrologProgram() prog.add_fact(pddl.Atom("at", ["foo", "bar"])) prog.add_fact(pddl.Atom("truck", ["bollerwagen"])) prog.add_fact(pddl.Atom("truck", ["segway"])) prog.add_rule(Rule([pddl.Atom("truck", ["?X"])], pddl.Atom("at", ["?X", "?Y"]))) prog.add_rule(Rule([pddl.Atom("truck", ["X"]), pddl.Atom("location", ["?Y"])], pddl.Atom("at", ["?X", "?Y"]))) prog.add_rule(Rule([pddl.Atom("truck", ["?X"]), pddl.Atom("location", ["?Y"])], pddl.Atom("at", ["?X", "?X"]))) prog.add_rule(Rule([pddl.Atom("p", ["?Y", "?Z", "?Y", "?Z"])], pddl.Atom("q", ["?Y", "?Y"]))) prog.add_rule(Rule([], pddl.Atom("foo", []))) prog.add_rule(Rule([], pddl.Atom("bar", ["X"]))) prog.normalize() output = StringIO() prog.dump(file=output) sorted_output = "\n".join(sorted(output.getvalue().splitlines())) assert sorted_output == """\ Atom @object(bar). Atom @object(bollerwagen). Atom @object(foo). Atom @object(segway). Atom at(foo, bar). Atom bar(X). Atom foo(). Atom truck(bollerwagen). Atom truck(segway). none Atom at(?X, ?X@0) :- Atom truck(?X), Atom location(?Y), Atom =(?X, ?X@0). none Atom at(?X, ?Y) :- Atom truck(?X), Atom @object(?Y). none Atom at(?X, ?Y) :- Atom truck(X), Atom location(?Y), Atom @object(?X). none Atom q(?Y, ?Y@0) :- Atom p(?Y, ?Z, ?Y, ?Z), Atom =(?Y, ?Y@0), Atom =(?Y, ?Y@1), Atom =(?Z, ?Z@2)."""
gpl-3.0
-3,731,900,976,421,474,300
37.761905
105
0.556511
false
2.754653
false
false
false
jacobajit/ion
intranet/middleware/ldap_db.py
1
1702
# -*- coding: utf-8 -*- import logging from django.contrib import messages from ..db.ldap_db import LDAPConnection logger = logging.getLogger(__name__) class CheckLDAPBindMiddleware: def process_response(self, request, response): if not hasattr(request, "user") or "_auth_user_backend" not in request.session or not request.user.is_authenticated(): # Nothing to check if user isn't already logged in return response auth_backend = request.session["_auth_user_backend"] kerberos_backend = "KerberosAuthenticationBackend" if LDAPConnection().did_use_simple_bind() and auth_backend.startswith(kerberos_backend): # if request.user.is_eighth_admin: # logger.info("Simple bind being used: staying logged in because eighth admin.") # return response logger.info("LDAP simple bind being used for {}".format(request.user if request.user else None)) messages.error(request, "Access to directory information may be limited: LDAP issue. Try logging out and back in.") """ logger.info("Simple bind being used: Destroying kerberos cache and logging out") try: kerberos_cache = request.session["KRB5CCNAME"] os.system("/usr/bin/kdestroy -c " + kerberos_cache) except KeyError: pass logout(request) response = redirect("login") url = response["Location"] response["Location"] = urls.add_get_parameters( url, {"next": request.path}, percent_encode=False) return response """ return response
gpl-2.0
-8,821,804,357,833,348,000
38.581395
127
0.619271
false
4.478947
false
false
false
amarfurt/arr
remote.py
1
1112
""" Starts the remote control worker. """ import os import logging import argparse from workers.controller import Controller def parse_args(): parser = argparse.ArgumentParser(description='Starts the remote control worker.') parser.add_argument('--logpath', default=os.path.expanduser('~/logs/arr.log'), help='Path to logfile.') parser.add_argument('--loglevel', default='INFO', help='Logging level.') return parser.parse_args() def main(args): # configure logging logformat = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s' loglevel = logging.getLevelName(args.loglevel) logging.basicConfig(filename=args.logpath, format=logformat, level=loglevel) logging.getLogger('pika').setLevel(logging.WARNING) log = logging.getLogger('main') log.info('Starting system...') # start control worker log.info('Starting control worker...') c = Controller('localhost', 'control') c.start() c.add_cpu() log.info('System started') c.join() log.info('System stopped') if __name__ == '__main__': main(parse_args())
mit
-6,781,648,225,161,846,000
28.263158
85
0.66277
false
3.915493
false
false
false
migasfree/migasfree
migasfree/catalog/migrations/0003_4_14_packages_by_project.py
1
2349
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import migasfree.server.models.common class Migration(migrations.Migration): dependencies = [ ('server', '0022_4_14_computers'), ('catalog', '0002_4_14_versions'), ] operations = [ migrations.CreateModel( name='PackagesByProject', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('packages_to_install', models.TextField(blank=True, verbose_name='packages to install')), ], options={ 'verbose_name': 'Packages by Project', 'verbose_name_plural': 'Packages by Projects', 'permissions': (('can_save_packagesbyproject', 'Can save packages by project'),), }, bases=(models.Model, migasfree.server.models.common.MigasLink), ), migrations.AlterField( model_name='application', name='name', field=models.CharField(max_length=50, unique=True, verbose_name='name'), ), migrations.AlterUniqueTogether( name='application', unique_together=set([]), ), migrations.AddField( model_name='packagesbyproject', name='application', field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to='catalog.Application', verbose_name='application', related_name='packages_by_project' ), ), migrations.AddField( model_name='packagesbyproject', name='project', field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to='server.Project', verbose_name='project' ), ), migrations.RemoveField( model_name='application', name='packages_to_install', ), migrations.RemoveField( model_name='application', name='project', ), migrations.AlterUniqueTogether( name='packagesbyproject', unique_together={('application', 'project')}, ), ]
gpl-3.0
-6,319,951,951,391,928,000
33.043478
114
0.553427
false
4.651485
false
false
false
robertostling/bnas
bnas/model.py
1
50808
"""Network models and submodels. The :class:`Model` class is used to encapsulate a set of Theano shared variables (model parameters), and can create symbolic expressions for model outputs and loss functions. This module also contains subclasses, such as :class:`Linear`, that function as building blocks for more complex networks. """ from collections import OrderedDict import pickle import sys import numpy as np import theano from theano.ifelse import ifelse from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams from theano import tensor as T from . import init from . import search from .fun import train_mode, function from .utils import expand_to_batch, softmax_masked, softmax_3d, softmax_4d class Model: """Base class for neural network models. Attributes ---------- name : str Name of the model. params : OrderedDict of str -> :class:`theano.compile.sharedvalue.SharedVariable` Mapping from parameter names to Theano shared variables. Note that submodel parameters are not included, so this should normally not be accessed directly, rather use `self.parameters()`. regularization : list of Theano symbolic expressions These expressions should all be added to the loss function when optimizing. Use `self.regularize()` to modify. """ def __init__(self, name): """Initialize an empty model. Parameters ---------- name : str Name of the model. """ self.name = name self.params = OrderedDict() self.regularization = [] self.submodels = OrderedDict() def loss(self): """Part of the loss function that is independent of inputs.""" terms = [submodel.loss() for submodel in self.submodels.values()] \ + self.regularization return sum(terms, T.as_tensor_variable(0.0)) def parameters(self, include_submodels=True): """Iterate over the parameters of this model and its submodels. Each value produced by the iterator is a tuple (name, value), where the name is a tuple of strings describing the hierarchy of submodels, e.g. ('hidden', 'b'), and the value is a Theano shared variable. Parameters ---------- include_submodels : bool If ``True`` (default), also iterate over submodel parameters. """ for name, p in self.params.items(): yield ((name,), p) if include_submodels: for submodel in self.submodels.values(): for name, p in submodel.parameters(): yield ((submodel.name,) + name, p) def summarize(self, grads, f=sys.stdout): def tensor_stats(m): return ', '.join([ 'norm = %g' % np.sqrt((m*m).sum()), 'maxabs = %g' % np.abs(m).max(), 'minabs = %g' % np.abs(m).min()]) def summarize_parameter(name, p, g): p_stats = tensor_stats(p) g_stats = tensor_stats(g) print('%s\n parameter %s\n gradient %s' % ( name, p_stats, g_stats), file=f) params = list(self.parameters()) assert len(grads) == len(params) for (name, p), grad in zip(params, grads): summarize_parameter('.'.join(name), p.get_value(), grad) f.flush() def parameters_list(self, include_submodels=True): """Return a list with parameters, without their names.""" return list(p for name, p in self.parameters(include_submodels=include_submodels)) def parameter(self, name): """Return the parameter with the given name. Parameters ---------- name : tuple of str Path to variable, e.g. ('hidden', 'b') to find the parameter 'b' in the submodel 'hidden'. Returns ------- value : :class:`theano.compile.sharedvalue.SharedVariable` """ if not isinstance(name, tuple): raise TypeError('Expected tuple, got %s' % type(name)) if len(name) == 1: return self.params[name[0]] elif len(name) >= 2: return self.submodels[name[0]].parameter(name[1:]) else: raise ValueError('Name tuple must not be empty!') def parameter_count(self): """Return the total number of parameters of the model.""" return sum(p.get_value(borrow=True).size for _,p in self.parameters()) def param(self, name, dims, init_f=None, value=None, dtype=theano.config.floatX): """Create a new parameter, or share an existing one. Parameters ---------- name : str Name of parameter, this will be used directly in `self.params` and used to create `self._name`. dims : tuple Shape of the parameter vector. value : :class:`theano.compile.sharedvalue.SharedVariable`, optional If this parameter should be shared, a SharedVariable instance can be passed here. init_f : (tuple => numpy.ndarray) Function used to initialize the parameter vector. dtype : str or numpy.dtype Data type (default is `theano.config.floatX`) Returns ------- p : :class:`theano.compile.sharedvalue.SharedVariable` """ if name in self.params: if not value is None: raise ValueError('Trying to add a shared parameter (%s), ' 'but a parameter with the same name already ' 'exists in %s!' % (name, self.name)) return self.params[name] if value is None: if init_f is None: raise ValueError('Creating new parameter, but no ' 'initialization specified!') p = theano.shared(init_f(dims, dtype=dtype), name=name) self.params[name] = p else: p = value setattr(self, '_'+name, p) return p def regularize(self, p, regularizer): """Add regularization to a parameter. Parameters ---------- p : :class:`theano.compile.sharedvalue.SharedVariable` Parameter to apply regularization regularizer : function Regularization function, which should return a symbolic expression. """ if not regularizer is None: self.regularization.append(regularizer(p)) def add(self, submodel): """Import parameters from a submodel. If a submodel named "hidden" has a parameter "b", it will be imported as "hidden_b", also accessible as `self._hidden_b`. Parameters ---------- submodel : :class:`.Model` Returns ------- submodel : :class:`.Model` Equal to the parameter, for convenience. """ if submodel.name in self.submodels: raise ValueError('Submodel with name %s already exists in %s!' % ( submodel.name, self.name)) self.submodels[submodel.name] = submodel setattr(self, submodel.name, submodel) return submodel def save(self, f, include_submodels=True): """Save the parameter values of this model to a file object. Parameters ---------- f : file File object to write to, assumed to be opened in 'wb' mode. include_submodels : bool If ``True`` (default), also save submodel parameters. """ pickle.dump({name: p.get_value(borrow=True) for name, p in self.parameters( include_submodels=include_submodels)}, f, -1) def load(self, f, allow_incomplete=False, allow_unused=False): """Load (some) weights of this model from a file object. Parameters ---------- f : file File object to read from, assumeb to be opened in 'rb' mode. allow_incomplete : bool If ``False``, throw a `ValueError` if some model parameters are missing in the file. allow_unused : bool If ``False``, throw a `ValueError` if the file contains model parameters that are not used in this model. """ data = pickle.load(f) parameters = dict(self.parameters()) names = frozenset(data.keys()) & frozenset(parameters.keys()) if not allow_incomplete and len(names) < len(parameters): diff = sorted(frozenset(parameters.keys()) - names) raise ValueError( 'The following parameters are missing: %s' % ', '.join( '.'.join(t) for t in diff)) if not allow_unused and len(names) < len(data): diff = sorted(frozenset(data.keys()) - names) raise ValueError( 'The following parameters are unused: %s' % ', '.join( '.'.join(t) for t in diff)) for name in names: value = data[name] old_value = parameters[name].get_value(borrow=True) if value.shape != old_value.shape: raise ValueError( 'Loaded shape is %s but %s expected' % ( value.shape, old_value.shape)) parameters[name].set_value(value) def compile(self, *args): return function(list(args), self(*args)) class Linear(Model): """Fully connected linear layer. This layer creates one shared parameter, `w` of shape `(input_dims, output_dims)` if `use_bias` is ``False``, otherwise it also creates `name_b` of shape `output_dims` for biases. Parameters ---------- name : str Name of layer. input_dims : int Number of inputs. output_dims : int Number of outputs. w : :class:`theano.compile.sharedvalue.SharedVariable` Weight vector to use, or pass ``None`` (default) to create a new one. w_init : :class:`.init.InitializationFunction` Initialization for weight vector, in case `w` is ``None``. w_regularizer : :class:`.regularize.Regularizer`, optional Regularization for weight matrix. b : :class:`theano.compile.sharedvalue.SharedVariable` Bias vector to use, or pass ``None`` (default) to create a new one. b_init : :class:`.init.InitializationFunction` Initialization for bias vector, in case `b` is ``None``. b_regularizer : :class:`.regularize.Regularizer`, optional Regularization for biases. use_bias : bool If ``False``, no bias is used and the `b` and `b_init` parameters are ignored. dropout : float Dropout factor (the default value of 0 means dropout is not used). layernorm : bool If ``True``, layer normalization is used on the activations. """ def __init__(self, name, input_dims, output_dims, w=None, w_init=None, w_regularizer=None, b=None, b_init=None, b_regularizer=None, use_bias=True, dropout=0, layernorm=False): super().__init__(name) self.input_dims = input_dims self.output_dims = output_dims self.use_bias = use_bias self.dropout = dropout self.layernorm = layernorm if w_init is None: w_init = init.Gaussian(fan_in=input_dims) if b_init is None: b_init = init.Constant(0.0) self.param('w', (input_dims, output_dims), init_f=w_init, value=w) self.regularize(self._w, w_regularizer) if use_bias: self.param('b', (output_dims,), init_f=b_init, value=b) self.regularize(self._b, b_regularizer) if dropout: self.add(Dropout('dropout', dropout)) if layernorm: self.add(LayerNormalization('ln', (None, output_dims))) def __call__(self, inputs): outputs = T.dot(inputs, self._w) if self.layernorm: outputs = self.ln(outputs) if self.use_bias: outputs = outputs + self._b if self.dropout: outputs = self.dropout(outputs) return outputs class Embeddings(Model): """Embeddings layer. This layer creates one shared parameter, `w` of shape `(alphabet_size, embedding_dims)`. Parameters ---------- name : str Name of layer. alphabet_size : int Size of symbol alphabet. embedding_dims : int Dimensionality of embeddings. w : :class:`theano.compile.sharedvalue.SharedVariable` Weight vector to use, or pass ``None`` (default) to create a new one. w_init : :class:`.init.InitializationFunction` Initialization for weight vector, in case `w` is ``None``. w_regularizer : :class:`.regularize.Regularizer`, optional Regularization for weight matrix. dropout : float Dropout factor (the default value of 0 means dropout is not used). """ def __init__(self, name, alphabet_size, embedding_dims, w=None, w_init=None, w_regularizer=None, dropout=0): super().__init__(name) self.embedding_dims = embedding_dims self.alphabet_size = alphabet_size self.dropout = dropout if w_init is None: w_init = init.Gaussian(fan_in=embedding_dims) self.param('w', (alphabet_size, embedding_dims), init_f=w_init, value=w) self.regularize(self._w, w_regularizer) if dropout: self.add(Dropout('dropout', dropout, sequence=True)) def __call__(self, inputs): outputs = self._w[inputs] if self.dropout: outputs = self.dropout(outputs) return outputs class Conv1D(Model): """1D convolution layer with linear activations. The input shape is assumed to be (batch_size, length, dims). """ def __init__(self, name, input_dims, output_dims, filter_dims=3, stride=1, f=None, f_init=None, f_regularizer=None, b=None, b_init=None, b_regularizer=None): super().__init__(name) if f_init is None: f_init = init.Gaussian(fan_in=filter_dims*input_dims) if b_init is None: b_init = init.Constant(0.0) self.stride = stride self.input_dims = input_dims self.f_shape = (output_dims, input_dims, filter_dims, 1) self.param('f', self.f_shape, init_f=f_init) self.param('b', (output_dims,), init_f=b_init) def __call__(self, inputs, inputs_mask): x = T.nnet.conv2d( (inputs * inputs_mask.dimshuffle(0,1,'x') ).dimshuffle(0,2,1,'x'), self._f, input_shape=(None, self.input_dims, None, 1), filter_shape=self.f_shape, border_mode='half', subsample=(self.stride, 1), filter_flip=True) batch_size = inputs.shape[0] length = inputs.shape[1] dims = inputs.shape[2] x = x.reshape((batch_size, dims, length)).dimshuffle(0,2,1) return x + self._b.dimshuffle('x','x',0) class LSTM(Model): """Long Short-Term Memory. name : str Name of layer. input_dims : int Length of each vector in the input sequence. state_dims : int Size of internal states. An LSTM contains two states, each of the will be of size state_dims. attention_dims : int If specified, use attention and let this be the size of the hidden attention state. attented_dims : int Dimensionality of the sequence to have attention on. layernorm : str One of `'ba1'` (eq 20--22 of Ba et al.), `'ba2'` (eq 29--31) or `False` (no layer normalization). """ def __init__(self, name, input_dims, state_dims, w=None, w_init=None, w_regularizer=None, u=None, u_init=None, u_regularizer=None, b=None, b_init=None, b_regularizer=None, attention_dims=None, attended_dims=None, layernorm=False, contextgate=False): super().__init__(name) assert layernorm in (False, 'ba1', 'ba2') assert (attention_dims is None) == (attended_dims is None) assert not (contextgate and (attention_dims is None)) self.n_states = 2 if attended_dims is not None: if not contextgate: input_dims += attended_dims self.input_dims = input_dims self.state_dims = state_dims self.layernorm = layernorm self.attention_dims = attention_dims self.attended_dims = attended_dims self.use_attention = attention_dims is not None self.use_contextgate = contextgate if w_init is None: w_init = init.Gaussian(fan_in=input_dims) if u_init is None: u_init = init.Concatenated( [init.Orthogonal()]*4, axis=1) if b_init is None: b_init = init.Concatenated( [init.Constant(x) for x in [0.0, 1.0, 0.0, 0.0]]) if self.use_contextgate: self.param('wzg', (input_dims, state_dims*2), init_f=init.Gaussian(fan_in=input_dims)) self.param('uzg', (state_dims, state_dims*2), init_f=init.Concatenated([init.Orthogonal()]*2, axis=1)) self.param('bzg', (state_dims*2,), init_f=init.Constant(0.0)) self.param('czs', (attended_dims, state_dims*2), init_f=init.Gaussian(fan_in=attended_dims)) self.param('bs', (state_dims,), init_f=init.Constant(0.0)) self.param('w', (state_dims, state_dims*4), init_f=w_init, value=w) self.param('u', (state_dims, state_dims*4), init_f=u_init, value=u) self.param('b', (state_dims*4,), init_f=b_init, value=b) else: self.param('w', (input_dims, state_dims*4), init_f=w_init, value=w) self.param('u', (state_dims, state_dims*4), init_f=u_init, value=u) self.param('b', (state_dims*4,), init_f=b_init, value=b) if self.use_attention: self.add(Linear('attention_u', attended_dims, attention_dims)) self.param('attention_w', (state_dims, attention_dims), init_f=init.Gaussian(fan_in=state_dims)) self.param('attention_v', (attention_dims,), init_f=init.Gaussian(fan_in=attention_dims)) self.regularize(self._attention_w, w_regularizer) if layernorm == 'ba1': self.add(LayerNormalization('ln_a', (None, attention_dims))) self.regularize(self._w, w_regularizer) self.regularize(self._u, u_regularizer) self.regularize(self._b, b_regularizer) if layernorm == 'ba1': self.add(LayerNormalization('ln_1', (None, state_dims*4))) self.add(LayerNormalization('ln_2', (None, state_dims*4))) if layernorm: self.add(LayerNormalization('ln_h', (None, state_dims))) def __call__(self, inputs, h_tm1, c_tm1, attended=None, attended_dot_u=None, attention_mask=None): if self.use_attention: # Non-precomputed part of the attention vector for this time step # _ x batch_size x attention_dims h_dot_w = T.dot(h_tm1, self._attention_w) if self.layernorm == 'ba1': h_dot_w = self.ln_a(h_dot_w) h_dot_w = h_dot_w.dimshuffle('x',0,1) # Attention vector, with distributions over the positions in # attended. Elements that fall outside the sentence in each batch # are set to zero. # sequence_length x batch_size # Note that attention.T is returned attention = softmax_masked( T.dot( T.tanh(attended_dot_u + h_dot_w), self._attention_v).T, attention_mask.T).T # Compressed attended vector, weighted by the attention vector # batch_size x attended_dims compressed = (attended * attention.dimshuffle(0,1,'x')).sum(axis=0) # Append the compressed vector to the inputs and continue as usual if not self.use_contextgate: inputs = T.concatenate([inputs, compressed], axis=1) else: zg = (T.dot(inputs, self._wzg) + T.dot(h_tm1, self._uzg) + self._bzg.dimshuffle('x', 0)) zs = T.dot(compressed, self._czs) def part(m,i): return m[:, i*self.state_dims:(i+1)*self.state_dims] z = T.nnet.sigmoid(part(zg,0) + part(zs,0)) g = part(zg,1) s = part(zs,1) + self._bs.dimshuffle('x', 0) inputs = z*s + (1-z)*g if self.layernorm == 'ba1': x = (self.ln_1(T.dot(inputs, self._w)) + self.ln_2(T.dot(h_tm1, self._u))) else: x = T.dot(inputs, self._w) + T.dot(h_tm1, self._u) x = x + self._b.dimshuffle('x', 0) def x_part(i): return x[:, i*self.state_dims:(i+1)*self.state_dims] i = T.nnet.sigmoid(x_part(0)) f = T.nnet.sigmoid(x_part(1)) o = T.nnet.sigmoid(x_part(2)) c = T.tanh( x_part(3)) c_t = f*c_tm1 + i*c h_t = o*T.tanh(self.ln_h(c_t) if self.layernorm else c_t) if self.use_attention: return h_t, c_t, attention.T else: return h_t, c_t class LSTMSequence(Model): def __init__(self, name, backwards, *args, dropout=0, trainable_initial=False, offset=0, **kwargs): super().__init__(name) self.backwards = backwards self.trainable_initial = trainable_initial self.offset = offset self._step_fun = None self._attention_u_fun = None self.add(Dropout('dropout', dropout)) self.add(LSTM('gate', *args, **kwargs)) if self.trainable_initial: self.param('h_0', (self.gate.state_dims,), init_f=init.Gaussian(fan_in=self.gate.state_dims)) self.param('c_0', (self.gate.state_dims,), init_f=init.Gaussian(fan_in=self.gate.state_dims)) def step(self, inputs, inputs_mask, h_tm1, c_tm1, h_mask, *non_sequences): if self.gate.use_attention: # attended is the # src_sequence_length x batch_size x attention_dims # matrix which we have attention on. # # attended_dot_u is the h_t-independent part of the final # attention vectors, which is precomputed for efficiency. # # attention_mask is a binary mask over the valid elements of # attended, which in practice is the same as the mask passed to # the encoder that created attended. Size # src_sequence_length x batch_size h_t, c_t, attention = self.gate( inputs, h_tm1 * h_mask.astype(theano.config.floatX), c_tm1, attended=non_sequences[0], attended_dot_u=non_sequences[1], attention_mask=non_sequences[2]) return (T.switch(inputs_mask.dimshuffle(0, 'x'), h_t, h_tm1), T.switch(inputs_mask.dimshuffle(0, 'x'), c_t, c_tm1), attention) else: h_t, c_t = self.gate( inputs, h_tm1 * h_mask.astype(theano.config.floatX), c_tm1) return (T.switch(inputs_mask.dimshuffle(0, 'x'), h_t, h_tm1), T.switch(inputs_mask.dimshuffle(0, 'x'), c_t, c_tm1)) def step_fun(self): if self._step_fun is None: inputs = T.matrix('inputs') h_tm1 = T.matrix('h_tm1') c_tm1 = T.matrix('c_tm1') if self.gate.use_attention: attended=T.tensor3('attended') attended_dot_u=T.tensor3('attended_dot_u') attention_mask=T.matrix('attention_mask') self._step_fun = function( [inputs, h_tm1, c_tm1, attended, attended_dot_u, attention_mask], self.step(inputs, T.ones(inputs.shape[:-1]), h_tm1, c_tm1, T.ones_like(h_tm1), attended, attended_dot_u, attention_mask), name='%s_step_fun'%self.name) else: self._step_fun = function( [inputs, h_tm1, c_tm1], self.step(inputs, T.ones(inputs.shape[:-1]), h_tm1, c_tm1, T.ones_like(h_tm1)), name='%s_step_fun'%self.name) return self._step_fun def attention_u_fun(self): assert self.gate.use_attention if self._attention_u_fun is None: attended = T.tensor3('attended') self._attention_u_fun = function( [attended], self.gate.attention_u(attended), name='%s_attention_u_fun'%self.name) return self._attention_u_fun def search(self, predict_fun, embeddings, start_symbol, stop_symbol, max_length, h_0=None, c_0=None, attended=None, attention_mask=None, beam_size=4): if self.gate.use_attention: attended_dot_u = self.attention_u_fun()(attended) if self.trainable_initial: if h_0 is None: h_0 = self._h_0.get_value()[None,:] if c_0 is None: c_0 = self._c_0.get_value()[None,:] def step(i, states, outputs, outputs_mask): if self.gate.use_attention: result = self.step_fun()( embeddings[outputs[-1]], states[0], states[1], attended, attended_dot_u, attention_mask) else: result = self.step_fun()( embeddings[outputs[-1]], states[0], states[1]) h_t, c_t = result[:2] return [h_t, c_t], predict_fun(h_t) return search.beam( step, [h_0, c_0], h_0.shape[0], start_symbol, stop_symbol, max_length, beam_size=beam_size) def __call__(self, inputs, inputs_mask, h_0=None, c_0=None, attended=None, attention_mask=None): if self.trainable_initial: batch_size = inputs.shape[1] if h_0 is None: h_0 = expand_to_batch(self._h_0, batch_size) if c_0 is None: c_0 = expand_to_batch(self._c_0, batch_size) attention_info = [] if self.gate.use_attention: attention_info = [attended, self.gate.attention_u(attended), attention_mask] dropout_masks = [self.dropout.mask(h_0.shape)] seqs, _ = theano.scan( fn=self.step, go_backwards=self.backwards, sequences=[{'input': inputs, 'taps': [self.offset]}, {'input': inputs_mask, 'taps': [self.offset]}], outputs_info=[h_0, c_0] + \ [None]*(1 if self.gate.use_attention else 0), non_sequences=dropout_masks + attention_info + \ self.gate.parameters_list()) if self.backwards: return tuple(seq[::-1] for seq in seqs) else: return seqs class Sequence(Model): def __init__(self, name, gate_type, backwards, *args, dropout=0, trainable_initial=False, offset=0, **kwargs): super().__init__(name) self.backwards = backwards self.trainable_initial = trainable_initial self.offset = offset self._step_fun = None self._attention_u_fun = None self.add(Dropout('dropout', dropout)) self.add(gate_type('gate', *args, **kwargs)) if self.trainable_initial: for state in range(self.gate.n_states): self.param('state_%d_0' % state, (self.gate.state_dims,), init_f=init.Gaussian(fan_in=self.gate.state_dims)) def step(self, inputs, inputs_mask, *args): states_tm1 = args[:self.gate.n_states] h_mask = args[self.gate.n_states] non_sequences = args[self.gate.n_states+1:] # TODO: currently assume that dropout is applied only to states[0] # through h_mask (which is passed through non_sequences and # constant at each time step) if self.gate.use_attention: # attended is the # src_sequence_length x batch_size x attention_dims # matrix which we have attention on. # # attended_dot_u is the h_t-independent part of the final # attention vectors, which is precomputed for efficiency. # # attention_mask is a binary mask over the valid elements of # attended, which in practice is the same as the mask passed to # the encoder that created attended. Size # src_sequence_length x batch_size states_attention = self.gate( inputs, *((states_tm1[0] * h_mask.astype(theano.config.floatX),) + states_tm1[1:]), attended=non_sequences[0], attended_dot_u=non_sequences[1], attention_mask=non_sequences[2]) states_t = states_attention[:-1] attention = states_attention[-1] return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1) for s_t, s_tm1 in zip(states_t, states_tm1) ) + (attention,) else: states_t = self.gate( inputs, *((states_tm1[0] * h_mask.astype(theano.config.floatX),) + states_tm1[1:])) return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1) for s_t, s_tm1 in zip(states_t, states_tm1)) def step_fun(self): if self._step_fun is None: inputs = T.matrix('inputs') states_tm1 = [T.matrix('state_%d_tm1' % state) for state in range(self.gate.n_states)] if self.gate.use_attention: attended=T.tensor3('attended') attended_dot_u=T.tensor3('attended_dot_u') attention_mask=T.matrix('attention_mask') self._step_fun = function( [inputs] + states_tm1 + [ attended, attended_dot_u, attention_mask], self.step(*([inputs, T.ones(inputs.shape[:-1])] + states_tm1 + [T.ones_like(states_tm1[0]), attended, attended_dot_u, attention_mask])), name='%s_step_fun'%self.name) else: self._step_fun = function( [inputs] + states_tm1, self.step(*([inputs, T.ones(inputs.shape[:-1])] + states_tm1 + [T.ones_like(states_tm1[0])])), name='%s_step_fun'%self.name) return self._step_fun def attention_u_fun(self): assert self.gate.use_attention if self._attention_u_fun is None: attended = T.tensor3('attended') self._attention_u_fun = function( [attended], self.gate.attention_u(attended), name='%s_attention_u_fun'%self.name) return self._attention_u_fun def search(self, predict_fun, embeddings, start_symbol, stop_symbol, max_length, states_0=None, attended=None, attention_mask=None, fixed=None, beam_size=4): if self.gate.use_attention: attended_dot_u = self.attention_u_fun()(attended) if self.trainable_initial: if states_0 is None: states_0 = [ getattr(self, '_state_%d_0' % state).get_value()[None,:] for state in range(self.gate.n_states)] def step(i, states, outputs, outputs_mask): inputs = embeddings[outputs[-1]] # TODO: is this the best way to add extra arguments? if fixed is not None: inputs = np.concatenate( [inputs, fixed[None,:].repeat(0, axis=-1)], axis=-1) if self.gate.use_attention: result = self.step_fun()( *([inputs] + states + [ attended, attended_dot_u, attention_mask])) else: result = self.step_fun()( *([inputs] + states)) states = result[:self.gate.n_states] # NOTE: state[0] hard-coded return states, predict_fun(states[0]) return search.beam( step, states_0, states_0[0].shape[0], start_symbol, stop_symbol, max_length, beam_size=beam_size) def __call__(self, inputs, inputs_mask, states_0=None, attended=None, attention_mask=None): if self.trainable_initial: batch_size = inputs.shape[1] if states_0 is None: states_0 = [ expand_to_batch(getattr(self, '_state_%d_0' % state), batch_size) for state in range(self.gate.n_states)] attention_info = [] if self.gate.use_attention: attention_info = [attended, self.gate.attention_u(attended), attention_mask] dropout_masks = [self.dropout.mask(states_0[0].shape)] seqs, _ = theano.scan( fn=self.step, go_backwards=self.backwards, sequences=[{'input': inputs, 'taps': [self.offset]}, {'input': inputs_mask, 'taps': [self.offset]}], outputs_info=list(states_0) + \ [None]*(1 if self.gate.use_attention else 0), non_sequences=dropout_masks + attention_info + \ self.gate.parameters_list()) if self.backwards: return tuple(seq[::-1] for seq in seqs) else: return seqs # TODO: need to re-think how to handle attention in stacked models class StackedSequence(Model): def __init__(self, name, gate_type, backwards, n_layers, input_dims, state_dims, *args, dropout=0, trainable_initial=False, offset=0, use_attention=False, layer_fixed_size=None, **kwargs): super().__init__(name) self.backwards = backwards self.trainable_initial = trainable_initial self.offset = offset self.n_layers = n_layers self.layer_fixed_size = layer_fixed_size self._step_fun = None self._attention_u_fun = None self.add(Dropout('dropout', dropout)) self.gates = [] for layer in range(n_layers): total_input_dims = state_dims if layer == 0: total_input_dims += input_dims if layer_fixed_size is not None: total_input_dims += layer_fixed_size[layer] gate = gate_type( 'gate%d' % layer, total_input_dims, state_dims, *args, **kwargs) self.add(gate) self.gates.append(gate) if self.trainable_initial: for state in range(self.gate0.n_states): self.param('state_%d_%d_0' % (layer, state), (self.gate0.state_dims,), init_f=init.Gaussian( fan_in=self.gate0.state_dims)) def step(self, inputs, inputs_mask, *args): total_states = self.gate0.n_states*self.n_layers layer_states_tm1 = [ args[layer*self.gate0.n_states:(layer+1)*self.gate0.n_states] for layer in range(self.n_layers)] n = total_states h_mask = args[n] n += 1 layer_fixed = None if self.layer_fixed_size is not None: layer_fixed = args[n:n+self.n_layers+1] n += self.n_layers+1 non_sequences = args[n:] layer_states_t = [] #states_tm1 = args[:self.gate.n_states] #h_mask = args[self.gate.n_states] #non_sequences = args[self.gate.n_states+1:] # TODO: currently assume that dropout is applied only to states[0] # through h_mask (which is passed through non_sequences and # constant at each time step) if self.gates[-1].use_attention: raise NotImplementedError('Stacked RNN with attention') # attended is the # src_sequence_length x batch_size x attention_dims # matrix which we have attention on. # # attended_dot_u is the h_t-independent part of the final # attention vectors, which is precomputed for efficiency. # # attention_mask is a binary mask over the valid elements of # attended, which in practice is the same as the mask passed to # the encoder that created attended. Size # src_sequence_length x batch_size states_attention = self.gate( inputs, *((states_tm1[0] * h_mask.astype(theano.config.floatX),) + states_tm1[1:]), attended=non_sequences[0], attended_dot_u=non_sequences[1], attention_mask=non_sequences[2]) states_t = states_attention[:-1] attention = states_attention[-1] return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1) for s_t, s_tm1 in zip(states_t, states_tm1) ) + (attention,) else: for layer in range(self.n_layers): states_tm1 = layer_states_tm1[layer] total_inputs = inputs if layer == 0 else layer_states_t[-1][0] if layer_fixed is not None: total_inputs = T.concatenate( [total_inputs, layer_fixed[layer].repeat( inputs.shape[0], axis=0)], axis=-1) states_t = getattr(self, 'gate%d' % layer)( total_inputs, *((states_tm1[0] * h_mask.astype(theano.config.floatX),) + states_tm1[1:])) layer_states_t.append(states_t) return tuple( T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1) for states_t, states_tm1 in zip( layer_states_t, layer_states_tm1) for s_t, s_tm1 in zip(states_t, states_tm1)) #states_t = self.gate( # inputs, # *((states_tm1[0] * h_mask.astype(theano.config.floatX),) + # states_tm1[1:])) #return tuple(T.switch(inputs_mask.dimshuffle(0, 'x'), s_t, s_tm1) # for s_t, s_tm1 in zip(states_t, states_tm1)) def step_fun(self): if self._step_fun is None: inputs = T.matrix('inputs') states_tm1 = [T.matrix('state_%d_%d_tm1' % (layer, state)) for layer in range(self.n_layers) for state in range(self.gate0.n_states)] if self.gates[-1].use_attention: raise NotImplementedError('Stacked RNN with attention') attended=T.tensor3('attended') attended_dot_u=T.tensor3('attended_dot_u') attention_mask=T.matrix('attention_mask') self._step_fun = function( [inputs] + states_tm1 + [ attended, attended_dot_u, attention_mask], self.step(*([inputs, T.ones(inputs.shape[:-1])] + states_tm1 + [T.ones_like(states_tm1[0]), attended, attended_dot_u, attention_mask])), name='%s_step_fun'%self.name) else: self._step_fun = function( [inputs] + states_tm1, self.step(*([inputs, T.ones(inputs.shape[:-1])] + states_tm1 + [T.ones_like(states_tm1[0])])), name='%s_step_fun'%self.name) return self._step_fun def attention_u_fun(self): assert self.gates[-1].use_attention if self._attention_u_fun is None: attended = T.tensor3('attended') self._attention_u_fun = function( [attended], self.gates[-1].attention_u(attended), name='%s_attention_u_fun'%self.name) return self._attention_u_fun def search(self, predict_fun, embeddings, start_symbol, stop_symbol, max_length, layer_states_0=None, attended=None, attention_mask=None, layer_fixed=None, beam_size=4): if self.gates[-1].use_attention: attended_dot_u = self.attention_u_fun()(attended) if self.trainable_initial: if layer_states_0 is None: layer_states_0 = [ getattr(self, '_state_%d_%d_0' % state).get_value()[None,:] for layer in range(self.n_layers) for state in range(self.gate0.n_states)] def step(i, states, outputs, outputs_mask): inputs = embeddings[outputs[-1]] # TODO: need to give sizes of fixed arguments ... # TODO: is this the best way to add extra arguments? if layer_fixed is not None and layer_fixed[0] is not None: # TODO: wasn't this buggy anyway? Why repeat(0, ...) ? inputs = np.concatenate( [inputs, layer_fixed[0][None,:]], axis=-1) if self.gates[-1].use_attention: raise NotImplementedError('Stacked RNN with attention') result = self.step_fun()( *([inputs] + states + [ attended, attended_dot_u, attention_mask])) else: result = self.step_fun()( *([inputs] + states)) states = result[:self.n_layers*self.gate0.n_states] # NOTE: state[0] of the last layer hard-coded return states, predict_fun( states[(self.n_layers-1)*self.gate0.n_states]) return search.beam( step, layer_states_0, layer_states_0[0][0].shape[0], start_symbol, stop_symbol, max_length, beam_size=beam_size) def __call__(self, inputs, inputs_mask, layer_states_0=None, attended=None, attention_mask=None): if self.trainable_initial: batch_size = inputs.shape[1] if layer_states_0 is None: layer_states_0 = [ expand_to_batch(getattr(self, '_state_%d_%d_0' % ( layer, state)), batch_size) for layer in range(self.n_layers) for state in range(self.gate0.n_states)] attention_info = [] if self.gates[-1].use_attention: attention_info = [attended, self.gates[-1].attention_u(attended), attention_mask] dropout_masks = [self.dropout.mask(layer_states_0[0].shape)] seqs, _ = theano.scan( fn=self.step, go_backwards=self.backwards, sequences=[{'input': inputs, 'taps': [self.offset]}, {'input': inputs_mask, 'taps': [self.offset]}], outputs_info=list(layer_states_0) + \ [None]*(1 if self.gate0.use_attention else 0), non_sequences=dropout_masks + attention_info + \ sum([gate.parameters_list() for gate in self.gates], [])) if self.backwards: return tuple(seq[::-1] for seq in seqs) else: return seqs class Dropout(Model): """Dropout layer. name : str Name of layer. dropout : float Dropout factor (equivalent to 1 - retention probability) sequence : bool If True, dropout is not performed on the last dimension. This is useful for e.g. embedded symbol sequences, where either a symbol is kept intact or it is completely zeroed out. """ def __init__(self, name, dropout, sequence=False): super().__init__(name) self.p = 1.0 - dropout self.rng = RandomStreams() self.sequence = sequence def mask(self, shape): """Return a scaled mask for a (symbolic) shape. This can be used for dropout in recurrent layers, where a fixed mask is passed through the non_sequences argument to theano.scan(). """ if self.p == 1: return T.ones(shape) if self.sequence: m = T.shape_padright(self.rng.binomial(shape[:-1], p=self.p) ).astype(theano.config.floatX) else: m = self.rng.binomial(shape, p=self.p).astype(theano.config.floatX) return m / self.p def __call__(self, inputs): if self.p == 1: return inputs m = self.mask(inputs.shape) return ifelse(train_mode, inputs * m, inputs) class LayerNormalization(Model): """Layer Normalization (Ba, Kiros and Hinton 2016).""" def __init__(self, name, inputs_shape, g_init=None, axis=-1, epsilon=1e-6): super().__init__(name) self.inputs_shape = inputs_shape self.axis = axis self.epsilon = epsilon if g_init is None: g_init = init.Constant(1.0) self.param('g', (inputs_shape[self.axis],), init_f=g_init) def __call__(self, inputs): broadcast = ['x']*len(self.inputs_shape) broadcast[self.axis] = 0 mean = inputs.mean(axis=self.axis, keepdims=True).astype( theano.config.floatX) std = inputs.std(axis=self.axis, keepdims=True).astype( theano.config.floatX) normed = (inputs - mean) / (std + self.epsilon) return normed * self._g.dimshuffle(*broadcast) class LinearSelection(Model): def __init__(self, name, input_dims, output_dims, selector_dims, parallel_dims, w=None, w_init=None, w_regularizer=None, b=None, b_init=None, b_regularizer=None, sw=None, sw_init=None, sb=None, sb_init=None, input_select=False, use_bias=True, dropout=0, layernorm=False): super().__init__(name) self.input_dims = input_dims self.output_dims = output_dims self.selector_dims = selector_dims self.parallel_dims = parallel_dims self.use_bias = use_bias self.dropout = dropout self.layernorm = layernorm self.input_select = input_select s_dims = selector_dims + (input_dims if input_select else 0) if w_init is None: w_init = init.Gaussian(fan_in=input_dims) if b_init is None: b_init = init.Constant(0.0) if sw_init is None: sw_init = init.Gaussian(fan_in=s_dims) if sb_init is None: sb_init = init.Constant(0.0) self.param('w', (input_dims, output_dims*parallel_dims), init_f=w_init, value=w) self.regularize(self._w, w_regularizer) if use_bias: self.param('b', (output_dims*parallel_dims,), init_f=b_init, value=b) self.regularize(self._b, b_regularizer) self.param('sw', (s_dims, output_dims*parallel_dims), init_f=sw_init) self.param('sb', (output_dims*parallel_dims,), init_f=sb_init) if dropout: self.add(Dropout('dropout', dropout)) if layernorm: self.add(LayerNormalization('ln', (None, output_dims))) def __call__(self, inputs, selector, sequence=False): par = T.dot(inputs, self._w) if self.use_bias: par = par + self._b if sequence: par = par.reshape((par.shape[0], par.shape[1], self.output_dims, self.parallel_dims)) else: par = par.reshape((par.shape[0], self.output_dims, self.parallel_dims)) # Note that par might be a 3D or 4D tensor, while sel is always 3D if self.input_select and sequence: # ...except if we condition on the input selector = T.concatenate([ inputs, T.repeat(selector.dimshuffle('x',0,1), inputs.shape[0], axis=0)], axis=-1) sel = T.dot(selector, self._sw) + self._sb sel = sel.reshape( (sel.shape[0], sel.shape[1], self.output_dims, self.parallel_dims)) sel = softmax_4d(sel) outputs = (par * sel).sum(axis=-1) else: if self.input_select: selector = T.concatenate([inputs, selector], axis=-1) sel = T.dot(selector, self._sw) + self._sb sel = sel.reshape( (sel.shape[0], self.output_dims, self.parallel_dims)) sel = softmax_3d(sel) if sequence: outputs = (par * sel.dimshuffle('x',0,1,2)).sum(axis=-1) else: outputs = (par * sel).sum(axis=-1) if self.layernorm: outputs = self.ln(outputs) if self.dropout: outputs = self.dropout(outputs) return outputs
gpl-3.0
-8,664,384,897,296,081,000
40.040388
85
0.531078
false
3.942578
false
false
false
cyliustack/sofa
bin/sofa_analyze.py
1
50661
import argparse import matplotlib matplotlib.use('agg') import csv import json import multiprocessing as mp import os import random import re import sys from functools import partial from operator import attrgetter, itemgetter import networkx as nx import numpy as np import pandas as pd import time from sofa_aisi import * from sofa_common import * from sofa_config import * from sofa_print import * from matplotlib import pyplot as plt import grpc import potato_pb2 import potato_pb2_grpc import socket import random import subprocess from sofa_ml import hsg_v2 def random_generate_color(): rand = lambda: random.randint(0, 255) return '#%02X%02X%02X' % (64, rand(), rand()) def get_top_k_events(cfg, df, topk): topk_events=[] gby = df.groupby(['name']) df_agg = gby.aggregate(np.sum) df_agg_sorted = df_agg.sort_values(by=['duration'],ascending=False) #memcpy = ['copyKind_1_','copyKind_2_','copyKind_8_'] if cfg.verbose: print("Top %d Events: "%topk) print(df_agg_sorted[['duration']][0:topk]) eventName = df_agg_sorted[df_agg_sorted.columns[0:0]].head(topk).index.values.tolist() return eventName # input: pfv(performance feature vector), Pandas.DataFrame # output: hint, docker_image def get_hint(potato_server, features): if len(features) > 0: pfv = potato_pb2.PerformanceFeatureVector() for i in range(len(features)): name = features.iloc[i]['name'] value = features.iloc[i]['value'] #print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20))) pfv.name.append(name) pfv.value.append(value) #print('Wait for response from POTATO server...') myhostname = socket.gethostname() channel = grpc.insecure_channel(potato_server) stub = potato_pb2_grpc.HintStub(channel) request = potato_pb2.HintRequest( hostname = myhostname, pfv = pfv) response = stub.Hint(request) hint = response.hint docker_image = response.docker_image else: hint = 'There is no pfv to get hints.' docker_image = 'NA' return hint, docker_image def concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features): if cfg.verbose: print_title('Concurrency Breakdown Analysis') total_elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0} elapsed_time_ratio = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0} total_interval_vector = [] total_performace_vector = [] if len(df_mpstat) == 0: print_warning(cfg, 'no mpstat and perf traces!') return features t_begin = df_mpstat.iloc[0]['timestamp'] t_end = df_mpstat.iloc[-1]['timestamp'] t = t_begin sample_time = (1 / float(cfg.sys_mon_rate)) while t < t_end: t = t + sample_time if cfg.roi_end > 0 and (t < cfg.roi_begin or t > cfg.roi_end): continue window_begin = t - sample_time window_end = t if len(df_cpu) > 0: if df_cpu.iloc[0].timestamp > window_end: continue cond1 = (df_cpu['timestamp'] > window_begin) cond2 = (df_cpu['timestamp'] <= window_end) df_cpu_interval = df_cpu[ cond1 & cond2 ] num_gpus = len(list(set(df_nvsmi['deviceId']))) cond1 = (df_nvsmi['timestamp'] > window_begin) cond2 = (df_nvsmi['timestamp'] <= window_end) sm = df_nvsmi['event'] == int(0) df_nvsmi_interval = df_nvsmi[ cond1 & cond2 & sm ] cond1 = (df_mpstat['timestamp'] > window_begin) cond2 = (df_mpstat['timestamp'] <= window_end) df_mpstat_interval = df_mpstat[ cond1 & cond2 ] cond1 = (df_bandwidth['timestamp'] > window_begin) cond2 = (df_bandwidth['timestamp'] <= window_end) tx = df_bandwidth['event'] == float(0) rx = df_bandwidth['event'] == float(1) df_tx_interval = df_bandwidth[ cond1 & cond2 & tx ] df_rx_interval = df_bandwidth[ cond1 & cond2 & rx ] mp_usr = [] mp_sys = [] mp_idl = [] mp_iow = [] usr = [] sys = [] irq = [] cpu_max = 0 cpu_min = 100 for i in range(len(df_mpstat_interval)): ratios = df_mpstat_interval.iloc[i]['name'].split(':')[1].split('|') #print(ratios) mp_usr.append(sample_time*int(ratios[1])/100.0) mp_sys.append(sample_time*int(ratios[2])/100.0) mp_idl.append(sample_time*int(ratios[3])/100.0) mp_iow.append(sample_time*int(ratios[4])/100.0) usr.append(int(ratios[1])) sys.append(int(ratios[2])) irq.append(int(ratios[5])) cpu_tmp = int(ratios[1]) + int(ratios[2]) + int(ratios[5]) if cpu_tmp > cpu_max: cpu_max = cpu_tmp if cpu_tmp < cpu_min: cpu_min = cpu_tmp mp_usr = np.asarray(mp_usr) mp_sys = np.asarray(mp_sys) mp_idl = np.asarray(mp_idl) mp_iow = np.asarray(mp_iow) usr = np.asarray(usr) sys = np.asarray(sys) irq = np.asarray(irq) elapsed_time = {'usr':0, 'sys':0, 'gpu':0, 'iow':0, 'idl':0} if len(df_mpstat_interval) > 0: elapsed_time['usr'] = mp_usr.max() elapsed_time['sys'] = mp_sys.max() elapsed_time['gpu'] = df_nvsmi_interval['duration'].max() * 0.01 * sample_time elapsed_time['iow'] = mp_iow.max() #print('gput,usrt = ', elapsed_time['gpu'], elapsed_time['usr']) dominator = max(elapsed_time, key=elapsed_time.get) #if elapsed_time['gpu'] > 0.1 : # dominator = 'gpu' if elapsed_time[dominator] > sample_time * int(cfg.is_idle_threshold)/100: total_elapsed_time[dominator] = total_elapsed_time[dominator] + sample_time else: total_elapsed_time['idl'] += sample_time if num_gpus > 0: time_gpu_avg = df_nvsmi_interval['duration'].sum() * 0.01 * sample_time / num_gpus else: time_gpu_avg = 0 interval_vector = [mp_usr.max(), mp_sys.max(), mp_iow.max(), mp_idl.max(), time_gpu_avg, df_tx_interval['bandwidth'].sum(), df_rx_interval['bandwidth'].sum()] total_interval_vector.append(tuple(interval_vector)) if num_gpus > 0: sm_avg = df_nvsmi_interval['duration'].sum() / int(len(list(set(df_nvsmi_interval['deviceId'])))) else: sm_avg = 0 performace_vector = [window_end, df_nvsmi_interval['duration'].max(), sm_avg, df_nvsmi_interval['duration'].min(), round((usr.mean() + sys.mean() + irq.mean()), 0), cpu_max, cpu_min] total_performace_vector.append(tuple(performace_vector)) total_all_elapsed_time = sum(total_elapsed_time.values()) if total_all_elapsed_time > 0 : elapsed_time_ratio['usr'] = 100 * total_elapsed_time['usr'] / total_all_elapsed_time elapsed_time_ratio['sys'] = 100 * total_elapsed_time['sys'] / total_all_elapsed_time elapsed_time_ratio['gpu'] = 100 * total_elapsed_time['gpu'] / total_all_elapsed_time elapsed_time_ratio['idl'] = 100 * total_elapsed_time['idl'] / total_all_elapsed_time elapsed_time_ratio['iow'] = 100 * total_elapsed_time['iow'] / total_all_elapsed_time if cfg.verbose: print('Elapsed Time = %.1lf ' % total_all_elapsed_time) print('USR = %.1lf %%' % elapsed_time_ratio['usr']) print('SYS = %.1lf %%' % elapsed_time_ratio['sys']) if num_gpus > 0: print('GPU = %.1lf %%' % elapsed_time_ratio['gpu']) print('IDL = %.1lf %%' % elapsed_time_ratio['idl']) print('IOW = %.1lf %%' % elapsed_time_ratio['iow']) if cfg.spotlight_gpu: elapsed_hotspot_time = cfg.roi_end - cfg.roi_begin else: elapsed_hotspot_time = 0 df = pd.DataFrame({ 'name':['elapsed_usr_time_ratio', 'elapsed_sys_time_ratio', 'elapsed_gpu_time_ratio', 'elapsed_iow_time_ratio', 'elapsed_hotspot_time'], 'value':[elapsed_time_ratio['usr'], elapsed_time_ratio['sys'], elapsed_time_ratio['gpu'], elapsed_time_ratio['iow'], elapsed_hotspot_time ] }, columns=['name','value']) features = pd.concat([features, df]) if len(total_performace_vector) > 0: performance_table = pd.DataFrame(total_performace_vector, columns = ['time', 'max_gpu_util', 'avg_gpu_util', 'min_gpu_util', 'cpu_util', 'cpu_max', 'cpu_min']) performance_table.to_csv('%s/performance.csv' % logdir) vector_table = pd.DataFrame(total_interval_vector, columns = ['usr' , 'sys', 'iow', 'idl','gpu', 'net_tx', 'net_rx']) pearson = vector_table.corr(method ='pearson').round(2) if cfg.verbose: print('Correlation Table :') print(pearson) df = pd.DataFrame({ 'name':['corr_gpu_usr', 'corr_gpu_sys', 'corr_gpu_iow', 'corr_gpu_ntx', 'corr_gpu_nrx'], 'value':[pearson['gpu'].usr, pearson['gpu'].sys, pearson['gpu'].iow, pearson['gpu'].net_tx, pearson['gpu'].net_rx]}, columns=['name','value']) features = pd.concat([features, df]) return features def payload_sum(df): print((len(df))) class Event: def __init__(self, name, ttype, timestamp, duration): self.name = name self.ttype = ttype # 0 for begin, 1 for end self.timestamp = timestamp self.duration = duration def __repr__(self): return repr((self.name, self.ttype, self.timestamp, self.duration)) def nvsmi_profile(logdir, cfg, df_nvsmi, features): if not cfg.cluster_ip and cfg.verbose: print_title('SM & MEM & ENCODE/DECODE Profiling') if cfg.spotlight_gpu: if cfg.roi_end == 0 : print_warning(cfg, 'spotlight_gpu has no effects.') else: cond1 = (df_nvsmi['timestamp'] > cfg.roi_begin) cond2 = (df_nvsmi['timestamp'] <= cfg.roi_end) df_nvsmi = df_nvsmi[ cond1 & cond2 ] sm_start = df_nvsmi.iloc[0].timestamp sm_end = df_nvsmi.iloc[-1].timestamp SM_time = sm_end - sm_start result = df_nvsmi.groupby(['deviceId','event'])['duration'].mean() result = result.astype(int) gpu_sm_util = df_nvsmi.groupby(['event'])['duration'].mean()[0] gpu_mem_util = df_nvsmi.groupby(['event'])['duration'].mean()[1] if cfg.nvsmi_data: gpu_enc_util = df_nvsmi.groupby(['event'])['duration'].mean()[2] gpu_dec_util = df_nvsmi.groupby(['event'])['duration'].mean()[3] else: gpu_enc_util = 0 gpu_dec_util = 0 sm = df_nvsmi['event'] == int(0) mem = df_nvsmi['event'] == int(1) enc = df_nvsmi['event'] == int(2) dec = df_nvsmi['event'] == int(3) gpunum = list(set(df_nvsmi['deviceId'])) res = pd.DataFrame([], columns=['sm', 'mem', 'enc', 'dec']) sm_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg']) mem_q = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg']) for i in gpunum: gpuid = df_nvsmi['deviceId'] == int(i) gpudata = [round(df_nvsmi[sm & gpuid]['duration'].mean(), 2), round(df_nvsmi[mem & gpuid]['duration'].mean(), 2), round(df_nvsmi[enc & gpuid]['duration'].mean(), 2), round(df_nvsmi[dec & gpuid]['duration'].mean(), 2)] smdata = [round(df_nvsmi[sm & gpuid]['duration'].quantile(0.25), 2), round(df_nvsmi[sm & gpuid]['duration'].quantile(0.5), 2), round(df_nvsmi[sm & gpuid]['duration'].quantile(0.75), 2), round(df_nvsmi[sm & gpuid]['duration'].mean(), 2)] memdata = [round(df_nvsmi[mem & gpuid]['duration'].quantile(0.25), 2), round(df_nvsmi[mem & gpuid]['duration'].quantile(0.5), 2), round(df_nvsmi[mem & gpuid]['duration'].quantile(0.75), 2), round(df_nvsmi[mem & gpuid]['duration'].mean(), 2)] gpu_tmp = pd.DataFrame([gpudata], columns=['sm', 'mem', 'enc', 'dec'], index=[i]) sm_tmp = pd.DataFrame([smdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i]) mem_tmp = pd.DataFrame([memdata], columns=['Q1', 'Q2', 'Q3', 'Avg'], index=[i]) res = pd.concat([res, gpu_tmp]) sm_q = pd.concat([sm_q, sm_tmp]) mem_q = pd.concat([mem_q, mem_tmp]) res.index.name = 'gpu_id' sm_q.index.name = 'gpu_id' mem_q.index.name = 'gpu_id' if not cfg.cluster_ip and cfg.verbose: print('GPU Utilization (%):') print(res) print('\nGPU SM Quartile (%):') print(sm_q) print('\nGPU MEM Quartile (%):') print(mem_q) print('Overall Average SM Utilization (%): ', int(gpu_sm_util)) print('Overall Average MEM Utilization (%): ', int(gpu_mem_util)) print('Overall Average ENC Utilization (%): ', int(gpu_enc_util)) print('Overall Average DEC Utilization (%): ', int(gpu_dec_util)) print('Overall Active GPU Time (s): %.3lf' % (SM_time * gpu_sm_util/100.0)) df = pd.DataFrame({'name':['gpu_sm_util_q2', 'gpu_sm_util_q3', 'gpu_sm_util', 'gpu_mem_util_q2', 'gpu_mem_util_q3', 'gpu_mem_util'], 'value':[df_nvsmi[sm & gpuid]['duration'].quantile(0.5), df_nvsmi[sm & gpuid]['duration'].quantile(0.75), int(gpu_sm_util), df_nvsmi[mem & gpuid]['duration'].quantile(0.5), df_nvsmi[mem & gpuid]['duration'].quantile(0.75), int(gpu_mem_util), ]}, columns=['name','value']) features = pd.concat([features, df]) return features def gpu_profile(logdir, cfg, df_gpu, features): if cfg.verbose: print_title('GPU Profiling') print('Per-GPU time (s):') groups = df_gpu.groupby("deviceId")["duration"] gpu_time = 0 for key, item in groups: gpuid = int(float(key)) per_gpu_time = groups.get_group(key).sum() if cfg.verbose: print("[%d]: %lf" % (gpuid, per_gpu_time)) gpu_time = gpu_time + per_gpu_time num_gpus = len(groups) kernel_time = 0 grouped_df = df_gpu.groupby("copyKind")["duration"] for key, item in grouped_df: if key == 0: kernel_time = grouped_df.get_group(key).sum() nccl_time = 0 grouped_df = df_gpu.groupby("name")["duration"] for key, item in grouped_df: #print("[%s]: %lf" % (key, grouped_df.get_group(key).sum())) if key.find("nccl") != -1: nccl_time = nccl_time + grouped_df.get_group(key).sum() features = comm_profile(logdir, cfg, df_gpu, features) get_top_k_events(cfg, df_gpu, 10) df = pd.DataFrame({'name':['gpu_time', 'num_gpus', 'kernel_time', 'nccl_time'], 'value':[gpu_time, num_gpus, kernel_time, nccl_time] }, columns=['name','value']) features = pd.concat([features, df]) return features def strace_profile(logdir, cfg, df, features): print_title('STRACE Profiling:') return features def net_profile(logdir, cfg, df, features): if not cfg.cluster_ip: print_title("Network Profiling:") grouped_df = df.groupby("name")["duration"] net_time = 0 n_packets = 0 for key, item in grouped_df: #print("[%s]: %lf" % (key, grouped_df.get_group(key).sum())) if key.find("network:tcp:") != -1: net_time = net_time + grouped_df.get_group(key).sum() n_packets = n_packets + 1 #print(("total network time (s) = %.3lf" % net_time)) #print(("total amount of network packets = %d" % n_packets)) # total network packet packet_num_matrix = df.groupby(['pkt_src','pkt_dst','payload']).size().unstack(level=1, fill_value=0) # total network traffic packet_sum_matrix = df.groupby(['pkt_src','pkt_dst'])["payload"].sum().unstack(level=1, fill_value=0) # ================ change pandas table columns and index name ==== rename_index = packet_sum_matrix.index.tolist() rename_index2 = packet_num_matrix.index.tolist() rename_columns = packet_sum_matrix.columns.tolist() rename_columns2 = packet_num_matrix.columns.tolist() def zero(s): if s[0:2] == '00': s = s[2] elif (s[0] == '0') and (s[1] != '0'): s = s[1:3] return(s) def check_str(rename_list): rename_list_new = [] for j in rename_list: j = str(int(j)) a = j[-9:-6] b = j[-6:-3] c = j[-3:] j = j[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c) rename_list_new.append(j) return(rename_list_new) def check_str2(rename_list): rename_columns_2 = [] for i in rename_list: i = str(int(i[0])) a = i[-9:-6] b = i[-6:-3] c = i[-3:] i = i[:-9] + '.' + zero(a) + '.' + zero(b) + '.' + zero(c) rename_columns_2.append(i) return(rename_columns_2) rename_index_new = check_str(rename_index) rename_index_new = dict(zip(rename_index, rename_index_new)) rename_index2_new = check_str2(rename_index2) rename_index2_final = list(set(rename_index2_new)) rename_index2_final.sort(key=rename_index2_new.index) rename_columns_new = check_str(rename_columns) rename_columns_new = dict(zip(rename_columns, rename_columns_new)) rename_columns2_new = check_str(rename_columns2) rename_columns2_new = dict(zip(rename_columns2, rename_columns2_new)) # rename here packet_sum_matrix = packet_sum_matrix.rename(columns=rename_columns_new) packet_num_matrix = packet_num_matrix.rename(columns=rename_columns2_new) packet_sum_matrix = packet_sum_matrix.rename(index=rename_index_new) packet_num_matrix.index.set_levels(rename_index2_final , level = 0, inplace = True) if cfg.verbose: print("total amount of network traffic : ", convertbyte(df['payload'].sum()), '\n', packet_sum_matrix.to_string(), "\n") print("total amount of network packets = %d\n" % packet_num_matrix.sum().sum() ,packet_num_matrix.to_string(), "\n") network_value = [] src = [] dst = [] final = [] for index in packet_sum_matrix.index: for column in packet_sum_matrix.columns: src.append(index) dst.append(column) network_value.append(packet_sum_matrix[column][index]) record = list(zip(src, dst, network_value)) record.sort(key=lambda tup:tup[2], reverse=True) for src, dst, value in record: if value == 0: pass else: item = [src, dst, convertbyte(value), round(value / df['payload'].sum(), 2)] final.append(item) summary = pd.DataFrame(final, columns=['Source', 'Destination', 'Amount', 'Percentage of a Node']) summary.to_csv(logdir + 'netrank.csv', mode='w', header=True, index=False) df = pd.DataFrame({'name':['net_time'], 'value':[net_time] }, columns=['name','value']) features = pd.concat([features, df]) return features def convertbyte(B): B = int(B) KB = float(1024) MB = float(KB ** 2) # 1,048,576 GB = float(KB ** 3) # 1,073,741,824 TB = float(KB ** 4) # 1,099,511,627,776 if B < KB: return '{} Bytes'.format(B) elif KB <= B < MB: return '{0:.2f} KB'.format(B/KB) elif MB <= B < GB: return '{0:.2f} MB'.format(B/MB) elif GB <= B < TB: return '{0:.2f} GB'.format(B/GB) elif TB <= B: return '{0:.2f} TB'.format(B/TB) def convertbytes(B): B = float(B) KB = float(1024) MB = float(KB ** 2) # 1,048,576 GB = float(KB ** 3) # 1,073,741,824 TB = float(KB ** 4) # 1,099,511,627,776 if B < KB: return '{0:.2f} B/s'.format(B) elif KB <= B < MB: return '{0:.2f} KB/s'.format(B/KB) elif MB <= B < GB: return '{0:.2f} MB/s'.format(B/MB) elif GB <= B < TB: return '{0:.2f} GB/s'.format(B/GB) elif TB <= B: return '{0:.2f} TB/s'.format(B/TB) def netbandwidth_profile(logdir, cfg, df, features): if not cfg.cluster_ip and cfg.verbose: print_title('Network Bandwidth Profiling:') tx = df['event'] == float(0) rx = df['event'] == float(1) bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25) bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5) bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75) bw_tx_mean = int(df[tx]['bandwidth'].mean()) bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25) bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5) bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75) bw_rx_mean = int(df[rx]['bandwidth'].mean()) with open('%s/netstat.txt' % logdir) as f: lines = f.readlines() first_line = lines[0] last_line = lines[-1] tx_begin = first_line.split(',')[1] rx_begin = first_line.split(',')[2] tx_end = last_line.split(',')[1] rx_end = last_line.split(',')[2] tx_amount = int(last_line.split(',')[1]) - int(first_line.split(',')[1]) rx_amount = int(last_line.split(',')[2]) - int(first_line.split(',')[2]) if not cfg.cluster_ip: bw_tx_q1 = df[tx]['bandwidth'].quantile(0.25) bw_tx_q2 = df[tx]['bandwidth'].quantile(0.5) bw_tx_q3 = df[tx]['bandwidth'].quantile(0.75) bw_tx_mean = int(df[tx]['bandwidth'].mean()) bw_rx_q1 = df[rx]['bandwidth'].quantile(0.25) bw_rx_q2 = df[rx]['bandwidth'].quantile(0.5) bw_rx_q3 = df[rx]['bandwidth'].quantile(0.75) bw_rx_mean = int(df[rx]['bandwidth'].mean()) if cfg.verbose: print('Amount of Network Traffic : %s' % (convertbyte(tx_amount + rx_amount))) print('Amount of tx : %s' % convertbyte(tx_amount)) print('Amount of rx : %s' % convertbyte(rx_amount)) print('Bandwidth Quartile :') print('Q1 tx : %s, rx : %s' % ( convertbytes(bw_tx_q1), convertbytes(bw_rx_q1))) print('Q2 tx : %s, rx : %s' % ( convertbytes(bw_tx_q2), convertbytes(bw_rx_q2))) print('Q3 tx : %s, rx : %s' % ( convertbytes(bw_tx_q3), convertbytes(bw_rx_q3))) print('Avg tx : %s, rx : %s'% ( convertbytes(bw_tx_mean), convertbytes(bw_rx_mean))) #network chart part all_time = df[tx]['timestamp'].tolist() all_tx = df[tx]['bandwidth'].tolist() all_rx = df[rx]['bandwidth'].tolist() fig = plt.figure(dpi=128, figsize=(16, 14)) plt.plot(all_time, all_tx, c='red', alpha=0.5, label='tx') plt.plot(all_time, all_rx, c='blue', alpha=0.5, label='rx') plt.legend(loc='upper right') plt.title("Network Report", fontsize=18) plt.xlabel('Timestamp (s)', fontsize=16) plt.ylabel("Bandwidth (bytes)", fontsize=16) fig.savefig("%s/network_report.pdf" % logdir, bbox_inches='tight') if not cfg.cluster_ip and cfg.verbose: print('Network Bandwidth Chart is saved at %s/network_report.pdf' %logdir) df_feature = pd.DataFrame({ 'name':['bw_tx_q2', 'bw_tx_q3', 'bw_rx_q2', 'bw_rx_q3'], 'value':[bw_tx_q2, bw_tx_q3, bw_rx_q2, bw_rx_q3] }, columns=['name','value']) features = pd.concat([features, df_feature]) return features def blktrace_latency_profile(logdir, cfg, df, features): with open('%s/btt.txt' % logdir) as f: lines = f.readlines() for i, line in enumerate(lines): if '==================== All Devices ====================' in line: start = i if '==================== Device Merge Information ====================' in line: end = i break bttoutput_result = lines[start:end] df_offset = pd.read_table('%s/offset_all.txt' % logdir, delim_whitespace=True, names=('time', 'start', 'end')) time = df_offset['time'].tolist() start_b = df_offset['start'].tolist() end_b = df_offset['end'].tolist() fig = plt.figure(dpi=128, figsize=(16, 14)) plt.plot(time, start_b, c='red', marker='o', alpha=0.3, label='Start block') plt.legend(loc='upper right') plt.title("Block Offset Report", fontsize=18) plt.xlabel('Timestamp (s)', fontsize=16) plt.ylabel("Block Number", fontsize=16) fig.savefig("%s/offset_of_device_report.pdf" % logdir, bbox_inches='tight') print('Offset of Device Report is saved at %s/offset_of_device_report.pdf' %logdir) if cfg.verbose: print_title('Storage Profiling:') print('Blktracae Latency (s):') for btt in bttoutput_result: print(btt[:-1]) blktrace_latency = df['event'] == 'C' blktrace_latency_q1 = df[blktrace_latency]['duration'].quantile(0.25) blktrace_latency_q2 = df[blktrace_latency]['duration'].quantile(0.5) blktrace_latency_q3 = df[blktrace_latency]['duration'].quantile(0.75) blktrace_latency_mean = df[blktrace_latency]['duration'].mean() df_feature = pd.DataFrame({ 'name':['blktrace_latency_q1','blktrace_latency_q2','blktrace_latency_q3'], 'value': [blktrace_latency_q1, blktrace_latency_q2, blktrace_latency_q3] }, columns=['name','value']) features = pd.concat([features, df_feature]) return features def diskstat_profile(logdir, cfg, df, features): #diskstat_dev = list(set(df['dev'])) diskstat_r_q1 = df.groupby('dev')['d_read'].quantile(0.25) diskstat_w_q1 = df.groupby('dev')['d_write'].quantile(0.25) diskstat_q1 = df.groupby('dev')['d_disk_total'].quantile(0.25) diskstat_r_q2 = df.groupby('dev')['d_read'].quantile(0.5) diskstat_w_q2 = df.groupby('dev')['d_write'].quantile(0.5) diskstat_q2 = df.groupby('dev')['d_disk_total'].quantile(0.5) diskstat_r_q3 = df.groupby('dev')['d_read'].quantile(0.75) diskstat_w_q3 = df.groupby('dev')['d_write'].quantile(0.75) diskstat_q3 = df.groupby('dev')['d_disk_total'].quantile(0.75) diskstat_r_avg = df.groupby('dev')['d_read'].mean() diskstat_w_avg = df.groupby('dev')['d_write'].mean() diskstat_avg = df.groupby('dev')['d_disk_total'].mean() diskstat_r_iops = df.groupby('dev')['r_iops'].mean() diskstat_w_iops = df.groupby('dev')['w_iops'].mean() diskstat_iops = df.groupby('dev')['iops'].mean() diskstat_wait = df.groupby('dev')['await_time'].mean() diskstat_table = pd.concat([diskstat_r_q1, diskstat_r_q2, diskstat_r_q3, diskstat_r_avg, diskstat_w_q1, diskstat_w_q2, diskstat_w_q3, diskstat_w_avg, diskstat_q1, diskstat_q2, diskstat_q3, diskstat_avg, diskstat_r_iops, diskstat_w_iops, diskstat_iops, diskstat_wait], axis=1, sort=False) diskstat_columns = ['Q1 throughput(Read)', 'Q2 throughput(Read)', 'Q3 throughput(Read)', 'Avg throughput(Read)', 'Q1 throughput(Write)', 'Q2 throughput(Write)', 'Q3 throughput(Write)', 'Avg throughput(Write)', 'Q1 throughput(R+W)', 'Q2 throughput(R+W)', 'Q3 throughput(R+W)', 'Avg throughput(R+W)', 'Avg IOPS(Read)', 'Avg IOPS(Write)', 'Avg IOPS(R+W)', 'Avg Await time(ms)'] diskstat_table.columns = diskstat_columns diskstat_dev = diskstat_table.index.format() final_table = pd.DataFrame(columns=diskstat_columns) for j, dev in enumerate(diskstat_dev): tmp_list = [] for i in diskstat_columns[:-4]: tmp_list.append(convertbytes(diskstat_table.iloc[j][i])) for i in diskstat_columns[-4:-1]: tmp_list.append('%d' % int(diskstat_table.iloc[j][i])) tmp_list.append('%.3lf ms' % diskstat_table.iloc[j][-1]) tmp_table = pd.DataFrame([tuple(tmp_list)], columns=diskstat_columns, index=[dev]) final_table = pd.concat([final_table, tmp_table]) if cfg.verbose: print_title('DISKSTAT Profiling:') print('Disk Throughput Quartile :') print(final_table.T) df_feature = pd.DataFrame({ 'name':['diskstat_q1','diskstat_q2','diskstat_q3'], 'value': [diskstat_q1.mean(), diskstat_q2.mean(), diskstat_q3.mean()] }, columns=['name','value']) features = pd.concat([features, df_feature]) return features def cpu_profile(logdir, cfg, df): if cfg.verbose: print_title('CPU Profiling:') print('elapsed_time (s) = %.6lf' % cfg.elapsed_time) grouped_df = df.groupby("deviceId")["duration"] total_exec_time = 0 for key, item in grouped_df: print(("[%d]: %lf" % (key, grouped_df.get_group(key).sum()))) total_exec_time = total_exec_time + grouped_df.get_group(key).sum() print("total execution time (s) = %.3lf" % total_exec_time) cpu_detail_profile_df = df[['timestamp','duration','name']] cpu_detail_profile_df = cpu_detail_profile_df.sort_values(by=['duration'], ascending=False) cpu_detail_profile_df['ratio(%)'] = cpu_detail_profile_df['duration']/total_exec_time * 100 cpu_detail_profile_df = cpu_detail_profile_df[['timestamp','ratio(%)','duration','name']] print(cpu_detail_profile_df[:20].to_string(index=False)) def vmstat_profile(logdir, cfg, df, features): _,_,_,_,_,_,df['si'],df['so'],df['bi'],df['bo'],df['in'],df['cs'],_,_,_,_,_=df['name'].str.split('|').str for col_name in ('si','so','bi','bo','in','cs'): df[col_name] = df[col_name].str[3:] vmstat_traces = df[['si','so','bi','bo','in','cs']].astype(float) vm_bi = vmstat_traces['bi'].mean() vm_bo = vmstat_traces['bo'].mean() vm_cs = vmstat_traces['cs'].mean() vm_in = vmstat_traces['in'].mean() if cfg.verbose: print_title('VMSTAT Profiling:') print('average bi/s: %d' % int(vm_cs)) print('average bo/s: %d' % int(vm_in)) print('average cs/s: %d' % int(vm_bi)) print('average in/s: %d' % int(vm_bo)) df_feature = pd.DataFrame({ 'name':['vm_bi', 'vm_bo', 'vm_cs', 'vm_in' ], 'value':[vm_bi, vm_bo, vm_cs, vm_in] }, columns=['name','value']) features = pd.concat([features, df_feature]) return features def mpstat_profile(logdir, cfg, df, features): if not cfg.cluster_ip and cfg.verbose: print_title('MPSTAT Profiling:') num_cores = int(df['deviceId'].max() + 1) df_summary = pd.DataFrame( np.zeros((num_cores,5)), columns=['USR','SYS','IDL','IOW','IRQ']) _,_,_,_,_,df['USR'],df['SYS'],df['IDL'],df['IOW'],df['IRQ'],_ = df["name"].str.split('|').str df[['USR','SYS','IDL','IOW','IRQ']] = df[['USR','SYS','IDL','IOW','IRQ']].astype(float) df["dt_all"] = np.where(df["IDL"]==100, 0.1, df["duration"]/((100-df["IDL"])/100.0)) df["t_USR"] = df['dt_all'] * df['USR']/100.0 df["t_SYS"] = df['dt_all'] * df['SYS']/100.0 df["t_IDL"] = df['dt_all'] * df['IDL']/100.0 df["t_IOW"] = df['dt_all'] * df['IOW']/100.0 df["t_IRQ"] = df['dt_all'] * df['IRQ']/100.0 dfs=[] for i in range(num_cores): dfs.append(df.loc[df['deviceId'] == float(i)]) for index,dff in enumerate(dfs): df_summary.iloc[index]['USR'] = dff['t_USR'].sum() df_summary.iloc[index]['SYS'] = dff['t_SYS'].sum() df_summary.iloc[index]['IDL'] = dff['t_IDL'].sum() df_summary.iloc[index]['IRQ'] = dff['t_IRQ'].sum() df_summary.iloc[index]['IOW'] = dff['t_IOW'].sum() if not cfg.cluster_ip and cfg.verbose: print('CPU Utilization (%):') print('core\tUSR\tSYS\tIDL\tIOW\tIRQ') for i in range(len(df_summary)): t_sum = df_summary.iloc[i].sum() if not cfg.cluster_ip and cfg.verbose: print('%3d\t%3d\t%3d\t%3d\t%3d\t%3d'%(i,int(100.0*df_summary.iloc[i]['USR']/t_sum), int(100.0*df_summary.iloc[i]['SYS']/t_sum), int(100.0*df_summary.iloc[i]['IDL']/t_sum), int(100.0*df_summary.iloc[i]['IOW']/t_sum), int(100.0*df_summary.iloc[i]['IRQ']/t_sum) )) if not cfg.cluster_ip and cfg.verbose: print('CPU Time (s):') print('core\tUSR\tSYS\tIDL\tIOW\tIRQ') for i in range(len(df_summary)): t_sum = df_summary.iloc[i].sum() if not cfg.cluster_ip and cfg.verbose: print('%3d\t%.2lf\t%.2lf\t%.2lf\t%.2lf\t%.2lf'%(i, df_summary.iloc[i]['USR'], df_summary.iloc[i]['SYS'], df_summary.iloc[i]['IDL'], df_summary.iloc[i]['IOW'], df_summary.iloc[i]['IRQ'] )) total_cpu_time = df_summary[['USR','SYS','IRQ']].sum().sum() cpu_util = int(100*total_cpu_time / (num_cores*cfg.elapsed_time)) if not cfg.cluster_ip and cfg.verbose: print('Active CPU Time (s): %.3lf' % total_cpu_time) print('Active CPU ratio (%%): %3d' % cpu_util) df_feature = pd.DataFrame({ 'name':['num_cores', 'cpu_util'], 'value':[num_cores, cpu_util] }, columns=['name','value']) features = pd.concat([features, df_feature]) return features def sofa_analyze(cfg): print_main_progress('SOFA analyzing...') filein = [] df_cpu = pd.DataFrame([], columns=cfg.columns) df_gpu = pd.DataFrame([], columns=cfg.columns) df_net = pd.DataFrame([], columns=cfg.columns) df_mpstat = pd.DataFrame([], columns=cfg.columns) df_vmstat = pd.DataFrame([], columns=cfg.columns) df_bandwidth = pd.DataFrame([], columns=cfg.columns) df_blktrace = pd.DataFrame([], columns=cfg.columns) df_diskstat = pd.DataFrame([], columns=cfg.columns) df_nvsmi = pd.DataFrame([], columns=cfg.columns) iter_summary = None logdir = cfg.logdir with open(logdir+'/misc.txt') as f: lines = f.readlines() elapsed_time = float(lines[0].split()[1]) vcores = int(lines[2].split()[1]) cfg.elapsed_time = float(lines[0].split()[1]) filein_gpu = logdir + "gputrace.csv" filein_cpu = logdir + "cputrace.csv" filein_net = logdir + "nettrace.csv" filein_vmstat = logdir + "vmstat.csv" filein_mpstat = logdir + "mpstat.csv" filein_strace = logdir + "strace.csv" filein_nvsmi = logdir + "nvsmi_trace.csv" filein_bandwidth = logdir + "netstat.csv" filein_blktrace = logdir + "blktrace.csv" filein_diskstat = logdir + "diskstat_vector.csv" if os.path.isfile('%s/nvlink_topo.txt' % logdir): with open(logdir + 'nvlink_topo.txt') as f: lines = f.readlines() if len(lines) > 0: title = lines[0] num_gpus = 1 for word in title.split(): if re.match(r'GPU', word) != None : num_gpus = num_gpus + 1 print_info(cfg,'# of GPUs: ' + str(num_gpus) ) edges = [] if len(lines) >= num_gpus+1: for i in range(num_gpus): connections = lines[1+i].split() for j in range(len(connections)): if connections[j] == 'NV1' or connections[j] == 'NV2': edges.append((i,j-1)) #print('%d connects to %d' % (i, j-1)) ring_found = False G = nx.DiGraph(edges) # Try to find ring with its length of num_gpus for cycle in nx.simple_cycles(G): if len(cycle) == num_gpus: if cfg.verbose: print('One of the recommended ring having length of %d' % len(cycle)) ring_found = True os.system("mkdir -p sofalog/sofa_hints/") xring_order = ','.join(map(str, cycle)) with open("sofalog/sofa_hints/xring_order.txt", "w") as f: f.write('export CUDA_VISIBLE_DEVICES=' + xring_order) break # Try to find ring with its length of num_gpus/2 if not ring_found: for cycle in nx.simple_cycles(G): if len(cycle) == num_gpus/2: print(("One of the recommended ring having length of %d" % len(cycle) )) ring_found = True os.system("mkdir -p sofalog/sofa_hints/") xring_order = ','.join(map(str, cycle)) with open("sofalog/sofa_hints/xring_order.txt", "w") as f: f.write('export CUDA_VISIBLE_DEVICES=' + xring_order) break # Construct Performance Features features = pd.DataFrame({'name':['elapsed_time'], 'value':[cfg.elapsed_time]}, columns=['name','value']) try: df_nvsmi = pd.read_csv(filein_nvsmi) if not df_nvsmi.empty and cfg.spotlight_gpu: state = 0 sm_high = 0 trigger = 10 for i in range(len(df_nvsmi)): if df_nvsmi.iloc[i].event == 0 and df_nvsmi.iloc[i].deviceId == 0 : if df_nvsmi.iloc[i].duration >= 50: sm_high = min(trigger, sm_high + 1) if df_nvsmi.iloc[i].duration < 10: sm_high = max(0, sm_high - 1) if state == 0 and sm_high == trigger: state = 1 cfg.roi_begin = df_nvsmi.iloc[i].timestamp elif state == 1 and sm_high == 0: state = 0 cfg.roi_end = df_nvsmi.iloc[i].timestamp #print('sm_high=%d state=%d' % (sm_high, state)) if cfg.roi_end - cfg.roi_begin < 0: cfg.roi_end = 0 cfg.roi_begin = 0 except IOError: print_warning(cfg, "nvsmi_trace.csv is not found") try: df_cpu = pd.read_csv(filein_cpu) if not df_cpu.empty: if cfg.verbose: cpu_profile(logdir, cfg, df_cpu) if cfg.enable_swarms and len(df_cpu) > cfg.num_swarms: df_cpu, swarms = hsg_v2(cfg, df_cpu) except IOError as e: df_cpu = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found" % filein_cpu) try: df_strace = pd.read_csv(filein_strace) if not df_strace.empty: features = strace_profile(logdir, cfg, df_strace, features) except IOError as e: df_strace = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found" % filein_strace) try: df_net = pd.read_csv(filein_net) if not df_net.empty: features = net_profile(logdir, cfg, df_net, features) except IOError as e: df_net = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found" % filein_net) try: df_bandwidth = pd.read_csv(filein_bandwidth) if not df_bandwidth.empty: features = netbandwidth_profile(logdir, cfg, df_bandwidth, features) except IOError as e: df_bandwidth = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found" % filein_bandwidth) try: df_blktrace = pd.read_csv(filein_blktrace) if not df_blktrace.empty: features = blktrace_latency_profile(logdir, cfg, df_blktrace, features) except IOError as e: df_blktrace = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found" % filein_blktrace) try: df_diskstat = pd.read_csv(filein_diskstat) if not df_diskstat.empty: features = diskstat_profile(logdir, cfg, df_diskstat, features) except IOError as e: df_diskstat = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found" % filein_diskstat) try: df_vmstat = pd.read_csv(filein_vmstat) if not df_vmstat.empty: features = vmstat_profile(logdir, cfg, df_vmstat, features) except IOError as e: df_vmstat = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found" % filein_vmstat) try: df_mpstat = pd.read_csv(filein_mpstat) if not df_mpstat.empty: features = mpstat_profile(logdir, cfg, df_mpstat, features) except IOError as e: df_mpstat = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found" % filein_mpstat) try: df_nvsmi = pd.read_csv(filein_nvsmi) features = nvsmi_profile(logdir, cfg, df_nvsmi, features) except IOError: print_warning(cfg, "nvsmi_trace.csv is not found") try: df_gpu = pd.read_csv(filein_gpu) if not df_gpu.empty: features = gpu_profile(logdir, cfg, df_gpu, features) except IOError: df_gpu = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found. If there is no need to profile GPU, just ignore it." % filein_gpu) try: if len(df_mpstat)>0: df_nvsmi.append(df_mpstat.iloc[0]) features = concurrency_breakdown(logdir, cfg, df_mpstat, df_cpu, df_gpu, df_nvsmi, df_bandwidth, features) except IOError as e: print_warning(cfg, "Some files are not found, which are needed for concurrency_breakdown analysis") if cfg.enable_aisi: selected_pattern, iter_summary, features = sofa_aisi(logdir, cfg, df_cpu, df_gpu, df_strace, df_mpstat, features) if 'IS_SOFA_ON_HAIHUB' not in os.environ or os.environ['IS_SOFA_ON_HAIHUB'] == 'no': print_title('Final Performance Features') print('%s%s%s%s' % ('ID'.ljust(10),'Feature'.ljust(30),'Value'.ljust(20),'Unit'.ljust(20)) ) for i in range(len(features)): name = features.iloc[i]['name'] value = features.iloc[i]['value'] print('%s%s%s' % (str(i).ljust(10), name.ljust(30), ('%.3lf'%value).ljust(20))) if cfg.spotlight_gpu: try: print('Elapsed hotspot time: %.3lf' % features[features.name=='elapsed_hotspot_time'].value) except: print_warning(cfg, 'elpased_hostspot_time is not defined.') if cfg.potato_server: if cfg.potato_server.find(':') == -1: cfg.potato_server = cfg.potato_server + ':50051' hint, docker_image = get_hint(cfg.potato_server, features) df_report = pd.read_json(hint, orient='table') file_potato_report = cfg.logdir + 'potato_report.html' # Export report to HTML file. df_report.to_html(file_potato_report ) with open(file_potato_report, 'a') as f: f.write('<head><link rel=stylesheet type="text/css" href="potato_report.css"></head>') print_title('POTATO Feedback') print('%s%s%s%s' % ('ID'.ljust(5), 'Metric'.ljust(20), 'Value'.ljust(10), 'Reference-Value'.ljust(30) ) ) for i in range(len(df_report)): metric = df_report.iloc[i]['Metric'] if metric != 'hybrid_suggestion': value = df_report.iloc[i]['Value'] ref_value = df_report.iloc[i]['ReferenceValue'] print('%s%s%s%s' % (str(i).ljust(5), metric.ljust(20), ('%.3lf'%value).ljust(20), str(ref_value).ljust(30))) print('\n') print_hint('General Suggestions:') for i in range(len(df_report)): metric = df_report.iloc[i]['Metric'] if metric != 'hybrid_suggestion': suggestion = df_report.iloc[i]['Suggestion'] print('%d. %s' % (i, suggestion)) print('\n') print_hint('Framework-specific Optimization Suggestions:') for i in range(len(df_report)): metric = df_report.iloc[i]['Metric'] if metric == 'hybrid_suggestion': suggestion = df_report.iloc[i]['Suggestion'] print('%d. %s' % (i, suggestion)) #print(df_report[['Metric', 'Value', 'Reference Value']]) #print(df_report[['Suggestion']]) #print('Tag of optimal image recommended from POTATO: ' + highlight(docker_image)) print('\n') print_hint('Please re-launch KubeFlow Jupyter-notebook to have suggested images or resources if necessary.') sofa_home = os.path.dirname(os.path.realpath(__file__)) subprocess.Popen( ['bash', '-c', 'cp %s/../sofaboard/* %s;' % (sofa_home, cfg.logdir)]) subprocess.Popen(['sleep', '2']) print('\n\n') print('Complete!!') def cluster_analyze(cfg): if cfg.verbose: print_title('Cluster Network Profiling :') cluster = cfg.cluster_ip.split(',') summary_net = pd.DataFrame([], columns=['Source', 'Destination', 'Amount', 'Percentage of a Node']) summary_compute = pd.DataFrame([], columns=['gpu_sm_util','gpu_mem_util','cpu_util']) summary_band = pd.DataFrame([], columns=['Q1', 'Q2', 'Q3', 'Avg']) all = [] for i, ip in enumerate(cluster): features = pd.DataFrame({'name':['elapsed_time'], 'value':[cfg.elapsed_time]}, columns=['name','value']) node = 'node ' + str(i) if cfg.verbose: print('node ' + str(i) + ' is ' + ip) logdir = tmp_dir[0:-1] + '-' + ip + '/' filein_net = logdir + "nettrace.csv" filein_mpstat = logdir + "mpstat.csv" filein_nvsmi = logdir + "nvsmi_trace.csv" filein_bandwidth = logdir + "netstat.csv" with open(logdir+'/misc.txt') as f: lines = f.readlines() elapsed_time = float(lines[0].split()[1]) vcores = int(lines[2].split()[1]) cfg.elapsed_time = float(lines[0].split()[1]) try: df_net = pd.read_csv(filein_net) features = net_profile(logdir, cfg, df_net, features) except IOError as e: df_net = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found" % filein_net) try: df_mpstat = pd.read_csv(filein_mpstat) features = mpstat_profile(logdir, cfg, df_mpstat, features) except IOError as e: df_mpstat = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found" % filein_mpstat) try: df_nvsmi = pd.read_csv(filein_nvsmi) features = nvsmi_profile(logdir, cfg, df_nvsmi, features) except IOError: print_warning(cfg, "nvsmi_trace.csv is not found") try: df_bandwidth = pd.read_csv(filein_bandwidth) features = netbandwidth_profile(logdir, cfg, df_bandwidth, features) except IOError as e: df_bandwidth = pd.DataFrame([], columns=cfg.columns) print_warning(cfg, "%s is not found" % filein_bandwidth) sm = int(features[features['name'] == 'gpu_sm_util']['value']) mem = int(features[features['name'] == 'gpu_mem_util']['value']) cpu = int(features[features['name'] == 'cpu_util']['value']) sm_mem_cpu = [sm, mem, cpu] compute_tmp = pd.DataFrame([sm_mem_cpu], columns = ['gpu_sm_util', 'gpu_mem_util', 'cpu_util']) summary_compute = pd.concat([summary_compute, pd.concat([compute_tmp], keys=[node])]) net_tmp = pd.read_csv(logdir + "netrank.csv") summary_net = pd.concat([summary_net, pd.concat([net_tmp], keys=[node])]) # for bandwidth report tx = df_bandwidth['event'] == float(0) rx = df_bandwidth['event'] == float(1) tx_tmp = [convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.25)), convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.5)), convertbytes(df_bandwidth[tx]['bandwidth'].quantile(0.75)), convertbytes(df_bandwidth[tx]['bandwidth'].mean())] rx_tmp = [convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.25)), convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.5)), convertbytes(df_bandwidth[rx]['bandwidth'].quantile(0.75)), convertbytes(df_bandwidth[rx]['bandwidth'].mean())] band_tmp = pd.DataFrame([tx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['tx']) rx_pd = pd.DataFrame([rx_tmp], columns = ['Q1', 'Q2', 'Q3', 'Avg'], index = ['rx']) band_tmp = pd.concat([band_tmp, rx_pd]) summary_band = pd.concat([summary_band, pd.concat([band_tmp], keys=[node])]) if cfg.verbose: with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also print('Ranked Network Traffic : \n', summary_net, '\n') print('Cluster Bandwidth Quartile: \n', summary_band) print_title('Cluster Computation Profiling:') print(summary_compute)
apache-2.0
536,236,055,129,862,100
43.556728
259
0.541718
false
3.342857
false
false
false
kenmcc/mypywws
src/pywws/Process.py
1
29489
#!/usr/bin/env python # -*- coding: utf-8 -*- # pywws - Python software for USB Wireless Weather Stations # http://github.com/jim-easterbrook/pywws # Copyright (C) 2008-14 Jim Easterbrook [email protected] # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. """Generate hourly, daily & monthly summaries of raw weather station data :: %s This module takes raw weather station data (typically sampled every five or ten minutes) and generates hourly, daily and monthly summary data, which is useful when creating tables and graphs. Before computing the data summaries, raw data is "calibrated" using a user-programmable function. See :doc:`pywws.calib` for details. The hourly data is derived from all the records in one hour, e.g. from 18:00:00 to 18:59:59, and is given the index of the last complete record in that hour. The daily data summarises the weather over a 24 hour period typically ending at 2100 or 0900 hours, local (non DST) time, though midnight is another popular convention. It is also indexed by the last complete record in the period. Daytime and nightime, as used when computing maximum and minimum temperatures, are assumed to start at 0900 and 2100 local time, or 1000 and 2200 when DST is in effect, regardless of the meteorological day. To adjust the meteorological day to your preference, or that used by your local official weather station, edit the "day end hour" line in your ``weather.ini`` file, then run Reprocess.py to regenerate the summaries. Monthly summary data is computed from the daily summary data. If the meteorological day does not end at midnight, then each month may begin and end up to 12 hours before or after midnight. Wind speed data is averaged over the hour (or day) and the maximum gust speed during the hour (or day) is recorded. The predominant wind direction is calculated using vector arithmetic. Rainfall is converted from the raw "total since last reset" figure to a more useful total in the last hour, day or month. """ from __future__ import absolute_import __docformat__ = "restructuredtext en" __usage__ = """ usage: python -m pywws.Process [options] data_dir options are: -h or --help display this help -v or --verbose increase number of informative messages data_dir is the root directory of the weather data """ __doc__ %= __usage__ __usage__ = __doc__.split('\n')[0] + __usage__ from collections import deque from datetime import date, datetime, timedelta import getopt import logging import math import os import sys from .calib import Calib from . import DataStore from .Logger import ApplicationLogger from .TimeZone import STDOFFSET, HOUR SECOND = timedelta(seconds=1) TIME_ERR = timedelta(seconds=45) MINUTEx5 = timedelta(minutes=5) HOURx3 = timedelta(hours=3) DAY = timedelta(hours=24) WEEK = timedelta(days=7) class Average(object): """Compute average of multiple data values.""" def __init__(self): self.acc = 0.0 self.count = 0 def add(self, value): if value is None: return self.acc += value self.count += 1 def result(self): if self.count == 0: return None return self.acc / float(self.count) class Minimum(object): """Compute minimum value and timestamp of multiple data values.""" def __init__(self): self.value = None self.time = None def add(self, value, time): if not self.time or value <= self.value: self.value = value self.time = time def result(self): if self.time: return self.value, self.time return None, None class Maximum(object): """Compute maximum value and timestamp of multiple data values.""" def __init__(self): self.value = None self.time = None def add(self, value, time): if not self.time or value > self.value: self.value = value self.time = time def result(self): if self.time: return self.value, self.time return None, None sin_LUT = map( lambda x: math.sin(math.radians(float(x * 360) / 16.0)), range(16)) cos_LUT = map( lambda x: math.cos(math.radians(float(x * 360) / 16.0)), range(16)) class WindFilter(object): """Compute average wind speed and direction. The wind speed and direction of each data item is converted to a vector before averaging, so the result reflects the dominant wind direction during the time period covered by the data. Setting the ``decay`` parameter converts the filter from a simple averager to one where the most recent sample carries the highest weight, and earlier samples have a lower weight according to how long ago they were. This process is an approximation of "exponential smoothing". See `Wikipedia <http://en.wikipedia.org/wiki/Exponential_smoothing>`_ for a detailed discussion. The parameter ``decay`` corresponds to the value ``(1 - alpha)`` in the Wikipedia description. Because the weather data being smoothed may not be at regular intervals this parameter is the decay over 5 minutes. Weather data at other intervals will have its weight scaled accordingly. The return value is a (speed, direction) tuple. :param decay: filter coefficient decay rate. :type decay: float :rtype: (float, float) """ def __init__(self, decay=1.0): self.decay = decay self.Ve = 0.0 self.Vn = 0.0 self.total = 0.0 self.weight = 1.0 self.total_weight = 0.0 self.last_idx = None def add(self, data): direction = data['wind_dir'] speed = data['wind_ave'] if direction is None or speed is None: return if self.last_idx and self.decay != 1.0: interval = data['idx'] - self.last_idx assert interval.days == 0 decay = self.decay if interval != MINUTEx5: decay = decay ** (float(interval.seconds) / float(MINUTEx5.seconds)) self.weight = self.weight / decay self.last_idx = data['idx'] speed = speed * self.weight if isinstance(direction, int): self.Ve -= speed * sin_LUT[direction] self.Vn -= speed * cos_LUT[direction] else: direction = math.radians(float(direction) * 22.5) self.Ve -= speed * math.sin(direction) self.Vn -= speed * math.cos(direction) self.total += speed self.total_weight += self.weight def result(self): if self.total_weight == 0.0: return (None, None) return (self.total / self.total_weight, (math.degrees(math.atan2(self.Ve, self.Vn)) + 180.0) / 22.5) class HourAcc(object): """'Accumulate' raw weather data to produce hourly summary. Compute average wind speed and maximum wind gust, find dominant wind direction and compute total rainfall. """ def __init__(self, last_rain): self.logger = logging.getLogger('pywws.Process.HourAcc') self.last_rain = last_rain self.copy_keys = ['idx', 'hum_in', 'temp_in', 'hum_out', 'temp_out', 'abs_pressure', 'rel_pressure', 'temp_bedroom','temp_kitchen', "temp_bed2"] self.reset() def reset(self): self.wind_fil = WindFilter() self.wind_gust = (-2.0, None) self.rain = 0.0 self.retval = {'idx' : None, 'temp_out' : None} def add_raw(self, data): idx = data['idx'] self.wind_fil.add(data) wind_gust = data['wind_gust'] if wind_gust is not None and wind_gust > self.wind_gust[0]: self.wind_gust = (wind_gust, idx) rain = data['rain'] if rain is not None: if self.last_rain is not None: diff = rain - self.last_rain if diff < -0.001: self.logger.warning( '%s rain reset %.1f -> %.1f', str(idx), self.last_rain, rain) elif diff > float(data['delay'] * 5): # rain exceeds 5mm / minute, assume corrupt data and ignore it self.logger.warning( '%s rain jump %.1f -> %.1f', str(idx), self.last_rain, rain) else: self.rain += max(0.0, diff) self.last_rain = rain # copy some current readings if 'illuminance' in data and not 'illuminance' in self.copy_keys: self.copy_keys.append('illuminance') self.copy_keys.append('uv') # if near the end of the hour, ignore 'lost contact' readings if (data['idx'].minute < 45 or data['temp_out'] is not None or self.retval['temp_out'] is None): for key in self.copy_keys: if key in data: self.retval[key] = data[key] def result(self): if not self.retval['idx']: return None self.retval['wind_ave'], self.retval['wind_dir'] = self.wind_fil.result() if self.wind_gust[1]: self.retval['wind_gust'] = self.wind_gust[0] else: self.retval['wind_gust'] = None self.retval['rain'] = self.rain return self.retval class DayAcc(object): """'Accumulate' weather data to produce daily summary. Compute average wind speed, maximum wind gust and daytime max & nighttime min temperatures, find dominant wind direction and compute total rainfall. Daytime is assumed to be 0900-2100 and nighttime to be 2100-0900, local time (1000-2200 and 2200-1000 during DST), regardless of the "day end hour" setting. """ def __init__(self): self.logger = logging.getLogger('pywws.Process.DayAcc') self.has_illuminance = False self.ave = {} self.max = {} self.min = {} self.reset() def reset(self): self.wind_fil = WindFilter() self.wind_gust = (-1.0, None) self.rain = 0.0 for i in ('temp_in', 'temp_out', 'hum_in', 'hum_out', 'abs_pressure', 'rel_pressure', 'temp_bedroom', 'temp_kitchen', "temp_bed2"): self.ave[i] = Average() self.max[i] = Maximum() self.min[i] = Minimum() for i in ('illuminance', 'uv'): self.ave[i] = Average() self.max[i] = Maximum() self.retval = dict() def add_raw(self, data): idx = data['idx'] local_hour = (idx + STDOFFSET).hour wind_gust = data['wind_gust'] if wind_gust is not None and wind_gust > self.wind_gust[0]: self.wind_gust = (wind_gust, idx) for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"): #if i in data: try: temp = data[i] except: temp = 0 if temp is not None: self.ave[i].add(temp) if local_hour >= 9 and local_hour < 21: # daytime max temperature self.max[i].add(temp, idx) else: # nighttime min temperature self.min[i].add(temp, idx) for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'): if i in data: value = data[i] if value is not None: self.ave[i].add(value) self.max[i].add(value, idx) self.min[i].add(value, idx) if 'illuminance' in data: self.has_illuminance = True for i in ('illuminance', 'uv'): if i in data: value = data[i] if value is not None: self.ave[i].add(value) self.max[i].add(value, idx) def add_hourly(self, data): self.wind_fil.add(data) rain = data['rain'] if rain is not None: self.rain += rain self.retval['idx'] = data['idx'] def result(self): if not self.retval: return None self.retval['wind_ave'], self.retval['wind_dir'] = self.wind_fil.result() if self.wind_gust[1]: self.retval['wind_gust'] = self.wind_gust[0] else: self.retval['wind_gust'] = None self.retval['wind_gust_t'] = self.wind_gust[1] self.retval['rain'] = self.rain for i in ('temp_in', 'temp_out', 'hum_in', 'hum_out', 'abs_pressure', 'rel_pressure', 'temp_bedroom', 'temp_kitchen', "temp_bed2"): self.retval['%s_ave' % i] = self.ave[i].result() (self.retval['%s_max' % i], self.retval['%s_max_t' % i]) = self.max[i].result() (self.retval['%s_min' % i], self.retval['%s_min_t' % i]) = self.min[i].result() if self.has_illuminance: for i in ('illuminance', 'uv'): self.retval['%s_ave' % i] = self.ave[i].result() (self.retval['%s_max' % i], self.retval['%s_max_t' % i]) = self.max[i].result() return self.retval class MonthAcc(object): """'Accumulate' daily weather data to produce monthly summary. Compute daytime max & nighttime min temperatures. """ def __init__(self, rain_day_threshold): self.rain_day_threshold = rain_day_threshold self.has_illuminance = False self.ave = {} self.min = {} self.max = {} self.min_lo = {} self.min_hi = {} self.min_ave = {} self.max_lo = {} self.max_hi = {} self.max_ave = {} self.reset() def reset(self): for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"): self.ave[i] = Average() self.min_lo[i] = Minimum() self.min_hi[i] = Maximum() self.min_ave[i] = Average() self.max_lo[i] = Minimum() self.max_hi[i] = Maximum() self.max_ave[i] = Average() for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'): self.ave[i] = Average() self.max[i] = Maximum() self.min[i] = Minimum() for i in ('illuminance', 'uv'): self.ave[i] = Average() self.max_lo[i] = Minimum() self.max_hi[i] = Maximum() self.max_ave[i] = Average() self.wind_fil = WindFilter() self.wind_gust = (-1.0, None) self.rain = 0.0 self.rain_days = 0 self.valid = False def add_daily(self, data): self.idx = data['idx'] for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"): try: temp = data['%s_ave' % i] except: temp = 0 if temp is not None: self.ave[i].add(temp) try: temp = data['%s_min' % i] except: temp = 0 if temp is not None: try: self.min_lo[i].add(temp, data['%s_min_t' % i]) except: self.min_lo[i].add(temp, 0) try: self.min_hi[i].add(temp, data['%s_min_t' % i]) except: self.min_hi[i].add(temp, 0) self.min_ave[i].add(temp) try: temp = data['%s_max' % i] except: temp = 0 if temp is not None: try: self.max_lo[i].add(temp, data['%s_max_t' % i]) except: self.max_lo[i].add(temp, 0) try: self.max_hi[i].add(temp, data['%s_max_t' % i]) except: self.max_hi[i].add(temp, 0) self.max_ave[i].add(temp) for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'): value = data['%s_ave' % i] if value is not None: self.ave[i].add(value) value = data['%s_min' % i] if value is not None: self.min[i].add(value, data['%s_min_t' % i]) value = data['%s_max' % i] if value is not None: self.max[i].add(value, data['%s_max_t' % i]) self.wind_fil.add(data) wind_gust = data['wind_gust'] if wind_gust is not None and wind_gust > self.wind_gust[0]: self.wind_gust = (wind_gust, data['wind_gust_t']) if 'illuminance_ave' in data: self.has_illuminance = True for i in ('illuminance', 'uv'): value = data['%s_ave' % i] if value is not None: self.ave[i].add(value) value = data['%s_max' % i] if value is not None: self.max_lo[i].add(value, data['%s_max_t' % i]) self.max_hi[i].add(value, data['%s_max_t' % i]) self.max_ave[i].add(value) self.rain += data['rain'] if data['rain'] >= self.rain_day_threshold: self.rain_days += 1 self.valid = True def result(self): if not self.valid: return None result = {} result['idx'] = self.idx result['rain'] = self.rain result['rain_days'] = self.rain_days for i in ('temp_in', 'temp_out', 'temp_bedroom', 'temp_kitchen', "temp_bed2"): result['%s_ave' % i] = self.ave[i].result() result['%s_min_ave' % i] = self.min_ave[i].result() (result['%s_min_lo' % i], result['%s_min_lo_t' % i]) = self.min_lo[i].result() (result['%s_min_hi' % i], result['%s_min_hi_t' % i]) = self.min_hi[i].result() result['%s_max_ave' % i] = self.max_ave[i].result() (result['%s_max_lo' % i], result['%s_max_lo_t' % i]) = self.max_lo[i].result() (result['%s_max_hi' % i], result['%s_max_hi_t' % i]) = self.max_hi[i].result() for i in ('hum_in', 'hum_out', 'abs_pressure', 'rel_pressure'): result['%s_ave' % i] = self.ave[i].result() (result['%s_max' % i], result['%s_max_t' % i]) = self.max[i].result() (result['%s_min' % i], result['%s_min_t' % i]) = self.min[i].result() result['wind_ave'], result['wind_dir'] = self.wind_fil.result() if self.wind_gust[1]: result['wind_gust'] = self.wind_gust[0] else: result['wind_gust'] = None result['wind_gust_t'] = self.wind_gust[1] if self.has_illuminance: for i in ('illuminance', 'uv'): result['%s_ave' % i] = self.ave[i].result() result['%s_max_ave' % i] = self.max_ave[i].result() (result['%s_max_lo' % i], result['%s_max_lo_t' % i]) = self.max_lo[i].result() (result['%s_max_hi' % i], result['%s_max_hi_t' % i]) = self.max_hi[i].result() return result def calibrate_data(logger, params, raw_data, calib_data): """'Calibrate' raw data, using a user-supplied function.""" start = calib_data.before(datetime.max) if start is None: start = datetime.min before = raw_data.before(start) start = raw_data.after(start)# + SECOND) if start is None and before is None: return start else: start = before del calib_data[start:] calibrator = Calib(params, raw_data) count = 0 for data in raw_data[start:]: idx = data['idx'] count += 1 if count % 10000 == 0: logger.info("calib: %s", idx.isoformat(' ')) elif count % 500 == 0: logger.debug("calib: %s", idx.isoformat(' ')) calib_data[idx] = calibrator.calib(data) return start def generate_hourly(logger, calib_data, hourly_data, process_from): """Generate hourly summaries from calibrated data.""" start = hourly_data.before(datetime.max) if start is None: start = datetime.min start = calib_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # set start of hour in local time (not all time offsets are integer hours) start += STDOFFSET + timedelta(minutes=5) start = start.replace(minute=0, second=0) start -= STDOFFSET #del hourly_data[start:] # preload pressure history, and find last valid rain prev = None pressure_history = deque() last_rain = None for data in calib_data[start - HOURx3:start]: if data['rel_pressure']: pressure_history.append((data['idx'], data['rel_pressure'])) if data['rain'] is not None: last_rain = data['rain'] prev = data # iterate over data in one hour chunks stop = calib_data.before(datetime.max) hour_start = start acc = HourAcc(last_rain) count = 0 while hour_start <= stop: count += 1 if count % 1008 == 0: logger.info("hourly: %s", hour_start.isoformat(' ')) elif count % 24 == 0: logger.debug("hourly: %s", hour_start.isoformat(' ')) hour_end = hour_start + HOUR acc.reset() for data in calib_data[hour_start:hour_end]: if data['rel_pressure']: pressure_history.append((data['idx'], data['rel_pressure'])) if prev: err = data['idx'] - prev['idx'] #if abs(err - timedelta(minutes=data['delay'])) > TIME_ERR: # logger.info('unexpected data interval %s %s', # data['idx'].isoformat(' '), str(err)) acc.add_raw(data) prev = data new_data = acc.result() if new_data and new_data['idx'].minute >= 1: # was 9 # compute pressure trend new_data['pressure_trend'] = None if new_data['rel_pressure']: target = new_data['idx'] - HOURx3 while (len(pressure_history) >= 2 and abs(pressure_history[0][0] - target) > abs(pressure_history[1][0] - target)): pressure_history.popleft() if (pressure_history and abs(pressure_history[0][0] - target) < HOUR): new_data['pressure_trend'] = ( new_data['rel_pressure'] - pressure_history[0][1]) # store new hourly data t = new_data['idx']# + timedelta(minutes=5) # round up to the next hour t = t +timedelta(minutes=60) t = t.replace(minute=0, second=0) print "INDEX:", t new_data['idx'] = t hourly_data[t] = new_data hour_start = hour_end return start def generate_daily(logger, day_end_hour, calib_data, hourly_data, daily_data, process_from): """Generate daily summaries from calibrated and hourly data.""" start = daily_data.before(datetime.max) if start is None: start = datetime.min start = calib_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # round to start of this day, in local time start += STDOFFSET if start.hour < day_end_hour: start = start - DAY start = start.replace(hour=day_end_hour, minute=0, second=0) start -= STDOFFSET del daily_data[start:] stop = calib_data.before(datetime.max) day_start = start acc = DayAcc() count = 0 while day_start <= stop: count += 1 if count % 30 == 0: logger.info("daily: %s", day_start.isoformat(' ')) else: logger.debug("daily: %s", day_start.isoformat(' ')) day_end = day_start + DAY acc.reset() for data in calib_data[day_start:day_end]: acc.add_raw(data) for data in hourly_data[day_start:day_end]: acc.add_hourly(data) new_data = acc.result() if new_data: new_data['start'] = day_start daily_data[new_data['idx']] = new_data day_start = day_end return start def generate_monthly(logger, rain_day_threshold, day_end_hour, daily_data, monthly_data, process_from): """Generate monthly summaries from daily data.""" start = monthly_data.before(datetime.max) if start is None: start = datetime.min start = daily_data.after(start + SECOND) if process_from: if start: start = min(start, process_from) else: start = process_from if start is None: return start # set start to start of first day of month (local time) start += STDOFFSET start = start.replace(day=1, hour=day_end_hour, minute=0, second=0) if day_end_hour >= 12: # month actually starts on the last day of previous month start -= DAY start -= STDOFFSET del monthly_data[start:] stop = daily_data.before(datetime.max) month_start = start acc = MonthAcc(rain_day_threshold) count = 0 while month_start <= stop: count += 1 if count % 12 == 0: logger.info("monthly: %s", month_start.isoformat(' ')) else: logger.debug("monthly: %s", month_start.isoformat(' ')) month_end = month_start + WEEK if month_end.month < 12: month_end = month_end.replace(month=month_end.month+1) else: month_end = month_end.replace(month=1, year=month_end.year+1) month_end = month_end - WEEK acc.reset() for data in daily_data[month_start:month_end]: acc.add_daily(data) new_data = acc.result() if new_data: new_data['start'] = month_start monthly_data[new_data['idx']] = new_data month_start = month_end return start def Process(params, raw_data, calib_data, hourly_data, daily_data, monthly_data): """Generate summaries from raw weather station data. The meteorological day end (typically 2100 or 0900 local time) is set in the preferences file ``weather.ini``. The default value is 2100 (2200 during DST), following the historical convention for weather station readings. """ logger = logging.getLogger('pywws.Process') logger.info('Generating summary data') # get time of last record last_raw = raw_data.before(datetime.max) print "LAST RAW is ", last_raw if last_raw is None: raise IOError('No data found. Check data directory parameter.') # get daytime end hour (in local time) day_end_hour = eval(params.get('config', 'day end hour', '21')) % 24 # get other config rain_day_threshold = eval(params.get('config', 'rain day threshold', '0.2')) # calibrate raw data start = calibrate_data(logger, params, raw_data, calib_data) # generate hourly data print "Generating hourly data from ", start start = generate_hourly(logger, calib_data, hourly_data, start) # generate daily data start = generate_daily(logger, day_end_hour, calib_data, hourly_data, daily_data, start) # generate monthly data generate_monthly(logger, rain_day_threshold, day_end_hour, daily_data, monthly_data, start) return 0 def main(argv=None): if argv is None: argv = sys.argv try: opts, args = getopt.getopt(argv[1:], "hv", ['help', 'verbose']) except getopt.error, msg: print >>sys.stderr, 'Error: %s\n' % msg print >>sys.stderr, __usage__.strip() return 1 # process options verbose = 0 for o, a in opts: if o in ('-h', '--help'): print __usage__.strip() return 0 elif o in ('-v', '--verbose'): verbose += 1 # check arguments if len(args) != 1: print >>sys.stderr, 'Error: 1 argument required\n' print >>sys.stderr, __usage__.strip() return 2 logger = ApplicationLogger(verbose) data_dir = args[0] return Process(DataStore.params(data_dir), DataStore.data_store(data_dir), DataStore.calib_store(data_dir), DataStore.hourly_store(data_dir), DataStore.daily_store(data_dir), DataStore.monthly_store(data_dir)) if __name__ == "__main__": sys.exit(main())
gpl-2.0
-5,708,486,361,000,318,000
35.953634
101
0.555733
false
3.624954
false
false
false
xolox/python-linux-utils
linux_utils/tabfile.py
1
2307
# linux-utils: Linux system administration tools for Python. # # Author: Peter Odding <[email protected]> # Last Change: February 9, 2020 # URL: https://linux-utils.readthedocs.io """Generic parsing of Linux configuration files like ``/etc/fstab`` and ``/etc/crypttab``.""" # Standard library modules. import re # External dependencies. from property_manager import PropertyManager, mutable_property # Modules included in our package. from linux_utils import coerce_context # Public identifiers that require documentation. __all__ = ( 'TabFileEntry', 'parse_tab_file', ) def parse_tab_file(filename, context=None, encoding='UTF-8'): """ Parse a Linux configuration file like ``/etc/fstab`` or ``/etc/crypttab``. :param filename: The absolute pathname of the file to parse (a string). :param context: See :func:`.coerce_context()` for details. :param encoding: The name of the text encoding of the file (a string). :returns: A generator of :class:`TabFileEntry` objects. This function strips comments (the character ``#`` until the end of the line) and splits each line into tokens separated by whitespace. """ context = coerce_context(context) contents = context.read_file(filename).decode(encoding) for line_number, line in enumerate(contents.splitlines(), start=1): # Strip comments. line = re.sub('#.*', '', line) # Tokenize input. tokens = line.split() if tokens: yield TabFileEntry( context=context, configuration_file=filename, line_number=line_number, tokens=tokens, ) class TabFileEntry(PropertyManager): """Container for the results of :func:`parse_tab_file()`.""" @mutable_property def context(self): """The execution context from which the configuration file was retrieved.""" @mutable_property def configuration_file(self): """The name of the configuration file from which this entry was parsed (a string).""" @mutable_property def line_number(self): """The line number from which this entry was parsed (an integer).""" @mutable_property def tokens(self): """The tokens split on whitespace (a nonempty list of strings)."""
mit
3,480,510,046,944,304,600
31.492958
93
0.662765
false
4.202186
true
false
false
sassoftware/rmake3
rmake/worker/resolvesource.py
1
30654
# # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import itertools from conary.deps import deps from conary.local import deptable from conary.conaryclient import resolve from conary.repository import trovesource from rmake.lib import flavorutil class TroveSourceMesh(trovesource.SearchableTroveSource): def __init__(self, extraSource, mainSource, repos): trovesource.SearchableTroveSource.__init__(self) self.extraSource = extraSource self.mainSource = mainSource self.repos = repos trovesource.SearchableTroveSource.__init__(self) self.searchAsRepository() for source in self.mainSource, self.repos, self.extraSource: if not source: continue self._allowNoLabel = source._allowNoLabel self._bestFlavor = source._bestFlavor self._getLeavesOnly = source._getLeavesOnly self._flavorCheck = source._flavorCheck break self.sources = [ self.extraSource] if self.mainSource: self.sources.append(self.mainSource) if self.repos: self.sources.append(self.repos) def __getattr__(self, key): if self.repos: return getattr(self.repos, key) return getattr(self.mainSource, key) def getFileVersions(self, *args, **kw): if self.repos: return self.repos.getFileVersions(*args, **kw) return self.mainSource.getFileVersions(*args, **kw) def close(self): pass def hasTroves(self, troveList): if self.repos: results = self.repos.hasTroves(troveList) if isinstance(results, dict): results = [ results[x] for x in troveList ] else: results = [ False for x in troveList ] if self.extraSource: hasTroves = self.extraSource.hasTroves(troveList) results = [ x[0] or x[1] for x in itertools.izip(results, hasTroves) ] if self.mainSource: hasTroves = self.mainSource.hasTroves(troveList) results = [ x[0] or x[1] for x in itertools.izip(results, hasTroves) ] return dict(itertools.izip(troveList, results)) def trovesByName(self, name): if self.mainSource: return list(set(self.mainSource.trovesByName(name)) | set(self.extraSource.trovesByName(name))) else: return self.extraSource.trovesByName(name) def getTroves(self, troveList, *args, **kw): if self.repos: return self.repos.getTroves(troveList, *args, **kw) else: return self.mainSource.getTroves(troveList, *args, **kw) def _mergeTroveQuery(self, resultD, response): if isinstance(resultD, dict): for troveName, troveVersions in response.iteritems(): if not resultD.has_key(troveName): resultD[troveName] = {} versionDict = resultD[troveName] for version, flavors in troveVersions.iteritems(): if version not in versionDict: versionDict[version] = [] resultD[troveName][version].extend(flavors) else: if not resultD: for resultList in response: resultD.append(list(resultList)) else: for idx, resultList in enumerate(response): resultD[idx].extend(resultList) return resultD def _mergeListTroveQuery(self, resultList, result2, altFlavors, altFlavors2, map, query): newMap = [] newQuery = [] for idx, items in enumerate(result2): if not items: newMap.append(map[idx]) newQuery.append(query[idx]) if altFlavors2: altFlavors[map[idx]].extend(altFlavors2[idx]) else: resultList[map[idx]].extend(items) altFlavors[map[idx]] = [] return newMap, newQuery def _call(self, fn, query, *args, **kw): if not isinstance(query, dict): query = list(query) result, altFlavors = getattr(self.extraSource, fn)(query, *args, **kw) map = [] newQuery = [] for idx, item in enumerate(result): if not item: map.append(idx) newQuery.append(query[idx]) if self.mainSource: result2, altFlavors2 = getattr(self.mainSource, fn)(newQuery, *args, **kw) newQuery, map = self._mergeListTroveQuery(result, result2, altFlavors, altFlavors2, map, newQuery) if self.repos: result3, altFlavors3 = getattr(self.repos, fn)(newQuery, *args, **kw) newQuery, map = self._mergeListTroveQuery(result, result3, altFlavors, altFlavors3, map, newQuery) result = result, altFlavors else: query = dict(query) d1 = getattr(self.extraSource, fn)(query, *args, **kw) result = {} self._mergeTroveQuery(result, d1) for name in result: query.pop(name) if self.mainSource: d2 = getattr(self.mainSource, fn)(query, *args, **kw) self._mergeTroveQuery(result, d2) if self.repos: d3 = getattr(self.repos, fn)(query, *args, **kw) self._mergeTroveQuery(result, d3) return result def _addLabelsToQuery(self, query): if isinstance(query, dict): newQuery = query.copy() names = query for name in query: labels = set(x[1].trailingLabel() for x in self.extraSource.trovesByName(name)) #asserts there is only one flavorList flavorList, = set(x and tuple(x) for x in query[name].values()) for label in labels: if label not in query[name]: newQuery[name][label] = flavorList map = None else: map = {} newQuery = list(query) names = [(x[0], x[1][0], x[1][2]) for x in enumerate(query)] for idx, name, flavor in names: labels = set(x[1].trailingLabel() for x in self.extraSource.trovesByName(name)) for label in labels: map[len(newQuery)] = idx newQuery.append((name, label, flavor)) return newQuery, map def _compressResults(self, results, map): if map is None: return results results, altFlavors = results finalResults = [] for idx, result in enumerate(results): if idx in map: if result: finalResults[map[idx]].extend(result) altFlavors[map[idx]] = [] else: altFlavors[map[idx]].extend(altFlavors) else: finalResults.append(result) return finalResults, altFlavors def getTroveLatestByLabel(self, query, *args, **kw): map = None if self.expandLabelQueries: query, map = self._addLabelsToQuery(query) results = self._call('getTroveLatestByLabel', query, *args, **kw) return self._compressResults(results, map) def getTroveLeavesByLabel(self, query, *args, **kw): map = None if self.expandLabelQueries: query, map = self._addLabelsToQuery(query) results = self._call('getTroveLeavesByLabel', query, *args, **kw) return self._compressResults(results, map) def getTroveVersionsByLabel(self, query, *args, **kw): map = None if self.expandLabelQueries: query, map = self._addLabelsToQuery(query) results = self._call('getTroveVersionsByLabel', query, *args, **kw) return self._compressResults(results, map) def getTroveLeavesByBranch(self, query, *args, **kw): return self._call('getTroveLeavesByBranch', query, *args, **kw) def getTroveVersionsByBranch(self, query, *args, **kw): return self._call('getTroveVersionsByBranch', query, *args, **kw) def getTroveVersionFlavors(self, query, *args, **kw): return self._call('getTroveVersionFlavors', query, *args, **kw) def findTroves(self, labelPath, troveSpecs, defaultFlavor=None, acrossLabels=False, acrossFlavors=False, affinityDatabase=None, allowMissing=False, bestFlavor=None, getLeaves=None, troveTypes=trovesource.TROVE_QUERY_PRESENT, exactFlavors=False, **kw): if self.mainSource is None: return trovesource.SearchableTroveSource.findTroves(self, labelPath, troveSpecs, defaultFlavor=defaultFlavor, acrossLabels=acrossLabels, acrossFlavors=acrossFlavors, affinityDatabase=affinityDatabase, troveTypes=troveTypes, exactFlavors=exactFlavors, allowMissing=True, **kw) results = {} if bestFlavor is not None: kw.update(bestFlavor=bestFlavor) if getLeaves is not None: kw.update(getLeaves=getLeaves) for source in self.sources: if source == self.repos: # we need the labelPath for repos, otherwise # we allow other algorithms to determine which # version of a particular trove to use - the same ones # used during dep resolution. Sometimes this will not # be a package on the ILP. searchLabelPath = labelPath else: searchLabelPath = None foundTroves = source.findTroves(searchLabelPath, troveSpecs, defaultFlavor=defaultFlavor, acrossLabels=acrossLabels, acrossFlavors=acrossFlavors, affinityDatabase=affinityDatabase, troveTypes=troveTypes, exactFlavors=exactFlavors, allowMissing=True, **kw) for troveSpec, troveTups in foundTroves.iteritems(): results.setdefault(troveSpec, []).extend(troveTups) if not allowMissing: for troveSpec in troveSpecs: assert(troveSpec in results) return results def resolveDependencies(self, label, depList, *args, **kw): sugg = self.extraSource.resolveDependencies(label, depList, *args, **kw) sugg2 = self.repos.resolveDependencies(label, depList, *args, **kw) for depSet, trovesByDep in sugg.iteritems(): for idx, troveList in enumerate(trovesByDep): if not troveList: troveList.extend(sugg2[depSet][idx]) return sugg def resolveDependenciesByGroups(self, troveList, depList): sugg = self.extraSource.resolveDependencies(None, depList) sugg2 = self.repos.resolveDependenciesByGroups(troveList, depList) for depSet, trovesByDep in sugg.iteritems(): for idx, troveList in enumerate(trovesByDep): if not troveList: troveList.extend(sugg2[depSet][idx]) return sugg class DepHandlerSource(TroveSourceMesh): def __init__(self, builtTroveSource, troveListList, repos=None, useInstallLabelPath=True, expandLabelQueries=False): if repos: flavorPrefs = repos._flavorPreferences else: flavorPrefs = [] stack = trovesource.TroveSourceStack() stack.searchWithFlavor() stack.setFlavorPreferenceList(flavorPrefs) self.setFlavorPreferenceList(flavorPrefs) self.expandLabelQueries = expandLabelQueries self.resolveTroveSource = None if isinstance(troveListList, trovesource.SimpleTroveSource): troveListList.setFlavorPreferenceList(flavorPrefs) self.stack.addSource(troveListList) self.resolveTroveSource = troveListList else: if troveListList: for troveList in troveListList: allTroves = [ x.getNameVersionFlavor() for x in troveList ] childTroves = itertools.chain(* (x.iterTroveList(weakRefs=True, strongRefs=True) for x in troveList)) allTroves.extend(childTroves) source = trovesource.SimpleTroveSource(allTroves) source.searchWithFlavor() source.setFlavorPreferenceList(flavorPrefs) stack.addSource(source) self.resolveTroveSource = stack if not useInstallLabelPath: repos = None if not stack.sources: stack = None TroveSourceMesh.__init__(self, builtTroveSource, stack, repos) def __repr__(self): return 'DepHandlerSource(%r,%r,%r)' % (self.extraSource, self.mainSource, self.repos) def copy(self): inst = self.__class__(self.source, None, self.repos) inst.repos = self.repos return inst class BuiltTroveSource(trovesource.SimpleTroveSource): """ Trove source that is used for dep resolution and buildreq satisfaction only - it does not contain references to the changesets that are added """ def __init__(self, troves, repos): self.depDb = deptable.DependencyDatabase() trovesource.SimpleTroveSource.__init__(self) self.setFlavorPreferenceList(repos._flavorPreferences) self.idMap = [] self.idx = 0 for trove in troves: self.addTrove(trove.getNameVersionFlavor(), trove.getProvides(), trove.getRequires()) self.searchWithFlavor() def close(self): self.depDb.db.close() def __del__(self): self.depDb.db.close() def addTrove(self, troveTuple, provides, requires): self._trovesByName.setdefault(troveTuple[0],set()).add(troveTuple) self.idMap.append(troveTuple) self.depDb.add(self.idx, provides, requires) self.idx += 1 def addChangeSet(self, cs): for idx, trvCs in enumerate(cs.iterNewTroveList()): self.addTrove(trvCs.getNewNameVersionFlavor(), trvCs.getProvides(), trvCs.getRequires()) def resolveDependencies(self, label, depList, leavesOnly=False): suggMap = self.depDb.resolve(label, depList) for depSet, solListList in suggMap.iteritems(): newSolListList = [] for solList in solListList: if not self._allowNoLabel and label: newSolListList.append([ self.idMap[x] for x in solList if self.idMap[x][1].trailingLabel == label]) else: newSolListList.append([ self.idMap[x] for x in solList ]) suggMap[depSet] = newSolListList return suggMap class ResolutionMesh(resolve.BasicResolutionMethod): def __init__(self, cfg, extraMethod, mainMethod): resolve.BasicResolutionMethod.__init__(self, cfg, None) self.extraMethod = extraMethod self.mainMethod = mainMethod def prepareForResolution(self, depList): self.depList = [ x[1] for x in depList] self.extraMethod.prepareForResolution(depList) return self.mainMethod.prepareForResolution(depList) def resolveDependencies(self): suggMap = self.extraMethod.resolveDependencies() suggMap2 = self.mainMethod.resolveDependencies() for depSet in self.depList: if depSet not in suggMap: suggMap[depSet] = [[] for x in depSet.iterDeps() ] if depSet not in suggMap2: suggMap2[depSet] = [[] for x in depSet.iterDeps() ] for depSet, results in suggMap.iteritems(): mainResults = suggMap2[depSet] for troveList1, troveList2 in itertools.izip(results, mainResults): troveList2.extend(troveList1) return suggMap2 def searchLeavesOnly(self): self.extraMethod.searchLeavesOnly() self.mainMethod.searchLeavesOnly() def searchLeavesFirst(self): self.extraMethod.searchLeavesFirst() self.mainMethod.searchLeavesFirst() def searchAllVersions(self): self.extraMethod.searchAllVersions() self.mainMethod.searchAllVersions() def selectResolutionTrove(self, requiredBy, dep, depClass, troveTups, installFlavor, affFlavorDict): """ determine which of the given set of troveTups is the best choice for installing on this system. Because the repository didn't try to determine which flavors are best for our system, we have to filter the troves locally. """ #NOTE: this method should be a match exactly for the one in # conary.repository.resolvemethod for conary 1.2 and later. # when we drop support for earlier conary's we can drop this method. # we filter the troves in the following ways: # 1. prefer troves that match affinity flavor + are on the affinity # label. (And don't drop an arch) # 2. fall back to troves that match the install flavor. # If we don't match an affinity flavor + label, then use flavor # preferences and flavor scoring to select the best flavor. # We'll have to check # Within these two categories: # 1. filter via flavor preferences for each trove (this may result # in an older version for some troves) # 2. only leave the latest version for each trove # 3. pick the best flavor out of the remaining affinityMatches = [] affinityFlavors = [] otherMatches = [] otherFlavors = [] if installFlavor is not None and not installFlavor.isEmpty(): flavoredList = [] for troveTup in troveTups: label = troveTup[1].trailingLabel() affTroves = affFlavorDict[troveTup[0]] found = False if affTroves: for affName, affVersion, affFlavor in affTroves: if affVersion.trailingLabel() != label: continue newFlavor = deps.overrideFlavor(installFlavor, affFlavor, mergeType=deps.DEP_MERGE_TYPE_PREFS) # implement never drop an arch for dep resolution currentArch = deps.getInstructionSetFlavor(affFlavor) if not troveTup[2].stronglySatisfies(currentArch): continue if newFlavor.satisfies(troveTup[2]): affinityMatches.append((newFlavor, troveTup)) affinityFlavors.append(troveTup[2]) found = True if not found and not affinityMatches: if installFlavor.satisfies(troveTup[2]): otherMatches.append((installFlavor, troveTup)) otherFlavors.append(troveTup[2]) else: otherMatches = [ (None, x) for x in troveTups ] otherFlavors = [x[2] for x in troveTups] if affinityMatches: allFlavors = affinityFlavors flavoredList = affinityMatches else: allFlavors = otherFlavors flavoredList = otherMatches # Now filter by flavor preferences. newFlavors = [] if self.flavorPreferences: for flavor in self.flavorPreferences: for trvFlavor in allFlavors: if trvFlavor.stronglySatisfies(flavor): newFlavors.append(trvFlavor) if newFlavors: break if newFlavors: flavoredList = [ x for x in flavoredList if x[1][2] in newFlavors ] return self._selectMatchingResolutionTrove(requiredBy, dep, depClass, flavoredList) def _selectMatchingResolutionTrove(self, requiredBy, dep, depClass, flavoredList): # this function should be an exact match of # resolvemethod._selectMatchingResolutionTrove from conary 1.2 and # later. # finally, filter by latest then score. trovesByNL = {} for installFlavor, (n,v,f) in flavoredList: l = v.trailingLabel() myTimeStamp = v.timeStamps()[-1] if installFlavor is None: myScore = 0 else: # FIXME: we should cache this scoring from before. myScore = installFlavor.score(f) if (n,l) in trovesByNL: curScore, curTimeStamp, curTup = trovesByNL[n,l] if curTimeStamp > myTimeStamp: continue if curTimeStamp == myTimeStamp: if myScore < curScore: continue trovesByNL[n,l] = (myScore, myTimeStamp, (n,v,f)) scoredList = sorted(trovesByNL.itervalues()) if not scoredList: return None else: # highest score, then latest timestamp, then name. return scoredList[-1][-1] if hasattr(resolve.BasicResolutionMethod, '_selectMatchingResolutionTrove'): selectResolutionTrove = resolve.BasicResolutionMethod.selectResolutionTrove _selectMatchingResolutionTrove = resolve.BasicResolutionMethod._selectMatchingResolutionTrove class rMakeResolveSource(ResolutionMesh): """ Resolve by trove list first and then resort back to label path. Also respects intra-trove deps. If foo:runtime requires foo:lib, it requires exactly the same version of foo:lib. """ def __init__(self, cfg, builtTroveSource, resolveTroveSource, troveLists, repos): self.removeFileDependencies = False self.builtTroveSource = builtTroveSource self.troveLists = troveLists self.resolveTroveSource = resolveTroveSource self.repos = repos self.cfg = cfg self.repos = repos self.flavor = cfg.flavor sources = [] builtResolveSource = resolve.BasicResolutionMethod(cfg, None) builtResolveSource.setTroveSource(builtTroveSource) sources = [] if troveLists: troveListSources = [resolve.DepResolutionByTroveList(cfg, None, x) for x in troveLists] [ x.setTroveSource(self.repos) for x in troveListSources ] sources.extend(troveListSources) mainMethod = resolve.ResolutionStack(*sources) flavorPreferences = self.repos._flavorPreferences for source in sources: source.setFlavorPreferences(flavorPreferences) ResolutionMesh.__init__(self, cfg, builtResolveSource, mainMethod) self.setFlavorPreferences(flavorPreferences) def close(self): self.builtTroveSource.close() def setLabelPath(self, labelPath): if labelPath: source = resolve.DepResolutionByLabelPath(self.cfg, None, labelPath) source.setTroveSource(self.repos) self.mainMethod.addSource(source) def prepareForResolution(self, depList): # need to get intratrove deps while we still have the full dependency # request information - including what trove the dep arises from. intraDeps = self._getIntraTroveDeps(depList) self.intraDeps = intraDeps return ResolutionMesh.prepareForResolution(self, depList) def _resolveIntraTroveDeps(self, intraDeps): trovesToGet = [] for depSet, deps in intraDeps.iteritems(): for dep, troveTups in deps.iteritems(): trovesToGet.extend(troveTups) hasTroves = self.troveSource.hasTroves(trovesToGet) if isinstance(hasTroves, list): hasTroves = dict(itertools.izip(trovesToGet, hasTroves)) results = {} for depSet, deps in intraDeps.iteritems(): d = {} results[depSet] = d for dep, troveTups in deps.iteritems(): d[dep] = [ x for x in troveTups if hasTroves[x] ] return results def resolveDependencies(self): sugg = ResolutionMesh.resolveDependencies(self) intraDepSuggs = self._resolveIntraTroveDeps(self.intraDeps) for depSet, intraDeps in self.intraDeps.iteritems(): for idx, (depClass, dep) in enumerate(depSet.iterDeps(sort=True)): if depClass.tag == deps.DEP_CLASS_TROVES: if (dep in intraDepSuggs[depSet] and intraDepSuggs[depSet][dep]): sugg[depSet][idx] = intraDepSuggs[depSet][dep] return sugg def _getIntraTroveDeps(self, depList): suggsByDep = {} intraDeps = {} for troveTup, depSet in depList: pkgName = troveTup[0].split(':', 1)[0] for dep in depSet.iterDepsByClass(deps.TroveDependencies): if (dep.name.startswith(pkgName) and dep.name.split(':', 1)[0] == pkgName): troveToGet = (dep.name, troveTup[1], troveTup[2]) l = suggsByDep.setdefault(dep, []) l.append(troveToGet) intraDeps.setdefault(depSet, {}).setdefault(dep, l) return intraDeps def filterDependencies(self, depList): if self.removeFileDependencies: depList = [(x[0], flavorutil.removeFileDeps(x[1])) for x in depList ] return [ x for x in depList if not x[1].isEmpty() ] return depList def _selectMatchingResolutionTrove(self, requiredBy, dep, depClass, flavoredList): # if all packages are the same and only their flavor score or timestamp # is keeping one from being picked over the other, prefer the # newly built package. builtTroves = [] resolveTroves = [] newList = flavoredList if self.resolveTroveSource: minResolveIdx = len(self.resolveTroveSource.sources) ilp = self.cfg.installLabelPath for installFlavor, troveTup in flavoredList: if self.extraMethod.troveSource.hasTrove(*troveTup): branch = troveTup[1].branch() if branch.hasParentBranch(): label = branch.parentBranch().label() else: label = branch.label() list = builtTroves elif (self.resolveTroveSource and self.resolveTroveSource.hasTrove(*troveTup)): # if a package is both in the resolveTroves list # and found via ILP, it might be in this list even # though it was not found via resolveTroves. So we # limit results to ones found as early as possible # in the resolveTroves list for resolveIdx, source in enumerate(self.resolveTroveSource.sources): if source.hasTrove(*troveTup): if resolveIdx < minResolveIdx: resolveTroves = [] minResolveIdx = resolveIdx break if resolveIdx > minResolveIdx: continue list = resolveTroves label = troveTup[1].trailingLabel() else: continue if label in ilp: index = ilp.index(label) else: index = len(ilp) list.append((index, (installFlavor, troveTup))) if builtTroves: minIndex = sorted(builtTroves, key=lambda x: x[0])[0][0] newList = [ x[1] for x in builtTroves if x[0] == minIndex ] elif resolveTroves: minIndex = sorted(resolveTroves, key=lambda x: x[0])[0][0] newList = [ x[1] for x in resolveTroves if x[0] == minIndex ] return ResolutionMesh._selectMatchingResolutionTrove(self, requiredBy, dep, depClass, newList)
apache-2.0
-3,809,415,997,018,213,000
41.872727
119
0.560057
false
4.353025
false
false
false
songyi199111/sentry
src/sentry/event_manager.py
2
19299
""" sentry.event_manager ~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import, print_function import logging import math import six from datetime import datetime, timedelta from django.conf import settings from django.db import IntegrityError, transaction from django.utils import timezone from hashlib import md5 from raven.utils.encoding import to_string from uuid import uuid4 from sentry.app import buffer, tsdb from sentry.constants import ( CLIENT_RESERVED_ATTRS, LOG_LEVELS, DEFAULT_LOGGER_NAME, MAX_CULPRIT_LENGTH, MAX_TAG_VALUE_LENGTH ) from sentry.interfaces.base import get_interface from sentry.models import ( Activity, Event, EventMapping, Group, GroupHash, GroupStatus, Project, Release, UserReport ) from sentry.plugins import plugins from sentry.signals import regression_signal from sentry.utils.logging import suppress_exceptions from sentry.tasks.index import index_event from sentry.tasks.merge import merge_group from sentry.tasks.post_process import post_process_group from sentry.utils.db import get_db_engine from sentry.utils.safe import safe_execute, trim, trim_dict def count_limit(count): # TODO: could we do something like num_to_store = max(math.sqrt(100*count)+59, 200) ? # ~ 150 * ((log(n) - 1.5) ^ 2 - 0.25) for amount, sample_rate in settings.SENTRY_SAMPLE_RATES: if count <= amount: return sample_rate return settings.SENTRY_MAX_SAMPLE_RATE def time_limit(silence): # ~ 3600 per hour for amount, sample_rate in settings.SENTRY_SAMPLE_TIMES: if silence >= amount: return sample_rate return settings.SENTRY_MAX_SAMPLE_TIME def md5_from_hash(hash_bits): result = md5() for bit in hash_bits: result.update(to_string(bit)) return result.hexdigest() def get_hashes_for_event(event): interfaces = event.get_interfaces() for interface in interfaces.itervalues(): result = interface.compute_hashes(event.platform) if not result: continue return result return [[event.message]] def get_hashes_from_fingerprint(event, fingerprint): default_values = set(['{{ default }}', '{{default}}']) if any(d in fingerprint for d in default_values): default_hashes = get_hashes_for_event(event) hash_count = len(default_hashes) else: hash_count = 1 hashes = [] for idx in xrange(hash_count): result = [] for bit in fingerprint: if bit in default_values: result.extend(default_hashes[idx]) else: result.append(bit) hashes.append(result) return hashes if not settings.SENTRY_SAMPLE_DATA: def should_sample(current_datetime, last_seen, times_seen): return False else: def should_sample(current_datetime, last_seen, times_seen): silence_timedelta = current_datetime - last_seen silence = silence_timedelta.days * 86400 + silence_timedelta.seconds if times_seen % count_limit(times_seen) == 0: return False if times_seen % time_limit(silence) == 0: return False return True def plugin_is_regression(group, event): project = event.project for plugin in plugins.for_project(project): result = safe_execute(plugin.is_regression, group, event, version=1, _with_transaction=False) if result is not None: return result return True class ScoreClause(object): def __init__(self, group): self.group = group def __int__(self): # Calculate the score manually when coercing to an int. # This is used within create_or_update and friends return self.group.get_score() def prepare_database_save(self, unused): return self def prepare(self, evaluator, query, allow_joins): return def evaluate(self, node, qn, connection): engine = get_db_engine(getattr(connection, 'alias', 'default')) if engine.startswith('postgresql'): sql = 'log(times_seen) * 600 + last_seen::abstime::int' elif engine.startswith('mysql'): sql = 'log(times_seen) * 600 + unix_timestamp(last_seen)' else: # XXX: if we cant do it atomically let's do it the best we can sql = int(self) return (sql, []) @classmethod def calculate(self, times_seen, last_seen): return math.log(times_seen) * 600 + float(last_seen.strftime('%s')) class EventManager(object): logger = logging.getLogger('sentry.events') def __init__(self, data, version='5'): self.data = data self.version = version def normalize(self): # TODO(dcramer): store http.env.REMOTE_ADDR as user.ip # First we pull out our top-level (non-data attr) kwargs data = self.data if not isinstance(data.get('level'), (six.string_types, int)): data['level'] = logging.ERROR elif data['level'] not in LOG_LEVELS: data['level'] = logging.ERROR if not data.get('logger'): data['logger'] = DEFAULT_LOGGER_NAME else: data['logger'] = trim(data['logger'], 64) if data.get('platform'): data['platform'] = trim(data['platform'], 64) timestamp = data.get('timestamp') if not timestamp: timestamp = timezone.now() if isinstance(timestamp, datetime): # We must convert date to local time so Django doesn't mess it up # based on TIME_ZONE if settings.TIME_ZONE: if not timezone.is_aware(timestamp): timestamp = timestamp.replace(tzinfo=timezone.utc) elif timezone.is_aware(timestamp): timestamp = timestamp.replace(tzinfo=None) timestamp = float(timestamp.strftime('%s')) data['timestamp'] = timestamp if not data.get('event_id'): data['event_id'] = uuid4().hex data.setdefault('message', None) data.setdefault('culprit', None) data.setdefault('time_spent', None) data.setdefault('server_name', None) data.setdefault('site', None) data.setdefault('checksum', None) data.setdefault('fingerprint', None) data.setdefault('platform', None) data.setdefault('extra', {}) data.setdefault('errors', []) tags = data.get('tags') if not tags: tags = [] # full support for dict syntax elif isinstance(tags, dict): tags = tags.items() # prevent [tag, tag, tag] (invalid) syntax elif not all(len(t) == 2 for t in tags): tags = [] else: tags = list(tags) data['tags'] = [] for key, value in tags: key = six.text_type(key).strip() value = six.text_type(value).strip() if not (key and value): continue if len(value) > MAX_TAG_VALUE_LENGTH: continue data['tags'].append((key, value)) if not isinstance(data['extra'], dict): # throw it away data['extra'] = {} trim_dict( data['extra'], max_size=settings.SENTRY_MAX_EXTRA_VARIABLE_SIZE) # TODO(dcramer): more of validate data needs stuffed into the manager for key in data.keys(): if key in CLIENT_RESERVED_ATTRS: continue value = data.pop(key) try: interface = get_interface(key)() except ValueError: continue try: inst = interface.to_python(value) data[inst.get_path()] = inst.to_json() except Exception: pass data['version'] = self.version # TODO(dcramer): find a better place for this logic exception = data.get('sentry.interfaces.Exception') stacktrace = data.get('sentry.interfaces.Stacktrace') if exception and len(exception['values']) == 1 and stacktrace: exception['values'][0]['stacktrace'] = stacktrace del data['sentry.interfaces.Stacktrace'] if 'sentry.interfaces.Http' in data: # default the culprit to the url if not data['culprit']: data['culprit'] = data['sentry.interfaces.Http']['url'] if data['time_spent']: data['time_spent'] = int(data['time_spent']) if data['culprit']: data['culprit'] = trim(data['culprit'], MAX_CULPRIT_LENGTH) if data['message']: data['message'] = trim( data['message'], settings.SENTRY_MAX_MESSAGE_LENGTH) return data @suppress_exceptions def save(self, project, raw=False): # TODO: culprit should default to "most recent" frame in stacktraces when # it's not provided. project = Project.objects.get_from_cache(id=project) data = self.data.copy() # First we pull out our top-level (non-data attr) kwargs event_id = data.pop('event_id') message = data.pop('message') level = data.pop('level') culprit = data.pop('culprit', None) or '' time_spent = data.pop('time_spent', None) logger_name = data.pop('logger', None) server_name = data.pop('server_name', None) site = data.pop('site', None) checksum = data.pop('checksum', None) fingerprint = data.pop('fingerprint', None) platform = data.pop('platform', None) release = data.pop('release', None) date = datetime.fromtimestamp(data.pop('timestamp')) date = date.replace(tzinfo=timezone.utc) kwargs = { 'message': message, 'platform': platform, } event = Event( project=project, event_id=event_id, data=data, time_spent=time_spent, datetime=date, **kwargs ) tags = data.get('tags') or [] tags.append(('level', LOG_LEVELS[level])) if logger_name: tags.append(('logger', logger_name)) if server_name: tags.append(('server_name', server_name)) if site: tags.append(('site', site)) if release: # TODO(dcramer): we should ensure we create Release objects tags.append(('sentry:release', release)) for plugin in plugins.for_project(project, version=None): added_tags = safe_execute(plugin.get_tags, event, _with_transaction=False) if added_tags: tags.extend(added_tags) # XXX(dcramer): we're relying on mutation of the data object to ensure # this propagates into Event data['tags'] = tags # prioritize fingerprint over checksum as its likely the client defaulted # a checksum whereas the fingerprint was explicit if fingerprint: hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint)) elif checksum: hashes = [checksum] else: hashes = map(md5_from_hash, get_hashes_for_event(event)) group_kwargs = kwargs.copy() group_kwargs.update({ 'culprit': culprit, 'logger': logger_name, 'level': level, 'last_seen': date, 'first_seen': date, 'time_spent_total': time_spent or 0, 'time_spent_count': time_spent and 1 or 0, }) if release: release = Release.get_or_create( project=project, version=release, date_added=date, ) group_kwargs['first_release'] = release Activity.objects.create( type=Activity.RELEASE, project=project, ident=release, data={'version': release}, datetime=date, ) group, is_new, is_regression, is_sample = safe_execute( self._save_aggregate, event=event, hashes=hashes, **group_kwargs ) using = group._state.db event.group = group event.group_id = group.id # store a reference to the group id to guarantee validation of isolation event.data.bind_ref(event) try: with transaction.atomic(): EventMapping.objects.create( project=project, group=group, event_id=event_id) except IntegrityError: self.logger.info('Duplicate EventMapping found for event_id=%s', event_id) return event UserReport.objects.filter( project=project, event_id=event_id, ).update(group=group) # save the event unless its been sampled if not is_sample: try: with transaction.atomic(): event.save() except IntegrityError: self.logger.info('Duplicate Event found for event_id=%s', event_id) return event if is_new and release: buffer.incr(Release, {'new_groups': 1}, { 'id': release.id, }) safe_execute(Group.objects.add_tags, group, tags, _with_transaction=False) if not raw: post_process_group.delay( group=group, event=event, is_new=is_new, is_sample=is_sample, is_regression=is_regression, ) else: self.logger.info('Raw event passed; skipping post process for event_id=%s', event_id) index_event.delay(event) # TODO: move this to the queue if is_regression and not raw: regression_signal.send_robust(sender=Group, instance=group) return event def _find_hashes(self, project, hash_list): matches = [] for hash in hash_list: ghash, _ = GroupHash.objects.get_or_create( project=project, hash=hash, ) matches.append((ghash.group_id, ghash.hash)) return matches def _ensure_hashes_merged(self, group, hash_list): # TODO(dcramer): there is a race condition with selecting/updating # in that another group could take ownership of the hash bad_hashes = GroupHash.objects.filter( project=group.project, hash__in=hash_list, ).exclude( group=group, ) if not bad_hashes: return for hash in bad_hashes: merge_group.delay( from_group_id=hash.group_id, to_group_id=group.id, ) return GroupHash.objects.filter( project=group.project, hash__in=bad_hashes, ).update( group=group, ) def _save_aggregate(self, event, hashes, **kwargs): time_spent = event.time_spent project = event.project # attempt to find a matching hash all_hashes = self._find_hashes(project, hashes) try: existing_group_id = (h[0] for h in all_hashes if h[0]).next() except StopIteration: existing_group_id = None # XXX(dcramer): this has the opportunity to create duplicate groups # it should be resolved by the hash merging function later but this # should be better tested/reviewed if existing_group_id is None: kwargs['score'] = ScoreClause.calculate(1, kwargs['last_seen']) group, group_is_new = Group.objects.create( project=project, **kwargs ), True else: group = Group.objects.get(id=existing_group_id) group_is_new = False # If all hashes are brand new we treat this event as new is_new = False new_hashes = [h[1] for h in all_hashes if h[0] is None] if new_hashes: affected = GroupHash.objects.filter( project=project, hash__in=new_hashes, group__isnull=True, ).update( group=group, ) if affected != len(new_hashes): self._ensure_hashes_merged(group, new_hashes) elif group_is_new and len(new_hashes) == len(all_hashes): is_new = True # XXX(dcramer): it's important this gets called **before** the aggregate # is processed as otherwise values like last_seen will get mutated can_sample = should_sample(event.datetime, group.last_seen, group.times_seen) if not is_new: is_regression = self._process_existing_aggregate(group, event, kwargs) else: is_regression = False # Determine if we've sampled enough data to store this event if is_new or is_regression: is_sample = False else: is_sample = can_sample tsdb.incr_multi([ (tsdb.models.group, group.id), (tsdb.models.project, project.id), ]) return group, is_new, is_regression, is_sample def _process_existing_aggregate(self, group, event, data): date = max(event.datetime, group.last_seen) extra = { 'last_seen': date, 'score': ScoreClause(group), } if event.message and event.message != group.message: extra['message'] = event.message if group.level != data['level']: extra['level'] = data['level'] if group.culprit != data['culprit']: extra['culprit'] = data['culprit'] is_regression = False if group.is_resolved() and plugin_is_regression(group, event): is_regression = bool(Group.objects.filter( id=group.id, # ensure we cant update things if the status has been set to # muted status__in=[GroupStatus.RESOLVED, GroupStatus.UNRESOLVED], ).exclude( # add to the regression window to account for races here active_at__gte=date - timedelta(seconds=5), ).update( active_at=date, # explicitly set last_seen here as ``is_resolved()`` looks # at the value last_seen=date, status=GroupStatus.UNRESOLVED )) group.active_at = date group.status = GroupStatus.UNRESOLVED group.last_seen = extra['last_seen'] update_kwargs = { 'times_seen': 1, } if event.time_spent: update_kwargs.update({ 'time_spent_total': event.time_spent, 'time_spent_count': 1, }) buffer.incr(Group, update_kwargs, { 'id': group.id, }, extra) return is_regression
bsd-3-clause
-1,338,582,620,076,449,300
31.654822
97
0.56604
false
4.204575
false
false
false
theodoregoetz/wernher
sandbox/KRPC Testing.py
1
3144
# -*- coding: utf-8 -*- # <nbformat>3.0</nbformat> # <codecell> %run -i 'KRPC.ipynb' # <codecell> conn = krpc.connect(name='laptop', address='192.168.1.9') ksc = conn.space_center vessel = ksc.active_vessel obt = vessel.orbit ap = vessel.auto_pilot con = vessel.control vrf = vessel.reference_frame srfrf = vessel.surface_reference_frame vobtrf = vessel.orbital_reference_frame obtrf = obt.body.reference_frame obtorf = obt.body.orbital_reference_frame obtnrrf = obt.body.non_rotating_reference_frame flight = lambda rf: vessel.flight(rf) # <codecell> t = ksc.ut o = KeplerOrbit(obt) f = flight(obtorf) print(obt.time_to_apoapsis, obt.time_to_periapsis) print(f.longitude) print(o.Ω * 180/π) print(o.ν * 180/π) # <codecell> speed = conn.add_stream(getattr, flight(srfrf), 'speed') altitude = conn.add_stream(getattr, flight(obtrf), 'mean_altitude') apoapsis = conn.add_stream(getattr, obt, 'apoapsis_altitude') # <codecell> con.throttle = 0.6 ap.set_rotation(90, 90, roll=90) time.sleep(1) con.activate_next_stage() while flight(obtrf).speed < 100.: time.sleep(0.1) ap.set_rotation(80, 90, roll=90) while flight(obtrf).mean_altitude < 5000.: time.sleep(0.1) ap.disengage() ap.sas = True ap.sas_mode = ksc.SASMode.prograde while obt.apoapsis_altitude < 80000: time.sleep(0.1) ap.sas_mode = ksc.SASMode.stability_assist ap.sas = False while abs(obt.eccentricity) > 0.1: obt.apoapsis ap.set_direction(, 90, roll=90) ap.disengage() con.throttle = 0. # <codecell> ksc.SASMode.prograde # <codecell> speed.remove() altitude.remove() apoapsis.remove() # <codecell> def prelaunch(conn): ksc = conn.space_center vessel = ksc.active_vessel obtbody_rf = vessel.orbit.body.reference_frame flight = vessel.flight ap = vessel.auto_pilot cont = vessel.control vessel ut = conn.add_stream(getattr, ksc, 'ut') mean_altitude = conn.add_stream(getattr, flight(), 'mean_altitude') #position = conn.add_stream(vessel.position, obtbody_rf) timestamp = [] altitude = [] t0 = ut() alt = mean_altitude() while alt < 80000: t1 = ut() alt = mean_altitude() if abs(t1 - t0) > 0.001: timestamp.append(t1) altitude.append(alt) t0 = t1 time.sleep(1./25.) # <codecell> print(ut()) # <codecell> pyplot.plot(timestamp,altitude) # <codecell> print(vessel.name) print(vessel.met) print(vessel.mass) print(vessel.position(vessel.orbit.body.reference_frame)) # <codecell> def latlon(vessel): x,y,z = vessel.position(vessel.orbit.body.reference_frame) r = np.sqrt(x*x + y*y + z*z) lat = 90. - np.arccos(y / r) * 180. / np.pi lon = np.arctan2(z, x) * 180. / np.pi return lat,lon # <codecell> data = [] # <codecell> image = pyplot.imread('/home/goetz/kerbin.jpg') fig, ax = pyplot.subplots(figsize=(15,7)) im = ax.imshow(image) ax.set_autoscale_on(False) xmin,xmax = ax.get_xlim() ymin,ymax = ax.get_ylim() lat,lon = latlon(vessel) xmap = ((lon + 180.) / 360.) * (xmax - xmin) + xmin ymap = ((lat + 90.) / 180.) * (ymax - ymin) + ymin pt = ax.plot(xmap,ymap, marker='o', color='cyan')
gpl-3.0
4,061,962,225,214,920,700
17.690476
67
0.660828
false
2.470496
false
false
false
saltzm/yadi
yadi/datalog2sql/parse2tokens/parser_tests.py
1
6246
from .Parser import Parser p = Parser() #Tests to check syntax print(p.parsesentence("q.")) # Atom, zero arity print(p.parsesentence("q(x).")) # Atom, one var print(p.parsesentence("q('3').")) # Atom, string print(p.parsesentence("q(x,y).")) # Atom, two-arity print(p.parsesentence("q(_,x).")) # Atom, anonymous variable print(p.parsesentence("_ab(a).")) # Predicate symbol with underscore print(p.parsesentence("q2(x,z,b,'a').")) # Predicate symbol with number print(p.parsesentence("__ab_55(a,b,c).")) # Predicate symbol with number and underscore print(p.parsesentence("q(x,y) :- k(x,y).")) # Rule with one literal print(p.parsesentence("q(x,y) :- a(foo_foo).")) # Rule with one literal using constant print(p.parsesentence("q(x,y) :- k(_ab).")) # Rule with one literal with constant starting with underscore print(p.parsesentence("q(x,y) :- k(X).")) # Rule with one literal with one variable print(p.parsesentence("q(x,y) :- k(x,h), _v3(n,k).")) # Rule with two literals print(p.parsesentence("q(x,y) :- a;b.")) # Rule with disjunction of two zero-arity atoms print(p.parsesentence("q(x,y) :- a(x);b(x).")) # Rule with disjunction of two 1-arity atoms print(p.parsesentence("q(x,y) :- a division b.")) # Rule with division of two zero-arity atoms print(p.parsesentence("q(x,y) :- a(x,y) division b(x,y).")) # Rule with division of two two-arity atoms print(p.parsesentence("q(x,y,z) :- a(x),a;b.")) # Rule with one-arity atom, disjunction of two zero-arity atoms print(p.parsesentence("q(x,y) :- a(x), t>5.")) # Rule with one-arity atom, boolean comparison print(p.parsesentence("q(x,y) :- a(x), t<5.")) # Rule with one-arity atom, boolean comparison print(p.parsesentence("q(x,y) :- a(x), t>=5.")) # Rule with one-arity atom, boolean comparison print(p.parsesentence("q(x,y) :- a(x), t<=5.")) # Rule with one-arity atom, boolean comparison print(p.parsesentence("q(x,y) :- a(x), gd=5.")) # Rule with one-arity atom, boolean comparison print(p.parsesentence("q(x,y,z) :- a(x), t=4.0.")) # Rule with one-arity atom, comparison using float print(p.parsesentence("q(x,y,z) :- a(x), t=4.0E6.")) # Rule with one-arity atom, comparison using float+E print(p.parsesentence("q(x,y,z) :- a(x), t=4.0E+6.")) # Rule with one-arity atom, comparison using float+E+'+' print(p.parsesentence("q(x,y,z) :- a(x), t=4.0E-6.")) # Rule with one-arity atom, comparison using float+E+'-' print(p.parsesentence("q(x,y,z) :- a(x), t=4.0, k(x).")) # Rule with one-arity atom, comparison, atom print(p.parsesentence("q(x) :- x(g), not(a(x,y)).")) # Rule with one-arity atom, negation print(p.parsesentence("q(x,y). k(x).")) # Two facts in a line. print(p.parsesentence("q(x,y). q(x,y) :- a(b,c).")) # A fact and a rule in a line. print(p.parsesentence("q(x,y). q(x,y) :- a(b,c). a(b).")) # A fact, a rule and a fact in a line. print(p.parsesentence("q(x,y) :- a(b), X=3; Y>5.")) # Rule with one-arity atom, disjunctive comparison. print(p.parsesentence("q(x,y) :- a(b), X=3, Y>5.")) # Rule with one-arity atom, conjunctive comparison. print(p.parsesentence("q(x,y) :- a(b), X=3, Y>5, X=3; Y>5.")) # Rule with one-arity atom, two two-term comparisons. print(p.parsesentence("r(X) :- not(t(Y)), X = Y, s(Y).")) # Rule with a negation in front. print(p.parsesentence("r(x) :- r(a,X); not(q(X,b)), lj(a,b,x).")) # Rule with a left join print(p.parsesentence("q(X,Z) :- s(X,Y), not(t(X)), Y=Z.")) print(p.parsesentence("q(X,Z) :- t>5, s(X,Y), not(t(X)), Y=Z.")) print(p.parsesentence("q(X,Y):- s(X).\nq(X,Y):- s(Y).")) # Two statements broken down in two lines. print(p.parsesentence("q(x,y) :- a(b), X=3, 3>Y, X=3; 5>X.")) # Rule with one-arity atom, two two-term comparisons. print(p.parsesentence("q(X,Y), s(x).")) # Temporary view print(p.parsesentence("q(X,Y), not(x(t,y)).")) # Temporary view print(p.parsesentence("q(X,Y):- s(X).\nq(X,Y):- s(X).\nq(X,Y):- s(X).")) print(p.parsesentence("q(X,3) :- s(X).")) #Incorporation of all elements print(p.parsesentence("a45(x,Y,_343,a) :- x43A(k,5,x), A>=4; t=5, a(q,x);r(x,Y), a division y. q(x,y).")) #Rules (that actually make sense) print(p.parsesentence("q(X,Y):- s(X).")) print(p.parsesentence("q(X):- s(X).")) print(p.parsesentence("q(X):- s(X), not(t(U)).")) print(p.parsesentence("q(X):- s(X,U), not(t(U)).")) print(p.parsesentence("q(X):- s(X), not(t(U)), U = 2.")) print(p.parsesentence("q(X):- s(X), not(t(U)), U < 2.")) print(p.parsesentence("q(X):- s(X), not(t(U)), U = X.")) print(p.parsesentence("q(X):- s(X), Y < 3.")) print(p.parsesentence("q(X):- s(X,Y), Y < 3.")) print(p.parsesentence("q(X):- s(X), not(t(Y)), X = Y.")) print(p.parsesentence("q(X,Z):- s(X,Y), not(t(A,Z)), Z = Y.")) print(p.parsesentence("q(X):- s(X), X = 2.")) print(p.parsesentence("q(X):- s(X, Y), Y = 2.")) print(p.parsesentence("q(X):- s(X, Y, Z), Y = 2, Z = Y.")) print(p.parsesentence("q(X) :- not(s(Y)), X = 2, X = Y.")) print(p.parsesentence("q(X) :- not(s(Y)), X = Y, X = 2.")) print(p.parsesentence("q(X) :- s(X), X = Y.")) print(p.parsesentence("q(X) :- s(X), P = Y.")) print(p.parsesentence("r(X) :- s(X), 3=X, X>2.")) print(p.parsesentence("r(Y) :- s(X), Y=X, X=2, Y =4.")) print(p.parsesentence("r(X,Y,Z,_,2) :- s(X), Y=X, X=2.")) print(p.parsesentence("q(X,Y) :- s(_,Y), t(X,_), u(_), v(_,_).")) print(p.parsesentence("q(x,y).")) print(p.parsesentence("q(X,Y) :- s(_,Y), t(X,_), u(_), v(_,_).")) #Errors #print(p.parsesentence("q(x,y,,).")) #print(p.parsesentence("r(Title1,Title2,Release_date):-movie(Title1,,,Release_date),movie(Title2,,,Release_date).")) #print(p.parsesentence("r(x):-q(x),s(x,,,,,).")) #print(p.parsesentence("q(x,)."))
bsd-3-clause
-3,213,480,592,657,898,000
72.482353
127
0.553154
false
2.666951
false
true
false
blaisb/cfdemUtilities
cylinderPorosity/pythons/getParticlePositionsFOAM.py
1
2828
# This program converts OpenFOAM raw data to a text file containing information on the particles # in the format that can be read by the porosity code # # position (x y z) and radius # THIS PROGRAM REQUIRES A DIRECTORY particles in the main folder #In the current form of the software the radius must be fixed byu the user # Author : Bruno Blais # Last modified : 15-01-2014 #Python imports #---------------- import os import sys import numpy #---------------- #******************************** # OPTIONS AND USER PARAMETERS #******************************** #Initial time of simulation, final time and time increment must be specified by user t0=5 tf=115.0 dT=5 radius = 0.0007485 height=0.05 ri = 0.0064 ro = 0.0238 #==================== # READER #==================== #This function reads an OpenFOAM raw file and extract a table of the data def readf(fname): infile = open(fname,'r') if (infile!=0): #Clear garbage lines for i in range(0,17): infile.readline() #Read number of cell centers n=int(infile.readline()) #Pre-allocate memory x=numpy.zeros([n]) y=numpy.zeros([n]) z=numpy.zeros([n]) #Clear garbage line "(" infile.readline() #read current property "xu" for i in range(0,n,1): number_str=infile.readline() number2_str=number_str.split("(") number3_str=number2_str[1].split(")") number4_str=number3_str[0].split() x[i]=float(number4_str[0]) y[i]=float(number4_str[1]) z[i]=float(number4_str[2]) else: print "File %s could not be opened" %fname infile.close(); return n,x,y,z #====================== # MAIN #====================== #Name of the files to be considered inname= ['lagrangian/particleCloud/positions'] os.chdir("./") # go to directory nt=int((tf-t0)/dT) t=t0 for i in range(0,nt): #Current case print "Post-processing time ", t #Go to the directory corresponding to the timestep if (t>0.99999 and t<1.0000001) : os.chdir("1") elif (t==0) : os.chdir("0") elif ((numpy.abs(numpy.mod(t,1)))<0.01): os.chdir(str(int(t))) else :os.chdir(str(t)) #Create output file back in main folder outname="../particlesInfo/particlesInfo_%s" %str(i) outfile=open(outname,'w') #Read each variables to be able to dimensionalise final array [n,x,y,z] = readf(inname[0]) #Write header outfile.write("%i\n" %nt) outfile.write("%5.5e\n" %height) outfile.write("%5.5e\n" %ri) outfile.write("%5.5e\n" %ro) outfile.write("%i\n" %n) outfile.write("%5.5e\n" %t) outfile.write("**************************************************\n") for j in range(0,n): outfile.write("%5.5e %5.5e %5.5e %5.5e \n" %(x[j],y[j],z[j],radius)) outfile.close() t += dT #Go back to CFD directory os.chdir("..") # print "Post-processing over"
lgpl-3.0
-5,991,427,169,702,565,000
22.966102
96
0.594413
false
3.050701
false
false
false
travistang/late_fyt
model.py
1
10888
from keras.models import * from keras.layers import * from keras.layers.advanced_activations import * from keras.callbacks import * from keras.optimizers import Adam from keras.initializers import * import tensorflow as tf from utils import huber_loss def guide_v1(): S = Input(shape = (64,64,12)) x = Convolution2D(32,8,8,subsample = (4,4),activation = 'relu')(S) x = BatchNormalization()(x) x = Convolution2D(32,4,4,subsample = (2,2),activation = 'relu')(x) x = BatchNormalization()(x) x = Convolution2D(64,4,4,subsample = (2,2),activation = 'relu')(x) x = BatchNormalization()(x) x = Flatten()(x) # z = Dense(128,init = 'uniform',activation = 'relu',name = 'ls_1',trainable = False)(x) # ls = Dense(29,init = 'uniform',activation = 'relu',name = 'ls_2',trainable = False)(z) y = Dense(300,activation = 'relu',name = 'act_1')(x) Steering = Dense(1,activation = 'linear',name = 'act_2')(y) #Steering = Dense(1,weights = [np.random.uniform(-1e-8,1e-8,(512,1)),np.zeros((1,))], name='Steering')(lrn4) model = Model(S,Steering) adam = Adam(lr=0.00000001,decay = 1e-6) K.get_session().run([adam.beta_1.initializer,adam.beta_2.initializer]) model.compile(loss='mse', optimizer=adam) if weight_files: model.load_weights(weight_files) return model, model.trainable_weights, S def guide_v2(): S = Input(shape = (64,64,4)) x = Convolution2D(32,8,8,subsample = (4,4),activation = 'relu')(S) x = BatchNormalization()(x) x = Convolution2D(32,4,4,subsample = (2,2),activation = 'relu')(x) x = BatchNormalization()(x) x = Convolution2D(32,4,4,subsample = (2,2),activation = 'relu')(x) x = BatchNormalization()(x) x = Flatten()(x) x = Dense(300,activation = 'relu')(x) x = Dense(8,activation = 'linear',name = 'act_2')(x) model = Model(S,x) adam = Adam(lr = 0.0001,decay = 1e-6) model.compile(loss = 'categorial_accuracy',optimizer = adam) return model def low_guide_v1(lr = 0.0001,num_output = 9): S = Input(shape = (116,)) x = Dense(300,activation = ELU())(S) x = Dense(600,activation = ELU())(x) x = Dense(num_output,activation = 'linear',init=lambda shape: normal(shape, scale=1e-4))(x) model = Model(S,x) adam = Adam(lr = lr,decay = 1e-6,clipnorm=0.5) model.compile(loss = huber_loss(0.5),optimizer = adam) return model def low_guide_v2(num_action = 1,num_ob = 1): # the actor S = Input(shape = (1,num_ob)) x = Flatten()(S) x = Dense(300,activation = 'relu')(x) x = Dense(600,activation = 'relu')(x) x = Dense(num_action,activation = 'linear')(x) model = Model(S,x) # the critic A = Input(shape = (num_action,)) S = Input(shape = (1,num_ob)) s = Flatten()(S) x = merge([A,s],mode = 'concat') x = Dense(300,activation = 'relu')(x) x = Dense(600,activation = 'relu')(x) x = Dense(1,activation = 'linear')(x) critic = Model([A,S],x) return model,critic def img_guide_v1(num_action = 1): S = Input(shape = (1,64,64,3)) x = Reshape((64,64,3))(S) x = Conv2D(16,(8,8),strides = (4,4),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = Flatten()(x) x = Dense(600,activation = 'relu')(x) x = Dense(300,activation = 'relu')(x) x = Dense(num_action,activation = 'linear')(x) actor = Model(S,x) S = Input(shape = (1,64,64,3)) A = Input(shape = (num_action,)) x = Reshape((64,64,3))(S) x = Conv2D(16,(8,8),strides = (4,4),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = Flatten()(x) x = merge([A,x],mode = 'concat') x = Dense(600,activation = 'relu')(x) x = Dense(300,activation = 'relu')(x) x = Dense(1,activation = 'linear')(x) critic = Model([A,S],x) return actor,critic def img_guide_v2(num_action = 1,hist_len = 4): S = Input(shape = (1,64,64,3 * hist_len)) x = Reshape((64,64,3 * hist_len))(S) x = Conv2D(32,(8,8),strides = (4,4),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = Flatten()(x) x = Dense(800,activation = 'relu')(x) x = Dense(300,activation = 'relu')(x) x = Dense(num_action,activation = 'linear')(x) actor = Model(S,x) S = Input(shape = (1,64,64,3 * hist_len)) A = Input(shape = (num_action,)) x = Reshape((64,64,3 * hist_len))(S) x = Conv2D(32,(8,8),strides = (4,4),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = Flatten()(x) x = merge([A,x],mode = 'concat') x = Dense(800,activation = 'relu')(x) x = Dense(300,activation = 'relu')(x) x = Dense(1,activation = 'linear')(x) critic = Model([A,S],x) return actor,critic def img_guide_v3(num_action = 1,hist_len = 4): S = Input(shape = (1,hist_len,64,64,3)) x = Reshape((hist_len,64,64,3))(S) x = TimeDistributed(Conv2D(32,(8,8),strides = (4,4),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = Flatten()(x) x = Dense(800,activation = 'relu')(x) x = Dense(400,activation = 'relu')(x) x = Dense(num_action,activation = 'linear')(x) actor = Model(S,x) S = Input(shape = (1,hist_len,64,64,3)) A = Input(shape = (num_action,)) x = Reshape((hist_len,64,64,3))(S) x = TimeDistributed(Conv2D(32,(8,8),strides = (4,4),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = Flatten()(x) x = merge([A,x],mode = 'concat') x = Dense(800,activation = 'relu')(x) x = Dense(400,activation = 'relu')(x) x = Dense(1,activation = 'linear')(x) critic = Model([A,S],x) return actor,critic def stack_model(num_action = 1,hist_len = 4, num_filters = 16): S = Input(shape = (1,64,64,3 * hist_len)) x = Reshape((64,64,3 * hist_len))(S) x = Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = Flatten()(x) x = Dense(600 if num_filters == 16 else 800,activation = 'relu')(x) x = Dense(300,activation = 'relu')(x) x = Dense(num_action,activation = 'linear')(x) actor = Model(S,x) S = Input(shape = (1,64,64,3 * hist_len)) A = Input(shape = (num_action,)) x = Reshape((64,64,3 * hist_len))(S) x = Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = BatchNormalization()(x) x = Conv2D(32,(4,4),strides = (2,2),activation = 'relu')(x) x = Flatten()(x) x = merge([A,x],mode = 'concat') x = Dense(600 if num_filters == 16 else 800,activation = 'relu')(x) x = Dense(300,activation = 'relu')(x) x = Dense(1,activation = 'linear')(x) critic = Model([A,S],x) return actor,critic def fork_model(num_action = 1,hist_len = 4, num_filters = 16): S = Input(shape = (1,hist_len,64,64,3)) x = Reshape((hist_len,64,64,3))(S) x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = Flatten()(x) x = Dense(600 if num_filters == 16 else 800,activation = 'relu')(x) x = Dense(400,activation = 'relu')(x) x = Dense(num_action,activation = 'linear')(x) actor = Model(S,x) S = Input(shape = (1,hist_len,64,64,3)) A = Input(shape = (num_action,)) x = Reshape((hist_len,64,64,3))(S) x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = Flatten()(x) x = merge([A,x],mode = 'concat') x = Dense(800 if num_filters == 16 else 1200,activation = 'relu')(x) x = Dense(400,activation = 'relu')(x) x = Dense(1,activation = 'linear')(x) critic = Model([A,S],x) return actor,critic def LSTM_model(num_action = 1,hist_len = 4, num_filters = 16): S = Input(shape = (1,hist_len,64,64,3)) x = Reshape((hist_len,64,64,3))(S) x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = TimeDistributed(Flatten())(x) x = LSTM(100 if num_filters == 16 else 200,activation = 'relu')(x) x = Dense(1,activation = 'linear')(x) actor = Model(S,x) S = Input(shape = (1,hist_len,64,64,3)) A = Input(shape = (num_action,)) x = Reshape((hist_len,64,64,3))(S) x = TimeDistributed(Conv2D(num_filters,(8,8),strides = (4,4),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Conv2D(32,(4,4),strides = (2,2),activation = 'relu'))(x) x = TimeDistributed(Flatten())(x) x = LSTM(100 if num_filters == 16 else 200,activation = 'relu')(x) x = merge([A,x],mode = 'concat') x = Dense(50,activation = 'relu')(x) x = Dense(1,activation = 'linear')(x) critic = Model([A,S],x) return actor,critic
mit
4,400,644,651,622,553,000
40.090566
116
0.591293
false
2.791795
false
false
false
wummel/linkchecker-gui
linkcheck_gui/syntax.py
1
3578
# -*- coding: iso-8859-1 -*- # Copyright (C) 2011-2016 Bastian Kleineidam # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from PyQt4 import QtCore, QtGui def format (color, style=''): """Return a QTextCharFormat with the given attributes.""" format = QtGui.QTextCharFormat() format.setForeground(getattr(QtCore.Qt, color)) if 'bold' in style: format.setFontWeight(QtGui.QFont.Bold) if 'italic' in style: format.setFontItalic(True) return format class Highlighter (QtGui.QSyntaxHighlighter): """Base class for all highlighters.""" def __init__ (self, document): """Initialize rules and styles.""" super(Highlighter, self).__init__(document) self.rules = [] self.styles = {} def highlightBlock(self, text): """Highlight a text block.""" for expression, format in self.rules: # get first match index = expression.indexIn(text) while index >= 0: length = expression.matchedLength() self.setFormat(index, length, format) # jump to next match index = expression.indexIn(text, index + length) self.setCurrentBlockState(0) def addRule (self, pattern, style): """Add a rule pattern with given style.""" self.rules.append((QtCore.QRegExp(pattern), self.styles[style])) class XmlHighlighter (Highlighter): """XML syntax highlighter.""" def __init__(self, document): """Set XML syntax rules.""" super(XmlHighlighter, self).__init__(document) self.styles.update({ 'keyword': format('darkBlue'), 'attribute': format('darkGreen'), 'comment': format('darkYellow'), 'string': format('darkMagenta'), }) # keywords for reg in ('/>', '>', '<!?[a-zA-Z0-9_]+'): self.addRule(reg, 'keyword') # attributes self.addRule(r"\b[A-Za-z0-9_]+(?=\s*\=)", 'attribute') # double-quoted string, possibly containing escape sequences self.addRule(r'"[^"\\]*(\\.[^"\\]*)*"', 'string') # single-quoted string, possibly containing escape sequences self.addRule(r"'[^'\\]*(\\.[^'\\]*)*'", 'string') # comments self.addRule(r"<!--[^>]*-->", 'comment') # Treat HTML as XML HtmlHighlighter = XmlHighlighter class IniHighlighter (Highlighter): """INI syntax highlighter.""" def __init__(self, document): """Set INI syntax rules.""" super(IniHighlighter, self).__init__(document) self.styles.update({ 'section': format('darkBlue'), 'property': format('darkGreen'), 'comment': format('darkYellow'), }) self.addRule(r'\b\[[a-zA-Z0-9_]+\]\b', 'section') self.addRule(r'\b[a-zA-Z0-9_]+\](?=\s*\=)', 'property') self.addRule(r'#[^\n]*', 'comment')
gpl-3.0
4,945,731,341,848,713,000
35.510204
73
0.604807
false
3.893362
false
false
false
boisde/Greed_Island
business_logic/order_collector/transwarp/validate.py
1
2044
#!/usr/bin/env python # coding:utf-8 import logging RECORD_NORMAL = 0 RECORD_DELETED = 1 RECORD_CHOICE = ( (RECORD_NORMAL, u'正常'), (RECORD_DELETED, u'已删除'), ) def is_valid_kw(obj, is_update=False, **kw): mappings = obj.__mappings__ if is_update and kw.get('deleted', None) == RECORD_DELETED: raise ValueError("Illegal operation: Try to mark %s as deleted with update api." % obj.__name__) elif is_update: pass # 检查是否要求存在的参数都存在 else: args = set(kw.keys()) required = {key_name for key_name, orm_val in mappings.iteritems() if orm_val.nullable is False and orm_val.primary_key is False} required -= {'deleted', 'create_time', 'update_time'} if not required.issubset(args): raise ValueError("Not providing required args: %s." % list(required-args)) # 检查参数类型 for key_name, kv in kw.iteritems(): if key_name in mappings: orm_val = mappings[key_name] if orm_val.ddl.find('int') != -1: try: int(kv) except ValueError: raise ValueError("[%s]:[%s][%s] should be type of [%s]." % (key_name, unicode(kv), type(kv), orm_val.ddl)) elif orm_val.ddl.find('char') != -1: char_len = int(orm_val.ddl[orm_val.ddl.find('(') + 1:orm_val.ddl.find(')')]) if (not kv) and orm_val.nullable is True: # 参数值设置可以为空且传入参数就是空 continue elif not isinstance(kv, unicode) and not isinstance(kv, str): raise ValueError("[%s]:[%s][%s] should be type of str." % (key_name, unicode(kv), type(kv))) elif kv and len(kv) > char_len: raise ValueError("[%s]:[%s] should be str of length[%s]." % (key_name, unicode(kv), char_len)) else: logging.warning("[%s]:[%s] won't be passed since [%s] is not valid." % (key_name, unicode(kv), key_name))
mit
302,318,775,476,008,200
43.568182
137
0.555612
false
3.294118
false
false
false
Rbeuque74/brie-aurore
Brie/brie/websetup.py
1
1479
# -*- coding: utf-8 -*- """Setup the Brie application""" import logging import transaction from tg import config from brie.config.environment import load_environment __all__ = ['setup_app'] log = logging.getLogger(__name__) def setup_app(command, conf, vars): """Place any commands to setup brie here""" load_environment(conf.global_conf, conf.local_conf) # Load the models from brie import model print "Creating tables" model.metadata.create_all(bind=config['pylons.app_globals'].sa_engine) manager = model.User() manager.user_name = u'manager' manager.display_name = u'Example manager' manager.email_address = u'[email protected]' manager.password = u'managepass' model.DBSession.add(manager) group = model.Group() group.group_name = u'managers' group.display_name = u'Managers Group' group.users.append(manager) model.DBSession.add(group) permission = model.Permission() permission.permission_name = u'manage' permission.description = u'This permission give an administrative right to the bearer' permission.groups.append(group) model.DBSession.add(permission) editor = model.User() editor.user_name = u'editor' editor.display_name = u'Example editor' editor.email_address = u'[email protected]' editor.password = u'editpass' model.DBSession.add(editor) model.DBSession.flush() transaction.commit() print "Successfully setup"
bsd-2-clause
-5,205,969,752,594,157,000
24.947368
90
0.698445
false
3.616137
false
false
false
papallas/baxter_cashier
scripts/baxter_cashier_manipulation/src/environment_factory.py
1
6798
#!/usr/bin/env python """ Factory for Environments. This file contains some static classes that represents environments in real life. If Baxter for example is placed somewhere in a real environment let's name it "Robotics Lab" then we wish to define obstacles around Baxter in this specific environment. In this class we achieve exactly this, for each environment that Baxter can be, we define the obstacles around him and using the Factory Pattern and Template design pattern we are able to have extensibility with a very nice way. If you need to define a new environment here are the steps: 1. Define a similar class with the one listed below: `RoboticsLabEnvironment` but make sure the obstacles implemented in `RoboticsLabEnvironment` match you own obstacles in your environment, and make sure you give a sensible name for the class. 2. In `EnvironmentFactory` class, define a top-level attribute with the name of your new class (see the one already there: `__robotics_lab_environment`) 3. Implement your getter, as like `def get_robotics_lab_environment():` and use similar logic to return your new class back. 4. In `moveit_controller.py` find the line `EnvironmentFactory.get_robotics_lab_environment()` and change it to match your new getter method. Copyright (C) 2016/2017 The University of Leeds and Rafael Papallas This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import copy from geometry_msgs.msg import PoseStamped class EnvironmentFactory: """ Environment Factory implementing the design pattern. In here are defined the getters for the different environments and is the class used in other scripts to generate the class environments required. """ _robotics_lab_environment = None @staticmethod def initialize(): """Initialise each environment.""" EnvironmentFactory._robotics_lab_environment = RoboticsLabEnvironment() @staticmethod def get_robotics_lab_environment(): """Will return the robotics lab environment.""" return EnvironmentFactory._robotics_lab_environment.clone() class Obstacle: """This represent an obstacle in real world.""" def __init__(self, obstalce_name, x, y, z, shape_size): """ Will configure the obstacle details and set it's attributes. - obstalce_name: is the name of the obstacle. - x, y and z: is the position or pose of the obstacle in the world. - shape_size: is a triple tuple with height, width and depth of the object or obstacle. """ self.name = obstalce_name # The pose of where the obstacle is self.pose = PoseStamped() self.pose.pose.position.x = x self.pose.pose.position.y = y self.pose.pose.position.z = z # Pose Header Frame ID is None because it needs to be set for the # specific scene, which is not available at the time the obstacle # is created. self.pose.header.frame_id = None # This is a triple tuple (h, w, z) representing the size of the # obstacle self.size = shape_size def set_frame_id(self, id): """ Will set the pose's header frame ID. It is important, for the obstacle to appear in the MoveIt Rviz to set this to `robot.get_planning_frame()`, since we don't have this info in here, we need to set this later. Make sure you have set this otherwise you will not be able to visualise the obstacle in Rviz. """ self.pose.header.frame_id = id class Environment: """This is the template class of the Template design pattern.""" # Obstacles represents a list of obstacles _obstacles = None def clone(self): """ Clone itself. Required method to clone itself when Factory is used to get the instance. """ pass def get_obstacles(self): """Will return the list with obstacles.""" return self._obstacles class RoboticsLabEnvironment(Environment): """ This class represent's University of Leeds, Robotic's Laboratory. The obstacles defiend here are specifically to that environment. This is a subclass of the environment template of the Template design pattern. """ def __init__(self): """ Default constructor. Will initialise the obstacles attribute to empty list and will call the method to create the obstacles. """ self._obstacles = [] self._create_obstalces() def _create_obstalces(self): """ Generate and append the obstacles to the class. In here are the obstacles relevant to this specific environment. """ side_wall = Obstacle(obstalce_name="side_wall", x=0.6, y=1, z=0, shape_size=(4, 0.2, 3)) self._obstacles.append(side_wall) back_wall = Obstacle(obstalce_name="back_wall", x=-1, y=0, z=0, shape_size=(0.2, 4, 3)) self._obstacles.append(back_wall) table = Obstacle(obstalce_name="table", x=0.7, y=-0.1, z=-0.53, shape_size=(0.8, 1.2, 0.7)) self._obstacles.append(table) camera_tripod = Obstacle(obstalce_name="camera_tripod", x=0.6, y=-1.2, z=-0.54, shape_size=(1, 0.3, 1.8)) self._obstacles.append(camera_tripod) # width, length, height def clone(self): """Required method for the Template design pattern.""" return copy.copy(self)
gpl-3.0
8,566,268,127,867,704,000
34.968254
79
0.604884
false
4.117505
false
false
false
fynjah/django-pimp-my-filter
filter_manager/views.py
1
7132
import datetime try: import simplejson as json except ImportError: from django.utils import simplejson as json from django.conf import settings from django.contrib import auth from django.contrib.auth.models import User from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, HttpResponseForbidden from django.shortcuts import render_to_response, RequestContext from django.core.context_processors import csrf from django.contrib.auth.decorators import login_required from django.db.models import Q from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes.generic import GenericRelation from filter_manager.models import Filter, Condition, LOGICAL_OPERATORS @login_required def save_filter(request): if request.method == "POST" and request.is_ajax(): if 'filter' in request.POST: new_filter = json.loads(request.POST['filter']) app_model = '%s.%s' % (new_filter['app'],new_filter['model']) if settings.PIMP_MY_FILTER['ALLOWED_MODELS']: if not app_model in settings.PIMP_MY_FILTER['ALLOWED_MODELS']: return HttpResponseForbidden('[{"error":"Forbidden."}]', mimetype='application/json; charset=utf8') else: return HttpResponseForbidden( '[{"error":"Forbidden. Check PIMP_MY_FILTER Settings."}]', mimetype='application/json; charset=utf8', ) ct = ContentType.objects.get_by_natural_key(new_filter['app'], new_filter['model']) if new_filter['quick'] == 'true': quick = True else: quick = False f = Filter(name=new_filter['name'], user_id=request.user.id, quick=quick, content_type = ct,) f.save() for k,c in new_filter['conditions'].iteritems(): data = c['value_data'] if (data['type'] == 'ForeignKey' or data['type'] == 'ManyToManyField' or data['type'] == 'OneToOneField'): value = data['fk_id'] elif (data['type'] == 'BooleanField' or data['type'] == 'NullBooleanField' or data['type'] == 'FieldFile' or data['type'] == 'FileField' or data['type'] == 'ImageField'): if c['value'] == 'on': value = True else: value = False else: value = c['value'] con = Condition(filter=f, operator = c['operator'], field_type = data['type'], value=value, field=c['field'],) con.save() r = {'filter_id':f.id} return HttpResponse(json.dumps(r, indent = 4 * ' '), mimetype='application/json; charset=utf8') else: return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]', mimetype='application/json; charset=utf8') @login_required def get_structure(request): if request.method == "POST" and request.is_ajax(): if 'app' in request.POST and 'model' in request.POST: fields = {} ct = ContentType.objects.get_by_natural_key(request.POST['app'], request.POST['model']) model = ContentType.model_class(ct) for i,x in enumerate(model._meta.get_all_field_names()): obj, m, direct, m2m = model._meta.get_field_by_name(x) if obj.name == 'id' or not direct or isinstance(obj, GenericRelation): continue f = {} f.update({"type":obj.get_internal_type()}) f.update({"name":obj.name}) fields.update( {i: f} ) r = {} r.update({'fields':fields}) r.update({'operators':LOGICAL_OPERATORS}) return HttpResponse(json.dumps(r, indent = 4 * ' '), mimetype='application/json; charset=utf8') return HttpResponseForbidden('[{"error":"Forbidden"}]', mimetype='application/json; charset=utf8') def use_filter_internal(filter_id): if filter_id: try: flt = Filter.objects.only('content_type').get(pk = filter_id) except Filter.DoesNotExist: return None model = ContentType.model_class(flt.content_type) kwargs = {} for c in flt.conditions.all(): field = None lookup = c.operator field = "%s%s" % (c.field, lookup) kwargs.update({field:c.value}) return model.objects.filter(**kwargs) else: return None @login_required def use_filter(request): if request.is_ajax(): if 'filter_id' in request.GET: try: flt = Filter.objects.only('content_type').get(pk = request.GET['filter_id']) except Filter.DoesNotExist: return HttpResponseForbidden('[{"error":"Filter Not found."}]', mimetype='application/json; charset=utf8') model = ContentType.model_class(flt.content_type) kwargs = {} for c in flt.conditions.all(): field = None lookup = c.operator field = "%s%s" % (c.field, lookup) kwargs.update({field:c.value}) qs = model.objects.filter(**kwargs) response = {} for i,q in enumerate(qs): field_list = {} for f in q._meta.get_all_field_names(): obj, model, direct, m2m = q._meta.get_field_by_name(f) if not direct or isinstance(obj, GenericRelation): continue if m2m: l = {} val = obj.value_from_object(q) for m in obj.value_from_object(q): l.update({m.pk:m.__unicode__()}) field_list.update({f:l}) elif obj.rel: val = q.__getattribute__(obj.name) if val: l = {val.pk:val.__unicode__()} field_list.update({obj.name:l}) else: field_list.update({f:None}) else: field_list.update({f:obj.value_to_string(q)}) response.update({i:field_list}) r = json.dumps(response, indent = 4 * ' ') return HttpResponse(r, mimetype='application/json; charset=utf8') return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]', mimetype='application/json; charset=utf8') @login_required def get_typeahead(request): if request.is_ajax() and request.method == "POST": if ('field' in request.POST and 'app' in request.POST and 'model' in request.POST): ct = ContentType.objects.get_by_natural_key(request.POST['app'], request.POST['model']) instance = ContentType.model_class(ct) f = dict([(x,x) for x in instance._meta.get_all_field_names() ]) try: o = f[request.POST['field']] o = instance._meta.get_field_by_name(o)[0] except KeyError: return HttpResponseForbidden('[{"error":"Forbidden"}]', mimetype='application/json; charset=utf8') o = o.related.parent_model obj_list = o.objects.all() lst = {} for i,obj in enumerate(obj_list): l = {} l.update({"id":obj.id}) l.update({"unicode":obj.__unicode__()}) #not sure about __unicode__, actually lst.update({i:l}) return HttpResponse(json.dumps(lst, indent = 4 * ' '), mimetype='application/json; charset=utf8') else: return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]', mimetype='application/json; charset=utf8') def get_filters_by_user(request): if request.is_ajax(): user_filters = Filter.objects.filter(Q(user = request.user.id)|Q(for_all = True)) f_list = {} for i,f in enumerate(user_filters): f_list.update({i:{'id':f.pk, 'name':f.name, 'quick':f.quick}}) return HttpResponse(json.dumps(f_list, indent = 4 * ' '), mimetype='application/json; charset=utf8') return HttpResponseForbidden('[{"error":"Forbidden. Wrong headers."}]', mimetype='application/json; charset=utf8')
bsd-3-clause
-1,585,255,892,386,726,400
32.483568
103
0.651711
false
3.221319
false
false
false
LaurentClaessens/phystricks
src/MathStructures.py
1
3272
# -*- coding: utf8 -*- ########################################################################### # This is part of the module phystricks # # phystricks is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # phystricks is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with phystricks.py. If not, see <http://www.gnu.org/licenses/>. ########################################################################### # copyright (c) Laurent Claessens, 2010,2011,2013-2017 # email: [email protected] from sage.rings.rational import Rational from sage.all import latex from Utilities import * from SmallComputations import MultipleBetween from AngleMeasure import AngleMeasure class PolarCoordinates(object): def __init__(self,r,value_degree=None,value_radian=None): self.r = r self.measure=AngleMeasure(value_degree=value_degree,value_radian=value_radian) self.degree=self.measure.degree self.radian=self.measure.radian def __str__(self): return "PolarCoordinates, r=%s,degree=%s,radian=%s"%(str(self.r),str(self.degree),str(self.radian)) def DegreeAngleMeasure(x): return AngleMeasure(value_degree=x) def RadianAngleMeasure(x): return AngleMeasure(value_radian=x) class AxesUnit(object): def __init__(self,numerical_value,latex_symbol=""): try : numerical_value=Rational(numerical_value) except TypeError : pass self.numerical_value=numerical_value self.latex_symbol=latex_symbol def symbol(self,x): return latex(x)+self.latex_symbol def place_list(self,mx,Mx,frac=1,mark_origin=True): """ return a tuple of 1. values that are all the integer multiple of <frac>*self.numerical_value between mx and Mx 2. the multiple of the basis unit. Give <frac> as literal real. Recall that python evaluates 1/2 to 0. If you pass 0.5, it will be converted back to 1/2 for a nice display. """ try : frac=Rational(frac) # If the user enters "0.5", it is converted to 1/2 except TypeError : pass if frac==0: raise ValueError,"frac is zero in AxesUnit.place_list(). Maybe you ignore that python evaluates 1/2 to 0 ? (writes literal 0.5 instead) \n Or are you trying to push me in an infinite loop ?" l=[] k=var("TheTag") for x in MultipleBetween(frac*self.numerical_value,mx,Mx,mark_origin): if self.latex_symbol == "": l.append((x,"$"+latex(x)+"$")) else : pos=(x/self.numerical_value)*k text="$"+latex(pos).replace("TheTag",self.latex_symbol)+"$" # This risks to be Sage-version dependent. l.append((x,text)) return l
gpl-3.0
3,012,877,965,020,561,000
40.417722
202
0.623472
false
3.778291
false
false
false
stefanwebb/tensorflow-models
tensorflow_models/models/vae_normal_obs.py
1
5404
# MIT License # # Copyright (c) 2017, Stefan Webb. All Rights Reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np import tensorflow as tf import tensorflow_models as tf_models def create_placeholders(settings): x = tf.placeholder(tf.float32, shape=tf_models.batchshape(settings), name='samples') z = tf.placeholder(tf.float32, shape=tf_models.latentshape(settings), name='codes') return x, z def create_prior(settings): dist_prior = tf_models.standard_normal(tf_models.latentshape(settings)) return tf.identity(dist_prior.sample(), name='p_z/sample') def create_encoder(settings, reuse=True): encoder_network = settings['architecture']['encoder']['fn'] x_placeholder = tf_models.samples_placeholder() assert(not x_placeholder is None) with tf.variable_scope('encoder', reuse=reuse): mean_z, diag_stdev_z = encoder_network(settings, x_placeholder, is_training=False) dist_z_given_x = tf.contrib.distributions.MultivariateNormalDiag(mean_z, diag_stdev_z) encoder = tf.identity(dist_z_given_x.sample(name='sample'), name='q_z_given_x/sample') return encoder def create_decoder(settings, reuse=True): if 'transformations' in settings and 'rescale' in settings['transformations']: min_val = settings['transformations']['rescale'][0] max_val = settings['transformations']['rescale'][1] else: min_val = 0. max_val = 1. decoder_network = settings['architecture']['decoder']['fn'] z_placeholder = tf_models.codes_placeholder() assert(not z_placeholder is None) with tf.variable_scope('decoder', reuse=reuse): mean_x, diag_stdev_x = decoder_network(settings, z_placeholder, is_training=False) dist_x_given_z = tf.contrib.distributions.MultivariateNormalDiag(mean_x, diag_stdev_x) decoder = tf.identity(tf.clip_by_value(dist_x_given_z.sample(), min_val, max_val), name='p_x_given_z/sample') return decoder def create_probs(settings, inputs, is_training, reuse=False): encoder_network = settings['architecture']['encoder']['fn'] decoder_network = settings['architecture']['decoder']['fn'] dist_prior = tf_models.standard_normal(tf_models.latentshape(settings)) # Use recognition network to determine mean and (log) variance of Gaussian distribution in latent space with tf.variable_scope('encoder', reuse=reuse): mean_z, diag_stdev_z = encoder_network(settings, inputs, is_training=is_training) dist_z_given_x = tf.contrib.distributions.MultivariateNormalDiag(mean_z, diag_stdev_z) # Draw one sample z from Gaussian distribution eps = tf.random_normal(tf_models.latentshape(settings), 0, 1, dtype=tf.float32) z_sample = tf.add(mean_z, tf.multiply(diag_stdev_z, eps)) # Use generator to determine mean of Bernoulli distribution of reconstructed input with tf.variable_scope('decoder', reuse=reuse): mean_x, diag_stdev_x = decoder_network(settings, z_sample, is_training=is_training) dist_x_given_z = tf.contrib.distributions.MultivariateNormalDiag(tf_models.flatten(mean_x), tf_models.flatten(diag_stdev_x)) #print('*** Debugging ***') #print('mean_x.shape', mean_x.shape) #print('diag_stdev_x.shape', diag_stdev_x.shape) #print('dist_x_given_z.sample().shape', dist_x_given_z.sample().shape) #print('dist_x_given_z.log_prob(tf_models.flatten(inputs)).shape', dist_x_given_z.log_prob(tf_models.flatten(inputs)).shape) lg_p_x_given_z = tf.identity(dist_x_given_z.log_prob(tf_models.flatten(inputs)), name='p_x_given_z/log_prob') lg_p_z = tf.identity(dist_prior.log_prob(z_sample), name='p_z/log_prob') lg_q_z_given_x = tf.identity(dist_z_given_x.log_prob(z_sample), name='q_z_given_x/log_prob') return lg_p_x_given_z, lg_p_z, lg_q_z_given_x # TODO: Fix this to be normal distribution! def lg_likelihood(x, z, settings, reuse=True, is_training=False): decoder_network = settings['architecture']['decoder']['fn'] with tf.variable_scope('model'): with tf.variable_scope('decoder', reuse=reuse): logits_x = decoder_network(settings, z, is_training=is_training) dist_x_given_z = tf.contrib.distributions.Bernoulli(logits=tf_models.flatten(logits_x), dtype=tf.float32) return tf.reduce_sum(dist_x_given_z.log_prob(tf_models.flatten(x)), 1) def lg_prior(z, settings, reuse=True, is_training=False): dist_prior = tf_models.standard_normal(z.shape) return dist_prior.log_prob(z)
mit
4,858,192,034,011,259,000
45.586207
125
0.744078
false
3.180695
false
false
false
lamestation/packthing
packthing/util.py
1
7019
import errno import os import platform import shutil import string import subprocess import sys import tarfile import zipfile from contextlib import contextmanager def get_platform(): _platform = dict() _platform["system"] = platform.system().lower() machine = platform.machine().lower() if machine == "x86_64": machine = "amd64" _platform["machine"] = machine return _platform def warning(*args): print("WARNING:" + " ".join(args)) def error(*objs): blocks = [] for b in " ".join(objs).split("\n"): if len(blocks) > 0: blocks.append(" " + b) else: blocks.append(b) print("\nERROR:" + "\n".join(blocks)) print() sys.exit(1) def subtitle(text): line = (80 - (len(text) + 2)) // 2 print("-" * line, text, "-" * (line + (len(text) % 2))) def title(text): line = (80 - (len(text) + 2)) // 2 print("=" * line, text.upper(), "=" * (line + (len(text) % 2))) def headline(func): def wrapper(*args, **kwargs): title(func.__name__) res = func(*args, **kwargs) return res return wrapper @contextmanager def pushd(newDir): previousDir = os.getcwd() os.chdir(newDir) yield os.chdir(previousDir) def copy(src, dest, verbose=True, permissions=0o644): destfile = os.path.join(dest, os.path.basename(src)) if verbose: print("Copy", src, "to dir", dest) mkdir(dest) shutil.copy(src, destfile) os.chmod(destfile, permissions) def command(args, verbose=True, strict=True, stdinput=None, abort=None): if abort is None: abort = True if verbose: print("-", " ".join(args)) if not args: error("Attempting to run empty command.") try: process = subprocess.Popen( args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE ) except OSError as e: if abort: error("Command '" + args[0] + "' not found; exiting.") return if stdinput is not None: stdinput = stdinput.encode() out, err = process.communicate(input=stdinput) out = out.decode() err = err.decode() if strict: if process.returncode: print(err) raise subprocess.CalledProcessError(process.returncode, args, err) return out, err def command_in_dir(args, newdir, verbose=True, strict=True, stdinput=None): if verbose: print("DIR:", newdir) with pushd(newdir): out, err = command(args, verbose=verbose, strict=strict) return out, err def table(path, version, url): return "%30s %10s %s" % (path, version, url) def make(path, args): with pushd(path): args.insert(0, "make") for m in ["make", "mingw32-make"]: args[0] = m failed = 0 try: subprocess.check_call(args) except OSError: failed = 1 except subprocess.CalledProcessError as e: error("Failed to build project '" + path + "'") if not failed: return def which(program): def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_exe(program): return program else: for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file return None def mkdir(path): try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def tar_archive(name, files): shortname = os.path.basename(name) name += ".tgz" archive = tarfile.open(name=name, mode="w:gz") for f in files: archive.add(name=f, arcname=os.path.join(shortname, f), recursive=False) archive.close() def zip_archive(name, files): shortname = os.path.basename(name) name += ".zip" archive = zipfile.ZipFile(name, "w") for f in files: archive.write( filename=f, arcname=os.path.join(shortname, f), compress_type=zipfile.ZIP_DEFLATED, ) archive.close() def from_scriptroot(filename): currentpath = os.path.dirname(os.path.abspath(__file__)) return os.path.join(currentpath, filename) def get_template_text(template): template = os.path.join("template", template) template = from_scriptroot(template) return open(template, "r").read() def get_template(template): return string.Template(get_template_text(template)) # python-chroot-builder # Copyright (C) 2012 Ji-hoon Kim # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ----------------------------------------- def ldd(filenames): libs = [] for x in filenames: p = subprocess.Popen(["ldd", x], stdout=subprocess.PIPE, stderr=subprocess.PIPE) result = p.stdout.readlines() for x in result: s = x.split() s.pop(1) s.pop() if len(s) == 2: libs.append(s) return libs # ----------------------------------------- def extract_libs(files, libs): resultlibs = [] for f in files: for l in ldd([which(f)]): for lib in libs: if l[0].find(lib) == -1: pass else: resultlibs.append(l) return sorted(list(set(tuple(lib) for lib in resultlibs))) def write(text, filename): f = open(filename, "w") f.seek(0) f.write(text) f.close() def create(text, filename, executable=False): print("Create", filename) mkdir(os.path.dirname(filename)) f = open(filename, "w") f.seek(0) f.write(text) f.close() if executable: os.chmod(filename, 0o755) else: os.chmod(filename, 0o644) def root(): if os.geteuid() != 0: error("This configuration requires root privileges!") def cksum(files): print("cksum:") for f in files: try: out, err = command(["cksum", f], verbose=False) except subprocess.CalledProcessError as e: error("Failed to checksum file:", f) print("| " + out.replace("\n", ""))
gpl-3.0
2,858,408,366,746,371,000
23.371528
88
0.579855
false
3.719661
false
false
false
tklengyel/patchwork
apps/patchwork/views/xmlrpc.py
1
13846
# Patchwork - automated patch tracking system # Copyright (C) 2008 Jeremy Kerr <[email protected]> # # This file is part of the Patchwork package. # # Patchwork is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Patchwork is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Patchwork; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Patchwork XMLRPC interface # from SimpleXMLRPCServer import SimpleXMLRPCDispatcher from django.http import HttpResponse, HttpResponseRedirect, \ HttpResponseServerError from django.core import urlresolvers from django.contrib.auth import authenticate from patchwork.models import Patch, Project, Person, State from patchwork.views import patch_to_mbox from django.views.decorators.csrf import csrf_exempt import sys import base64 import xmlrpclib class PatchworkXMLRPCDispatcher(SimpleXMLRPCDispatcher): def __init__(self): if sys.version_info[:3] >= (2,5,): SimpleXMLRPCDispatcher.__init__(self, allow_none=False, encoding=None) def _dumps(obj, *args, **kwargs): kwargs['allow_none'] = self.allow_none kwargs['encoding'] = self.encoding return xmlrpclib.dumps(obj, *args, **kwargs) else: def _dumps(obj, *args, **kwargs): return xmlrpclib.dumps(obj, *args, **kwargs) SimpleXMLRPCDispatcher.__init__(self) self.dumps = _dumps # map of name => (auth, func) self.func_map = {} def register_function(self, fn, auth_required): self.func_map[fn.__name__] = (auth_required, fn) def _user_for_request(self, request): auth_header = None if 'HTTP_AUTHORIZATION' in request.META: auth_header = request.META.get('HTTP_AUTHORIZATION') elif 'Authorization' in request.META: auth_header = request.META.get('Authorization') if auth_header is None or auth_header == '': raise Exception("No authentication credentials given") str = auth_header.strip() if not str.startswith('Basic '): raise Exception("Authentication scheme not supported") str = str[len('Basic '):].strip() try: decoded = base64.decodestring(str) username, password = decoded.split(':', 1) except: raise Exception("Invalid authentication credentials") return authenticate(username = username, password = password) def _dispatch(self, request, method, params): if method not in self.func_map.keys(): raise Exception('method "%s" is not supported' % method) auth_required, fn = self.func_map[method] if auth_required: user = self._user_for_request(request) if not user: raise Exception("Invalid username/password") params = (user,) + params return fn(*params) def _marshaled_dispatch(self, request): try: params, method = xmlrpclib.loads(request.body) response = self._dispatch(request, method, params) # wrap response in a singleton tuple response = (response,) response = self.dumps(response, methodresponse=1) except xmlrpclib.Fault, fault: response = self.dumps(fault) except: # report exception back to server response = self.dumps( xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value)), ) return response dispatcher = PatchworkXMLRPCDispatcher() # XMLRPC view function @csrf_exempt def xmlrpc(request): if request.method != 'POST': return HttpResponseRedirect( urlresolvers.reverse('patchwork.views.help', kwargs = {'path': 'pwclient/'})) response = HttpResponse() try: ret = dispatcher._marshaled_dispatch(request) response.write(ret) except Exception: return HttpResponseServerError() return response # decorator for XMLRPC methods. Setting login_required to true will call # the decorated function with a non-optional user as the first argument. def xmlrpc_method(login_required = False): def wrap(f): dispatcher.register_function(f, login_required) return f return wrap # We allow most of the Django field lookup types for remote queries LOOKUP_TYPES = ["iexact", "contains", "icontains", "gt", "gte", "lt", "in", "startswith", "istartswith", "endswith", "iendswith", "range", "year", "month", "day", "isnull" ] ####################################################################### # Helper functions ####################################################################### def project_to_dict(obj): """Return a trimmed down dictionary representation of a Project object which is OK to send to the client.""" return \ { 'id' : obj.id, 'linkname' : obj.linkname, 'name' : obj.name, } def person_to_dict(obj): """Return a trimmed down dictionary representation of a Person object which is OK to send to the client.""" # Make sure we don't return None even if the user submitted a patch # with no real name. XMLRPC can't marshall None. if obj.name is not None: name = obj.name else: name = obj.email return \ { 'id' : obj.id, 'email' : obj.email, 'name' : name, 'user' : unicode(obj.user).encode("utf-8"), } def patch_to_dict(obj): """Return a trimmed down dictionary representation of a Patch object which is OK to send to the client.""" return \ { 'id' : obj.id, 'date' : unicode(obj.date).encode("utf-8"), 'filename' : obj.filename(), 'msgid' : obj.msgid, 'name' : obj.name, 'project' : unicode(obj.project).encode("utf-8"), 'project_id' : obj.project_id, 'state' : unicode(obj.state).encode("utf-8"), 'state_id' : obj.state_id, 'submitter' : unicode(obj.submitter).encode("utf-8"), 'submitter_id' : obj.submitter_id, 'delegate' : unicode(obj.delegate).encode("utf-8"), 'delegate_id' : max(obj.delegate_id, 0), 'commit_ref' : max(obj.commit_ref, ''), } def bundle_to_dict(obj): """Return a trimmed down dictionary representation of a Bundle object which is OK to send to the client.""" return \ { 'id' : obj.id, 'name' : obj.name, 'n_patches' : obj.n_patches(), 'public_url' : obj.public_url(), } def state_to_dict(obj): """Return a trimmed down dictionary representation of a State object which is OK to send to the client.""" return \ { 'id' : obj.id, 'name' : obj.name, } ####################################################################### # Public XML-RPC methods ####################################################################### @xmlrpc_method(False) def pw_rpc_version(): """Return Patchwork XML-RPC interface version.""" return 1 @xmlrpc_method(False) def project_list(search_str="", max_count=0): """Get a list of projects matching the given filters.""" try: if len(search_str) > 0: projects = Project.objects.filter(linkname__icontains = search_str) else: projects = Project.objects.all() if max_count > 0: return map(project_to_dict, projects)[:max_count] else: return map(project_to_dict, projects) except: return [] @xmlrpc_method(False) def project_get(project_id): """Return structure for the given project ID.""" try: project = Project.objects.filter(id = project_id)[0] return project_to_dict(project) except: return {} @xmlrpc_method(False) def person_list(search_str="", max_count=0): """Get a list of Person objects matching the given filters.""" try: if len(search_str) > 0: people = (Person.objects.filter(name__icontains = search_str) | Person.objects.filter(email__icontains = search_str)) else: people = Person.objects.all() if max_count > 0: return map(person_to_dict, people)[:max_count] else: return map(person_to_dict, people) except: return [] @xmlrpc_method(False) def person_get(person_id): """Return structure for the given person ID.""" try: person = Person.objects.filter(id = person_id)[0] return person_to_dict(person) except: return {} @xmlrpc_method(False) def patch_list(filter={}): """Get a list of patches matching the given filters.""" try: # We allow access to many of the fields. But, some fields are # filtered by raw object so we must lookup by ID instead over # XML-RPC. ok_fields = [ "id", "name", "project_id", "submitter_id", "delegate_id", "state_id", "date", "commit_ref", "hash", "msgid", "max_count", ] dfilter = {} max_count = 0 for key in filter: parts = key.split("__") if parts[0] not in ok_fields: # Invalid field given return [] if len(parts) > 1: if LOOKUP_TYPES.count(parts[1]) == 0: # Invalid lookup type given return [] if parts[0] == 'project_id': dfilter['project'] = Project.objects.filter(id = filter[key])[0] elif parts[0] == 'submitter_id': dfilter['submitter'] = Person.objects.filter(id = filter[key])[0] elif parts[0] == 'state_id': dfilter['state'] = State.objects.filter(id = filter[key])[0] elif parts[0] == 'max_count': max_count = filter[key] else: dfilter[key] = filter[key] patches = Patch.objects.filter(**dfilter) if max_count > 0: return map(patch_to_dict, patches[:max_count]) else: return map(patch_to_dict, patches) except: return [] @xmlrpc_method(False) def patch_get(patch_id): """Return structure for the given patch ID.""" try: patch = Patch.objects.filter(id = patch_id)[0] return patch_to_dict(patch) except: return {} @xmlrpc_method(False) def patch_get_by_hash(hash): """Return structure for the given patch hash.""" try: patch = Patch.objects.filter(hash = hash)[0] return patch_to_dict(patch) except: return {} @xmlrpc_method(False) def patch_get_by_project_hash(project, hash): """Return structure for the given patch hash.""" try: patch = Patch.objects.filter(project__linkname = project, hash = hash)[0] return patch_to_dict(patch) except: return {} @xmlrpc_method(False) def patch_get_mbox(patch_id): """Return mbox string for the given patch ID.""" try: patch = Patch.objects.filter(id = patch_id)[0] return patch_to_mbox(patch).as_string() except: return "" @xmlrpc_method(False) def patch_get_diff(patch_id): """Return diff for the given patch ID.""" try: patch = Patch.objects.filter(id = patch_id)[0] return patch.content except: return "" @xmlrpc_method(True) def patch_set(user, patch_id, params): """Update a patch with the key,value pairs in params. Only some parameters can be set""" try: ok_params = ['state', 'commit_ref', 'archived'] patch = Patch.objects.get(id = patch_id) if not patch.is_editable(user): raise Exception('No permissions to edit this patch') for (k, v) in params.iteritems(): if k not in ok_params: continue if k == 'state': patch.state = State.objects.get(id = v) else: setattr(patch, k, v) patch.save() return True except: raise @xmlrpc_method(False) def state_list(search_str="", max_count=0): """Get a list of state structures matching the given search string.""" try: if len(search_str) > 0: states = State.objects.filter(name__icontains = search_str) else: states = State.objects.all() if max_count > 0: return map(state_to_dict, states)[:max_count] else: return map(state_to_dict, states) except: return [] @xmlrpc_method(False) def state_get(state_id): """Return structure for the given state ID.""" try: state = State.objects.filter(id = state_id)[0] return state_to_dict(state) except: return {}
gpl-2.0
-7,321,025,644,502,314,000
30.114607
79
0.560162
false
4.12206
false
false
false
razisayyed/django-ads
ads/conf.py
1
1926
from django.conf import settings from appconf import AppConf from django.utils.translation import ugettext_lazy as _ gettext = lambda s: s class AdsConf(AppConf): class Meta: prefix = 'ads' GOOGLE_ADSENSE_CLIENT = None # 'ca-pub-xxxxxxxxxxxxxxxx' ZONES = { 'header': { 'name': gettext('Header'), 'ad_size': { 'xs': '720x150', 'sm': '800x90', 'md': '800x90', 'lg': '800x90', 'xl': '800x90' }, 'google_adsense_slot': None, # 'xxxxxxxxx', 'google_adsense_format': None, # 'auto' }, 'content': { 'name': gettext('Content'), 'ad_size': { 'xs': '720x150', 'sm': '800x90', 'md': '800x90', 'lg': '800x90', 'xl': '800x90' }, 'google_adsense_slot': None, # 'xxxxxxxxx', 'google_adsense_format': None, # 'auto' }, 'sidebar': { 'name': gettext('Sidebar'), 'ad_size': { 'xs': '720x150', 'sm': '800x90', 'md': '800x90', 'lg': '800x90', 'xl': '800x90' } } } DEFAULT_AD_SIZE = '720x150' DEVICES = ( ('xs', _('Extra small devices')), ('sm', _('Small devices')), ('md', _('Medium devices (Tablets)')), ('lg', _('Large devices (Desktops)')), ('xl', _('Extra large devices (Large Desktops)')), ) VIEWPORTS = { 'xs': 'd-block img-fluid d-sm-none', 'sm': 'd-none img-fluid d-sm-block d-md-none', 'md': 'd-none img-fluid d-md-block d-lg-none', 'lg': 'd-none img-fluid d-lg-block d-xl-none', 'xl': 'd-none img-fluid d-xl-block', }
apache-2.0
5,576,397,388,752,822,000
26.913043
61
0.419522
false
3.546961
false
false
false
lukasmonk/lucaschess
Code/GestorTurnOnLights.py
1
12214
import time from Code import ControlPosicion from Code import Gestor from Code import Jugada from Code import TurnOnLights from Code.QT import QTUtil from Code.QT import QTUtil2 from Code.Constantes import * class GestorTurnOnLights(Gestor.Gestor): def inicio(self, num_theme, num_block, tol): if hasattr(self, "reiniciando"): if self.reiniciando: return self.reiniciando = True self.num_theme = num_theme self.num_block = num_block self.tol = tol self.block = self.tol.get_block(self.num_theme, self.num_block) self.block.shuffle() self.calculation_mode = self.tol.is_calculation_mode() self.penaltyError = self.block.penaltyError(self.calculation_mode) self.penaltyHelp = self.block.penaltyHelp(self.calculation_mode) # self.factorDistancia = self.block.factorDistancia() # No se usa es menor que 1.0 self.av_seconds = self.block.av_seconds() if self.av_seconds: cat, ico = self.block.cqualification(self.calculation_mode) self.lb_previous = "%s - %0.2f\"" % (cat, self.av_seconds) else: self.lb_previous = None self.num_line = 0 self.num_lines = len(self.block) self.num_moves = 0 self.total_time_used = 0.0 self.ayudas = 0 self.errores = 0 self.dicFENayudas = {} # se muestra la flecha a partir de dos del mismo self.tipoJuego = kJugEntLight self.siJuegaHumano = False self.siTutorActivado = False self.pantalla.ponActivarTutor(False) self.ayudasPGN = 0 self.pantalla.activaJuego(True, False, siAyudas=False) self.pantalla.quitaAyudas(True, True) self.ponMensajero(self.mueveHumano) self.mostrarIndicador(True) self.reiniciando = False self.next_line_run() def pon_rotulos(self, next): r1 = _("Calculation mode") if self.calculation_mode else _("Memory mode") r1 += "<br>%s" % self.line.label if self.lb_previous: r1 += "<br><b>%s</b>" % self.lb_previous if self.num_line: av_secs, txt = self.block.calc_current(self.num_line - 1, self.total_time_used, self.errores, self.ayudas, self.calculation_mode) r1 += "<br><b>%s: %s - %0.2f\"" % (_("Current"), txt, av_secs) self.ponRotulo1(r1) if next is not None: r2 = "<b>%d/%d</b>" % (self.num_line + next, self.num_lines) else: r2 = None self.ponRotulo2(r2) def next_line(self): if self.num_line < self.num_lines: self.line = self.block.line(self.num_line) self.num_move = -1 self.ini_time = None cp = ControlPosicion.ControlPosicion() cp.leeFen(self.line.fen) self.partida.reset(cp) siBlancas = cp.siBlancas self.siJugamosConBlancas = siBlancas self.siRivalConBlancas = not siBlancas self.ponPosicion(self.partida.ultPosicion) self.ponPiezasAbajo(siBlancas) self.pgnRefresh(True) self.partida.pendienteApertura = False self.pon_rotulos(1) def next_line_run(self): liOpciones = [k_mainmenu, k_ayuda, k_reiniciar] self.pantalla.ponToolBar(liOpciones) self.next_line() QTUtil.xrefreshGUI() self.ponPosicionDGT() self.estado = kJugando self.siguienteJugada() def procesarAccion(self, clave): if clave == k_mainmenu: self.finPartida() elif clave == k_ayuda: self.ayuda() elif clave == k_reiniciar: self.reiniciar() elif clave == k_configurar: self.configurar(siSonidos=True, siCambioTutor=False) elif clave == k_utilidades: self.utilidades() elif clave == k_siguiente: self.next_line_run() def reiniciar(self): if self.estado == kJugando: if self.ini_time: self.total_time_used += time.time() - self.ini_time if self.total_time_used: self.block.new_reinit(self.total_time_used, self.errores, self.ayudas) self.total_time_used = 0.0 TurnOnLights.write_tol(self.tol) self.inicio(self.num_theme, self.num_block, self.tol) def siguienteJugada(self): if self.estado == kFinJuego: return self.estado = kJugando self.siJuegaHumano = False self.ponVista() siBlancas = self.partida.ultPosicion.siBlancas self.ponIndicador(siBlancas) self.refresh() siRival = siBlancas == self.siRivalConBlancas self.num_move += 1 if self.num_move >= self.line.total_moves(): self.finLinea() return if siRival: pv = self.line.get_move(self.num_move) desde, hasta, coronacion = pv[:2], pv[2:4], pv[4:] self.mueveRival(desde, hasta, coronacion) self.siguienteJugada() else: self.siJuegaHumano = True self.base_time = time.time() if not (self.calculation_mode and self.ini_time is None): # Se inicia salvo que sea el principio de la linea self.ini_time = self.base_time self.activaColor(siBlancas) if self.calculation_mode: self.tablero.setDispatchMove(self.dispatchMove) def dispatchMove(self): if self.ini_time is None: self.ini_time = time.time() def finLinea(self): self.num_line += 1 islast_line = self.num_line == self.num_lines if islast_line: #Previous ant_tm = self.block.av_seconds() ant_done = self.tol.done_level() ant_cat_level, nada = self.tol.cat_num_level() ant_cat_global = self.tol.cat_global() num_moves = self.block.num_moves() ta = self.total_time_used + self.errores*self.penaltyError + self.ayudas*self.penaltyHelp tm = ta/num_moves self.block.new_result(tm, self.total_time_used, self.errores, self.ayudas) TurnOnLights.write_tol(self.tol) cat_block, ico = TurnOnLights.qualification(tm, self.calculation_mode) cat_level, ico = self.tol.cat_num_level() cat_global = self.tol.cat_global() txt_more_time = "" txt_more_cat = "" txt_more_line = "" txt_more_global = "" if ant_tm is None or tm < ant_tm: txt_more_time = '<span style="color:red">%s</span>' % _("New record") done = self.tol.done_level() if done and (not ant_done): if not self.tol.islast_level(): txt_more_line = "%s<hr>" % _("Open the next level") if cat_level != ant_cat_level: txt_more_cat = '<span style="color:red">%s</span>' % _("New") if cat_global != ant_cat_global: txt_more_global = '<span style="color:red">%s</span>' % _("New") cErrores = '<tr><td align=right> %s </td><td> %d (x%d"=%d")</td></tr>' % (_('Errors'), self.errores, self.penaltyError, self.errores*self.penaltyError) if self.errores else "" cAyudas = '<tr><td align=right> %s </td><td> %d (x%d"=%d")</td></tr>' % (_('Hints'), self.ayudas, self.penaltyHelp, self.ayudas*self.penaltyHelp) if self.ayudas else "" mens = ('<hr><center><big>'+_('You have finished this block of positions') + '<hr><table>' + '<tr><td align=right> %s </td><td> %0.2f"</td></tr>' % (_('Time used'), self.total_time_used) + cErrores + cAyudas + '<tr><td align=right> %s: </td><td> %0.2f" %s</td></tr>' % (_('Time assigned'), ta, txt_more_time) + '<tr><td align=right> %s: </td><td> %d</td></tr>' % (_('Total moves'), num_moves) + '<tr><td align=right> %s: </td><td> %0.2f"</td></tr>' % (_('Average time'), tm) + '<tr><td align=right> %s: </td><td> %s</td></tr>' % (_('Block qualification'), cat_block) + '<tr><td align=right> %s: </td><td> %s %s</td></tr>' % (_('Level qualification'), cat_level, txt_more_cat) + '<tr><td align=right> %s: </td><td> %s %s</td></tr>' % (_('Global qualification'), cat_global, txt_more_global) + '</table></center></big><hr>' + txt_more_line ) self.pon_rotulos(None) QTUtil2.mensaje(self.pantalla, mens, _("Result of training")) self.total_time_used = 0 else: if self.tol.go_fast == True or (self.tol.go_fast is None and self.tol.work_level > 0): self.next_line_run() return QTUtil2.mensajeTemporal(self.pantalla, _("This line training is completed."), 1.3) self.pon_rotulos(0) self.estado = kFinJuego self.desactivaTodas() liOpciones = [k_mainmenu, k_reiniciar, k_configurar, k_utilidades] if not islast_line: liOpciones.append(k_siguiente) self.pantalla.ponToolBar(liOpciones) def mueveHumano(self, desde, hasta, coronacion=None): if self.ini_time is None: self.ini_time = self.base_time end_time = time.time() jg = self.checkMueveHumano(desde, hasta, coronacion) if not jg: return False movimiento = jg.movimiento().lower() if movimiento == self.line.get_move(self.num_move).lower(): self.movimientosPiezas(jg.liMovs) self.partida.ultPosicion = jg.posicion self.masJugada(jg, True) self.error = "" self.total_time_used += (end_time - self.ini_time) self.siguienteJugada() return True self.errores += 1 self.sigueHumano() return False def masJugada(self, jg, siNuestra): if self.siTerminada(): jg.siJaqueMate = jg.siJaque jg.siAhogado = not jg.siJaque self.partida.append_jg(jg) resp = self.partida.si3repetidas() if resp: jg.siTablasRepeticion = True rotulo = "" for j in resp: rotulo += "%d," % (j / 2 + 1,) rotulo = rotulo.strip(",") self.rotuloTablasRepeticion = rotulo if self.partida.ultPosicion.movPeonCap >= 100: jg.siTablas50 = True if self.partida.ultPosicion.siFaltaMaterial(): jg.siTablasFaltaMaterial = True self.ponFlechaSC(jg.desde, jg.hasta) self.beepExtendido(siNuestra) self.pgnRefresh(self.partida.ultPosicion.siBlancas) self.refresh() self.ponPosicionDGT() def mueveRival(self, desde, hasta, coronacion): siBien, mens, jg = Jugada.dameJugada(self.partida.ultPosicion, desde, hasta, coronacion) self.partida.ultPosicion = jg.posicion self.masJugada(jg, False) self.movimientosPiezas(jg.liMovs, True) self.error = "" def ayuda(self): self.ayudas += 1 mov = self.line.get_move(self.num_move).lower() self.tablero.markPosition(mov[:2]) fen = self.partida.ultPosicion.fen() if fen not in self.dicFENayudas: self.dicFENayudas[fen] = 1 else: self.dicFENayudas[fen] += 1 if self.dicFENayudas[fen] > 2: self.ponFlechaSC(mov[:2], mov[2:4]) def finPartida(self): self.procesador.inicio() self.procesador.showTurnOnLigths(self.tol.name) def finalX(self): self.procesador.inicio() return False def actualPGN(self): resp = '[Event "%s"]\n' % _("Turn on the lights") resp += '[Site "%s"]\n' % self.line.label.replace("<br>", " ").strip() resp += '[FEN "%s"\n' % self.partida.iniPosicion.fen() resp += "\n" + self.partida.pgnBase() return resp
gpl-2.0
7,487,070,209,496,803,000
34.923529
187
0.55682
false
3.16261
false
false
false
skosukhin/spack
var/spack/repos/builtin/packages/r-mzid/package.py
1
2260
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RMzid(RPackage): """A parser for mzIdentML files implemented using the XML package. The parser tries to be general and able to handle all types of mzIdentML files with the drawback of having less 'pretty' output than a vendor specific parser. Please contact the maintainer with any problems and supply an mzIdentML file so the problems can be fixed quickly.""" homepage = "https://www.bioconductor.org/packages/mzID/" url = "https://git.bioconductor.org/packages/mzID" version('1.14.0', git='https://git.bioconductor.org/packages/mzID', commit='1c53aa6523ae61d3ebb13381381fc119d6cc6115') depends_on('r-xml', type=('build', 'run')) depends_on('r-plyr', type=('build', 'run')) depends_on('r-doparallel', type=('build', 'run')) depends_on('r-foreach', type=('build', 'run')) depends_on('r-iterators', type=('build', 'run')) depends_on('r-protgenerics', type=('build', 'run')) depends_on('[email protected]:3.4.9', when='@1.14.0')
lgpl-2.1
-8,712,464,723,545,602,000
48.130435
122
0.670796
false
3.779264
false
false
false
sg-/project_generator
project_generator/builders/gccarm.py
1
1789
# Copyright 2014 0xc0170 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess import logging from .builder import Builder from os.path import dirname class MakefileGccArmBuilder(Builder): # http://www.gnu.org/software/make/manual/html_node/Running.html ERRORLEVEL = { 0: 'success (0 warnings, 0 errors)', 1: 'targets not already up to date', 2: 'errors' } SUCCESSVALUE = 0 def build_project(self, project_name, project_files, env_settings): # cwd: relpath(join(project_path, ("gcc_arm" + project))) # > make all path = dirname(project_files[0]) logging.debug("Building GCC ARM project: %s" % path) args = ['make', 'all'] try: ret_code = None ret_code = subprocess.call(args, cwd=path) except: logging.error("Error whilst calling make. Is it in your PATH?") else: if ret_code != self.SUCCESSVALUE: # Seems like something went wrong. logging.error("Build failed with the status: %s" % self.ERRORLEVEL[ret_code]) else: logging.info("Build succeeded with the status: %s" % self.ERRORLEVEL[ret_code])
apache-2.0
6,599,894,252,396,413,000
34.078431
75
0.628284
false
4.075171
false
false
false
pu239ppy/authentic2
authentic2/migrations/0011_auto__add_authenticationevent.py
1
4418
# -*- coding: utf-8 -*- from south.db import db from south.v2 import SchemaMigration from authentic2.compat import user_model_label class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'AuthenticationEvent' db.create_table(u'authentic2_authenticationevent', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('when', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('who', self.gf('django.db.models.fields.CharField')(max_length=80)), ('how', self.gf('django.db.models.fields.CharField')(max_length=10)), ('nonce', self.gf('django.db.models.fields.CharField')(max_length=255)), )) db.send_create_signal(u'authentic2', ['AuthenticationEvent']) def backwards(self, orm): # Deleting model 'AuthenticationEvent' db.delete_table(u'authentic2_authenticationevent') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, user_model_label: { 'Meta': {'object_name': user_model_label.split('.')[-1]}, }, u'authentic2.authenticationevent': { 'Meta': {'object_name': 'AuthenticationEvent'}, 'how': ('django.db.models.fields.CharField', [], {'max_length': '10'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'nonce': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'who': ('django.db.models.fields.CharField', [], {'max_length': '80'}) }, u'authentic2.deleteduser': { 'Meta': {'object_name': 'DeletedUser'}, 'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % user_model_label}) }, u'authentic2.userexternalid': { 'Meta': {'object_name': 'UserExternalId'}, 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'external_id': ('django.db.models.fields.CharField', [], {'max_length': '256'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'source': ('django.db.models.fields.URLField', [], {'max_length': '256'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % user_model_label}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['authentic2']
agpl-3.0
-636,708,911,938,553,300
57.906667
187
0.559982
false
3.642209
false
false
false
bgris/ODL_bgris
odl/trafos/util/ft_utils.py
1
23184
# Copyright 2014, 2015 The ODL development group # # This file is part of ODL. # # ODL is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ODL is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ODL. If not, see <http://www.gnu.org/licenses/>. """Utility functions for Fourier transforms on regularly sampled data.""" # Imports for common Python 2/3 codebase from __future__ import print_function, division, absolute_import from future import standard_library standard_library.install_aliases() from builtins import range import numpy as np from odl.discr import ( uniform_grid, DiscreteLp, uniform_partition_fromgrid, uniform_discr_frompartition) from odl.set import RealNumbers from odl.util import ( fast_1d_tensor_mult, is_real_dtype, is_scalar_dtype, is_real_floating_dtype, is_complex_floating_dtype, complex_dtype, dtype_repr, conj_exponent, normalized_scalar_param_list, normalized_axes_tuple) __all__ = ('reciprocal_grid', 'realspace_grid', 'reciprocal_space', 'dft_preprocess_data', 'dft_postprocess_data') def reciprocal_grid(grid, shift=True, axes=None, halfcomplex=False): """Return the reciprocal of the given regular grid. This function calculates the reciprocal (Fourier/frequency space) grid for a given regular grid defined by the nodes:: x[k] = x[0] + k * s, where ``k = (k[0], ..., k[d-1])`` is a ``d``-dimensional index in the range ``0 <= k < N`` (component-wise). The multi-index ``N`` is the shape of the input grid. This grid's reciprocal is then given by the nodes:: xi[j] = xi[0] + j * sigma, with the reciprocal grid stride ``sigma = 2*pi / (s * N)``. The minimum frequency ``xi[0]`` can in principle be chosen freely, but usually it is chosen in a such a way that the reciprocal grid is centered around zero. For this, there are two possibilities: 1. Make the grid point-symmetric around 0. 2. Make the grid "almost" point-symmetric around zero by shifting it to the left by half a reciprocal stride. In the first case, the minimum frequency (per axis) is given as:: xi_1[0] = -pi/s + pi/(s*n) = -pi/s + sigma/2. For the second case, it is:: xi_1[0] = -pi / s. Note that the zero frequency is contained in case 1 for an odd number of points, while for an even size, the second option guarantees that 0 is contained. If a real-to-complex (half-complex) transform is to be computed, the reciprocal grid has the shape ``M[i] = floor(N[i]/2) + 1`` in the last transform axis ``i``. Parameters ---------- grid : uniform `RectGrid` Original sampling grid,. shift : bool or sequence of bools, optional If ``True``, the grid is shifted by half a stride in the negative direction. With a sequence, this option is applied separately on each axis. axes : int or sequence of ints, optional Dimensions in which to calculate the reciprocal. The sequence must have the same length as ``shift`` if the latter is given as a sequence. ``None`` means all axes in ``grid``. halfcomplex : bool, optional If ``True``, return the half of the grid with last coordinate less than zero. This is related to the fact that for real-valued functions, the other half is the mirrored complex conjugate of the given half and therefore needs not be stored. Returns ------- reciprocal_grid : uniform `RectGrid` The reciprocal grid. """ if axes is None: axes = list(range(grid.ndim)) else: try: axes = [int(axes)] except TypeError: axes = list(axes) # List indicating shift or not per "active" axis, same length as axes shift_list = normalized_scalar_param_list(shift, length=len(axes), param_conv=bool) # Full-length vectors stride = grid.stride shape = np.array(grid.shape) rmin = grid.min_pt.copy() rmax = grid.max_pt.copy() rshape = list(shape) # Shifted axes (full length to avoid ugly double indexing) shifted = np.zeros(grid.ndim, dtype=bool) shifted[axes] = shift_list rmin[shifted] = -np.pi / stride[shifted] # Length min->max increases by double the shift, so we # have to compensate by a full stride rmax[shifted] = (-rmin[shifted] - 2 * np.pi / (stride[shifted] * shape[shifted])) # Non-shifted axes not_shifted = np.zeros(grid.ndim, dtype=bool) not_shifted[axes] = np.logical_not(shift_list) rmin[not_shifted] = ((-1.0 + 1.0 / shape[not_shifted]) * np.pi / stride[not_shifted]) rmax[not_shifted] = -rmin[not_shifted] # Change last axis shape and max if halfcomplex if halfcomplex: rshape[axes[-1]] = shape[axes[-1]] // 2 + 1 # - Odd and shifted: - stride / 2 # - Even and not shifted: + stride / 2 # - Otherwise: 0 last_odd = shape[axes[-1]] % 2 == 1 last_shifted = shift_list[-1] half_rstride = np.pi / (shape[axes[-1]] * stride[axes[-1]]) if last_odd and last_shifted: rmax[axes[-1]] = -half_rstride elif not last_odd and not last_shifted: rmax[axes[-1]] = half_rstride else: rmax[axes[-1]] = 0 return uniform_grid(rmin, rmax, rshape) def realspace_grid(recip_grid, x0, axes=None, halfcomplex=False, halfcx_parity='even'): """Return the real space grid from the given reciprocal grid. Given a reciprocal grid:: xi[j] = xi[0] + j * sigma, with a multi-index ``j = (j[0], ..., j[d-1])`` in the range ``0 <= j < M``, this function calculates the original grid:: x[k] = x[0] + k * s by using a provided ``x[0]`` and calculating the stride ``s``. If the reciprocal grid is interpreted as coming from a usual complex-to-complex FFT, it is ``N == M``, and the stride is:: s = 2*pi / (sigma * N) For a reciprocal grid from a real-to-complex (half-complex) FFT, it is ``M[i] = floor(N[i]/2) + 1`` in the last transform axis ``i``. To resolve the ambiguity regarding the parity of ``N[i]``, the it must be specified if the output shape should be even or odd, resulting in:: odd : N[i] = 2 * M[i] - 1 even: N[i] = 2 * M[i] - 2 The output stride is calculated with this ``N`` as above in this case. Parameters ---------- recip_grid : uniform `RectGrid` Sampling grid in reciprocal space. x0 : `array-like` Desired minimum point of the real space grid. axes : int or sequence of ints, optional Dimensions in which to calculate the real space grid. The sequence must have the same length as ``shift`` if the latter is given as a sequence. ``None`` means "all axes". halfcomplex : bool, optional If ``True``, interpret the given grid as the reciprocal as used in a half-complex FFT (see above). Otherwise, the grid is regarded as being used in a complex-to-complex transform. halfcx_parity : {'even', 'odd'} Use this parity for the shape of the returned grid in the last axis of ``axes`` in the case ``halfcomplex=True`` Returns ------- irecip : uniform `RectGrid` The inverse reciprocal grid. """ if axes is None: axes = list(range(recip_grid.ndim)) else: try: axes = [int(axes)] except TypeError: axes = list(axes) rstride = recip_grid.stride rshape = recip_grid.shape # Calculate shape of the output grid by adjusting in axes[-1] irshape = list(rshape) if halfcomplex: if str(halfcx_parity).lower() == 'even': irshape[axes[-1]] = 2 * rshape[axes[-1]] - 2 elif str(halfcx_parity).lower() == 'odd': irshape[axes[-1]] = 2 * rshape[axes[-1]] - 1 else: raise ValueError("`halfcomplex` parity '{}' not understood" "".format(halfcx_parity)) irmin = np.asarray(x0) irshape = np.asarray(irshape) irstride = np.copy(rstride) irstride[axes] = 2 * np.pi / (irshape[axes] * rstride[axes]) irmax = irmin + (irshape - 1) * irstride return uniform_grid(irmin, irmax, irshape) def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None): """Pre-process the real-space data before DFT. This function multiplies the given data with the separable function:: p(x) = exp(+- 1j * dot(x - x[0], xi[0])) where ``x[0]`` and ``xi[0]`` are the minimum coodinates of the real-space and reciprocal grids, respectively. The sign of the exponent depends on the choice of ``sign``. In discretized form, this function becomes an array:: p[k] = exp(+- 1j * k * s * xi[0]) If the reciprocal grid is not shifted, i.e. symmetric around 0, it is ``xi[0] = pi/s * (-1 + 1/N)``, hence:: p[k] = exp(-+ 1j * pi * k * (1 - 1/N)) For a shifted grid, we have :math:``xi[0] = -pi/s``, thus the array is given by:: p[k] = (-1)**k Parameters ---------- arr : `array-like` Array to be pre-processed. If its data type is a real non-floating type, it is converted to 'float64'. shift : bool or or sequence of bools, optional If ``True``, the grid is shifted by half a stride in the negative direction. With a sequence, this option is applied separately on each axis. axes : int or sequence of ints, optional Dimensions in which to calculate the reciprocal. The sequence must have the same length as ``shift`` if the latter is given as a sequence. Default: all axes. sign : {'-', '+'}, optional Sign of the complex exponent. out : `numpy.ndarray`, optional Array in which the result is stored. If ``out is arr``, an in-place modification is performed. For real data type, this is only possible for ``shift=True`` since the factors are complex otherwise. Returns ------- out : `numpy.ndarray` Result of the pre-processing. If ``out`` was given, the returned object is a reference to it. Notes ----- If ``out`` is not specified, the data type of the returned array is the same as that of ``arr`` except when ``arr`` has real data type and ``shift`` is not ``True``. In this case, the return type is the complex counterpart of ``arr.dtype``. """ arr = np.asarray(arr) if not is_scalar_dtype(arr.dtype): raise ValueError('array has non-scalar data type {}' ''.format(dtype_repr(arr.dtype))) elif is_real_dtype(arr.dtype) and not is_real_floating_dtype(arr.dtype): arr = arr.astype('float64') if axes is None: axes = list(range(arr.ndim)) else: try: axes = [int(axes)] except TypeError: axes = list(axes) shape = arr.shape shift_list = normalized_scalar_param_list(shift, length=len(axes), param_conv=bool) # Make a copy of arr with correct data type if necessary, or copy values. if out is None: if is_real_dtype(arr.dtype) and not all(shift_list): out = np.array(arr, dtype=complex_dtype(arr.dtype), copy=True) else: out = arr.copy() else: out[:] = arr if is_real_dtype(out.dtype) and not shift: raise ValueError('cannot pre-process real input in-place without ' 'shift') if sign == '-': imag = -1j elif sign == '+': imag = 1j else: raise ValueError("`sign` '{}' not understood".format(sign)) def _onedim_arr(length, shift): if shift: # (-1)^indices factor = np.ones(length, dtype=out.dtype) factor[1::2] = -1 else: factor = np.arange(length, dtype=out.dtype) factor *= -imag * np.pi * (1 - 1.0 / length) np.exp(factor, out=factor) return factor.astype(out.dtype, copy=False) onedim_arrs = [] for axis, shift in zip(axes, shift_list): length = shape[axis] onedim_arrs.append(_onedim_arr(length, shift)) fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out) return out def _interp_kernel_ft(norm_freqs, interp): """Scaled FT of a one-dimensional interpolation kernel. For normalized frequencies ``-1/2 <= xi <= 1/2``, this function returns:: sinc(pi * xi)**k / sqrt(2 * pi) where ``k=1`` for 'nearest' and ``k=2`` for 'linear' interpolation. Parameters ---------- norm_freqs : `numpy.ndarray` Normalized frequencies between -1/2 and 1/2 interp : {'nearest', 'linear'} Type of interpolation kernel Returns ------- ker_ft : `numpy.ndarray` Values of the kernel FT at the given frequencies """ # Numpy's sinc(x) is equal to the 'math' sinc(pi * x) ker_ft = np.sinc(norm_freqs) interp_ = str(interp).lower() if interp_ == 'nearest': pass elif interp_ == 'linear': ker_ft **= 2 else: raise ValueError("`interp` '{}' not understood".format(interp)) ker_ft /= np.sqrt(2 * np.pi) return ker_ft def dft_postprocess_data(arr, real_grid, recip_grid, shift, axes, interp, sign='-', op='multiply', out=None): """Post-process the Fourier-space data after DFT. This function multiplies the given data with the separable function:: q(xi) = exp(+- 1j * dot(x[0], xi)) * s * phi_hat(xi_bar) where ``x[0]`` and ``s`` are the minimum point and the stride of the real-space grid, respectively, and ``phi_hat(xi_bar)`` is the FT of the interpolation kernel. The sign of the exponent depends on the choice of ``sign``. Note that for ``op='divide'`` the multiplication with ``s * phi_hat(xi_bar)`` is replaced by a division with the same array. In discretized form on the reciprocal grid, the exponential part of this function becomes an array:: q[k] = exp(+- 1j * dot(x[0], xi[k])) and the arguments ``xi_bar`` to the interpolation kernel are the normalized frequencies:: for 'shift=True' : xi_bar[k] = -pi + pi * (2*k) / N for 'shift=False' : xi_bar[k] = -pi + pi * (2*k+1) / N See [Pre+2007]_, Section 13.9 "Computing Fourier Integrals Using the FFT" for a similar approach. Parameters ---------- arr : `array-like` Array to be pre-processed. An array with real data type is converted to its complex counterpart. real_grid : uniform `RectGrid` Real space grid in the transform. recip_grid : uniform `RectGrid` Reciprocal grid in the transform shift : bool or sequence of bools If ``True``, the grid is shifted by half a stride in the negative direction in the corresponding axes. The sequence must have the same length as ``axes``. axes : int or sequence of ints Dimensions along which to take the transform. The sequence must have the same length as ``shifts``. interp : string or sequence of strings Interpolation scheme used in the real-space. sign : {'-', '+'}, optional Sign of the complex exponent. op : {'multiply', 'divide'}, optional Operation to perform with the stride times the interpolation kernel FT out : `numpy.ndarray`, optional Array in which the result is stored. If ``out is arr``, an in-place modification is performed. Returns ------- out : `numpy.ndarray` Result of the post-processing. If ``out`` was given, the returned object is a reference to it. """ arr = np.asarray(arr) if is_real_floating_dtype(arr.dtype): arr = arr.astype(complex_dtype(arr.dtype)) elif not is_complex_floating_dtype(arr.dtype): raise ValueError('array data type {} is not a complex floating point ' 'data type'.format(dtype_repr(arr.dtype))) if out is None: out = arr.copy() elif out is not arr: out[:] = arr if axes is None: axes = list(range(arr.ndim)) else: try: axes = [int(axes)] except TypeError: axes = list(axes) shift_list = normalized_scalar_param_list(shift, length=len(axes), param_conv=bool) if sign == '-': imag = -1j elif sign == '+': imag = 1j else: raise ValueError("`sign` '{}' not understood".format(sign)) op, op_in = str(op).lower(), op if op not in ('multiply', 'divide'): raise ValueError("kernel `op` '{}' not understood".format(op_in)) # Make a list from interp if that's not the case already try: # Duck-typed string check interp + '' except TypeError: pass else: interp = [str(interp).lower()] * arr.ndim onedim_arrs = [] for ax, shift, intp in zip(axes, shift_list, interp): x = real_grid.min_pt[ax] xi = recip_grid.coord_vectors[ax] # First part: exponential array onedim_arr = np.exp(imag * x * xi) # Second part: interpolation kernel len_dft = recip_grid.shape[ax] len_orig = real_grid.shape[ax] halfcomplex = (len_dft < len_orig) odd = len_orig % 2 fmin = -0.5 if shift else -0.5 + 1.0 / (2 * len_orig) if halfcomplex: # maximum lies around 0, possibly half a cell left or right of it if shift and odd: fmax = - 1.0 / (2 * len_orig) elif not shift and not odd: fmax = 1.0 / (2 * len_orig) else: fmax = 0.0 else: # not halfcomplex # maximum lies close to 0.5, half or full cell left of it if shift: # -0.5 + (N-1)/N = 0.5 - 1/N fmax = 0.5 - 1.0 / len_orig else: # -0.5 + 1/(2*N) + (N-1)/N = 0.5 - 1/(2*N) fmax = 0.5 - 1.0 / (2 * len_orig) freqs = np.linspace(fmin, fmax, num=len_dft) stride = real_grid.stride[ax] if op == 'multiply': onedim_arr *= stride * _interp_kernel_ft(freqs, intp) else: onedim_arr /= stride * _interp_kernel_ft(freqs, intp) onedim_arrs.append(onedim_arr.astype(out.dtype, copy=False)) fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out) return out def reciprocal_space(space, axes=None, halfcomplex=False, shift=True, **kwargs): """Return the range of the Fourier transform on ``space``. Parameters ---------- space : `DiscreteLp` Real space whose reciprocal is calculated. It must be uniformly discretized. axes : sequence of ints, optional Dimensions along which the Fourier transform is taken. Default: all axes halfcomplex : bool, optional If ``True``, take only the negative frequency part along the last axis for. For ``False``, use the full frequency space. This option can only be used if ``space`` is a space of real-valued functions. shift : bool or sequence of bools, optional If ``True``, the reciprocal grid is shifted by half a stride in the negative direction. With a boolean sequence, this option is applied separately to each axis. If a sequence is provided, it must have the same length as ``axes`` if supplied. Note that this must be set to ``True`` in the halved axis in half-complex transforms. Default: ``True`` impl : string, optional Implementation back-end for the created space. Default: ``'numpy'`` exponent : float, optional Create a space with this exponent. By default, the conjugate exponent ``q = p / (p - 1)`` of the exponent of ``space`` is used, where ``q = inf`` for ``p = 1`` and vice versa. dtype : optional Complex data type of the created space. By default, the complex counterpart of ``space.dtype`` is used. Returns ------- rspace : `DiscreteLp` Reciprocal of the input ``space``. If ``halfcomplex=True``, the upper end of the domain (where the half space ends) is chosen to coincide with the grid node. """ if not isinstance(space, DiscreteLp): raise TypeError('`space` {!r} is not a `DiscreteLp` instance' ''.format(space)) if not space.is_uniform: raise ValueError('`space` is not uniformly discretized') if axes is None: axes = tuple(range(space.ndim)) axes = normalized_axes_tuple(axes, space.ndim) if halfcomplex and space.field != RealNumbers(): raise ValueError('`halfcomplex` option can only be used with real ' 'spaces') exponent = kwargs.pop('exponent', None) if exponent is None: exponent = conj_exponent(space.exponent) dtype = kwargs.pop('dtype', None) if dtype is None: dtype = complex_dtype(space.dtype) else: if not is_complex_floating_dtype(dtype): raise ValueError('{} is not a complex data type' ''.format(dtype_repr(dtype))) impl = kwargs.pop('impl', 'numpy') # Calculate range recip_grid = reciprocal_grid(space.grid, shift=shift, halfcomplex=halfcomplex, axes=axes) # Make a partition with nodes on the boundary in the last transform axis # if `halfcomplex == True`, otherwise a standard partition. if halfcomplex: max_pt = {axes[-1]: recip_grid.max_pt[axes[-1]]} part = uniform_partition_fromgrid(recip_grid, max_pt=max_pt) else: part = uniform_partition_fromgrid(recip_grid) # Use convention of adding a hat to represent fourier transform of variable axis_labels = list(space.axis_labels) for i in axes: # Avoid double math label = axis_labels[i].replace('$', '') axis_labels[i] = '$\^{{{}}}$'.format(label) recip_spc = uniform_discr_frompartition(part, exponent=exponent, dtype=dtype, impl=impl, axis_labels=axis_labels) return recip_spc if __name__ == '__main__': from doctest import testmod, NORMALIZE_WHITESPACE testmod(optionflags=NORMALIZE_WHITESPACE)
gpl-3.0
-1,737,695,355,543,527,700
34.180577
79
0.598516
false
3.773438
false
false
false
allenai/document-qa
docqa/text_preprocessor.py
1
7061
from collections import Counter from typing import List, Optional, Tuple import numpy as np from tqdm import tqdm from docqa.utils import flatten_iterable from docqa.data_processing.document_splitter import ExtractedParagraphWithAnswers, MergeParagraphs, ExtractedParagraph from docqa.data_processing.multi_paragraph_qa import ParagraphWithAnswers from docqa.configurable import Configurable from docqa.squad.squad_data import SquadCorpus from docqa.triviaqa.build_span_corpus import TriviaQaWebDataset class TextPreprocessor(Configurable): """ Preprocess text input, must be deterministic. Only used thus far adding special indicator tokens """ def encode_extracted_paragraph(self, question: List[str], paragraph: ExtractedParagraphWithAnswers): text, answers, _ = self.encode_paragraph(question, paragraph.text, paragraph.start == 0, paragraph.answer_spans) return ParagraphWithAnswers(text, answers) def encode_text(self, question: List[str], paragraph: ExtractedParagraph): text, _, _ = self.encode_paragraph(question, paragraph.text, paragraph.start == 0, np.zeros((0, 2), dtype=np.int32)) return text def encode_paragraph(self, question: List[str], paragraphs: List[List[str]], is_first, answer_spans: np.ndarray, token_spans=None) -> Tuple[List[str], np.ndarray, Optional[np.ndarray]]: """ Returns updated (and flattened) text, answer_spans, and token_spans """ raise NotImplementedError() def special_tokens(self) -> List[str]: return [] class WithIndicators(TextPreprocessor): """ Adds a document or group start token before the text, and a paragraph token between each between in each paragraph. """ PARAGRAPH_TOKEN = "%%PARAGRAPH%%" DOCUMENT_START_TOKEN = "%%DOCUMENT%%" PARAGRAPH_GROUP = "%%PARAGRAPH_GROUP%%" def __init__(self, remove_cross_answer: bool=True, para_tokens: bool=True, doc_start_token: bool=True): self.remove_cross_answer = remove_cross_answer self.doc_start_token = doc_start_token self.para_tokens = para_tokens def special_tokens(self) -> List[str]: tokens = [self.PARAGRAPH_GROUP] if self.doc_start_token: tokens.append(self.DOCUMENT_START_TOKEN) if self.para_tokens: tokens.append(self.PARAGRAPH_TOKEN) return tokens def encode_paragraph(self, question: List[str], paragraphs: List[List[str]], is_first, answer_spans: np.ndarray, inver=None): out = [] offset = 0 if self.doc_start_token and is_first: out.append(self.DOCUMENT_START_TOKEN) else: out.append(self.PARAGRAPH_GROUP) if inver is not None: inv_out = [np.zeros((1, 2), dtype=np.int32)] else: inv_out = None offset += 1 spans = answer_spans + offset out += paragraphs[0] offset += len(paragraphs[0]) on_ix = len(paragraphs[0]) if inv_out is not None: inv_out.append(inver[:len(paragraphs[0])]) for sent in paragraphs[1:]: if self.remove_cross_answer: remove = np.logical_and(spans[:, 0] < offset, spans[:, 1] >= offset) spans = spans[np.logical_not(remove)] if self.para_tokens: spans[spans[:, 0] >= offset, 0] += 1 spans[spans[:, 1] >= offset, 1] += 1 out.append(self.PARAGRAPH_TOKEN) if inv_out is not None: if len(inv_out) == 0 or len(inv_out[-1]) == 0: inv_out.append(np.zeros((1, 2), dtype=np.int32)) else: inv_out.append(np.full((1, 2), inv_out[-1][-1][1], dtype=np.int32)) offset += 1 out += sent offset += len(sent) if inv_out is not None: inv_out.append(inver[on_ix:on_ix+len(sent)]) on_ix += len(sent) return out, spans, None if inv_out is None else np.concatenate(inv_out) def __setstate__(self, state): if "state" in state: state["state"]["doc_start_token"] = True state["state"]["para_tokens"] = True else: if "doc_start_token" not in state: state["doc_start_token"] = True if "para_tokens" not in state: state["para_tokens"] = True super().__setstate__(state) def check_preprocess(): data = TriviaQaWebDataset() merge = MergeParagraphs(400) questions = data.get_dev() pre = WithIndicators(False) remove_cross = WithIndicators(True) rng = np.random.RandomState(0) rng.shuffle(questions) for q in tqdm(questions[:1000]): doc = rng.choice(q.all_docs, 1)[0] text = data.evidence.get_document(doc.doc_id, n_tokens=800) paras = merge.split_annotated(text, doc.answer_spans) para = paras[np.random.randint(0, len(paras))] built = pre.encode_extracted_paragraph(q.question, para) expected_text = flatten_iterable(para.text) if expected_text != [x for x in built.text if x not in pre.special_tokens()]: raise ValueError() expected = [expected_text[s:e+1] for s, e in para.answer_spans] expected = Counter([tuple(x) for x in expected]) actual = [tuple(built.text[s:e+1]) for s,e in built.answer_spans] actual_cleaned = Counter(tuple(z for z in x if z not in pre.special_tokens()) for x in actual) if actual_cleaned != expected: raise ValueError() r_built = remove_cross.encode_extracted_paragraph(q.question, para) rc = Counter(tuple(r_built.text[s:e + 1]) for s, e in r_built.answer_spans) removed = Counter() for w in actual: if all(x not in pre.special_tokens() for x in w): removed[w] += 1 if rc != removed: raise ValueError() def check_preprocess_squad(): data = SquadCorpus().get_train() remove_cross = WithIndicators(True) for doc in tqdm(data): for para in doc.paragraphs: q = para.questions[np.random.randint(0, len(para.questions))] text, ans, inv = remove_cross.encode_paragraph(q.words, para.text, para.paragraph_num == 0, q.answer.answer_spans, para.spans) if len(inv) != len(text): raise ValueError() for i in range(len(inv)-1): if inv[i, 0] > inv[i+1, 0]: raise ValueError() for (s1, e1), (s2, e2) in zip(ans, q.answer.answer_spans): if tuple(inv[s1]) != tuple(para.spans[s2]): raise ValueError() if tuple(inv[e1]) != tuple(para.spans[e2]): raise ValueError() if __name__ == "__main__": check_preprocess_squad()
apache-2.0
8,086,274,971,685,163,000
37.172973
129
0.584195
false
3.712408
false
false
false
hylje/tekis
tekis/flatpages/migrations/0003_auto_20160221_0250.py
1
1533
# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-02-21 00:50 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('flatpages', '0002_auto_20160221_0006'), ] operations = [ migrations.CreateModel( name='Sponsor', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('url', models.URLField()), ('logo', models.ImageField(upload_to='sponsors/')), ('titletext', models.CharField(max_length=255)), ('is_active', models.BooleanField(default=True)), ], options={ 'ordering': ('name',), }, ), migrations.AlterField( model_name='flatpage', name='menu_index', field=models.IntegerField(default=0, help_text='Menus are sorted ascending by this value. The first menu item in a category is the category link itself. <strong>Note:</strong> The first menu item in the top level category should be the front page.'), ), migrations.AlterField( model_name='flatpage', name='published', field=models.BooleanField(default=False, help_text='Published pages show up on the menu. Unpublished pages can be reached over direct link.'), ), ]
bsd-3-clause
-2,971,475,573,143,553,500
38.307692
262
0.580561
false
4.430636
false
false
false
kowey/attelo
attelo/harness/parse.py
1
5021
''' Control over attelo parsers as might be needed for a test harness ''' from __future__ import print_function from os import path as fp import os import sys from joblib import (delayed) from ..io import (write_predictions_output) from attelo.decoding.util import (prediction_to_triples) from attelo.fold import (select_training, select_testing) from attelo.harness.util import (makedirs) def _eval_banner(econf, hconf, fold): """ Which combo of eval parameters are we running now? """ msg = ("Reassembling " "fold {fnum} [{dset}]\t" "parser: {parser}") return msg.format(fnum=fold, dset=hconf.dataset, parser=econf.parser.key) def _tmp_output_filename(path, suffix): """ Temporary filename for output file segment """ return fp.join(fp.dirname(path), '_' + fp.basename(path) + '.' + suffix) def concatenate_outputs(mpack, output_path): """ (For use after :py:func:`delayed_main_for_harness`) Concatenate temporary per-group outputs into a single combined output """ tmpfiles = [_tmp_output_filename(output_path, d) for d in sorted(mpack.keys())] with open(output_path, 'wb') as file_out: for tfile in tmpfiles: with open(tfile, 'rb') as file_in: file_out.write(file_in.read()) for tmpfile in tmpfiles: os.remove(tmpfile) def _parse_group(dpack, parser, output_path): ''' parse a single group and write its output score the predictions if we have :rtype Count or None ''' dpack = parser.transform(dpack) # we trust the parser to select what it thinks is its best prediction prediction = prediction_to_triples(dpack) write_predictions_output(dpack, prediction, output_path) def jobs(mpack, parser, output_path): """ Return a list of delayed decoding jobs for the various documents in this group """ res = [] tmpfiles = [_tmp_output_filename(output_path, d) for d in mpack.keys()] for tmpfile in tmpfiles: if fp.exists(tmpfile): os.remove(tmpfile) for onedoc, dpack in mpack.items(): tmp_output_path = _tmp_output_filename(output_path, onedoc) res.append(delayed(_parse_group)(dpack, parser, tmp_output_path)) return res def learn(hconf, econf, dconf, fold): """ Run the learners for the given configuration """ if fold is None: subpacks = dconf.pack parent_dir = hconf.combined_dir_path() else: subpacks = select_training(dconf.pack, dconf.folds, fold) parent_dir = hconf.fold_dir_path(fold) if not os.path.exists(parent_dir): os.makedirs(parent_dir) cache = hconf.model_paths(econf.learner, fold) print('learning ', econf.key, '...', file=sys.stderr) dpacks = subpacks.values() targets = [d.target for d in dpacks] econf.parser.payload.fit(dpacks, targets, cache=cache) def delayed_decode(hconf, dconf, econf, fold): """ Return possible futures for decoding groups within this model/decoder combo for the given fold """ if fold is None and hconf.test_evaluation is None: return [] if _say_if_decoded(hconf, econf, fold, stage='decoding'): return [] output_path = hconf.decode_output_path(econf, fold) makedirs(fp.dirname(output_path)) if fold is None: subpack = dconf.pack else: subpack = select_testing(dconf.pack, dconf.folds, fold) parser = econf.parser.payload return jobs(subpack, parser, output_path) def decode_on_the_fly(hconf, dconf, fold): """ Learn each parser, returning decoder jobs as each is learned. Return a decoder job generator that should hopefully allow us to effectively learn and decode in parallel. """ for econf in hconf.evaluations: learn(hconf, econf, dconf, fold) for job in delayed_decode(hconf, dconf, econf, fold): yield job def _say_if_decoded(hconf, econf, fold, stage='decoding'): """ If we have already done the decoding for a given config and fold, say so and return True """ if fp.exists(hconf.decode_output_path(econf, fold)): print(("skipping {stage} {parser} " "(already done)").format(stage=stage, parser=econf.parser.key), file=sys.stderr) return True else: return False def post_decode(hconf, dconf, econf, fold): """ Join together output files from this model/decoder combo """ if _say_if_decoded(hconf, econf, fold, stage='reassembly'): return print(_eval_banner(econf, hconf, fold), file=sys.stderr) if fold is None: subpack = dconf.pack else: subpack = select_testing(dconf.pack, dconf.folds, fold) concatenate_outputs(subpack, hconf.decode_output_path(econf, fold))
gpl-3.0
2,330,291,924,736,310,300
28.710059
73
0.626967
false
3.651636
false
false
false
DailyActie/Surrogate-Model
surrogate/selection/selRoulette.py
1
2509
# MIT License # # Copyright (c) 2016 Daily Actie # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Author: Quan Pan <[email protected]> # License: MIT License # Create: 2016-12-02 import random from operator import attrgetter def selRoulette(individuals, k=1): """Select *k* individuals from the input *individuals* using *k* spins of a roulette. The selection is made by looking only at the first objective of each individual. The list returned contains references to the input *individuals*. :param individuals: A list of individuals to select from. :param k: The number of individuals to select. :returns: A list of selected individuals. This function uses the :func:`~random.random` function from the python base :mod:`random` module. .. warning:: The roulette selection by definition cannot be used for minimization or when the fitness can be smaller or equal to 0. """ s_inds = sorted(individuals, key=attrgetter("fitness"), reverse=True) # TODO 20161204 individual property fitness.values[] # sum_fits = sum(ind.fitness.values[0] for ind in individuals) sum_fits = sum(ind.fitness for ind in individuals) chosen = [] for i in xrange(k): u = random.random() * sum_fits sum_ = 0 for ind in s_inds: # sum_ += ind.fitness.values[0] sum_ += ind.fitness if sum_ > u: chosen.append(ind) break return chosen
mit
1,467,734,263,457,390,600
38.203125
80
0.707852
false
4.099673
false
false
false
aroth-arsoft/arsoft-web-crashupload
app/crashdump/utils.py
1
24246
#!/usr/bin/python # -*- coding: utf-8 -*- # kate: space-indent on; indent-width 4; mixedindent off; indent-mode python; crashdump_use_jinja2 = False def _(msg): return msg def tag_a(name, title=None, href=None, alt=None): from xml.etree.ElementTree import Element, tostring a = Element('a') a.text = name if href: a.set('href', href) if title: a.set('title', title) if alt: a.set('alt', alt) return tostring(a, encoding="utf8", method='html').decode() def _hex_format(number, prefix='0x', width=None, bits=None): if isinstance(number, str): try: number = int(number) except ValueError: number = None if number is None: return '(none)' if bits is not None: if bits == 32: number = number & 0xffffffff if width is None: width = 8 elif bits == 64: number = number & 0xffffffffffffffff if width is None: width = 16 if width is None: if number > 2**48: width = 16 elif number > 2**40: width = 12 elif number > 2**32: width = 10 elif number > 2**24: width = 8 elif number > 2**16: width = 6 elif number > 2**8: width = 4 else: width = 2 fmt = '%%0%ix' % width return prefix + fmt % number def hex_format(number, prefix='0x', width=None, bits=None): if isinstance(number, list): nums = [] for n in number: nums.append(_hex_format(n, prefix, width, bits)) return ','.join(nums) else: return _hex_format(number, prefix, width, bits) def hex_format_bits(number, bits): return hex_format(number, bits=bits) def addr_format(number, prefix='0x', bits=64): if number == 0: return 'NULL' elif number < 256: return hex_format(number, 'NULL+' + prefix, bits=bits) else: return hex_format(number, prefix, bits=bits) def addr_format_64(number, prefix='0x'): if number == 0: return 'NULL' elif number < 256: return hex_format(number, 'NULL+' + prefix, bits=64) else: return hex_format(number, prefix, bits=64) def addr_format_32(number, prefix='0x'): if number == 0: return 'NULL' elif number < 256: return hex_format(number, 'NULL+' + prefix, bits=32) else: return hex_format(number, prefix, bits=32) def addr_format_bits(number, bits=64): return addr_format(number, bits=bits) def exception_code(platform_type, code, name): if platform_type is None: return 'Platform unknown' elif platform_type == 'Linux': return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Unix_signal') elif platform_type == 'Windows NT': return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Windows_NT') elif platform_type == 'Windows': return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Microsoft_Windows') else: return tag_a(str(name) + '(' + hex_format(code) + ')', href='https://en.wikipedia.org/wiki/Special:Search/' + str(platform_type)) def format_bool_yesno(val): if isinstance(val, str) or isinstance(val, unicode): try: val = bool(val) except ValueError: val = None if val is None: return '(none)' elif val == True: return _('yes') elif val == False: return _('no') else: return _('neither') def format_source_line(source, line, line_offset=None, source_url=None): if source is None: return _('unknown') else: title = str(source) + ':' + str(line) if line_offset is not None: title += '+' + hex_format(line_offset) if source_url is not None: href = source_url else: href='file:///' + str(source) return tag_a(title, href=href) def format_function_plus_offset(function, funcoff=None): if function is None: return _('unknown') else: if funcoff: return str(function) + '+' + hex_format(funcoff) else: return str(function) def str_or_unknown(str): if str is None: return _('unknown') else: return str def format_cpu_type(cputype): cputype = cputype.lower() if cputype == 'amd64': href='http://en.wikipedia.org/wiki/X86-64' title = 'x86-64 (also known as x64, x86_64 and AMD64)' elif cputype == 'x86': href='http://en.wikipedia.org/wiki/X86' title = 'x86 (also known as i386)' elif cputype == 'mips': href='http://en.wikipedia.org/wiki/MIPS_instruction_set' title = 'MIPS instruction set' elif cputype == 'alpha': href='http://en.wikipedia.org/wiki/DEC_Alpha' title = 'Alpha, originally known as Alpha AXP' elif cputype == 'alpha64': href='http://en.wikipedia.org/wiki/DEC_Alpha' title = 'Alpha64, originally known as Alpha AXP' elif cputype == 'powerpc': href='http://en.wikipedia.org/wiki/PowerPC' title = 'PowerPC' elif cputype == 'powerpc64': href='http://en.wikipedia.org/wiki/Ppc64' title = 'PowerPC64 or ppc64' elif cputype == 'arm': href='http://en.wikipedia.org/wiki/ARM_architecture' title = 'ARM' elif cputype == 'arm64': href='http://en.wikipedia.org/wiki/ARM_architecture#64-bit' title = 'ARM 64-bit' elif cputype == 'sparc': href='http://en.wikipedia.org/wiki/SPARC' title = 'SPARC ("scalable processor architecture")' elif cputype == 'ia64': href='http://en.wikipedia.org/wiki/Itanium' title = 'Intel Itanium architecture (IA-64)' elif cputype == 'msil': href='http://en.wikipedia.org/wiki/Common_Intermediate_Language' title = 'Microsoft Intermediate Language (MSIL)' elif cputype == 'x64 wow': href='http://en.wikipedia.org/wiki/WoW64' title = 'Microsoft WoW64' else: href = 'http://en.wikipedia.org/wiki/Central_processing_unit' title = 'Unknown:%s' % cputype return tag_a(title, title=cputype, href=href) def format_cpu_vendor(vendor): if vendor == 'AuthenticAMD': title = 'AMD' href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices' elif vendor == 'GenuineIntel': title = 'Intel' href = 'http://en.wikipedia.org/wiki/Intel' elif vendor == 'Microsoft Hv': title = 'Microsoft Hyper-V' href = 'http://en.wikipedia.org/wiki/Hyper-V' elif vendor == 'VMwareVMware': title = 'VMware' href = 'http://en.wikipedia.org/wiki/VMware' elif vendor == 'KVMKVMKVMKVM': title = 'KVM' href = 'http://en.wikipedia.org/wiki/Kernel-based_Virtual_Machine' elif vendor == 'XenVMMXenVMM': title = 'Xen' href = 'http://en.wikipedia.org/wiki/Xen' else: title = vendor href = 'http://en.wikipedia.org/wiki/List_of_x86_manufacturers' return tag_a(title, title=vendor, href=href) def format_cpu_name(vendor, name): # http://en.wikipedia.org/wiki/CPUID # http://www.sandpile.org/x86/cpuid.htm if vendor == 'AuthenticAMD': if name is None: title = 'Unknown AMD CPU' href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices' elif name.startswith('AMD Ryzen'): href = 'https://en.wikipedia.org/wiki/Ryzen' title = 'AMD Ryzen' elif name.startswith('AMD FX'): href = 'http://en.wikipedia.org/wiki/List_of_AMD_FX_microprocessors' title = 'AMD FX-series' elif name.startswith('AMD Phenom'): href = 'https://en.wikipedia.org/wiki/List_of_AMD_Phenom_microprocessors' title = 'AMD Phenom family' elif name.startswith('AMD Opteron'): href = 'https://en.wikipedia.org/wiki/List_of_AMD_Opteron_microprocessors' title = 'AMD Opteron family' elif name.startswith('AMD Sempron'): href = 'https://en.wikipedia.org/wiki/List_of_AMD_Sempron_microprocessors' title = 'AMD Sempron family' elif name.startswith('AMD Turion'): href = 'https://en.wikipedia.org/wiki/List_of_AMD_Turion_microprocessors' title = 'AMD Turion family' elif name.startswith('AMD A'): href = 'https://en.wikipedia.org/wiki/List_of_AMD_accelerated_processing_unit_microprocessors' title = 'AMD APU series' else: title = 'Unknown AMD CPU' href = 'http://en.wikipedia.org/wiki/Advanced_Micro_Devices' title = title + ' (%s)' % name elif vendor == 'GenuineIntel': if name is None: title = 'Unknown Intel CPU' href = 'https://en.wikipedia.org/wiki/List_of_Intel_microprocessors' elif name.startswith('Intel(R) Core(TM) i3'): title = 'Intel Core i3 series' href = 'http://en.wikipedia.org/wiki/Intel_Core' elif name.startswith('Intel(R) Core(TM) i5'): title = 'Intel Core i5 series' href = 'http://en.wikipedia.org/wiki/Intel_Core' elif name.startswith('Intel(R) Core(TM) i7'): title = 'Intel Core i7 series' href = 'http://en.wikipedia.org/wiki/Intel_Core' elif name.startswith('Intel(R) Core(TM) i9'): title = 'Intel Core i9 series' href = 'http://en.wikipedia.org/wiki/Intel_Core' elif name.startswith('Intel(R) Core(TM)'): title = 'Unknown Intel Core series' href = 'http://en.wikipedia.org/wiki/Intel_Core' elif name.startswith('Intel(R) Xeon(R)') or name.startswith('Intel(R) Xeon(TM)'): title = 'Intel Xeon series' href = 'http://en.wikipedia.org/wiki/Xeon' else: title = 'Unknown Intel CPU' href = 'https://en.wikipedia.org/wiki/List_of_Intel_microprocessors' title = title + ' (%s)' % name else: title = name href = 'http://en.wikipedia.org/wiki/List_of_x86_manufacturers' return tag_a(name, title=title, href=href) def format_distribution_id(distro_id): if distro_id == 'Debian': name = 'Debian' href = 'http://www.debian.org' elif distro_id == 'Ubuntu': name = 'Ubuntu' href = 'http://www.ubuntu.com' else: name = distro_id href = 'http://distrowatch.com/' + distro_id return tag_a(name, title=distro_id, href=href) def format_distribution_codename(distro_id, distro_codename): if distro_id == 'Debian': name = '%s %s' % (distro_id.capitalize(), distro_codename.capitalize()) href = 'http://www.debian.org/%s%s' % (distro_id.capitalize(), distro_codename.capitalize()) elif distro_id == 'Ubuntu': name = '%s %s' % (distro_id.capitalize(), distro_codename.capitalize()) href = 'http://ubuntuguide.org/wiki/%s_%s' % (distro_id.capitalize(), distro_codename.capitalize()) else: name = distro_id href = 'http://distrowatch.com/' + distro_id return tag_a(name, title=distro_id, href=href) def format_seconds(s): if s is None: return 'None' elif s >= 3600: hr = int(float(s) / 3600.0) from math import fmod m = fmod(float(s), 3600.0) / 60.0 return '%ihr %0.1fmin' % (hr, m) elif s >= 60: m = float(s) / 60.0 return '%0.1fmin' % m elif s >= 1: return '%0.1fs' % s else: return '%0.1fms' % ( s * 1000.0 ) def format_milliseconds(ms): if ms is None: return 'None' elif ms > 1000: s = float(ms) / 1000.0 return format_seconds(s) else: return '%ims' % ms def format_trust_level(tl): if tl == 0 or tl is None: return 'Unknown' elif tl == 1: return 'Stack scan' elif tl == 2: return 'CFI scan' elif tl == 3: return 'FP' elif tl == 4: return 'CFI' elif tl == 5: return 'External' elif tl == 6: return 'IP' else: return 'unknown(%i)' % tl _suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] def format_size(nbytes): if isinstance(nbytes, str): try: nbytes = int(nbytes) except ValueError: nbytes = None if nbytes == 0: return '0 B' elif nbytes is None: return 'None' i = 0 while nbytes >= 1024 and i < len(_suffixes)-1: nbytes /= 1024. i += 1 f = ('%.2f' % nbytes).rstrip('0').rstrip('.') return '%s&nbsp;%s' % (f, _suffixes[i]) def format_memory_usagetype(usage): if usage == 0 or usage is None: return 'Unknown' elif usage == 1: return 'Stack' elif usage == 2: return 'TEB' elif usage == 3: return 'PEB' elif usage == 4: return 'Process Parameters' elif usage == 5: return 'Environment' elif usage == 6: return 'IP' elif usage == 7: return 'Process Heap Handles' elif usage == 8: return 'Process Heap' elif usage == 9: return 'TLS' elif usage == 10: return 'Thread info block' else: return 'unknown(%i)' % usage def format_gl_extension_name(ext): khronos_extension_base_url = 'https://www.khronos.org/registry/OpenGL/extensions' unknown_extension_url = 'https://www.khronos.org/opengl/wiki/OpenGL_Extension' title = ext name = ext href = unknown_extension_url vendor = None ext_name = None if ext.startswith('GL_'): vendor_end = ext.index('_', 3) if vendor_end > 0: vendor = ext[3:vendor_end] ext_name = ext[3:] elif ext.startswith('GLX_') or ext.startswith('WGL_'): vendor_end = ext.index('_', 4) if vendor_end > 0: vendor = ext[4:vendor_end] ext_name = ext if vendor and ext_name: href = khronos_extension_base_url + '/%s/%s.txt' % (vendor, ext_name) return tag_a(name, title=title, href=href) def format_version_number(num): if isinstance(num, str) or isinstance(num, unicode): try: num = int(num) except ValueError: num = None if num is None: return 'None' m, n, o, p = (num >> 48) & 0xffff, (num >> 32) & 0xffff, (num >> 16) & 0xffff, (num >> 0) & 0xffff return '%i.%i.%i.%i' % (m, n, o, p) def format_platform_type(platform_type): if platform_type is None: return _('Platform unknown') elif platform_type == 'Linux': return tag_a('Linux', href='https://en.wikipedia.org/wiki/Linux') elif platform_type == 'Windows NT': return tag_a('Windows NT',href='https://en.wikipedia.org/wiki/Windows_NT') elif platform_type == 'Windows': return tag_a('Windows', href='https://en.wikipedia.org/wiki/Microsoft_Windows') else: return tag_a(platform_type, href='https://en.wikipedia.org/wiki/Special:Search/' + str(platform_type)) def _get_version_from_string(number_str): elems = number_str.split('.') major = 0 minor = 0 patch = 0 build = 0 if len(elems) >= 1: major = int(elems[0]) if len(elems) >= 2: minor = int(elems[1]) if len(elems) >= 3: patch = int(elems[2]) if len(elems) >= 4: build = int(elems[3]) return major, minor, patch, build def _get_version_from_numbers(os_version_number, os_build_number): print('_get_version_from_numbers %s, %s' % (os_version_number, os_build_number)) if isinstance(os_version_number, int): major = os_version_number >> 48 & 0xffff minor = os_version_number >> 32 & 0xffff patch = os_version_number >> 16 & 0xffff build = os_version_number & 0xffff if build == 0 and os_build_number: build = int(os_build_number) if os_build_number is not None else 0 else: major, minor, patch, build = _get_version_from_string(os_version_number) #print('%x, %s -> %i.%i.%i.%i' % (os_version_number, os_build_number, major, minor, patch, build)) return major, minor, patch, build def get_os_version_number(platform_type, os_version_number, os_build_number): if platform_type is None or os_version_number is None: return 0 if platform_type == 'Linux': major, minor, patch, build = _get_version_from_string(os_version_number) elif platform_type == 'Windows NT': major, minor, patch, build = _get_version_from_string(os_version_number) if major >= 10: build = patch patch = 0 else: major = 0 minor = 0 patch = 0 build = 0 ret = (major << 48) | (minor << 32) | (patch << 16) | build print('ver in %s -> %x' % (os_version_number, ret)) return ret def get_os_build_number(platform_type, os_version_number, os_build_number): if platform_type is None or os_version_number is None: return 0 if platform_type == 'Linux': build = 0 elif platform_type == 'Windows NT': major, minor, patch, build = _get_version_from_string(os_version_number) if major >= 10: build = patch else: build = 0 print('build in %s -> %x' % (os_version_number, build)) return build def os_version_info(platform_type, os_version_number, os_build_number): ret = {'text': 'unknown' } if platform_type is None or os_version_number is None: return ret major, minor, patch, build = _get_version_from_numbers(os_version_number, os_build_number) if platform_type == 'Linux': ret['text'] = 'Linux %i.%i.%i.%i' % (major, minor, patch, build) ret['href'] = 'https://en.wikipedia.org/wiki/Linux' elif platform_type == 'Windows NT': productName = 'Windows %i.%i' % (major, minor) marketingName = None if (major < 6): productName = "Windows XP" ret['short'] = 'WinXP' ret['href'] = 'https://en.wikipedia.org/wiki/Windows_XP' elif (major == 6 and minor == 0): productName = "Windows Vista" ret['short'] = 'WinVista' ret['href'] = 'https://en.wikipedia.org/wiki/Windows_Vista' elif (major == 6 and minor == 1): productName = "Windows 7" ret['short'] = 'Win7' ret['href'] = 'https://en.wikipedia.org/wiki/Windows_7' elif (major == 6 and minor == 2): productName = "Windows 8" ret['short'] = 'Win8' ret['href'] = 'https://en.wikipedia.org/wiki/Windows_8' elif (major == 6 and minor == 3): productName = "Windows 8.1" ret['short'] = 'Win8.1' ret['href'] = 'https://en.wikipedia.org/wiki/Windows_8' elif (major == 10): ret['href'] = 'https://en.wikipedia.org/wiki/Windows_10' # See https://en.wikipedia.org/wiki/Windows_10_version_history if build <= 10240: ret['short'] = 'Win10' productName = "Windows 10" marketingName = '' elif(build <= 10586): ret['short'] = 'Win10/1511' productName = "Windows 10 Version 1511" marketingName = "November Update" elif (build <= 14393): ret['short'] = 'Win10/1607' productName = "Windows 10 Version 1607" marketingName = "Anniversary Update" elif (build <= 15063): ret['short'] = 'Win10/1703' productName = "Windows 10 Version 1703" marketingName = "Creators Update" elif (build <= 16299): ret['short'] = 'Win10/1709' productName = "Windows 10 Version 1709" marketingName = "Fall Creators Update" elif (build <= 17134): ret['short'] = 'Win10/1803' productName = "Windows 10 Version 1803" marketingName = "April 2018 Update" elif (build <= 18204): ret['short'] = 'Win10/1809' productName = "Windows 10 Version 1809" marketingName = "October 2018 Update" elif (build <= 18362): ret['short'] = 'Win10/1903' productName = "Windows 10 Version 1903" marketingName = "May 2019 Update" elif (build <= 18363): ret['short'] = 'Win10/1909' productName = "Windows 10 Version 1909" marketingName = "November 2019 Update" elif (build <= 19041): ret['short'] = 'Win10/2004' productName = "Windows 10 Version 2004" marketingName = "May 2020 Update" elif (build <= 19042): ret['short'] = 'Win10/1903' productName = "Windows 10 Version 20H2" marketingName = '' # TBA else: ret['short'] = 'Win10/TBA' productName = 'Windows 10 Build %i' % build if marketingName: ret['text'] = '%s (%s)' % (productName, marketingName) else: ret['text'] = productName ret['full'] = ret['text'] + ' %i.%i.%i.%i' % (major, minor, patch, build) elif platform_type == 'Windows': ret['text'] = 'Windows %i.%i' % (major, minor) ret['href'] = 'https://en.wikipedia.org/wiki/Microsoft_Windows' return ret def format_os_version(platform_type, os_version_number, os_build_number): info = os_version_info(platform_type, os_version_number, os_build_number) if 'href' in info: return tag_a(info.get('text'), href=info.get('href')) else: return info.get('text') def format_os_version_short(platform_type, os_version_number, os_build_number): info = os_version_info(platform_type, os_version_number, os_build_number) if 'short' in info: return info.get('short') else: return info.get('text') def language_from_qlocale_language_enum(num): _codes = { 0: 'Any language', 31: 'English', 42: 'German', } if num in _codes: return _codes[num] else: return str(num) # See https://doc.qt.io/qt-5/qlocale.html#Country-enum def country_from_qlocale_country_enum(num): _codes = { 0: 'Any country', 82: 'Germany', 224: 'United Kingdom', 225: 'United States', } if num in _codes: return _codes[num] else: return str(num) # https://doc.qt.io/qt-5/qlocale.html#Script-enum def script_from_qlocale_script_enum(num): _codes = { 0: 'Any script', 1: 'Arabic', 2: 'Cyrillic', 16: 'Greek', 7: 'Latin', } if num in _codes: return _codes[num] else: return str(num) def thread_extra_info(thread): if thread is None: return _('N/A') elif thread.main_thread: return '*@' if thread.exception else '@' elif thread.rpc_thread: return '*[RPC]' if thread.exception else '[RPC]' elif thread.exception: return '*' else: return '' def format_thread(thread): if thread is None: return _('N/A') else: if thread.main_thread: ret = _('Main thread') elif thread.rpc_thread: ret = _('RPC thread') else: ret = _('Thread') ret = ret + ' ' + hex_format(thread.id) if thread.name: ret = ret + ' ' + thread.name if thread.exception: ret = ret + ' ' + _('with exception') return ret def format_stack_frame(frame): if frame is None: return _('N/A') else: if frame.function is None: offset = frame.addr - frame.module_base if frame.module: return frame.module + '+' + hex_format(offset) else: return frame.addr else: return format_function_plus_offset(frame.function, frame.funcoff)
gpl-3.0
-138,188,937,514,413,520
34.39562
137
0.558443
false
3.501733
false
false
false
libicocco/poser-hand-generator
createGraspICRA09.py
1
3429
# creatGraspICRA09.py - script for creating a hand poses database # # Copyright (c) 2009 Javier Romero # # Author: Javier Romero <[email protected]> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 # USA import poser import linecache import os import setCamAZEL import setTexture from os.path import join scene = poser.Scene() basedir = os.path.dirname(os.path.abspath(__file__)) dir = join(basedir, 'out') lightdir = join(basedir, 'lights') taxonomyDir = join(basedir, 'taxonomy') texture = join(basedir, 'Hand Texture2.TIF') listpath = join(basedir, 'poses', 'handjointssavinglist.txt') #lights=["light1.lt2","light2.lt2","light3.lt2","light4.lt2"] lights = ["light1.lt2"] nAz = 24 nEl = 12 nRo = 9 nFrames = 6 grasps = ["largeDiameter", "smallDiameter", "mediumWrap", "adductedThumb", "lightTool", "prismatic4Finger", "prismatic3Finger", "prismatic2Finger", "palmarPinch", "powerDisk", "powerSphere", "precisionDisk", "precisionSphere", "tripod", "fixedHook", "lateral", "indexFingerExtension", "extensionType", "distalType", "writingTripod", "tripodVariation", "parallelExtension", "adductionGrip", "tipPinch", "lateralTripod", "sphere4Finger", "quadpod", "sphere3Finger", "stick", "palmarGrasp", "ringGrasp", "ventralGrasp", "inferiorPincerGrasp"] #poser.SetNumRenderThreads(4) #poser.SetRenderInSeparateProcess(1) for graspIndex in range(len(grasps)): outdir = join(dir, '%02d' % (graspIndex+1)) if not os.path.isdir(outdir): os.mkdir(outdir) for lightindex in range(len(lights)): jointFileName0 = join(taxonomyDir, "rest.txt") jointFileName1 = join(taxonomyDir, grasps[graspIndex] + ".txt") graspCode = (graspIndex)*(len(lights)) + lightindex + 1 # close and discard changes poser.CloseDocument(1) poser.OpenDocument(join(taxonomyDir, grasps[graspIndex] + ".pz3")) scene.LoadLibraryLight(lightdir+lights[lightindex]) setTexture.setTexture(texture) linecache.checkcache(jointFileName0) linecache.checkcache(jointFileName1) setCamAZEL.setRenderOptions(scale=0) gnd = scene.Actor("GROUND") gnd.SetVisible(0) gnd.SetVisibleInRender(0) gnd.SetVisibleInReflections(0) ffly = scene.CurrentFireFlyOptions() ffly.SetManual(1) setCamAZEL.multiViewSeqRender(basedir, nAz, nEl, nRo, outdir, jointFileName0, jointFileName1, nFrames, graspCode, listpath=listpath, fullSphere=True, f=70, camName="RHand Camera")
gpl-2.0
4,005,655,278,173,334,000
39.313253
79
0.656168
false
3.384995
false
false
false
jeffmacinnes/pyneal
pyneal_scanner/utils/Siemens_utils.py
1
30222
""" Set of classes and methods specific to Siemens scanning environments """ from __future__ import print_function from __future__ import division import os from os.path import join import sys import time import re import json import glob import logging from threading import Thread from queue import Queue import numpy as np import pydicom import nibabel as nib from nibabel.nicom import dicomreaders import zmq # regEx for Siemens style file naming Siemens_filePattern = re.compile('\d{3}_\d{6}_\d{6}.dcm') # regEx for pulling the volume field out of the mosaic file name Siemens_mosaicVolumeNumberField = re.compile('(?<=\d{6}_)\d{6}') Siemens_mosaicSeriesNumberField = re.compile('(?<=\d{3}_)\d{6}(?=_\d{6}.dcm)') class Siemens_DirStructure(): """ Finding the names and paths of series directories in a Siemens scanning environment. In Siemens environments, using the ideacmdtool, the scanner is set up to export data in real-time to a shared directory that is accessible from a remote workstation (running Pyneal Scanner). For functional data, Siemens scanners store reconstructed slices images by taking all of the slices for a single volume, and placing them side-by-side in a larger "mosaic" dicom image. A scan will produce one mosaic image per volume. For anatomical data, dicom images for each 2D slice will be written as separate files, numbered sequentially, and saved in the `sessionDir`. All dicom images for all scans across a single session will be stored in the same directory. We'll call this directory the `sessionDir`. A single `sessionDir` will hold all of the mosaic files for all of the series for the current session. The series number is contained in the filename, which follows the pattern: [session#]_[series#]_[vol#].dcm These files will appear in real-time as the scan progresses. This class contains methods to retrieve the current `sessionDir`, show the current series that are present, and monitor the `sessionDir` for the appearance of new series files. """ def __init__(self, scannerSettings): """ Initialize the class Parameters ---------- scannerSettings : object class attributes represent all of the settings unique to the current scanning environment (many of them read from `scannerConfig.yaml`) See Also -------- general_utils.ScannerSettings """ # initialize class attributes if 'scannerSessionDir' in scannerSettings.allSettings: self.sessionDir = scannerSettings.allSettings['scannerSessionDir'] else: print('No scannerSessionDir found in scannerConfig file') sys.exit() def print_currentSeries(self): """ Find all of the series present in given sessionDir, and print them all, along with time since last modification, and directory size """ # find the sessionDir, if not already found if self.sessionDir is None: self.findSessionDir() print('Session Dir: ') print('{}'.format(self.sessionDir)) # find all mosaic files in the sessionDir self.uniqueSeries = self.getUniqueSeries() if len(self.uniqueSeries) == 0: print('No mosaic files found in {}'.format(self.sessionDir)) else: # print out info on each unique series in sessionDir currentTime = int(time.time()) print('Unique Series: ') for series in sorted(self.uniqueSeries): # get list of all dicoms that match this series number thisSeriesDicoms = glob.glob(join(self.sessionDir, ('*_' + series + '_*.dcm'))) # get time since last modification for last dicom in list lastModifiedTime = os.stat(thisSeriesDicoms[-1]).st_mtime timeElapsed = currentTime - lastModifiedTime m, s = divmod(timeElapsed, 60) time_string = '{} min, {} s ago'.format(int(m), int(s)) print(' {}\t{} files \t{}'.format(series, len(thisSeriesDicoms), time_string)) def getUniqueSeries(self): """ Return a list of unique series numbers from the filenames of the files found in the sessionDir """ uniqueSeries = [] self.allMosaics = [f for f in os.listdir(self.sessionDir) if Siemens_filePattern.match(f)] if len(self.allMosaics) > 0: # find unique series numbers among all mosaics seriesNums = [] for f in self.allMosaics: seriesNums.append(Siemens_mosaicSeriesNumberField.search(f).group()) uniqueSeries = set(seriesNums) return uniqueSeries def waitForNewSeries(self, interval=.1): """ Listen for the appearance of new series files Once a scan starts, new series mosaic files will be created in the `sessionDir`. By the time this function is called, this class should already have the `sessionDir` defined Parameters ---------- interval : float, optional time, in seconds, to wait between polling for a new directory Returns ------- newSeries : string seriesNum of the new series """ keepWaiting = True existingSeries = self.getUniqueSeries() while keepWaiting: # get all of the unique series again currentSeries = self.getUniqueSeries() # compare against existing series diff = currentSeries - existingSeries if len(diff) > 0: newSeries = diff.pop() keepWaiting = False # pause before searching directories again time.sleep(interval) # return the found series name return newSeries class Siemens_BuildNifti(): """ Tools to build a 3D or 4D Nifti image from all of the dicom mosaic images in a directory. Input is a path to a series directory containing dicom images (either mosaic images for functional data, or 2D slice image for anatomical data). Image parameters, like voxel spacing and dimensions, are obtained automatically from the info in the dicom tags End result is a Nifti1 formatted 3D (anat) or 4D (func) file in RAS+ orientation """ def __init__(self, seriesDir, seriesNum): """ Initialize class, and set/obtain basic class attributes like file paths and scan parameters Parameters ---------- seriesDir : string full path to the directory containing the raw dicom mosaic files for each volume in the series seriesNum : string series number of the series that you'd like to build the nifti image from """ # initialize attributes self.seriesDir = seriesDir self.seriesNum = seriesNum self.niftiImage = None # make a list of the specified raw dicom mosaic files in this dir rawDicoms = glob.glob(join(self.seriesDir, ('*_' + str(self.seriesNum).zfill(6) + '_*.dcm'))) # figure out what type of image this is, 4d or 3d self.scanType = self._determineScanType(rawDicoms[0]) # build the nifti image if self.scanType == 'anat': self.niftiImage = self.buildAnat(rawDicoms) elif self.scanType == 'func': self.niftiImage = self.buildFunc(rawDicoms) def buildAnat(self, dicomFiles): """ Build a 3D structural/anatomical image from list of dicom files Given a list of `dicomFiles`, build a 3D anatomical image from them. Figure out the image dimensions and affine transformation to map from voxels to mm from the dicom tags Parameters ---------- dicomFiles : list list containing the file names (file names ONLY, no path) of all dicom slice images to be used in constructing the final nifti image Returns ------- anatImage_RAS : Nifti1Image nifti-1 formated image of the 3D anatomical data, oriented in RAS+ See Also -------- nibabel.nifti1.Nifti1Image() """ # read the first dicom in the list to get overall image dimensions dcm = pydicom.dcmread(join(self.seriesDir, dicomFiles[0]), stop_before_pixels=1) sliceDims = (getattr(dcm, 'Columns'), getattr(dcm, 'Rows')) self.nSlicesPerVol = len(dicomFiles) sliceThickness = getattr(dcm, 'SliceThickness') ### Build 3D array of voxel data # create an empty array to store the slice data imageMatrix = np.zeros(shape=( sliceDims[0], sliceDims[1], self.nSlicesPerVol), dtype='int16') # Use the InstanceNumber tag to order the slices. This works for anat # 3D images only, since the instance numbers do not repeat as they would # with functional data with multiple volumes sliceDict = {} for s in dicomFiles: dcm = pydicom.dcmread(join(self.seriesDir, s)) sliceDict[dcm.InstanceNumber] = join(self.seriesDir, s) # sort by InStackPositionNumber and assemble the image for sliceIdx, ISPN in enumerate(sorted(sliceDict.keys())): dcm = pydicom.dcmread(sliceDict[ISPN]) # extract the pixel data as a numpy array. Transpose # so that the axes order go [cols, rows] pixel_array = dcm.pixel_array.T # place in the image matrix imageMatrix[:, :, sliceIdx] = pixel_array ### create the affine transformation to map from vox to mm space # in order to do this, we need to get some values from the first and # last slices in the volume. firstSlice = sliceDict[sorted(sliceDict.keys())[0]] lastSlice = sliceDict[sorted(sliceDict.keys())[-1]] dcm_first = pydicom.dcmread(firstSlice) dcm_last = pydicom.dcmread(lastSlice) self.pixelSpacing = getattr(dcm_first, 'PixelSpacing') self.firstSlice_IOP = np.array(getattr(dcm_first, 'ImageOrientationPatient')) self.firstSlice_IPP = np.array(getattr(dcm_first, 'ImagePositionPatient')) self.lastSlice_IPP = np.array(getattr(dcm_last, 'ImagePositionPatient')) # now we can build the affine affine = self.buildAffine() ### Build a Nifti object, reorder it to RAS+ anatImage = nib.Nifti1Image(imageMatrix, affine=affine) anatImage_RAS = nib.as_closest_canonical(anatImage) # reoder to RAS+ print('Nifti image dims: {}'.format(anatImage_RAS.shape)) return anatImage_RAS def buildFunc(self, dicomFiles): """ Build a 4D functional image from list of dicom files Given a list of dicomFile paths, build a 4d functional image. For Siemens scanners, each dicom file is assumed to represent a mosaic image comprised of mulitple slices. This tool will split apart the mosaic images, and construct a 4D nifti object. The 4D nifti object contain a voxel array ordered like RAS+ as well the affine transformation to map between vox and mm space Parameters ---------- dicomFiles : list list containing the file names (file names ONLY, no path) of all dicom mosaic images to be used in constructing the final nifti image """ imageMatrix = None affine = None TR = None # make dicomFiles store the full path dicomFiles = [join(self.seriesDir, f) for f in dicomFiles] ### Loop over all dicom mosaic files nVols = len(dicomFiles) for mosaic_dcm_fname in dicomFiles: ### Parse the mosaic image into a 3D volume # we use the nibabel mosaic_to_nii() method which does a lot of the # heavy-lifting of extracting slices, arranging in a 3D array, and # grabbing the affine dcm = pydicom.dcmread(mosaic_dcm_fname) # create dicom object # for mosaic files, the instanceNumber tag will correspond to the # volume number (using a 1-based indexing, so subtract by 1) volIdx = dcm.InstanceNumber - 1 # convert the dicom object to nii thisVol = dicomreaders.mosaic_to_nii(dcm) # convert to RAS+ thisVol_RAS = nib.as_closest_canonical(thisVol) if TR is None: TR = dcm.RepetitionTime / 1000 # construct the imageMatrix if it hasn't been made yet if imageMatrix is None: imageMatrix = np.zeros(shape=(thisVol_RAS.shape[0], thisVol_RAS.shape[1], thisVol_RAS.shape[2], nVols), dtype=np.uint16) # construct the affine if it isn't made yet if affine is None: affine = thisVol_RAS.affine # Add this data to the image matrix imageMatrix[:, :, :, volIdx] = thisVol_RAS.get_fdata() ### Build a Nifti object funcImage = nib.Nifti1Image(imageMatrix, affine=affine) pixDims = np.array(funcImage.header.get_zooms()) pixDims[3] = TR funcImage.header.set_zooms(pixDims) return funcImage def buildAffine(self): """ Build the affine matrix that will transform the data to RAS+. This function should only be called once the required data has been extracted from the dicom tags from the relevant slices. The affine matrix is constructed by using the information in the ImageOrientationPatient and ImagePositionPatient tags from the first and last slices in a volume. However, note that those tags will tell you how to orient the image to DICOM reference coordinate space, which is LPS+. In order to to get to RAS+ we have to invert the first two axes. Notes ----- For more info on building this affine, please see the documentation at: http://nipy.org/nibabel/dicom/dicom_orientation.html http://nipy.org/nibabel/coordinate_systems.html """ ### Get the ImageOrientation values from the first slice, # split the row-axis values (0:3) and col-axis values (3:6) # and then invert the first and second values of each rowAxis_orient = self.firstSlice_IOP[0:3] * np.array([-1, -1, 1]) colAxis_orient = self.firstSlice_IOP[3:6] * np.array([-1, -1, 1]) ### Get the voxel size along Row and Col axis voxSize_row = float(self.pixelSpacing[0]) voxSize_col = float(self.pixelSpacing[1]) ### Figure out the change along the 3rd axis by subtracting the # ImagePosition of the last slice from the ImagePosition of the first, # then dividing by 1/(total number of slices-1), then invert to # make it go from LPS+ to RAS+ slAxis_orient = (self.firstSlice_IPP - self.lastSlice_IPP) / (1 - self.nSlicesPerVol) slAxis_orient = slAxis_orient * np.array([-1, -1, 1]) ### Invert the first two values of the firstSlice ImagePositionPatient. # This tag represents the translation needed to take the origin of our 3D voxel # array to the origin of the LPS+ reference coordinate system. Since we want # RAS+, need to invert those first two axes voxTranslations = self.firstSlice_IPP * np.array([-1, -1, 1]) ### Assemble the affine matrix affine = np.matrix([ [rowAxis_orient[0] * voxSize_row, colAxis_orient[0] * voxSize_col, slAxis_orient[0], voxTranslations[0]], [rowAxis_orient[1] * voxSize_row, colAxis_orient[1] * voxSize_col, slAxis_orient[1], voxTranslations[1]], [rowAxis_orient[2] * voxSize_row, colAxis_orient[2] * voxSize_col, slAxis_orient[2], voxTranslations[2]], [0, 0, 0, 1] ]) return affine def _determineScanType(self, dicomFile): """ Figure out what type of scan this is, anat or func This tool will determine the scan type from a given dicom file. Possible scan types are either single 3D volume (anat), or a 4D dataset built up of 2D slices (func). The scan type is determined by reading the `MRAcquisitionType` tag from the dicom file Parameters ---------- dcmFile : string file name of dicom file from the current series that you would like to open to read the imaging parameters from Returns ------- scanType : string either 'anat' or 'func' depending on scan type stored in dicom tag """ # read the dicom file dcm = pydicom.dcmread(join(self.seriesDir, dicomFile), stop_before_pixels=1) if getattr(dcm, 'MRAcquisitionType') == '3D': scanType = 'anat' elif getattr(dcm, 'MRAcquisitionType') == '2D': scanType = 'func' else: print('Cannot determine a scan type from this image!') sys.exit() return scanType def get_scanType(self): """ Return the scan type """ return self.scanType def get_niftiImage(self): """ Return the constructed Nifti Image """ return self.niftiImage def write_nifti(self, output_path): """ Write the nifti file to disk Parameters ---------- outputPath : string full path, including filename, you want to use to save the nifti image """ nib.save(self.niftiImage, output_path) print('Image saved at: {}'.format(output_path)) class Siemens_monitorSessionDir(Thread): """ Class to monitor for new mosaic images to appear in the sessionDir. This class will run independently in a separate thread. Each new mosaic file that appears and matches the current series number will be added to the Queue for further processing """ def __init__(self, sessionDir, seriesNum, dicomQ, interval=.2): """ Initialize the class, and set basic class attributes Parameters ---------- sessionDir : string full path to the session directory where new dicom mosaic files will appear seriesNum : string series number assigned to the new series dicomQ : object instance of python queue class to hold new dicom files before they have been processed. This class will add items to that queue. interval : float, optional time, in seconds, to wait before repolling the seriesDir to check for any new files """ # start the thread upon completion Thread.__init__(self) # set up logger self.logger = logging.getLogger(__name__) # initialize class parameters self.interval = interval # interval for polling for new files self.sessionDir = sessionDir # full path to series directory self.seriesNum = seriesNum # series number of current series self.dicomQ = dicomQ # queue to store dicom mosaic files self.alive = True # thread status self.numMosaicsAdded = 0 # counter to keep track of # mosaics self.queued_mosaic_files = set() # empty set to store names of queued mosaic def run(self): # function that runs while the Thread is still alive while self.alive: # create a set of all mosaic files with the current series num #currentMosaics = set(os.listdir(self.seriesDir)) currentMosaics = set(glob.glob(join(self.sessionDir, ('*_' + str(self.seriesNum).zfill(6) + '_*.dcm')))) # grab only the ones that haven't already been added to the queue newMosaics = [f for f in currentMosaics if f not in self.queued_mosaic_files] # loop over each of the new mosaic files, add each to queue for f in newMosaics: mosaic_fname = join(self.sessionDir, f) try: self.dicomQ.put(mosaic_fname) except: self.logger.error('failed on: {}'.format(mosaic_fname)) print(sys.exc_info()) sys.exit() if len(newMosaics) > 0: self.logger.debug('Put {} new mosaic file on the queue'.format(len(newMosaics))) self.numMosaicsAdded += len(newMosaics) # now update the set of mosaics added to the queue self.queued_mosaic_files.update(set(newMosaics)) # pause time.sleep(self.interval) def get_numMosaicsAdded(self): """ Return the cumulative number of mosaic files added to the queue thus far """ return self.numMosaicsAdded def stop(self): """ Set the `alive` flag to False, stopping thread """ self.alive = False class Siemens_processMosaic(Thread): """ Class to process each mosaic file in the queue. This class will run in a separate thread. While running, it will pull 'tasks' off of the queue and process each one. Processing each task involves reading the mosaic file, converting it to a 3D Nifti object, reordering it to RAS+, and then sending the volume out over the pynealSocket """ def __init__(self, dicomQ, pynealSocket, interval=.2): """ Initialize the class Parameters ---------- dicomQ : object instance of python queue class that will store the dicom slice file names. This class will pull items from that queue. pynealSocket : object instance of ZMQ style socket that will be used to communicate with Pyneal. This class will use this socket to send image data and headers to Pyneal during the real-time scan. See also: general_utils.create_pynealSocket() interval : float, optional time, in seconds, to wait before repolling the queue to see if there are any new file names to process """ # start the threat upon creation Thread.__init__(self) # set up logger self.logger = logging.getLogger(__name__) # initialize class parameters self.dicomQ = dicomQ self.interval = interval # interval between polling queue for new files self.alive = True self.pynealSocket = pynealSocket self.totalProcessed = 0 # counter for total number of slices processed def run(self): self.logger.debug('Siemens_processMosaic started') # function to run on loop while self.alive: # if there are any mosaic files in the queue, process them if not self.dicomQ.empty(): numMosaicsInQueue = self.dicomQ.qsize() # loop through all mosaics currently in queue & process for m in range(numMosaicsInQueue): # retrieve file name from queue mosaic_dcm_fname = self.dicomQ.get(True, 2) # ensure the file has copied completely file_size = 0 while True: file_info = os.stat(mosaic_dcm_fname) if file_info.st_size == 0 or file_info.st_size > file_size: file_size = file_info.st_size else: break # process this mosaic self.processMosaicFile(mosaic_dcm_fname) # complete this task, thereby clearing it from the queue self.dicomQ.task_done() # log how many were processed self.totalProcessed += numMosaicsInQueue self.logger.debug('Processed {} tasks from the queue ({} total)'.format(numMosaicsInQueue, self.totalProcessed)) # pause for a bit time.sleep(self.interval) def processMosaicFile(self, mosaic_dcm_fname): """ Process a given mosaic dicom file This method will read the dicom mosaic file. Convert to a nifti object that will provide the 3D voxel array for this mosaic. Reorder to RAS+, and then send to the pynealSocket Parameters ---------- mosaic_dcm_fname : string full path to the dicom mosaic file that you want to process """ ### Figure out the volume index for this mosaic by reading # the field from the file name itself mosaicFile_root, mosaicFile_name = os.path.split(mosaic_dcm_fname) volIdx = int(Siemens_mosaicVolumeNumberField.search(mosaicFile_name).group(0)) - 1 self.logger.info('Volume {} processing'.format(volIdx)) ### Parse the mosaic image into a 3D volume # we use the nibabel mosaic_to_nii() method which does a lot of the # heavy-lifting of extracting slices, arranging in a 3D array, and # grabbing the affine dcm = pydicom.dcmread(mosaic_dcm_fname) # create dicom object thisVol = dicomreaders.mosaic_to_nii(dcm) # convert to nifti # convert to RAS+ thisVol_RAS = nib.as_closest_canonical(thisVol) # get the data as a contiguous array (required for ZMQ) thisVol_RAS_data = np.ascontiguousarray(thisVol_RAS.get_fdata()) ### Create a header with metadata info volHeader = { 'volIdx': volIdx, 'dtype': str(thisVol_RAS_data.dtype), 'shape': thisVol_RAS_data.shape, 'affine': json.dumps(thisVol_RAS.affine.tolist()), 'TR': str(dcm.RepetitionTime / 1000)} ### Send the voxel array and header to the pynealSocket self.sendVolToPynealSocket(volHeader, thisVol_RAS_data) def sendVolToPynealSocket(self, volHeader, voxelArray): """ Send the volume data to Pyneal Send the image data and header information for the specified volume to Pyneal via the `pynealSocket`. Parameters ---------- volHeader : dict key:value pairs for all of the relevant metadata for this volume voxelArray : numpy array 3D numpy array of voxel data from the volume, reoriented to RAS+ """ self.logger.debug('TO pynealSocket: vol {}'.format(volHeader['volIdx'])) ### Send data out the socket, listen for response self.pynealSocket.send_json(volHeader, zmq.SNDMORE) # header as json self.pynealSocket.send(voxelArray, flags=0, copy=False, track=False) pynealSocketResponse = self.pynealSocket.recv_string() # log the success self.logger.debug('FROM pynealSocket: {}'.format(pynealSocketResponse)) # check if that was the last volume, and if so, stop if 'STOP' in pynealSocketResponse: self.stop() def stop(self): """ set the `alive` flag to False, stopping the thread """ self.alive = False def Siemens_launch_rtfMRI(scannerSettings, scannerDirs): """ Launch a real-time session in a Siemens environment. This method should be called from pynealScanner.py before starting the scanner. Once called, this method will take care of: - monitoring the sessionDir for new series files to appear (and then returing the new series number) - set up the socket connection to send volume data over - creating a Queue to store newly arriving DICOM files - start a separate thread to monitor the new series appearing - start a separate thread to process DICOMs that are in the Queue """ # Create a reference to the logger. This assumes the logger has already # been created and customized by pynealScanner.py logger = logging.getLogger(__name__) #### SET UP PYNEAL SOCKET (this is what we'll use to #### send data (e.g. header, volume voxel data) to remote connections) # figure out host and port number to use host = scannerSettings.get_pynealSocketHost() port = scannerSettings.get_pynealSocketPort() logger.debug('Scanner Socket Host: {}'.format(host)) logger.debug('Scanner Socket Port: {}'.format(port)) # create a socket connection from .general_utils import create_pynealSocket pynealSocket = create_pynealSocket(host, port) logger.debug('Created pynealSocket') # wait for remote to connect on pynealSocket logger.info('Connecting to pynealSocket...') while True: msg = 'hello from pyneal_scanner ' pynealSocket.send_string(msg) msgResponse = pynealSocket.recv_string() if msgResponse == msg: break logger.info('pynealSocket connected') ### Wait for a new series directory appear logger.info('Waiting for new series files to appear...') seriesNum = scannerDirs.waitForNewSeries() logger.info('New Series Number: {}'.format(seriesNum)) ### Start threads to A) watch for new mosaic files, and B) process # them as they appear # initialize the dicom queue to keep store newly arrived # dicom mosaic images, and keep track of which have been processed dicomQ = Queue() # create instance of class that will monitor sessionDir for new mosaic # images to appear. Pass in a copy of the dicom queue. Start the thread scanWatcher = Siemens_monitorSessionDir(scannerDirs.sessionDir, seriesNum, dicomQ) scanWatcher.start() # create an instance of the class that will grab mosaic dicoms # from the queue, reformat the data, and pass over the socket # to pyneal. Start the thread going mosaicProcessor = Siemens_processMosaic(dicomQ, pynealSocket) mosaicProcessor.start()
mit
7,839,139,264,983,837,000
38.713535
128
0.623552
false
4.091795
false
false
false
rnelson/adventofcode
advent2015/day08.py
1
2659
#!/usr/bin/env python """ http://adventofcode.com/day/8 Part 1 ------ Space on the sleigh is limited this year, and so Santa will be bringing his list as a digital copy. He needs to know how much space it will take up when stored. It is common in many programming languages to provide a way to escape special characters in strings. For example, C, JavaScript, Perl, Python, and even PHP handle special characters in very similar ways. However, it is important to realize the difference between the number of characters in the code representation of the string literal and the number of characters in the in-memory string itself. (examples removed because the interpreter was complaining about the escaping - ha) Disregarding the whitespace in the file, what is the number of characters of code for string literals minus the number of characters in memory for the values of the strings in total for the entire file? For example, given the four strings above, the total number of characters of string code (2 + 5 + 10 + 6 = 23) minus the total number of characters in memory for string values (0 + 3 + 7 + 1 = 11) is 23 - 11 = 12. Part 2 ------ Now, let's go the other way. In addition to finding the number of characters of code, you should now encode each code representation as a new string and find the number of characters of the new encoded representation, including the surrounding double quotes. (examples removed because the interpreter was complaining about the escaping - ha) Your task is to find the total number of characters to represent the newly encoded strings minus the number of characters of code in each original string literal. For example, for the strings above, the total encoded length (6 + 9 + 16 + 11 = 42) minus the characters in the original code representation (23, just like in the first part of this puzzle) is 42 - 23 = 19. """ from __future__ import print_function import os import re import sys INFILE = 'inputs/input08.txt' def main(): total_length = 0 unescaped_length = 0 escaped_length = 0 with open(INFILE) as f: # Part 1 for line in f: input = line.strip() total_length += len(input) unescaped = input[1:-1].decode('string_escape') unescaped_length += len(unescaped) escaped = '"{}"'.format(re.escape(input)) escaped_length += len(escaped) msg = '[Python] Puzzle 8-1: {}' print(msg.format(total_length - unescaped_length)) # Part 2 msg = '[Python] Puzzle 8-2: {}' print(msg.format(escaped_length - total_length)) if __name__ == '__main__': main()
mit
7,221,743,905,681,149,000
29.918605
69
0.703272
false
3.921829
false
false
false