repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
delete/estofadora
estofadora/bills/views.py
1
1602
from django.shortcuts import render, redirect, get_object_or_404 from django.contrib.auth.decorators import login_required from django.contrib import messages from django.core.urlresolvers import reverse from .forms import BillForm from .models import Bill @login_required def new(request): context = {} if request.method == 'POST': form = BillForm(request.POST) if form.is_valid(): form.save() messages.success(request, 'Cadastrada com sucesso!') return redirect(reverse('bills:new')) else: form = BillForm() context['form'] = form context['section'] = 'bill_new' return render(request, 'bills/new.html', context) @login_required def list(request): context = {} if request.method == 'POST': bill_name = request.POST.get('name') bills = Bill.objects.filter( name__icontains=bill_name ).order_by('-date_to_pay') print(bills) else: bills = Bill.objects.all().order_by('-date_to_pay') context['bills'] = bills context['section'] = 'bills' return render(request, 'bills/list.html', context) @login_required def delete(request, pk): bill = get_object_or_404(Bill, pk=pk) bill.delete() messages.success(request, 'Conta removida com sucesso!') return redirect(reverse('bills:list')) @login_required def mark_as_paid(request, pk): bill = get_object_or_404(Bill, pk=pk) bill.is_paid = True bill.save() messages.success(request, 'Conta marcada como paga!') return redirect(reverse('bills:list'))
mit
4,154,725,326,265,776,600
22.910448
64
0.644195
false
3.423077
false
false
false
theepicsnail/SuperBot2
Core.py
1
5362
from PluginManager import PluginManager from PluginDispatcher import PluginDispatcher from Configuration import ConfigFile from Util import call from re import match from sys import path from os import getcwd from Util import dictJoin from Logging import LogFile path.append(getcwd()) log = LogFile("Core") class Core: _PluginManager = None _PluginDispatcher = None _ResponseObject = None _Connector = None _Config = None def _LoadConnector(self, ConName): try: con = __import__("%s.Connector" % ConName, globals(), locals(), "Connector") log.debug("Got connector:", con) cls = getattr(con, "Connector", None) except : log.exception("Exception while loading connector") cls = None log.debug("Connectors class", cls) if cls: c = cls() log.debug("Connector constructed") return c log.critical("No connector") return cls def HandleEvent(self, event): log.dict(event,"HandleEvent") pm = self._PluginManager if not pm: log.warning("No plugin manager") return pd = self._PluginDispatcher if not pd: log.warning("No plugin dispatcher") return ro = self._ResponseObject if not ro: log.warning("no response object") pass matches = pm.GetMatchingFunctions(event) log.debug("Matched %i hook(s)." % len(matches)) for inst, func, args, servs in matches: newEvent = dictJoin(event, dictJoin(args, {"self": inst, "response": ro})) log.debug("Services found for plugin:", servs) if servs: log.debug("Event before processing:", newEvent) servDict={} servDict["event"]=newEvent servDict["pm"]=self._PluginManager servDict["pd"]=self._PluginDispatcher servDict["ro"]=self._ResponseObject servDict["c"]=self._Connector servDict["core"]=self servDict["config"]=self._Config for servName in servs: serv = pm.GetService(servName) log.debug("Processing service",servName,serv) call(serv.onEvent,servDict) if servs: log.dict(newEvent,"Event after processing:") #issue 5 fix goes here newEvent.update(servDict) pd.Enqueue((func, newEvent)) def __init__(self): self._Config = ConfigFile("Core") if not self._Config: log.critical("No log file loaded!") return ConName = self._Config["Core", "Provider"] if ConName == None: log.critical("No Core:Provider in Core.cfg") del self._Connector return self._Connector=self._LoadConnector(ConName) if self._Connector: self._PluginManager = PluginManager(ConName) self._PluginDispatcher = PluginDispatcher() self._Connector.SetEventHandler(self.HandleEvent) self._ResponseObject = self._Connector.GetResponseObject() self._PluginDispatcher.SetResponseHandler( self._Connector.HandleResponse) def Start(self): if not self._Connector: log.warning("Could not start, no connector.") return log.debug("Starting") log.debug("Auto loading plugins") self.AutoLoad() log.debug("Auto load complete") if self._Connector: log.debug("Connector starting") self._Connector.Start() #else log error? def Stop(self): log.debug("Stopping") if self._PluginDispatcher: self._PluginDispatcher.Stop() if self._PluginManager: self._PluginManager.Stop() if self._Connector: self._Connector.Stop() def AutoLoad(self): if not self._PluginManager: return pm = self._PluginManager log.note("Starting autoload", "Root:" + pm.root) cf = ConfigFile(pm.root, "Autoload") lines = ["Configuration:"] for i in cf: lines.append(i) for j in cf[i]: lines.append(" %s=%s"%(j,cf[i,j])) log.debug(*lines) if cf: log.debug("Autoloading plugins.") names = cf["Plugins", "Names"] log.debug("Autoloading plugins", names) if names: for name in names.split(): pm.LoadPlugin(name) log.debug("Autoloading finished.") pd=self._PluginDispatcher handler = pd.GetResponseHandler() log.debug("Updating dedicated thread pool",self._ResponseObject,handler) pd.EnsureDedicated(pm.GetDedicated(),self._ResponseObject,handler) else: log.note("No Autoload configuration file") if __name__ == "__main__": try: c = Core() try: c.Start() except: log.exception("Exception while starting.") c.Stop() except: log.exception("Exception while stopping.") log.debug("End of core")
mit
-4,258,690,189,499,016,000
29.99422
88
0.55166
false
4.575085
true
false
false
ryanpstauffer/market-vis
marketvis/quotes.py
1
5030
# -*- coding: utf-8 -*- """ [Python 2.7 (Mayavi is not yet compatible with Python 3+)] Created on Wed Dec 16 22:44:15 2015 @author: Ryan Stauffer https://github.com/ryanpstauffer/market-vis [This module referenced http://www.theodor.io/scraping-google-finance-data-using-pandas/] Market Visualization Prototype Quotes Module """ from datetime import datetime, date import pandas as pd import json import urllib import urllib2 import os def getIntradayData(ticker, interval_seconds=61, num_days=10): # Specify URL string based on function inputs. urlString = 'http://www.google.com/finance/getprices?q={0}'.format(ticker.upper()) urlString += "&i={0}&p={1}d&f=d,c".format(interval_seconds,num_days) # Request the text, and split by each line r = urllib2.urlopen(urllib2.Request(urlString)).read() r = r.splitlines() # Split each line by a comma, starting at the 8th line r = [line.split(',') for line in r[7:]] # Save data in Pandas DataFrame df = pd.DataFrame(r, columns=['Datetime',ticker]) # Convert UNIX to Datetime format df['Datetime'] = df['Datetime'].apply(lambda x: datetime.fromtimestamp(int(x[1:]))) df.index = df['Datetime'] return df[ticker] def getDailyData(ticker, startDate, endDate=date.today()): ''' Daily quotes from Google Finance API. Date format='yyyy-mm-dd' ''' ticker = ticker.upper() urlString = "http://www.google.com/finance/historical?q={0}".format(ticker) urlString += "&startdate={0}&enddate={1}&output=csv".format( startDate.strftime('%b %d, %Y'),endDate.strftime('%b %d, %Y')) #Convert URL output to dataframe df = pd.read_csv(urllib.urlopen(urlString)) # Convert strings to Datetime format df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%d-%b-%y')) #Index by date df.index = df[df.columns[0]] df.drop(df.columns[0], axis=1, inplace=True) return df def getLastPrice(ticker): '''Returns last price and date time of a given ticker (from Google Finance API)''' # Specify URL string based on function inputs. urlString = 'http://www.google.com/finance/info?client=ig&q={0}'.format(ticker.upper()) # Request the text, and split by each line r = urllib2.urlopen(urllib2.Request(urlString)).read() obj = json.loads(r[3:]) print(obj) price = float(obj[0]['l']) return price def buildDailyPriceData(tickerList, startDate, endDate): print('Pulling Market Data for S&P 500 from {0} to {1}'.format(startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d'))) #Build SP500 daily price data (for saving) firstTicker = tickerList[0] print(firstTicker) firstTickerData = getDailyData(firstTicker, startDate, endDate) firstTickerData.rename(columns={'Close' : firstTicker}, inplace = True) df = firstTickerData[firstTicker] for ticker in tickerList[1:]: print(ticker) newTicker = getDailyData(ticker, startDate, endDate) if not newTicker.empty: newTicker.rename(columns={'Close' : ticker}, inplace = True) df = pd.concat([df, newTicker[ticker]], axis=1, join='outer') #Google returns data w/ most recent at the top, this puts data in chrono order stockPrices = df.sort_index() print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d'))) return stockPrices def buildDummyData(): '''Builds Daily Price Data from a backup .csv file Used for offline testing purposes ''' #Select Dates startDate = datetime.strptime('20120101', '%Y%m%d') endDate = datetime.strptime('20130101', '%Y%m%d') #Load dataset from .csv print("Pulling Market Data from .csv") dataLoc = os.path.join(os.path.dirname(__file__),"Resources/SP500_daily_price_data.csv") df = pd.read_csv(dataLoc) #Convert strings to Datetime format df[df.columns[0]] = df[df.columns[0]].apply(lambda x: datetime.strptime(x, '%Y-%m-%d')) df.index = df[df.columns[0]] df.drop(df.columns[0], axis=1, inplace=True) #Build Price Table stockPrices = df[startDate:endDate] print('Pulled data for {0} stocks from {1} to {2}'.format(len(stockPrices.columns), startDate.strftime('%Y%m%d'), endDate.strftime('%Y%m%d'))) return stockPrices def createIndexedPricing(stockPrices, startingIndexValue): '''Takes a stock prices tables and converts to indexed pricing (i.e. all prices are relative based on a common starting index value) Inputs: stockPrices => a panda DataFrame startingIndexValue => the value that all prices will start at ''' #Build Returns Table stockReturns = stockPrices.pct_change(1) #Build Indexed Price Table (indexed to 100) indexedPrices = stockReturns + 1 indexedPrices.iloc[0] = startingIndexValue indexedPrices = indexedPrices.cumprod(axis=0) return indexedPrices
mit
-1,957,792,777,954,780,700
35.456522
146
0.669384
false
3.389488
false
false
false
caspartse/QQ-Groups-Spider
vendor/pyexcel/constants.py
1
3090
""" pyexcel.constants ~~~~~~~~~~~~~~~~~~~ Constants appeared in pyexcel :copyright: (c) 2015-2017 by Onni Software Ltd. :license: New BSD License """ # flake8: noqa DEFAULT_NA = '' DEFAULT_NAME = 'pyexcel sheet' DEFAULT_SHEET_NAME = 'pyexcel_sheet1' MESSAGE_WARNING = "We do not overwrite files" MESSAGE_WRITE_ERROR = "Cannot write sheet" MESSAGE_ERROR_02 = "No valid parameters found!" MESSAGE_DATA_ERROR_NO_SERIES = "No column names or row names found" MESSAGE_DATA_ERROR_EMPTY_COLUMN_LIST = "Column list is empty. Do not waste resource" MESSAGE_DATA_ERROR_COLUMN_LIST_INTEGER_TYPE = "Column list should be a list of integers" MESSAGE_DATA_ERROR_COLUMN_LIST_STRING_TYPE = "Column list should be a list of integers" MESSAGE_INDEX_OUT_OF_RANGE = "Index out of range" MESSAGE_DATA_ERROR_EMPTY_CONTENT = "Nothing to be pasted!" MESSAGE_DATA_ERROR_DATA_TYPE_MISMATCH = "Data type mismatch" MESSAGE_DATA_ERROR_ORDEREDDICT_IS_EXPECTED = "Please give a ordered list" MESSAGE_DEPRECATED_ROW_COLUMN = "Deprecated usage. Please use [row, column]" MESSAGE_DEPRECATED_OUT_FILE = "Depreciated usage of 'out_file'. please use dest_file_name" MESSAGE_DEPRECATED_CONTENT = "Depreciated usage of 'content'. please use file_content" MESSAGE_NOT_IMPLEMENTED_01 = "Please use attribute row or column to extend sheet" MESSAGE_NOT_IMPLEMENTED_02 = "Confused! What do you want to put as column names" MESSAGE_READONLY = "This attribute is readonly" MESSAGE_ERROR_NO_HANDLER = "No suitable plugins imported or installed" MESSAGE_UNKNOWN_IO_OPERATION = "Internal error: an illegal source action" MESSAGE_UPGRADE = "Please upgrade the plugin '%s' according to \ plugin compactibility table." _IMPLEMENTATION_REMOVED = "Deprecated since 0.3.0! Implementation removed" IO_FILE_TYPE_DOC_STRING = """ Get/Set data in/from {0} format You could obtain content in {0} format by dot notation:: {1}.{0} And you could as well set content by dot notation:: {1}.{0} = the_io_stream_in_{0}_format if you need to pass on more parameters, you could use:: {1}.get_{0}(**keywords) {1}.set_{0}(the_io_stream_in_{0}_format, **keywords) """ OUT_FILE_TYPE_DOC_STRING = """ Get data in {0} format You could obtain content in {0} format by dot notation:: {1}.{0} if you need to pass on more parameters, you could use:: {1}.get_{0}(**keywords) """ IN_FILE_TYPE_DOC_STRING = """ Set data in {0} format You could set content in {0} format by dot notation:: {1}.{0} if you need to pass on more parameters, you could use:: {1}.set_{0}(the_io_stream_in_{0}_format, **keywords) """ VALID_SHEET_PARAMETERS = ['name_columns_by_row', 'name_rows_by_column', 'colnames', 'rownames', 'transpose_before', 'transpose_after'] # for sources # targets SOURCE = 'source' SHEET = 'sheet' BOOK = 'book' # actions READ_ACTION = 'read' WRITE_ACTION = 'write' RW_ACTION = 'read-write' FILE_TYPE_NOT_SUPPORTED_FMT = "File type '%s' is not supported for %s."
mit
4,937,172,543,752,419,000
31.1875
90
0.680583
false
3.39934
false
false
false
rarcotvmw/capirca
lib/pcap.py
1
15928
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Pcap filter generator. This generate a pcap packet filter expression that either: 1) Matches (i.e., captures), the packets that match the ACCEPT clauses specified in a given policy, or 2) Matches the packets that match opposite of that, i.e., the DENY or REJECT clauses. Support tcp flags matching and icmptypes, including ipv6/icmpv6, but not much else past the standard addres, port, and protocol conditions. Note that this is still alpha and will likely require more testing prior to having more confidence in it. Stolen liberally from packetfilter.py. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import datetime from lib import aclgenerator from lib import nacaddr import logging class Error(Exception): """Base error class.""" class UnsupportedActionError(Error): """Raised when we see an unsupported action.""" class UnsupportedTargetOption(Error): """Raised when we see an unsupported option.""" class Term(aclgenerator.Term): """Generate pcap filter to match a policy term.""" _PLATFORM = 'pcap' _ACTION_TABLE = { 'accept': '', 'deny': '', 'reject': '', 'next': '', } _TCP_FLAGS_TABLE = { 'syn': 'tcp-syn', 'ack': 'tcp-ack', 'fin': 'tcp-fin', 'rst': 'tcp-rst', 'urg': 'tcp-urg', 'psh': 'tcp-push', 'all': '(tcp-syn|tcp-ack|tcp-fin|tcp-rst|tcp-urg|tcp-push)', 'none': '(tcp-syn&tcp-ack&tcp-fin&tcp-rst&tcp-urg&tcp-push)', } _PROTO_TABLE = { 'ah': 'proto \\ah', 'esp': 'proto \\esp', 'icmp': 'proto \\icmp', 'icmpv6': 'icmp6', 'ip': 'proto \\ip', 'ip6': 'ip6', 'igmp': 'proto \\igmp', 'igrp': 'igrp', 'pim': 'proto \\pim', 'tcp': 'proto \\tcp', 'udp': 'proto \\udp', # bpf supports "\vrrp", but some winpcap version dont' recognize it, # so use the IANA protocol number for it: 'vrrp': 'proto 112', 'hopopt': 'ip6 protochain 0', } def __init__(self, term, filter_name, af='inet', direction=''): """Setup a new term. Args: term: A policy.Term object to represent in packetfilter. filter_name: The name of the filter chan to attach the term to. af: Which address family ('inet' or 'inet6') to apply the term to. direction: Direction of the flow. Raises: aclgenerator.UnsupportedFilterError: Filter is not supported. """ super(Term, self).__init__(term) self.term = term # term object self.filter = filter_name # actual name of filter self.options = [] self.default_action = 'deny' self.af = af self.direction = direction def __str__(self): """Render config output from this term object.""" # Verify platform specific terms. Skip whole term if platform does not # match. if self.term.platform: if self._PLATFORM not in self.term.platform: return '' if self.term.platform_exclude: if self._PLATFORM in self.term.platform_exclude: return '' conditions = [] # if terms does not specify action, use filter default action if not self.term.action: self.term.action[0].value = self.default_action if str(self.term.action[0]) not in self._ACTION_TABLE: raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % ( '\n', self.term.name, self.term.action[0], 'action not currently supported.')) # source address term_saddrs = self._CheckAddressAf(self.term.source_address) if not term_saddrs: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='source', af=self.af)) return '' conditions.append(self._GenerateAddrStatement( term_saddrs, self.term.source_address_exclude)) # destination address term_daddrs = self._CheckAddressAf(self.term.destination_address) if not term_daddrs: logging.debug(self.NO_AF_LOG_ADDR.substitute(term=self.term.name, direction='destination', af=self.af)) return '' conditions.append(self._GenerateAddrStatement( term_daddrs, self.term.destination_address_exclude)) # protocol if self.term.protocol_except: raise aclgenerator.UnsupportedFilterError('%s %s %s' % ( '\n', self.term.name, 'protocol_except logic not currently supported.')) conditions.append(self._GenerateProtoStatement(self.term.protocol)) conditions.append(self._GeneratePortStatement( self.term.source_port, 'src')) conditions.append(self._GeneratePortStatement( self.term.destination_port, 'dst')) # icmp-type icmp_types = [''] if self.term.icmp_type: if self.af != 'mixed': af = self.af elif self.term.protocol == ['icmp']: af = 'inet' elif self.term.protocol == ['icmp6']: af = 'inet6' else: raise aclgenerator.UnsupportedFilterError('%s %s %s' % ( '\n', self.term.name, 'icmp protocol is not defined or not supported.')) icmp_types = self.NormalizeIcmpTypes( self.term.icmp_type, self.term.protocol, af) if 'icmp' in self.term.protocol: conditions.append(self._GenerateIcmpType(icmp_types, self.term.icmp_code)) # tcp options if 'tcp' in self.term.protocol: conditions.append(self._GenerateTcpOptions(self.term.option)) cond = Term.JoinConditionals(conditions, 'and') # Note that directionally-based pcap filter requires post-processing to # replace 'localhost' with whatever the IP(s) of the local machine happen # to be. This bit of logic ensure there's a placeholder with the # appropriate booleans around it. We also have to check that there exists # some form of condition already, else we'll end up with something overly # broad like 'dst net localhost' (e.g., 'default-deny'). if cond and self.direction == 'in': cond = Term.JoinConditionals(['dst net localhost', cond], 'and') elif cond and self.direction == 'out': cond = Term.JoinConditionals(['src net localhost', cond], 'and') return cond + '\n' def _CheckAddressAf(self, addrs): """Verify that the requested address-family matches the address's family.""" if not addrs: return ['any'] if self.af == 'mixed': return addrs af_addrs = [] af = self.NormalizeAddressFamily(self.af) for addr in addrs: if addr.version == af: af_addrs.append(addr) return af_addrs @staticmethod def JoinConditionals(condition_list, operator): """Join conditionals using the specified operator. Filters out empty elements and blank strings. Args: condition_list: a list of str()-able items to join. operator: the join string. Returns: A string consisting of the joined elements. If all elements are False or whitespace-only, the empty string. """ condition_list = filter(None, condition_list) condition_list = [str(x).strip(' ') for x in condition_list if str(x).strip()] if not condition_list: return '' op = ' %s ' % (operator) res = '(%s)' % (op.join(condition_list)) return res def _GenerateAddrStatement(self, addrs, exclude_addrs): addrlist = [] for d in nacaddr.CollapseAddrListRecursive(addrs): if d != 'any' and str(d) != '::/0': addrlist.append('dst net %s' % (d)) excludes = [] if exclude_addrs: for d in nacaddr.CollapseAddrListRecursive(exclude_addrs): if d != 'any' and str(d) != '::/0': excludes.append('not dst net %s' % (d)) else: # excluding 'any' doesn't really make sense ... return '' if excludes: return Term.JoinConditionals( [Term.JoinConditionals(addrlist, 'or'), Term.JoinConditionals(excludes, 'or')], 'and not') else: return Term.JoinConditionals(addrlist, 'or') def _GenerateProtoStatement(self, protocols): return Term.JoinConditionals( [self._PROTO_TABLE[p] for p in protocols], 'or') def _GeneratePortStatement(self, ports, direction): conditions = [] # term.destination_port is a list of tuples containing the start and end # ports of the port range. In the event it is a single port, the start # and end ports are the same. for port_tuple in ports: if port_tuple[0] == port_tuple[1]: conditions.append('%s port %s' % (direction, port_tuple[0])) else: conditions.append('%s portrange %s-%s' % ( direction, port_tuple[0], port_tuple[1])) return Term.JoinConditionals(conditions, 'or') def _GenerateTcpOptions(self, options): opts = [str(x) for x in options] tcp_flags_set = [] tcp_flags_check = [] for next_opt in opts: if next_opt == 'tcp-established': tcp_flags_set.append(self._TCP_FLAGS_TABLE['ack']) tcp_flags_check.extend([self._TCP_FLAGS_TABLE['ack']]) else: # Iterate through flags table, and create list of tcp-flags to append for next_flag in self._TCP_FLAGS_TABLE: if next_opt.find(next_flag) == 0: tcp_flags_check.append(self._TCP_FLAGS_TABLE.get(next_flag)) tcp_flags_set.append(self._TCP_FLAGS_TABLE.get(next_flag)) if tcp_flags_check: return '(tcp[tcpflags] & (%s) == (%s))' % ('|'.join(tcp_flags_check), '|'.join(tcp_flags_set)) return '' def _GenerateIcmpType(self, icmp_types, icmp_code): rtr_str = '' if icmp_types: code_strings = [''] if icmp_code: code_strings = [' and icmp[icmpcode] == %d' % code for code in icmp_code] rtr_str = Term.JoinConditionals( ['icmp[icmptype] == %d%s' % (x, y) for y in code_strings for x in icmp_types], 'or') return rtr_str class PcapFilter(aclgenerator.ACLGenerator): """Generates filters and terms from provided policy object. Note that since pcap isn't actually a firewall grammar, this generates a filter that only matches matches that which would be accepted by the specified policy. """ _PLATFORM = 'pcap' _DEFAULT_PROTOCOL = 'all' SUFFIX = '.pcap' _TERM = Term def __init__(self, *args, **kwargs): """Initialize a PcapFilter generator. Takes standard ACLGenerator arguments, as well as an 'invert' kwarg. If this argument is true, the pcap filter will be reversed, such that it matches all those packets that would be denied by the specified policy. """ self._invert = False if 'invert' in kwargs: self._invert = kwargs['invert'] del kwargs['invert'] super(PcapFilter, self).__init__(*args, **kwargs) def _BuildTokens(self): """Build supported tokens for platform. Returns: tuple containing both supported tokens and sub tokens """ supported_tokens, supported_sub_tokens = super( PcapFilter, self)._BuildTokens() supported_tokens |= {'logging', 'icmp_code'} supported_tokens -= {'verbatim'} supported_sub_tokens.update( {'action': {'accept', 'deny', 'reject', 'next'}, 'option': { 'tcp-established', 'established', 'syn', 'ack', 'fin', 'rst', 'urg', 'psh', 'all', 'none'}, }) return supported_tokens, supported_sub_tokens def _TranslatePolicy(self, pol, exp_info): self.pcap_policies = [] current_date = datetime.datetime.utcnow().date() exp_info_date = current_date + datetime.timedelta(weeks=exp_info) good_afs = ['inet', 'inet6', 'mixed'] good_options = ['in', 'out'] direction = '' for header, terms in pol.filters: filter_type = None if self._PLATFORM not in header.platforms: continue filter_options = header.FilterOptions(self._PLATFORM)[1:] filter_name = header.FilterName(self._PLATFORM) # ensure all options after the filter name are expected for opt in filter_options: if opt not in good_afs + good_options: raise UnsupportedTargetOption('%s %s %s %s' % ( '\nUnsupported option found in', self._PLATFORM, 'target definition:', opt)) if 'in' in filter_options: direction = 'in' elif 'out' in filter_options: direction = 'out' # Check for matching af for address_family in good_afs: if address_family in filter_options: # should not specify more than one AF in options if filter_type is not None: raise aclgenerator.UnsupportedFilterError('%s %s %s %s' % ( '\nMay only specify one of', good_afs, 'in filter options:', filter_options)) filter_type = address_family if filter_type is None: filter_type = 'mixed' # add the terms accept_terms = [] deny_terms = [] term_names = set() for term in terms: if term.name in term_names: raise aclgenerator.DuplicateTermError( 'You have a duplicate term: %s' % term.name) if term.expiration: if term.expiration <= exp_info_date: logging.info('INFO: Term %s in policy %s expires ' 'in less than two weeks.', term.name, filter_name) if term.expiration <= current_date: logging.warn('WARNING: Term %s in policy %s is expired and ' 'will not be rendered.', term.name, filter_name) continue if not term: continue if term.action[0] == 'accept': accept_terms.append(self._TERM(term, filter_name, filter_type, direction)) elif term.action[0] == 'deny' or term.action[0] == 'reject': deny_terms.append(self._TERM(term, filter_name, filter_type, direction)) self.pcap_policies.append((header, filter_name, filter_type, accept_terms, deny_terms)) def __str__(self): """Render the output of the PF policy into config.""" target = [] for (unused_header, unused_filter_name, unused_filter_type, accept_terms, deny_terms) in self.pcap_policies: accept = [] for term in accept_terms: term_str = str(term) if term_str: accept.append(str(term)) accept_clause = Term.JoinConditionals(accept, 'and') deny = [] for term in deny_terms: term_str = str(term) if term_str: deny.append(str(term)) deny_clause = Term.JoinConditionals(deny, 'and') if self._invert: target.append( Term.JoinConditionals([deny_clause, accept_clause], 'and not')) else: target.append( Term.JoinConditionals([accept_clause, deny_clause], 'and not')) return '\nor\n'.join(target) + '\n'
apache-2.0
6,208,498,579,205,639,000
32.674419
80
0.604847
false
3.881092
false
false
false
gmr/infoblox
infoblox/record.py
1
15975
""" Base Record Object """ import logging from infoblox import exceptions from infoblox import mapping LOGGER = logging.getLogger(__name__) class Record(mapping.Mapping): """This object is extended by specific Infoblox record types and implements the core API behavior of a record class. Attributes that map to other infoblox records will be instances of those record types. :param infoblox.Session session: The infoblox session object :param str reference_id: The infoblox _ref value for the record :param dict kwargs: Key-value pairs that when passed in, if the a key matches an attribute of the record, the value will be assigned. """ view = 'default' _ref = None _repr_keys = ['_ref'] _return_ignore = ['view'] _save_ignore = [] _search_by = [] _session = None _supports = [] _wapi_type = 'record' def __init__(self, session, reference_id=None, **kwargs): """Create a new instance of the Record passing in the Infoblox session object and the reference id for the record. """ super(Record, self).__init__(**kwargs) self._session = session self._ref = reference_id self._search_values = self._build_search_values(kwargs) if self._ref or self._search_values: self.fetch() def __repr__(self): return '<%s %s>' % (self.__class__.__name__, ' '.join(['%s=%s' % (key, getattr(self, key)) for key in self._repr_keys])) def delete(self): """Remove the item from the infoblox server. :rtype: bool :raises: AssertionError :raises: ValueError :raises: infoblox.exceptions.ProtocolError """ if not self._ref: raise ValueError('Object has no reference id for deletion') if 'save' not in self._supports: raise AssertionError('Can not save this object type') response = self._session.delete(self._path) if response.status_code == 200: self._ref = None self.clear() return True try: error = response.json() raise exceptions.ProtocolError(error['text']) except ValueError: raise exceptions.ProtocolError(response.content) def fetch(self): """Attempt to fetch the object from the Infoblox device. If successful the object will be updated and the method will return True. :rtype: bool :raises: infoblox.exceptions.ProtocolError """ LOGGER.debug('Fetching %s, %s', self._path, self._search_values) response = self._session.get(self._path, self._search_values, {'_return_fields': self._return_fields}) if response.status_code == 200: values = response.json() self._assign(values) return bool(values) elif response.status_code >= 400: try: error = response.json() raise exceptions.ProtocolError(error['text']) except ValueError: raise exceptions.ProtocolError(response.content) return False def reference_id(self): """Return a read-only handle for the reference_id of this object. """ return str(self._ref) def save(self): """Update the infoblox with new values for the specified object, or add the values if it's a new object all together. :raises: AssertionError :raises: infoblox.exceptions.ProtocolError """ if 'save' not in self._supports: raise AssertionError('Can not save this object type') values = {} for key in [key for key in self.keys() if key not in self._save_ignore]: if not getattr(self, key) and getattr(self, key) != False: continue if isinstance(getattr(self, key, None), list): value = list() for item in getattr(self, key): if isinstance(item, dict): value.append(item) elif hasattr(item, '_save_as'): value.append(item._save_as()) elif hasattr(item, '_ref') and getattr(item, '_ref'): value.append(getattr(item, '_ref')) else: LOGGER.warning('Cant assign %r', item) values[key] = value elif getattr(self, key, None): values[key] = getattr(self, key) if not self._ref: response = self._session.post(self._path, values) else: values['_ref'] = self._ref response = self._session.put(self._path, values) LOGGER.debug('Response: %r, %r', response.status_code, response.content) if 200 <= response.status_code <= 201: self.fetch() return True else: try: error = response.json() raise exceptions.ProtocolError(error['text']) except ValueError: raise exceptions.ProtocolError(response.content) def _assign(self, values): """Assign the values passed as either a dict or list to the object if the key for each value matches an available attribute on the object. :param dict values: The values to assign """ LOGGER.debug('Assigning values: %r', values) if not values: return keys = self.keys() if not self._ref: keys.append('_ref') if isinstance(values, dict): for key in keys: if values.get(key): if isinstance(values.get(key), list): items = list() for item in values[key]: if isinstance(item, dict): if '_ref' in item: obj_class = get_class(item['_ref']) if obj_class: items.append(obj_class(self._session, **item)) else: items.append(item) setattr(self, key, items) else: setattr(self, key, values[key]) elif isinstance(values, list): self._assign(values[0]) else: LOGGER.critical('Unhandled return type: %r', values) def _build_search_values(self, kwargs): """Build the search criteria dictionary. It will first try and build the values from already set attributes on the object, falling back to the passed in kwargs. :param dict kwargs: Values to build the dict from :rtype: dict """ criteria = {} for key in self._search_by: if getattr(self, key, None): criteria[key] = getattr(self, key) elif key in kwargs and kwargs.get(key): criteria[key] = kwargs.get(key) return criteria @property def _path(self): return self._ref if self._ref else self._wapi_type @property def _return_fields(self): return ','.join([key for key in self.keys() if key not in self._return_ignore]) class Host(Record): """Implements the host record type. Example:: session = infoblox.Session(infoblox_host, infoblox_user, infoblox_password) host = infoblox.Host(session, name='foo.bar.net') """ aliases = [] comment = None configure_for_dns = True disable = False dns_aliases = [] dns_name = None extattrs = None ipv4addrs = [] ipv6addrs = [] name = None rrset_order = 'cyclic' ttl = None use_ttl = False zone = None _repr_keys = ['name', 'ipv4addrs', 'ipv6addrs'] _save_ignore = ['dns_name', 'host', 'zone'] _search_by = ['name', 'ipv4addr', 'ipv6addr', 'mac'] _supports = ['delete', 'save'] _wapi_type = 'record:host' def __init__(self, session, reference_id=None, name=None, **kwargs): """Create a new instance of a Host object. If a reference_id or valid search criteria are passed in, the object will attempt to load the values for the host from the Infoblox device. When creating a new host or adding an ip address, use the Host.add_ipv4_address and Host.add_ipv6_address methods:: host.add_ipv4addr('1.2.3.4') Valid search criteria: name, ipv4addr, ipv6addr, mac :param infobox.Session session: The established session object :param str reference_id: The Infoblox reference id for the host :param str host: The host's FQDN :param dict kwargs: Optional keyword arguments """ self.name = name super(Host, self).__init__(session, reference_id, **kwargs) def add_ipv4addr(self, ipv4addr): """Add an IPv4 address to the host. :param str ipv4addr: The IP address to add. :raises: ValueError """ for addr in self.ipv4addrs: if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or (isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)): raise ValueError('Already exists') self.ipv4addrs.append({'ipv4addr': ipv4addr}) def remove_ipv4addr(self, ipv4addr): """Remove an IPv4 address from the host. :param str ipv4addr: The IP address to remove """ for addr in self.ipv4addrs: if ((isinstance(addr, dict) and addr['ipv4addr'] == ipv4addr) or (isinstance(addr, HostIPv4) and addr.ipv4addr == ipv4addr)): self.ipv4addrs.remove(addr) break def add_ipv6addr(self, ipv6addr): """Add an IPv6 address to the host. :param str ipv6addr: The IP address to add. :raises: ValueError """ for addr in self.ipv6addrs: if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or (isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)): raise ValueError('Already exists') self.ipv6addrs.append({'ipv6addr': ipv6addr}) def remove_ipv6addr(self, ipv6addr): """Remove an IPv6 address from the host. :param str ipv6addr: The IP address to remove """ for addr in self.ipv6addrs: if ((isinstance(addr, dict) and addr['ipv6addr'] == ipv6addr) or (isinstance(addr, HostIPv4) and addr.ipv6addr == ipv6addr)): self.ipv6addrs.remove(addr) break class HostIPv4(Record): """Implements the host_ipv4addr record type. """ bootfile = None bootserver = None configure_for_dhcp = None deny_bootp = None discovered_data = None enable_pxe_lease_time = None host = None ignore_client_requested_options = None ipv4addr = None last_queried = None mac = None match_client = None network = None nextserver = None options = None pxe_lease_time = None use_bootfile = None use_bootserver = None use_deny_bootp = None use_for_ea_inheritance = None use_ignore_client_requested_options = None use_nextserver = None use_options = None use_pxe_lease_time = None _repr_keys = ['ipv4addr'] _search_by = ['ipv4addr'] _wapi_type = 'record:host_ipv4addr' def __init__(self, session, reference_id=None, ipv4addr=None, **kwargs): """Create a new instance of a HostIPv4 object. If a reference_id or valid search criteria are passed in, the object will attempt to load the values for the host_ipv4addr from the Infoblox device. Valid search criteria: ipv4addr :param infobox.Session session: The established session object :param str reference_id: The Infoblox reference id for the host :param str ipv4addr: The ipv4 address :param dict kwargs: Optional keyword arguments """ self.ipv4addr = str(ipv4addr) super(HostIPv4, self).__init__(session, reference_id, **kwargs) def _save_as(self): return {'ipv4addr': self.ipv4addr} class HostIPv6(Record): """Implements the host_ipv6addr record type. """ address_type = None configure_for_dhcp = True discovered_data = None domain_name = None domain_name_servers = [] duid = None host = None ipv6addr = None ipv6bits = None ipv6prefix_bits = None match_client = None options = None preferred_lifetime = 27000 use_domain_name = False use_domain_name_servers = False use_for_ea_inheritance = False use_options = False use_valid_lifetime = False valid_lifetime = 43200 _repr_keys = ['ipv6addr', 'ipv6bits', 'ipv6prefix_bits'] _save_ignore = ['host'] _search_by = ['ipv6addr'] _wapi_type = 'record:host_ipv6addr' def __init__(self, session, reference_id=None, ipv6addr=None, ipv6bits=None, ipv6prefix_bits=None, **kwargs): """Create a new instance of a HostIPv6 object. If a reference_id or valid search criteria are passed in, the object will attempt to load the values for the host_ipv6addr from the Infoblox device. Valid search criteria: ipv6addr :param infobox.Session session: The established session object :param str reference_id: The Infoblox reference id for the host :param str ipv6addr: The ipv6 address :param str ipv6bits: The ipv6 address bit count :param str ipv6prefix_bits: The ipv6 address prefix bit count :param dict kwargs: Optional keyword arguments """ self.ipv6addr = str(ipv6addr) self.ipv6bits = str(ipv6bits) self.ipv6prefix_bits = str(ipv6prefix_bits) super(HostIPv6, self).__init__(session, reference_id, **kwargs) def _save_as(self): return {'ipv6addr': self.ipv6addr, 'ipv6bits': self.ipv6bits, 'ipv6prefix_bits': self.ipv6prefix_bits} class IPv4Address(Record): """Implements the ipv4address record type. """ dhcp_client_identifier = None extattrs = None fingerprint = None ip_address = None is_conflict = None lease_state = None mac_address = None names = None network = None network_view = None objects = None status = None types = None usage = None username = None _repr_keys = ['ip_address'] _search_by = ['ip_address'] _supports = ['fetch', 'put'] _wapi_type = 'record:host_ipv4addr' def __init__(self, session, reference_id=None, ipv4addr=None, **kwargs): """Create a new instance of a HostIPv4 object. If a reference_id or valid search criteria are passed in, the object will attempt to load the values for the host_ipv4addr from the Infoblox device. Valid search criteria: ipv4addr :param infobox.Session session: The established session object :param str reference_id: The Infoblox reference id for the host :param str ipv4addr: The ipv4 address :param dict kwargs: Optional keyword arguments """ self.ipv4addr = str(ipv4addr) super(IPv4Address, self).__init__(session, reference_id, **kwargs) def get_class(reference): class_name = reference.split('/')[0].split(':')[1] LOGGER.debug('Class: %s', class_name) return CLASS_MAP.get(class_name) CLASS_MAP = {'host': Host, 'host_ipv4addr': HostIPv4, 'host_ipv6addr': HostIPv6, 'ipv4address': IPv4Address}
bsd-3-clause
-1,247,889,201,822,977,500
32.420502
80
0.57759
false
4.168841
false
false
false
aleju/self-driving-truck
lib/plotting.py
1
13772
"""Classes to handle plotting during the training.""" from __future__ import print_function, division import math import cPickle as pickle from collections import OrderedDict import numpy as np import matplotlib.pyplot as plt import time GROWTH_BY = 500 class History(object): def __init__(self): self.line_groups = OrderedDict() @staticmethod def from_string(s): return pickle.loads(s) def to_string(self): return pickle.dumps(self, protocol=-1) @staticmethod def load_from_filepath(fp): #return json.loads(open(, "r").read()) with open(fp, "r") as f: history = pickle.load(f) return history def save_to_filepath(self, fp): with open(fp, "w") as f: pickle.dump(self, f, protocol=-1) def add_group(self, group_name, line_names, increasing=True): self.line_groups[group_name] = LineGroup(group_name, line_names, increasing=increasing) def add_value(self, group_name, line_name, x, y, average=False): self.line_groups[group_name].lines[line_name].append(x, y, average=average) def get_group_names(self): return list(self.line_groups.iterkeys()) def get_groups_increasing(self): return [group.increasing for group in self.line_groups.itervalues()] def get_max_x(self): return max([group.get_max_x() for group in self.line_groups.itervalues()]) def get_recent_average(self, group_name, line_name, nb_points): ys = self.line_groups[group_name].lines[line_name].ys[-nb_points:] return np.average(ys) class LineGroup(object): def __init__(self, group_name, line_names, increasing=True): self.group_name = group_name self.lines = OrderedDict([(name, Line()) for name in line_names]) self.increasing = increasing self.xlim = (None, None) def get_line_names(self): return list(self.lines.iterkeys()) def get_line_xs(self): #return [line.xs for line in self.lines.itervalues()] """ for key, line in self.lines.items(): if not hasattr(line, "last_index"): print(self.group_name, key, "no last index") else: print(self.group_name, key, "OK") print(type(line.xs), type(line.ys), type(line.counts), type(line.datetimes)) """ return [line.get_xs() for line in self.lines.itervalues()] def get_line_ys(self): #return [line.ys for line in self.lines.itervalues()] return [line.get_ys() for line in self.lines.itervalues()] def get_max_x(self): #return max([max(line.xs) if len(line.xs) > 0 else 0 for line in self.lines.itervalues()]) return max([np.maximum(line.get_xs()) if line.last_index > -1 else 0 for line in self.lines.itervalues()]) """ class Line(object): def __init__(self, xs=None, ys=None, counts=None, datetimes=None): self.xs = xs if xs is not None else [] self.ys = ys if ys is not None else [] self.counts = counts if counts is not None else [] self.datetimes = datetimes if datetimes is not None else [] self.last_index = -1 def append(self, x, y, average=False): # legacy (for loading from pickle) #if not hasattr(self, "counts"): # self.counts = [1] * len(self.xs) # --- if not average or len(self.xs) == 0 or self.xs[-1] != x: self.xs.append(x) self.ys.append(float(y)) # float to get rid of numpy self.counts.append(1) self.datetimes.append(time.time()) else: count = self.counts[-1] self.ys[-1] = ((self.ys[-1] * count) + y) / (count+1) self.counts[-1] += 1 self.datetimes[-1] = time.time() """ class Line(object): def __init__(self, xs=None, ys=None, counts=None, datetimes=None): zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY) self.xs = xs if xs is not None else np.copy(zeros) self.ys = ys if ys is not None else zeros.astype(np.float32) self.counts = counts if counts is not None else zeros.astype(np.uint16) self.datetimes = datetimes if datetimes is not None else zeros.astype(np.uint64) self.last_index = -1 # for legacy as functions, replace with properties def get_xs(self): # legacy if isinstance(self.xs, list): self._legacy_convert_from_list_to_np() return self.xs[0:self.last_index+1] def get_ys(self): return self.ys[0:self.last_index+1] def get_counts(self): return self.counts[0:self.last_index+1] def get_datetimes(self): return self.datetimes[0:self.last_index+1] def _legacy_convert_from_list_to_np(self): #print("is list!") print("[plotting] Converting from list to numpy...") self.last_index = len(self.xs) - 1 self.xs = np.array(self.xs, dtype=np.int32) self.ys = np.array(self.ys, dtype=np.float32) self.counts = np.array(self.counts, dtype=np.uint16) self.datetimes = np.array([int(dt*1000) for dt in self.datetimes], dtype=np.uint64) def append(self, x, y, average=False): # legacy (for loading from pickle) #if not hasattr(self, "counts"): # self.counts = [1] * len(self.xs) # --- #legacy if isinstance(self.xs, list): self._legacy_convert_from_list_to_np() if (self.last_index+1) == self.xs.shape[0]: #print("growing from %d by %d..." % (self.xs.shape[0], GROWTH_BY), self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape) zeros = np.tile(np.array([0], dtype=np.int32), GROWTH_BY) self.xs = np.append(self.xs, np.copy(zeros)) self.ys = np.append(self.ys, zeros.astype(np.float32)) self.counts = np.append(self.counts, zeros.astype(np.uint16)) self.datetimes = np.append(self.datetimes, zeros.astype(np.uint64)) #print("growing done", self.xs.shape, self.ys.shape, self.counts.shape, self.datetimes.shape) first_entry = (self.last_index == -1) if not average or first_entry or self.xs[self.last_index] != x: idx = self.last_index + 1 self.xs[idx] = x self.ys[idx] = y self.counts[idx] = 1 self.datetimes[idx] = int(time.time()*1000) self.last_index = idx else: idx = self.last_index count = self.counts[idx] self.ys[idx] = ((self.ys[idx] * count) + y) / (count+1) self.counts[idx] = count + 1 self.datetimes[idx] = int(time.time()*1000) #print("added", x, y, average) #print(self.xs[self.last_index-10:self.last_index+10+1]) #print(self.ys[self.last_index-10:self.last_index+10+1]) #print(self.counts[self.last_index-10:self.last_index+10+1]) #print(self.datetimes[self.last_index-10:self.last_index+10+1]) class LossPlotter(object): def __init__(self, titles, increasing, save_to_fp): assert len(titles) == len(increasing) n_plots = len(titles) self.titles = titles self.increasing = dict([(title, incr) for title, incr in zip(titles, increasing)]) self.xlim = dict([(title, (None, None)) for title in titles]) self.colors = ["red", "blue", "cyan", "magenta", "orange", "black"] self.nb_points_max = 500 self.save_to_fp = save_to_fp self.start_batch_idx = 0 self.autolimit_y = False self.autolimit_y_multiplier = 5 #self.fig, self.axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 20)) nrows = max(1, int(math.sqrt(n_plots))) ncols = int(math.ceil(n_plots / nrows)) width = ncols * 10 height = nrows * 10 self.fig, self.axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(width, height)) if nrows == 1 and ncols == 1: self.axes = [self.axes] else: self.axes = self.axes.flat title_to_ax = dict() for idx, (title, ax) in enumerate(zip(self.titles, self.axes)): title_to_ax[title] = ax self.title_to_ax = title_to_ax self.fig.tight_layout() self.fig.subplots_adjust(left=0.05) def plot(self, history): for plot_idx, title in enumerate(self.titles): ax = self.title_to_ax[title] group_name = title group_increasing = self.increasing[title] group = history.line_groups[title] line_names = group.get_line_names() #print("getting line x/y...", time.time()) line_xs = group.get_line_xs() line_ys = group.get_line_ys() #print("getting line x/y FIN", time.time()) """ print("title", title) print("line_names", line_names) for i, xx in enumerate(line_xs): print("line_xs i: ", xx) for i, yy in enumerate(line_ys): print("line_ys i: ", yy) """ if any([len(xx) > 0 for xx in line_xs]): xs_min = min([min(xx) for xx in line_xs if len(xx) > 0]) xs_max = max([max(xx) for xx in line_xs if len(xx) > 0]) xlim = self.xlim[title] xlim = [ max(xs_min, self.start_batch_idx) if xlim[0] is None else min(xlim[0], xs_max-1), xs_max+1 if xlim[1] is None else xlim[1] ] if xlim[0] < 0: xlim[0] = max(xs_max - abs(xlim[0]), 0) if xlim[1] < 0: xlim[1] = max(xs_max - abs(xlim[1]), 1) else: # none of the lines has any value, so just use dummy values # to avoid min/max of empty sequence errors xlim = [ 0 if self.xlim[title][0] is None else self.xlim[title][0], 1 if self.xlim[title][1] is None else self.xlim[title][1] ] self._plot_group(ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim) self.fig.savefig(self.save_to_fp) # this seems to be slow sometimes def _line_to_xy(self, line_x, line_y, xlim, limit_y_min=None, limit_y_max=None): def _add_point(points_x, points_y, curr_sum, counter): points_x.append(batch_idx) y = curr_sum / counter if limit_y_min is not None and limit_y_max is not None: y = np.clip(y, limit_y_min, limit_y_max) elif limit_y_min is not None: y = max(y, limit_y_min) elif limit_y_max is not None: y = min(y, limit_y_max) points_y.append(y) nb_points = 0 for i in range(len(line_x)): batch_idx = line_x[i] if xlim[0] <= batch_idx < xlim[1]: nb_points += 1 point_every = max(1, int(nb_points / self.nb_points_max)) points_x = [] points_y = [] curr_sum = 0 counter = 0 for i in range(len(line_x)): batch_idx = line_x[i] if xlim[0] <= batch_idx < xlim[1]: curr_sum += line_y[i] counter += 1 if counter >= point_every: _add_point(points_x, points_y, curr_sum, counter) counter = 0 curr_sum = 0 if counter > 0: _add_point(points_x, points_y, curr_sum, counter) return points_x, points_y def _plot_group(self, ax, group_name, group_increasing, line_names, line_xs, line_ys, xlim): ax.cla() ax.grid() if self.autolimit_y and any([len(line_xs) > 0 for line_xs in line_xs]): min_x = min([np.min(line_x) for line_x in line_xs]) max_x = max([np.max(line_x) for line_x in line_xs]) min_y = min([np.min(line_y) for line_y in line_ys]) max_y = max([np.max(line_y) for line_y in line_ys]) if group_increasing: if max_y > 0: limit_y_max = None limit_y_min = max_y / self.autolimit_y_multiplier if min_y > limit_y_min: limit_y_min = None else: if min_y > 0: limit_y_max = min_y * self.autolimit_y_multiplier limit_y_min = None if max_y < limit_y_max: limit_y_max = None if limit_y_min is not None: ax.plot((min_x, max_x), (limit_y_min, limit_y_min), c="purple") if limit_y_max is not None: ax.plot((min_x, max_x), (limit_y_max, limit_y_max), c="purple") # y achse range begrenzen yaxmin = min_y if limit_y_min is None else limit_y_min yaxmax = max_y if limit_y_max is None else limit_y_max yrange = yaxmax - yaxmin yaxmin = yaxmin - (0.05 * yrange) yaxmax = yaxmax + (0.05 * yrange) ax.set_ylim([yaxmin, yaxmax]) else: limit_y_min = None limit_y_max = None for line_name, line_x, line_y, line_col in zip(line_names, line_xs, line_ys, self.colors): #print("line to xy...", time.time()) x, y = self._line_to_xy(line_x, line_y, xlim, limit_y_min=limit_y_min, limit_y_max=limit_y_max) #print("line to xy FIN", time.time()) #print("plotting ax...", time.time()) ax.plot(x, y, color=line_col, linewidth=1.0) #print("plotting ax FIN", time.time()) ax.set_title(group_name)
mit
3,102,420,107,428,294,000
38.348571
149
0.548141
false
3.368885
false
false
false
waile23/todo
models/pduser.py
1
2906
# -*- coding: utf-8 -*- from basemodel import * import md5 import math import sys class PDuser(BaseModel): '''model autocreate by createModel''' table_name = 'pd_user' #db_name = 'todo_local' db_name = web.config.write_db_name def _format_user(self, row): if hasattr(row, 'u_logo'): if not row.u_logo: row.u_logo = "/static/img/default_logo.png" return row def load_by_id(self, id, iscache=True, isformat=True): mkey = self.create_pri_cache_key(u_id=id) ret = BaseModel.memget(mkey) if not iscache or not ret: rows = self.reader().select(self.table_name, where="u_id=$uid", vars={"uid":id}) for row in rows: if isformat: ret = self._format_user(row) else: ret = row break BaseModel.memset(mkey, ret) return ret def check_name(self, name,loginid=0): ret = self.reader().select(self.table_name, where="u_name=$name and u_id not in ($loginid)", vars={"name":name,"loginid":loginid}) for v in ret: return True return False def check_name_count(self, name): ret = self.reader().select(self.table_name,what="count(1) as count", where="u_name=$name", vars={"name":name}) for v in ret: return v.count return 0 def check_email(self, email,loginid=0): ret = self.reader().select(self.table_name, where="u_email=$email and u_id not in ($loginid)", vars={"email":email,"loginid":loginid}) for v in ret: return True return False def user_list(self,page=0,size=15,iscache=True,isformat=True): mkey=md5.new(self.__class__.__name__+"."+sys._getframe().f_code.co_name+"_page_"+str(page)+"_size_"+str(size)).hexdigest() ret=BaseModel.memget(mkey) if not iscache or not ret: ret=[] ret_i = self.reader().select(self.table_name,order="u_create_time desc",limit=size,offset=page*size) for row in ret_i: if isformat: ret.append(self._format_user(row)) else: ret.append(row) BaseModel.memset(mkey,ret) return ret def loaduser_by_email(self, email): rows = self.reader().select(self.table_name, where="u_email=$email", vars={"email":email}) ret = None for row in rows: ret = row break return ret def loaduser_by_social(self, fr, auth): rows = self.reader().select(self.table_name, where="u_from='" + fr + "' and u_auth='" + auth + "'") ret = None for row in rows: ret = row break return ret def insert_by_list(self, rows): ret = self.writer().multiple_insert(self.table_name, rows) for i in ret: self.memdel(self.create_pri_cache_key(u_id=i)) return ret def update_by_insert(self, row): sql = ["update"] sql.append(self.table_name) sql.append("set") tmp = [] for k in row: tmp.append(k + "=$" + k) sql.append(",".join(tmp)) sql.append("where u_id=$u_id") sqlstr = " ".join(sql) self.writer().query(sqlstr, row) self.memdel(self.create_pri_cache_key(u_id=row.u_id)) pduser = PDuser() #public instance
mit
-6,926,692,520,643,417,000
26.415094
136
0.646249
false
2.698236
false
false
false
Michal-Fularz/codingame_solutions
codingame_solutions/medium/medium_The_Paranoid_Android.py
1
3099
__author__ = 'Amin' # COMPLETED # PYTHON 3.x import sys import math class Floor: def __init__(self, width, contains_exit=False, exit_position=-1): self.width = width self.__contains_elevator = False self.__elevator_position = -1 self.__contains_exit = contains_exit self.__exit_position = exit_position def add_exit(self, exit_position): self.__contains_exit = True self.__exit_position = exit_position def add_elevator(self, elevator_position): self.__contains_elevator = True self.__elevator_position = elevator_position def should_be_blocked(self, position, direction): flag_should_be_blocked = False if self.__contains_elevator: if position > self.__elevator_position and direction == "RIGHT" or \ position < self.__elevator_position and direction == "LEFT": flag_should_be_blocked = True elif self.__contains_exit: if position > self.__exit_position and direction == "RIGHT" or \ position < self.__exit_position and direction == "LEFT": flag_should_be_blocked = True return flag_should_be_blocked class Drive: def __init__(self): self.floors = [] self.load_from_input() def load_from_input(self): # nb_floors: number of floors # width: width of the area # nb_rounds: maximum number of rounds # exit_floor: floor on which the exit is found # exit_pos: position of the exit on its floor # nb_total_clones: number of generated clones # nb_additional_elevators: ignore (always zero) # nb_elevators: number of elevators nb_floors, width, nb_rounds, exit_floor, exit_pos, nb_total_clones, nb_additional_elevators, nb_elevators = [int(i) for i in input().split()] for i in range(nb_floors): self.floors.append(Floor(width)) self.floors[exit_floor].add_exit(exit_pos) for i in range(nb_elevators): # elevator_floor: floor on which this elevator is found # elevator_pos: position of the elevator on its floor elevator_floor, elevator_pos = [int(j) for j in input().split()] self.floors[elevator_floor].add_elevator(elevator_pos) if __name__ == '__main__': drive = Drive() flag_do_the_blocking = False # game loop while 1: # clone_floor: floor of the leading clone # clone_pos: position of the leading clone on its floor # direction: direction of the leading clone: LEFT or RIGHT clone_floor, clone_pos, direction = input().split() clone_floor = int(clone_floor) clone_pos = int(clone_pos) flag_do_the_blocking = drive.floors[clone_floor].should_be_blocked(clone_pos, direction) # Write an action using print # To debug: print("Debug messages...", file=sys.stderr) # action: WAIT or BLOCK if flag_do_the_blocking: print("BLOCK") else: print("WAIT")
mit
7,778,575,852,018,126,000
32.322581
149
0.603743
false
3.811808
false
false
false
garthylou/Libreosteo
libreosteoweb/api/file_integrator.py
1
19791
# This file is part of LibreOsteo. # # LibreOsteo is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # LibreOsteo is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with LibreOsteo. If not, see <http://www.gnu.org/licenses/>. import logging import csv from django.utils.translation import ugettext_lazy as _ import random from libreosteoweb.models import Patient, ExaminationType, ExaminationStatus from datetime import date, datetime from .utils import enum, Singleton, _unicode logger = logging.getLogger(__name__) _CSV_BUFFER_SIZE = 1024 * 1024 * 10 class Extractor(object): def extract(self, instance): """ return a dict with key patient and examination which gives some extract of the content, with list of dict which contains line number and the content. """ result = {} extract_patient = self.extract_file(instance.file_patient) extract_examination = self.extract_file(instance.file_examination) result['patient'] = extract_patient result['examination'] = extract_examination return result def analyze(self, instance): """ return a dict with key patient, and examination, which indicates if : - the expected file has the correct type. - the file is is_valid - the file is not is_empty - list of errors if found. """ logger.info("* Analyze the instance") result = {} (type_file, is_valid, is_empty, errors) = self.analyze_file(instance.file_patient) result['patient'] = (type_file, is_valid, is_empty, errors) (type_file, is_valid, is_empty, errors) = self.analyze_file(instance.file_examination) result['examination'] = (type_file, is_valid, is_empty, errors) return result def analyze_file(self, internal_file): if not bool(internal_file): return ('', False, True, []) try: handler = AnalyzerHandler() report = handler.analyze(internal_file) except: logger.exception('Analyze failed.') return ('', False, True, [_('Analyze failed on this file')]) if report.type == FileCsvType.PATIENT: return ('patient', report.is_valid, report.is_empty, []) if report.type == FileCsvType.EXAMINATION: return ('examination', report.is_valid, report.is_empty, []) else: return ('patient', False, True, [_('Cannot recognize the patient file')]) def extract_file(self, internal_file): if not bool(internal_file): return {} result = {} try: content = FileContentProxy().get_content(internal_file, line_filter=filter) nb_row = content['nb_row'] - 1 if nb_row > 0: idx = sorted( random.sample(range(1, nb_row + 1), min(5, nb_row))) logger.info("indexes = %s " % idx) for i in idx: result['%s' % (i + 1)] = content['content'][i - 1] except: logger.exception('Extractor failed.') logger.info("result is %s" % result) return result def get_content(self, internal_file): return FileContentProxy().get_content(internal_file, line_filter=filter) def unproxy(self, internal_file): FileContentProxy().unproxy(internal_file, line_filter=filter) def filter(line): logger.debug("filtering ...") if not hasattr(line, 'decode'): logger.debug("no decode available") return line result_line = None try: logger.debug("Try to decode against utf-8") result_line = line.decode('utf-8') except: logger.debug("Fail to decode against utf-8") pass if result_line is None: try: logger.debug("Try to decode against iso-8859-1") result_line = line.decode('iso-8859-1') except: logger.info("Fail to decode against iso-8859-1") result_line = _( 'Cannot read the content file. Check the encoding.') return result_line FileCsvType = enum('FileCsvType', 'PATIENT', 'EXAMINATION') class AnalyzeReport(object): def __init__(self, is_empty, is_valid, internal_type): self.is_empty = is_empty self.is_valid = is_valid self.type = internal_type def is_empty(self): return self.is_empty def is_valid(self): return self.is_valid def type(self): return self.type class Analyzer(object): """ Performs the analyze on the content. It should be inherited. """ identifier = None type = None def __init__(self, content=None): self.content = content def is_instance(self): if self.content is not None: try: self._parse_header(self.content['header']) return True except ValueError: return False return False def _parse_header(self, header): _unicode(header[:]).lower().index(self.__class__.identifier) def get_report(self): is_empty = self.content.nb_row <= 1 # is_valid should check the number of columns is_valid = len(self.content.header) == self.__class__.field_number return AnalyzeReport(is_empty, is_valid, self.__class__.type) class AnalyzerPatientFile(Analyzer): identifier = 'nom de famille' type = FileCsvType.PATIENT field_number = 24 def __init__(self, content=None): super(self.__class__, self).__init__(content=content) class AnalyzerExaminationFile(Analyzer): identifier = 'conclusion' type = FileCsvType.EXAMINATION field_number = 14 def __init__(self, content=None): super(self.__class__, self).__init__(content=content) class FileContentAdapter(dict): def __init__(self, ourfile, line_filter=None): self.file = ourfile self['content'] = None self.filter = line_filter if self.filter is None: self.filter = self.passthrough def __getattr__(self, attr): return self[attr] def get_content(self): if self['content'] is None: reader = self._get_reader() rownum = 0 header = None content = [] for row in reader: # Save header row. if rownum == 0: header = [self.filter(c) for c in row] else: content.append([self.filter(c) for c in row]) rownum += 1 self.file.close() self['content'] = content self['nb_row'] = rownum self['header'] = header return self def _get_reader(self): if not bool(self.file): return None self.file.open(mode='r') logger.info("* Try to guess the dialect on csv") csv_buffer = self.file.read(_CSV_BUFFER_SIZE) # Compatibility with python2 and python3 dialect = csv.Sniffer().sniff(csv_buffer) self.file.seek(0) reader = csv.reader(self.file, dialect) return reader def passthrough(self, line): return line class DecodeCsvReader(object): def __init__(self, underlying_instance, decode_filter): self.reader_instance = underlying_instance self.filter = decode_filter def __next__(self): return self.filter(next(self.reader_instance)) def __iter__(self): return self class FileContentKey(object): def __init__(self, ourfile, line_filter): self.file = ourfile self.line_filter = line_filter def __hash__(self): return hash((self.file, self.line_filter)) def __eq__(self, other): return (self.file, self.line_filter) == (other.file, other.line_filter) def __ne__(self, other): # Not strictly necessary, but to avoid having both x==y and x!=y # True at the same time return not (self == other) class FileContentProxy(object): __metaclass__ = Singleton file_content = {} def get_content(self, ourfile, line_filter=None): key = FileContentKey(ourfile, line_filter) try: return self.file_content[key] except KeyError: self.file_content[key] = FileContentAdapter( ourfile, line_filter).get_content() return self.file_content[key] def unproxy(self, ourfile, line_filter=None): key = FileContentKey(ourfile, line_filter) try: self.file_content[key] = None except: pass class AnalyzerHandler(object): analyzers = [AnalyzerPatientFile, AnalyzerExaminationFile] def analyze(self, ourfile): if not bool(ourfile): return AnalyzeReport(False, False, None) content = self.get_content(ourfile) for analyzer in self.analyzers: instance = analyzer(content) if instance.is_instance(): return instance.get_report() logger.warn("No Analyzer found") return AnalyzeReport(False, False, None) def get_content(self, ourfile): return FileContentProxy().get_content(ourfile, line_filter=filter) def filter(self, line): result_line = None try: result_line = line.decode('utf-8') except: pass if result_line is None: try: result_line = line.decode('iso-8859-1') except: result_line = _( 'Cannot read the content file. Check the encoding.') return result_line class InvalidIntegrationFile(Exception): def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class IntegratorHandler(object): def integrate(self, file, file_additional=None, user=None): integrator = IntegratorFactory().get_instance(file) if integrator is None: raise InvalidIntegrationFile( "This file %s is not valid to be integrated." % (file)) result = integrator.integrate(file, file_additional=file_additional, user=user) return result def post_processing(self, files): extractor = Extractor() for f in files: extractor.unproxy(f) class IntegratorFactory(object): def __init__(self, serializer_class=None): self.extractor = Extractor() self.serializer_class = serializer_class def get_instance(self, file): result = self.extractor.analyze_file(file) if not result[1]: return None if result[0] == 'patient': from .serializers import PatientSerializer return IntegratorPatient(serializer_class=PatientSerializer) elif result[0] == 'examination': from .serializers import ExaminationSerializer return IntegratorExamination( serializer_class=ExaminationSerializer) class FilePatientFactory(object): def __init__(self): from .serializers import PatientSerializer self.serializer_class = PatientSerializer def get_serializer(self, row): try: data = { 'family_name': row[1], 'original_name': row[2], 'first_name': row[3], 'birth_date': self.get_date(row[4]), 'sex': self.get_sex_value(row[5]), 'address_street': row[6], 'address_complement': row[7], 'address_zipcode': row[8], 'address_city': row[9], 'email': row[10], 'phone': row[11], 'mobile_phone': row[12], 'job': row[13], 'hobbies': row[14], 'smoker': self.get_boolean_value(row[15]), 'laterality': self.get_laterality_value(row[16]), 'important_info': row[17], 'current_treatment': row[18], 'surgical_history': row[19], 'medical_history': row[20], 'family_history': row[21], 'trauma_history': row[22], 'medical_reports': row[23], 'creation_date': self.get_default_date(), 'consent_check': False } serializer = self.serializer_class(data=data) except ValueError as e: logger.exception("Exception when creating examination.") serializer = {'errors': ["%s" % e]} except: logger.exception("Exception when creating examination.") return serializer def get_sex_value(self, value): if value.upper() == 'F': return 'F' else: return 'M' def get_laterality_value(self, value): if value.upper() == 'G' or value.upper() == 'L': return 'L' else: return 'R' def get_boolean_value(self, value): if value.lower() == 'o' or value.lower() == 'oui' or value.lower( ) == 'true' or value.lower() == 't': return True else: return False def get_default_date(self): return date(2011, 1, 1) def get_date(self, value): f = "%d/%m/%Y" return datetime.strptime(value, f).date() class AbstractIntegrator(object): def integrate(self, file, file_additional=None, user=None): pass class IntegratorPatient(AbstractIntegrator): def __init__(self, serializer_class=None): self.extractor = Extractor() self.serializer_class = serializer_class def integrate(self, file, file_additional=None, user=None): content = self.extractor.get_content(file) nb_line = 0 errors = [] factory = FilePatientFactory() for idx, r in enumerate(content['content']): serializer = factory.get_serializer(r) try: serializer['errors'] errors.append((idx + 2, serializer['errors'])) except KeyError: if serializer.is_valid(): serializer.save() nb_line += 1 else: # idx + 2 because : we have header and the index start from 0 # To have the line number we have to add 2 to the index.... errors.append((idx + 2, serializer.errors)) logger.info("errors detected, data is = %s " % serializer.initial_data) return (nb_line, errors) class IntegratorExamination(AbstractIntegrator): def __init__(self, serializer_class=None): self.extractor = Extractor() self.serializer_class = serializer_class self.patient_table = None def integrate(self, file, file_additional=None, user=None): if file_additional is None: return (0, [_('Missing patient file to integrate it.')]) content = self.extractor.get_content(file) nb_line = 0 errors = [] for idx, r in enumerate(content['content']): logger.info("* Load line from content") try: patient = self.get_patient(int(r[0]), file_additional) data = { 'date': self.get_date(r[1], with_time=True), 'reason': r[2], 'reason_description': r[3], 'orl': r[4], 'visceral': r[5], 'pulmo': r[6], 'uro_gyneco': r[7], 'periphery': r[8], 'general_state': r[9], 'medical_examination': r[10], 'diagnosis': r[11], 'treatments': r[12], 'conclusion': r[13], 'patient': patient.id, 'therapeut': user.id, 'type': ExaminationType.NORMAL, 'status': ExaminationStatus.NOT_INVOICED, 'status_reason': u'%s' % _('Imported examination'), } serializer = self.serializer_class(data=data) if serializer.is_valid(): serializer.save() nb_line += 1 else: # idx + 2 because : we have header and the index start from 0 # To have the line number we have to add 2 to the index.... errors.append((idx + 2, serializer.errors)) logger.info("errors detected, data is = %s, errors = %s " % (data, serializer.errors)) except ValueError as e: logger.exception("Exception when creating examination.") errors.append((idx + 2, { 'general_problem': _('There is a problem when reading this line :') + _unicode(e) })) except: logger.exception("Exception when creating examination.") errors.append((idx + 2, { 'general_problem': _('There is a problem when reading this line.') })) return (nb_line, errors) def get_date(self, value, with_time=False): f = "%d/%m/%Y" if with_time: return datetime.strptime(value, f) return datetime.strptime(value, f).date() def get_patient(self, numero, file_patient): if not bool(file_patient): return None if self.patient_table is None: self._build_patient_table(file_patient) return self.patient_table[numero] def _build_patient_table(self, file_patient): content = self.extractor.get_content(file_patient) self.patient_table = {} factory = FilePatientFactory() for c in content['content']: serializer = factory.get_serializer(c) # remove validators to get a validated data through filters serializer.validators = [] serializer.is_valid() self.patient_table[int(c[0])] = Patient.objects.filter( family_name=serializer.validated_data['family_name'], first_name=serializer.validated_data['first_name'], birth_date=serializer.validated_data['birth_date']).first() logger.info("found patient %s " % self.patient_table[int(c[0])])
gpl-3.0
5,506,196,519,202,931,000
33.090426
95
0.539235
false
4.314585
false
false
false
valsson/MD-MC-Codes-2016
HarmonicOscillator-MD/HarmonicOscillator-MD-Verlet.py
1
4262
#! /usr/bin/env python import numpy as np import matplotlib.pyplot as plt from DataTools import writeDataToFile import argparse parser = argparse.ArgumentParser() parser.add_argument('--time-step',dest='time_step',required=False) parser.add_argument('--output-file',dest='fn_out',required=False) args = parser.parse_args() # Parameters of potential m = 1.0 k = (2.0*np.pi)**2 angular_freq = np.sqrt(k/m) freq = angular_freq/(2.0*np.pi) period = 1.0/freq # MD Parameters if(args.time_step): time_step = np.float64(args.time_step) else: time_step = 0.01*period if(args.fn_out): fn_out = args.fn_out else: fn_out = 'results.data' showPlots = False #num_periods = 20 #num_steps = np.int(np.rint( (num_periods*period)/time_step )) num_steps = 10000 # initial postion and velocity at t=0 initial_position = 2.0 initial_velocity = 0.0 def getPotentialEnergy(x): potential_ener = 0.5*k*x**2 return potential_ener #------------------------------- def getForce(x): force = -k*x return force #------------------------------- def getAccleration(x): return getForce(x)/m #------------------------------- def getPotentialAndForce(x): return ( getPotentialEnergy(x), getForce(x) ) #------------------------------- def getKineticEnergy(v): kinetic_ener = 0.5*m*v**2 return kinetic_ener #------------------------------- def getTotalEnergy(x,v): return getPotentialEnergy(x)+getKineticEnergy(v) #------------------------------- # analytical solution: phi = np.arctan(-initial_velocity/(initial_position*angular_freq)) amplitude = initial_position/np.cos(phi) conserved_energy = getPotentialEnergy(amplitude) # ---------------------- times = [] positions = [] velocites = [] pot_energies = [] kin_energies = [] tot_energies = [] time = 0.0 curr_position = initial_position prev_position = curr_position-initial_velocity*time_step + 0.5*getAccleration(curr_position)*time_step**2 curr_velocity = initial_velocity for i in range(num_steps): if (i+1) % (num_steps/10) == 0: print 'MD step {0:6d} of {1:6d}'.format(i+1,num_steps) # get force at t accleration = getAccleration(curr_position) # get new position at t+dt new_position = 2.0*curr_position - prev_position + accleration*time_step**2 # get velocity at t curr_velocity = (new_position - prev_position) / (2.0*time_step) # get energies at t curr_pot_ener = getPotentialEnergy(curr_position) curr_kin_ener = getKineticEnergy(curr_velocity) curr_tot_ener = curr_pot_ener + curr_kin_ener # times.append( time ) positions.append( curr_position ) velocites.append( curr_velocity ) pot_energies.append( curr_pot_ener ) kin_energies.append( curr_kin_ener ) tot_energies.append( curr_tot_ener ) # prev_position = curr_position curr_position = new_position time += time_step # #---------------------------------------- times = np.array(times) positions = np.array(positions) velocites = np.array(velocites) pot_energies = np.array(pot_energies) kin_energies = np.array(kin_energies) tot_energies = np.array(tot_energies) positions_analytical = amplitude*np.cos(angular_freq*times+phi) velocites_analytical = -angular_freq*amplitude*np.sin(angular_freq*times+phi) writeDataToFile(fn_out, [times,positions,velocites,pot_energies,kin_energies,tot_energies,positions_analytical,velocites_analytical], ['time','pos','vel','pot_ene','kin_ene','tot_ene','pos_an','vel_an'], constantsNames=['time_step','period','amplitude','k','m','phi','conserved_energy'], constantsValues=[time_step,period,amplitude,k,m,phi,conserved_energy], dataFormat='%15.8f') if showPlots: plt.figure(1) plt.plot(times,tot_energies) plt.plot(times,pot_energies) plt.plot(times,kin_energies) plt.show() plt.figure(2) plt.plot(times,pot_energies) plt.show() plt.figure(3) plt.plot(times,kin_energies) plt.show() plt.figure(4) plt.plot(times,velocites) plt.show() plt.figure(5) plt.plot(times,positions) plt.plot(times,positions_analytical) plt.show() plt.figure(6) plt.plot(times,positions-positions_analytical) plt.show() #
mit
8,504,365,888,325,456,000
26.320513
125
0.638667
false
3.159377
false
false
false
OCA/business-requirement
business_requirement_sale/models/business_requirement.py
1
1458
# Copyright 2019 Tecnativa Victor M.M. Torres> # Copyright 2019 Tecnativa - Pedro M. Baeza # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). from odoo import api, fields, models class BusinessRequirement(models.Model): _inherit = 'business.requirement' sale_order_ids = fields.One2many( comodel_name='sale.order', inverse_name='business_requirement_id', string='Sales Orders', ) sale_order_count = fields.Integer( string='Sales Orders Count', compute='_compute_sale_order_count', ) @api.multi @api.depends('sale_order_ids') def _compute_sale_order_count(self): groups = self.env['sale.order'].read_group( domain=[('business_requirement_id', 'in', self.ids)], fields=['business_requirement_id'], groupby=['business_requirement_id'], ) data = { x['business_requirement_id'][0]: x['business_requirement_id_count'] for x in groups } for rec in self: rec.sale_order_count = data.get(rec.id, 0) @api.multi def open_orders(self): action = self.env.ref('sale.action_quotations').read()[0] if len(self) == 1: action['context'] = { 'search_default_business_requirement_id': self.id, } else: action['domain'] = [('business_requirement_id', 'in', self.ids)], return action
agpl-3.0
-5,606,639,854,425,939,000
31.4
79
0.584362
false
3.681818
false
false
false
deepmind/open_spiel
open_spiel/python/algorithms/external_sampling_mccfr_test.py
1
4567
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for open_spiel.python.algorithms.cfr.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import absltest import numpy as np from open_spiel.python.algorithms import exploitability from open_spiel.python.algorithms import external_sampling_mccfr import pyspiel SEED = 39823987 class ExternalSamplingMCCFRTest(absltest.TestCase): def test_external_sampling_leduc_2p_simple(self): np.random.seed(SEED) game = pyspiel.load_game("leduc_poker") es_solver = external_sampling_mccfr.ExternalSamplingSolver( game, external_sampling_mccfr.AverageType.SIMPLE) for _ in range(10): es_solver.iteration() conv = exploitability.nash_conv(game, es_solver.average_policy()) print("Leduc2P, conv = {}".format(conv)) self.assertLess(conv, 5) # ensure that to_tabular() works on the returned policy and # the tabular policy is equivalent tabular_policy = es_solver.average_policy().to_tabular() conv2 = exploitability.nash_conv(game, tabular_policy) self.assertEqual(conv, conv2) def test_external_sampling_leduc_2p_full(self): np.random.seed(SEED) game = pyspiel.load_game("leduc_poker") es_solver = external_sampling_mccfr.ExternalSamplingSolver( game, external_sampling_mccfr.AverageType.FULL) for _ in range(10): es_solver.iteration() conv = exploitability.nash_conv(game, es_solver.average_policy()) print("Leduc2P, conv = {}".format(conv)) self.assertLess(conv, 5) def test_external_sampling_kuhn_2p_simple(self): np.random.seed(SEED) game = pyspiel.load_game("kuhn_poker") es_solver = external_sampling_mccfr.ExternalSamplingSolver( game, external_sampling_mccfr.AverageType.SIMPLE) for _ in range(10): es_solver.iteration() conv = exploitability.nash_conv(game, es_solver.average_policy()) print("Kuhn2P, conv = {}".format(conv)) self.assertLess(conv, 1) def test_external_sampling_kuhn_2p_full(self): np.random.seed(SEED) game = pyspiel.load_game("kuhn_poker") es_solver = external_sampling_mccfr.ExternalSamplingSolver( game, external_sampling_mccfr.AverageType.FULL) for _ in range(10): es_solver.iteration() conv = exploitability.nash_conv(game, es_solver.average_policy()) print("Kuhn2P, conv = {}".format(conv)) self.assertLess(conv, 1) # Liar's dice takes too long, so disable this test. Leave code for reference. # pylint: disable=g-unreachable-test-method def disabled_test_external_sampling_liars_dice_2p_simple(self): np.random.seed(SEED) game = pyspiel.load_game("liars_dice") es_solver = external_sampling_mccfr.ExternalSamplingSolver( game, external_sampling_mccfr.AverageType.SIMPLE) for _ in range(1): es_solver.iteration() conv = exploitability.nash_conv(game, es_solver.average_policy()) print("Liar's dice, conv = {}".format(conv)) self.assertLess(conv, 2) def test_external_sampling_kuhn_3p_simple(self): np.random.seed(SEED) game = pyspiel.load_game("kuhn_poker", {"players": 3}) es_solver = external_sampling_mccfr.ExternalSamplingSolver( game, external_sampling_mccfr.AverageType.SIMPLE) for _ in range(10): es_solver.iteration() conv = exploitability.nash_conv(game, es_solver.average_policy()) print("Kuhn3P, conv = {}".format(conv)) self.assertLess(conv, 2) def test_external_sampling_kuhn_3p_full(self): np.random.seed(SEED) game = pyspiel.load_game("kuhn_poker", {"players": 3}) es_solver = external_sampling_mccfr.ExternalSamplingSolver( game, external_sampling_mccfr.AverageType.FULL) for _ in range(10): es_solver.iteration() conv = exploitability.nash_conv(game, es_solver.average_policy()) print("Kuhn3P, conv = {}".format(conv)) self.assertLess(conv, 2) if __name__ == "__main__": absltest.main()
apache-2.0
8,920,101,350,981,107,000
37.70339
79
0.708123
false
3.307024
true
false
false
SGenheden/Scripts
Mol/parse_optq.py
1
2134
# Author: Samuel Genheden [email protected] """ Program to parse RESP charges and make Gromacs residue template file (.rtp) Atoms in the PDB file need to be in the same order as in the charge file The atom types file need to have an atomtype definition on each line NAME1 TYPE1 NAME2 TYPE2 ... Used in membrane engineering project Examples -------- parse_optq.py -f model0_1.pdb -q qout -o model0.rtp -t atypes.txt Make an rtp file based on model0_1 and qout """ import argparse import parmed if __name__ == '__main__': argparser = argparse.ArgumentParser(description="Script to parse optimal charges") argparser.add_argument('-f','--file',help="the PDB file") argparser.add_argument('-q','--qout',help="the output charges",default="qout") argparser.add_argument('-o','--out',help="the output RTP file") argparser.add_argument('-t','--types',help="a file with atom types") args = argparser.parse_args() struct = parmed.load_file(args.file) qline = "" with open(args.qout, "r") as f : line = f.readline() while line : qline += line.strip() + " " line = f.readline() charges = map(float,qline.strip().split()) for atom, charge in zip(struct.atoms, charges) : print "%4s%10.6f"%(atom.name, charge) if args.out is not None : atype = {} with open(args.types, "r") as f : for line in f.readlines() : a, t = line.strip().split() atype[a] = t with open(args.out, "w") as f : f.write("[ bondedtypes ]\n") f.write("1 5 9 2 1 3 1 0\n\n") f.write("[ UNK ]\n\n") f.write("[ atoms ]\n") for i, (atom, charge) in enumerate(zip(struct.atoms, charges)) : f.write("%5s %6s %10.6f %3d\n"%(atom.name, atype[atom.name], charge, i)) f.write("\n[ bonds ]\n") for bond in struct.bonds : f.write("%5s %5s\n"%(bond.atom1.name, bond.atom2.name)) f.write("\n")
mit
8,400,268,021,729,467,000
32.34375
90
0.559044
false
3.339593
false
false
false
joliva/wiki-appengine
main.py
1
12161
#!/usr/bin/env python import cgi, re, os, logging, string import hmac, random from datetime import datetime import webapp2, jinja2 from google.appengine.ext import db from google.appengine.api import memcache template_dir = os.path.join(os.path.dirname(__file__), 'templates') jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape=False) UNAME_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$") UPASS_RE = re.compile(r"^.{3,20}$") UEMAIL_RE = re.compile(r"^[\S]+@[\S]+\.[\S]+$") COOKIE_SALT = 'KISSMYGRITS' def valid_username(username): return UNAME_RE.match(username) def valid_password(password): return UPASS_RE.match(password) def valid_email(email): return email == "" or UEMAIL_RE.match(email) def make_salt(): # salt will be a random six character string return ''.join([chr(random.randint(97,122)) for idx in xrange(6)]) def make_password_hash(password): if password: salt = make_salt() return hmac.new(salt, password).hexdigest() + ('|%s' % salt) else: return None class WikiUsers(db.Model): username = db.StringProperty(required = True) password_hash = db.StringProperty(required = True) email = db.StringProperty() created = db.DateTimeProperty(auto_now_add = True) @staticmethod def get_user(username): user = None if username: qry = "SELECT * FROM WikiUsers WHERE username = '%s'" % username #logging.info('query = %s', qry) user = db.GqlQuery(qry).get() return user @staticmethod def create_user(user): # assumes properties of user were previously validated if user: user = WikiUsers(**user) key = user.put() class WikiEntry(db.Model): name = db.StringProperty(required = True, indexed = True) content = db.TextProperty(required = True) created = db.DateTimeProperty(auto_now_add = True, indexed = True) class Handler(webapp2.RequestHandler): def update_cache(self, name, value): # store in cache logging.info('insert %s into cache', name) memcache.set(name, {'cached_time':datetime.now(), 'content':value}) def store(self, name, content): # insert new wiki entry into datastore p = WikiEntry(name = name, content=content) key = p.put() # update cache self.update_cache(name, content) def retrieve(self, name, id=None): if id != None and id != '': value = WikiEntry.get_by_id(int(id)).content return {'cached_time':datetime.now(), 'content':value} else: # attempt first to get page from cache value = memcache.get(name) if value: return value else: logging.info('%s is not in the cache', name) # attempt to retrieve from database query = "SELECT * FROM WikiEntry WHERE name='%s' ORDER BY created DESC LIMIT 1" % name entry = db.GqlQuery(query).get() if entry: self.update_cache(name, entry.content) value = memcache.get(name) return value else: logging.info('%s is not in the DB', name) return None def retrieve_all(self, name): # attempt to retrieve from database query = "SELECT * FROM WikiEntry WHERE name='%s' ORDER BY created DESC" % name entries = db.GqlQuery(query).fetch(100) return entries def write(self, *a, **kw): self.response.out.write(*a, **kw) def render_str(self, template, **params): t = jinja_env.get_template(template) return t.render(params) def render(self, template, **kw): self.write(self.render_str(template, **kw)) def create_cookie(self, value): # cookie format: value|salted hash if value: return '%s|' % value + hmac.new(COOKIE_SALT, value).hexdigest() else: return None def store_cookie(self, key, value): if key and value: self.response.set_cookie(key, value=self.create_cookie(value), path='/') def remove_cookie(self, key): if key: self.response.set_cookie(key, value='', path='/') #self.response.delete_cookie(key) def get_cookie(self, key): # cookie format: value|salted hash if key: hashed_value = self.request.cookies.get(key) if hashed_value: value, salted_hash = hashed_value.split('|') if hashed_value == ('%s|' % value) + hmac.new(COOKIE_SALT, value).hexdigest(): return value return None class Signup(Handler): def get(self): self.render('signup.html') def post(self): username = self.request.get("username") password = self.request.get("password") verify = self.request.get("verify") email = self.request.get("email") err_name="" err_pass="" err_vpass="" err_email="" err = False if not valid_username(username): err_name = "That's not a valid username." err = True if WikiUsers.get_user(username) != None: err_name = "That user already exists" err = True if not valid_password(password): password="" verify="" err_pass = "That's not a valid password." err = True elif verify != password: password="" verify="" err_vpass = "Your passwords didn't match." err = True if not valid_email(email): err_email = "That's not a valid email." err = True if err == True: args = {"username":username, "password":password, "verify":verify, "email":email, "err_name":err_name, "err_pass":err_pass, "err_vpass":err_vpass, "err_email":err_email} self.render('signup.html', **args) else: # save new user into DB user = {} user['username'] = username user['password_hash'] = make_password_hash(password) user['email'] = email WikiUsers.create_user(user) # save login session cookie self.store_cookie('username', username) self.redirect(FRONT_URL) class Login(Handler): def get(self): self.render('login.html') def post(self): username = self.request.get("username") password = self.request.get("password") err = False if username and password: # validate login credentials user = WikiUsers.get_user(username) if user: # password hash: hmac.new(salt, password).hexdigest() + '|' + salt password_hash = user.password_hash.encode('ascii') logging.info('password_hash = %s', password_hash) hashval, salt = password_hash.split('|') logging.info('hashval = %s salt=%s', hashval, salt) if hashval == hmac.new(salt, password).hexdigest(): # save login session cookie self.store_cookie('username', username) self.redirect(FRONT_URL) return args = {"username":username, "password":password, "error":'Invalid Login'} self.render('login.html', **args) class Logout(Handler): def get(self): self.remove_cookie('username') self.redirect(FRONT_URL) class WikiPage(Handler): def get(self, name): if name == '': name = '_front' logging.info('name=%s', name) id = self.request.get('id') # attempt to retrieve page from DB value = self.retrieve(name, id) if value == None: # redirect to an edit page to create the new entry logging.info('redirect to page to add new wiki topic: %s', BASE_EDIT + name) self.redirect(BASE_EDIT + name) else: # display the page now = datetime.now() delta_secs = (now - value['cached_time']).seconds if self.request.get('cause') == 'logoff': self.remove_cookie('username') self.redirect(BASE_URL + name) # reload page # determine if user logged in to set header username = self.get_cookie('username') if username: edit_link=BASE_EDIT + name edit_status='edit' edit_user_sep=' | ' hist_link=BASE_HIST + name hist_status='history' wiki_user='&lt%s&gt' % username login_link=BASE_URL + name + '?cause=logoff' login_status='logout' login_signup_sep='' signup_link='' signup_status='' else: edit_link=BASE_URL + name edit_status='' edit_user_sep='' hist_link=BASE_HIST + name hist_status='history' wiki_user='' login_link=BASE_URL + '/login' login_status='login' login_signup_sep=' | ' signup_link=BASE_URL + '/signup' signup_status='signup' args = dict(topic=name, content=value['content'], cache_time=delta_secs, edit_link=edit_link, edit_status=edit_status, edit_user_sep=edit_user_sep, hist_link=hist_link, hist_status=hist_status, wiki_user=wiki_user, login_link=login_link, login_status=login_status, login_signup_sep=login_signup_sep, signup_link=signup_link, signup_status=signup_status) self.render('entry.html', **args) class HistPage(Handler): def get(self, name): if self.request.get('cause') == 'logoff': self.remove_cookie('username') self.redirect(BASE_HIST + name) # reload page # determine if user logged in to set header username = self.get_cookie('username') if username: edit_link=BASE_EDIT + name edit_status='edit' edit_user_sep='' wiki_user='&lt%s&gt' % username login_link=BASE_HIST + name + '?cause=logoff' login_status='logout' login_signup_sep='' signup_link='' signup_status='' else: edit_link=BASE_URL + name edit_status='view' edit_user_sep='' wiki_user='' login_link=BASE_URL + '/login' login_status='login' login_signup_sep=' | ' signup_link=BASE_URL + '/signup' signup_status='signup' entries = self.retrieve_all(name) args = dict(topic=name, edit_link=edit_link, edit_status=edit_status, edit_user_sep=edit_user_sep, wiki_user=wiki_user, login_link=login_link, login_status=login_status, login_signup_sep=login_signup_sep, signup_link=signup_link, signup_status=signup_status, entries=entries) self.render('history.html', **args) class EditPage(Handler): def get(self, name): if self.request.get('cause') == 'logoff': self.remove_cookie('username') self.redirect(BASE_URL + name) # reload page # determine if user logged in to set header username = self.get_cookie('username') if username: edit_link=BASE_URL + name edit_status='view' edit_user_sep='' wiki_user='&lt%s&gt' % username login_link=BASE_URL + name + '?cause=logoff' login_status='logout' login_signup_sep='' signup_link='' signup_status='' id = self.request.get('id') # attempt to retrieve page from DB value = self.retrieve(name, id) if value: content = value['content'] else: content = '' args = dict(topic=name, content=content, edit_link=edit_link, edit_status=edit_status, edit_user_sep=edit_user_sep, wiki_user=wiki_user, login_link=login_link, login_status=login_status, login_signup_sep=login_signup_sep, signup_link=signup_link, signup_status=signup_status) self.render('editentry.html', **args) else: edit_link='' edit_status='' edit_user_sep='' wiki_user='' login_link=BASE_URL + '/login' login_status='login' login_signup_sep=' | ' signup_link=BASE_URL + '/signup' signup_status='signup' args = dict(topic=name, msg='Not Authorized to create topic if not logged in.', edit_link=edit_link, edit_status=edit_status, edit_user_sep=edit_user_sep, wiki_user=wiki_user, login_link=login_link, login_status=login_status, login_signup_sep=login_signup_sep, signup_link=signup_link, signup_status=signup_status) self.response.set_status(401) self.render('unauthorized.html', **args) def post(self, name): # validate field content = self.request.get('content') # save into datastore and cache self.store(name, content) # redirect to entry permalink self.redirect(BASE_URL + name) class Flush(Handler): def get(self): memcache.flush_all() BASE_URL = '/wiki' FRONT_URL = BASE_URL + '/' BASE_EDIT = BASE_URL + '/_edit' BASE_HIST = BASE_URL + '/_history' PAGE_RE = r'(/(?:[a-zA-Z0-9_-]+/?)*)' routes = [ (BASE_URL + '/signup/?', Signup), (BASE_URL + '/login/?', Login), (BASE_URL + '/logout/?', Logout), (BASE_URL + '/flush/?', Flush), (BASE_EDIT + PAGE_RE + '/', EditPage), (BASE_EDIT + PAGE_RE, EditPage), (BASE_HIST + PAGE_RE + '/', HistPage), (BASE_HIST + PAGE_RE, HistPage), (BASE_URL + PAGE_RE + '/', WikiPage), (BASE_URL + PAGE_RE, WikiPage) ] app = webapp2.WSGIApplication(routes, debug=True)
bsd-3-clause
1,276,170,337,536,588,000
25.904867
172
0.649864
false
3.018367
false
false
false
psychopy/psychopy
psychopy/hardware/forp.py
1
6704
#!/usr/bin/env python # -*- coding: utf-8 -*- # Part of the PsychoPy library # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd. # Distributed under the terms of the GNU General Public License (GPL). """fORP fibre optic (MR-compatible) response devices by CurrentDesigns: http://www.curdes.com/ This class is only useful when the fORP is connected via the serial port. If you're connecting via USB, just treat it like a standard keyboard. E.g., use a Keyboard component, and typically listen for Allowed keys ``'1', '2', '3', '4', '5'``. Or use ``event.getKeys()``. """ # Jeremy Gray and Dan Grupe developed the asKeys and baud parameters from __future__ import absolute_import, print_function from builtins import object from psychopy import logging, event import sys from collections import defaultdict try: import serial except ImportError: serial = False BUTTON_BLUE = 1 BUTTON_YELLOW = 2 BUTTON_GREEN = 3 BUTTON_RED = 4 BUTTON_TRIGGER = 5 # Maps bit patterns to character codes BUTTON_MAP = [ (0x01, BUTTON_BLUE), (0x02, BUTTON_YELLOW), (0x04, BUTTON_GREEN), (0x08, BUTTON_RED), (0x10, BUTTON_TRIGGER)] class ButtonBox(object): """Serial line interface to the fORP MRI response box. To use this object class, select the box use setting `serialPort`, and connect the serial line. To emulate key presses with a serial connection, use `getEvents(asKeys=True)` (e.g., to be able to use a RatingScale object during scanning). Alternatively connect the USB cable and use fORP to emulate a keyboard. fORP sends characters at 800Hz, so you should check the buffer frequently. Also note that the trigger event numpy the fORP is typically extremely short (occurs for a single 800Hz epoch). """ def __init__(self, serialPort=1, baudrate=19200): """ :Parameters: `serialPort` : should be a number (where 1=COM1, ...) `baud` : the communication rate (baud), eg, 57600 """ super(ButtonBox, self).__init__() if not serial: raise ImportError("The module serial is needed to connect to " "fORP. On most systems this can be installed " "with\n\t easy_install pyserial") self.port = serial.Serial(serialPort - 1, baudrate=baudrate, bytesize=8, parity='N', stopbits=1, timeout=0.001) if not self.port.isOpen(): self.port.open() self.buttonStatus = defaultdict(bool) # Defaults to False self.rawEvts = [] self.pressEvents = [] def clearBuffer(self): """Empty the input buffer of all characters""" self.port.flushInput() def clearStatus(self): """ Resets the pressed statuses, so getEvents will return pressed buttons, even if they were already pressed in the last call. """ for k in self.buttonStatus: self.buttonStatus[k] = False def getEvents(self, returnRaw=False, asKeys=False, allowRepeats=False): """Returns a list of unique events (one event per button pressed) and also stores a copy of the full list of events since last getEvents() (stored as ForpBox.rawEvts) `returnRaw` : return (not just store) the full event list `asKeys` : If True, will also emulate pyglet keyboard events, so that button 1 will register as a keyboard event with value "1", and as such will be detectable using `event.getKeys()` `allowRepeats` : If True, this will return pressed buttons even if they were held down between calls to getEvents(). If the fORP is on the "Eprime" setting, you will get a stream of button presses while a button is held down. On the "Bitwise" setting, you will get a set of all currently pressed buttons every time a button is pressed or released. This option might be useful if you think your participant may be holding the button down before you start checking for presses. """ nToGet = self.port.inWaiting() evtStr = self.port.read(nToGet) self.rawEvts = [] self.pressEvents = [] if allowRepeats: self.clearStatus() # for each character convert to an ordinal int value (numpy the ascii # chr) for thisChr in evtStr: pressCode = ord(thisChr) self.rawEvts.append(pressCode) decodedEvents = self._generateEvents(pressCode) self.pressEvents += decodedEvents if asKeys: for code in decodedEvents: event._onPygletKey(symbol=code, modifiers=0) # better as: emulated='fORP_bbox_asKey', but need to # adjust event._onPygletKey and the symbol conversion # pyglet.window.key.symbol_string(symbol).lower() # return the abbreviated list if necessary if returnRaw: return self.rawEvts else: return self.getUniqueEvents() def _generateEvents(self, pressCode): """For a given button press, returns a list buttons that went from unpressed to pressed. Also flags any unpressed buttons as unpressed. `pressCode` : a number with a bit set for every button currently pressed. """ curStatuses = self.__class__._decodePress(pressCode) pressEvents = [] for button, pressed in curStatuses: if pressed and not self.buttonStatus[button]: # We're transitioning to pressed... pressEvents.append(button) self.buttonStatus[button] = True if not pressed: self.buttonStatus[button] = False return pressEvents @classmethod def _decodePress(kls, pressCode): """Returns a list of buttons and whether they're pressed, given a character code. `pressCode` : A number with a bit set for every button currently pressed. Will be between 0 and 31. """ return [(mapping[1], bool(mapping[0] & pressCode)) for mapping in BUTTON_MAP] def getUniqueEvents(self, fullEvts=False): """Returns a Python set of the unique (unordered) events of either a list given or the current rawEvts buffer """ if fullEvts: return set(self.rawEvts) return set(self.pressEvents)
gpl-3.0
-6,914,705,715,617,184,000
35.835165
79
0.616945
false
4.205772
false
false
false
nuchi/httpserver
httpserver.py
1
1065
#!/usr/bin/env python import socket from http_handler import Handler_thread MAX_CONNECTIONS = 5 class HTTPserver(object): def __init__(self, localOnly=False, port=80, max_connections=MAX_CONNECTIONS): self.port = port self.max_connections = max_connections if localOnly: self.hostname = '127.0.0.1' else: self.hostname = socket.gethostname() self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) def serve(self): self.server.bind((self.hostname, self.port)) self.server.listen(self.max_connections) while True: client_socket, address = self.server.accept() ht = Handler_thread() ht.daemon = True ht.run(client_socket) def close(self): self.server.close() def create_and_run_server(localOnly=True, port=8000): new_server = HTTPserver(localOnly=localOnly, port=port) try: new_server.serve() except KeyboardInterrupt: print('\nClosing server.') pass finally: new_server.close() if __name__ == '__main__': create_and_run_server()
mit
-7,448,993,895,145,337,000
24.380952
79
0.712676
false
2.991573
false
false
false
elaeon/dsignature
creacion_firma/forms.py
1
3487
# -*- coding: utf-8 -*- from django import forms from django.forms import ModelForm from creacion_firma.models import FirmarCertificado, NominaSubida, User import datetime class UserForm(forms.Form): nombre = forms.CharField(max_length=150, widget=forms.TextInput(attrs={"style": "width: 400px"})) correo_electronico = forms.EmailField(max_length=100) password = forms.CharField(widget=forms.PasswordInput) class FirmarCertificadoForm(ModelForm): user = forms.ModelChoiceField( queryset=User.objects.all().order_by("username"), required=True) class Meta: model = FirmarCertificado exclude = ('certificado',) class SubirNominaForm(forms.Form): anteriores = forms.ModelChoiceField( queryset=NominaSubida.objects.filter(visible=True), required=False) nombre = forms.CharField( max_length=50, widget=forms.TextInput(attrs={"style": "width: 150px"}), help_text="QNA, Reyes, etc", required=False) numero = forms.IntegerField(required=False) year = forms.IntegerField(label=u"Año", required=False) tipo = forms.ChoiceField(choices=(("ord", "Ordinaria"), ("ext", "Extraordinaria")), required=False) pdf = forms.FileField() xml = forms.FileField() def clean(self): cleaned_data = super(SubirNominaForm, self).clean() anteriores_nomina = cleaned_data.get("anteriores") nomina = cleaned_data.get("nombre") if not (anteriores_nomina or nomina): msg = "Elija un nombre o escriba uno" self.add_error('anteriores', msg) self.add_error('nombre', msg) class SubirNominaXMLForm(forms.Form): anteriores = forms.ModelChoiceField( queryset=NominaSubida.objects.filter(visible=True), required=False) nombre = forms.CharField( max_length=50, widget=forms.TextInput(attrs={"style": "width: 150px"}), help_text="QNA, Reyes, etc", required=False) numero = forms.IntegerField(required=False) year = forms.IntegerField(label=u"Año", required=False) tipo = forms.ChoiceField(choices=(("ord", "Ordinaria"), ("ext", "Extraordinaria")), required=False) xml = forms.FileField() def clean(self): cleaned_data = super(SubirNominaXMLForm, self).clean() anteriores_nomina = cleaned_data.get("anteriores") nomina = cleaned_data.get("nombre") if not (anteriores_nomina or nomina): msg = "Elija un nombre o escriba uno" self.add_error('anteriores', msg) self.add_error('nombre', msg) class LoginForm(forms.Form): usuario = forms.CharField(max_length=150) password = forms.CharField(max_length=32, widget=forms.PasswordInput) class SelectYearForm(forms.Form): year = forms.ChoiceField(label="Año", choices=((y, y) for y in xrange(2015, 2020))) class FirmaOSinForm(forms.Form): tipo = forms.ChoiceField(label="Tipo", choices=(("f", "firmado"), ("nf", "no firmado"))) class NominasFilterYear(forms.Form): def __init__(self, *args, **kwargs): if "year" in kwargs: self.year = kwargs["year"] del kwargs["year"] else: self.year = datetime.date.today().year super(NominasFilterYear, self).__init__(*args, **kwargs) self.fields['nomina'] = forms.ModelChoiceField( queryset=NominaSubida.objects.filter(year=self.year).order_by("-numero", "nombre", "tipo") )
gpl-3.0
-8,077,302,436,457,668,000
34.917526
103
0.650689
false
3.463221
false
false
false
mdinacci/rtw
demos/proto2/src/proto2.py
1
15023
# -*- coding: utf-8-*- """ Author: Marco Dinacci <[email protected]> Copyright © 2008-2009 """ from pandac.PandaModules import * loadPrcFile("../res/Config.prc") #loadPrcFileData("", "want-directtools 1") #loadPrcFileData("", "want-tk 1") import direct.directbase.DirectStart from direct.gui.OnscreenText import OnscreenText from direct.directtools.DirectGeometry import LineNodePath from direct.showbase.DirectObject import DirectObject from pandac.PandaModules import * from direct.task.Task import Task from mdlib.panda.entity import * from mdlib.panda.core import AbstractScene, AbstractLogic, AbstractApplication from mdlib.panda.data import GOM from mdlib.panda.input import * from mdlib.panda.utils import * from mdlib.types import Types import sys, math #base.wireframeOn() class Camera(object): ZOOM = 30 TARGET_DISTANCE = 10 def __init__(self): base.disableMouse() base.camera.setPos(0,0,0) def followTarget(self, target): self.target = target self.update() def getPos(self): return base.camera.getPos() def zoomOut(self): base.camera.setY(base.camera, - self.ZOOM) def zoomIn(self): base.camera.setY(base.camera, self.ZOOM) def update(self): base.camera.setPos(self.target.nodepath.getPos() - \ self.target.forward * self.TARGET_DISTANCE) z = self.target.jumpZ base.camera.setZ(self.target.nodepath.getZ() -z + 1) pos = self.target.nodepath.getPos() pos.setZ(pos.getZ() -z) base.camera.lookAt(pos) base.camera.setZ(self.target.nodepath.getZ() -z + 3) HEIGHT_TRACK = 0.5 class GameLogic(AbstractLogic): DUMMY_VALUE = -999 # the view is not really the view but just the scene for now. def __init__(self, view): super(GameLogic, self).__init__(view) self.env = GOM.createEntity(environment_params) self.view.addEntity(self.env) self.track = GOM.createEntity(new_track_params) self.track.nodepath.setCollideMask(BitMask32(1)) self.view.addEntity(self.track) self.ball = GOM.createEntity(ball_params) self.ball.nodepath.showTightBounds() collSphere = self.ball.nodepath.find("**/ball") collSphere.node().setIntoCollideMask(BitMask32(2)) collSphere.node().setFromCollideMask(BitMask32.allOff()) self.view.addEntity(self.ball) self.player = GOM.createEntity(player_params) self.player.nodepath.setPos(self.ball.nodepath.getPos()) self.player.nodepath.setQuat(self.track.nodepath,Quat(1,0,0,0)) self.ball.forward = Vec3(0,1,0) self.view.addEntity(self.player) # normally the view should create it self.cam = Camera() self.cam.followTarget(self.ball) self.camGroundZ = -999 self.view.cam = self.cam # HACK self.view.player = self.player self.view.ball = self.ball self.view.track = self.track self.lastTile = "" self.tileType = "neutral" self.lastTileType = "neutral" self._setupCollisionDetection() def update(self, task): self.inputMgr.update() return task.cont def updatePhysics(self, task): dt = globalClock.getDt() if dt > .2: return task.cont self.camGroundZ = self.DUMMY_VALUE ballIsCollidingWithGround = False # keep the collision node perpendicular to the track, this is necessary # since the ball rolls all the time self.ballCollNodeNp.setQuat(self.track.nodepath,Quat(1,0,0,0)) # check track collisions # TODO must optimise this, no need to check the whole track, # but only the current segment self.picker.traverse(self.track.nodepath) if self.pq.getNumEntries() > 0: self.pq.sortEntries() firstGroundContact = self.DUMMY_VALUE firstTile = None for i in range(self.pq.getNumEntries()): entry = self.pq.getEntry(i) z = entry.getSurfacePoint(render).getZ() # check camera collision. There can be more than one if entry.getFromNodePath() == self.cameraCollNodeNp: if z > firstGroundContact: firstGroundContact = z firstTile = entry.getIntoNodePath() # check ball's ray collision with ground elif entry.getFromNodePath() == self.ballCollNodeNp: np = entry.getIntoNodePath() #print np self.tileType = np.findAllTextures().getTexture(0).getName() self.ball.RayGroundZ = z ballIsCollidingWithGround = True if entry != self.lastTile: self.lastTile = entry self.camGroundZ = firstGroundContact if ballIsCollidingWithGround == False: if self.ball.isJumping(): print "no ball-ground contact but jumping" pass else: print "no ball-ground contact, losing" self.ball.getLost() self.view.gameIsAlive = False return task.done # automatically stop the task # check for rays colliding with the ball self.picker.traverse(self.ball.nodepath) if self.pq.getNumEntries() > 0: self.pq.sortEntries() if self.pq.getNumEntries() == 1: entry = self.pq.getEntry(0) if entry.getFromNodePath() == self.cameraCollNodeNp: self.camBallZ = entry.getSurfacePoint(render).getZ() else: raise AssertionError("must always be 1") #if self.camGroundZ > self.camBallZ: # ground collision happened before ball collision, this means # that the ball is descending a slope # Get the row colliding with the cam's ray, get two rows after, # set all of them transparent # TODO store the rows in a list, as I have to set the transparency # back to 0 after the ball has passed #pass #row = firstTile.getParent() #row.setSa(0.8) #row.setTransparency(TransparencyAttrib.MAlpha) forward = self.view._rootNode.getRelativeVector(self.player.nodepath, Vec3(0,1,0)) forward.setZ(0) forward.normalize() speedVec = forward * dt * self.ball.speed self.ball.forward = forward self.ball.speedVec = speedVec self.player.nodepath.setPos(self.player.nodepath.getPos() + speedVec) self.player.nodepath.setZ(self.ball.RayGroundZ + self.ball.jumpZ + \ self.ball.physics.radius + HEIGHT_TRACK) # rotate the ball self.ball.nodepath.setP(self.ball.nodepath.getP() -1 * dt * \ self.ball.speed * self.ball.spinningFactor) # set the ball to the position of the controller node self.ball.nodepath.setPos(self.player.nodepath.getPos()) # rotate the controller to follow the direction of the ball self.player.nodepath.setH(self.ball.nodepath.getH()) return task.cont def resetGame(self): self.player.nodepath.setPos(Point3(12,7,.13)) self.ball.nodepath.setPos(Point3(12,7,.13)) self.ball.nodepath.setQuat(Quat(1,0,0,0)) self.view.gameIsAlive = True def updateLogic(self, task): # steer if self.keyMap["right"] == True: right = self.view._rootNode.getRelativeVector(self.player.nodepath, Vec3(1,0,0)) if self.ball.speed > 0: self.ball.turnRight() if self.keyMap["left"] == True: if self.ball.speed > 0: self.ball.turnLeft() if self.keyMap["forward"] == True: self.ball.accelerate() else: self.ball.decelerate() if self.keyMap["backward"] == True: self.ball.brake() if self.keyMap["jump"] == True: self.ball.jump() self.keyMap["jump"] = False # special actions if self.tileType == "neutral": self.ball.neutral() elif self.tileType == "jump": if self.lastTileType != "jump": self.ball.jump() elif self.tileType == "accelerate": self.ball.sprint() elif self.tileType == "slow": self.ball.slowDown() self.lastTileType = self.tileType if self.ball.speed < 0: self.ball.speed = 0 return task.cont def setKey(self, key, value): self.keyMap[key] = value def debugPosition(self): for text in aspect2d.findAllMatches("**/text").asList(): text.getParent().removeNode() OnscreenText(text="Camera's Ray-Ball: %s" % self.camBallZ, style=1, fg=(1,1,1,1), pos=(-0.9,-0.45), scale = .07) OnscreenText(text="Camera's Ray-Ground : %s" % self.camGroundZ, style=1, fg=(1,1,1,1), pos=(-0.9,-0.55), scale = .07) OnscreenText(text="Camera: %s" % base.camera.getZ(), style=1, fg=(1,1,1,1), pos=(-0.9,-0.65), scale = .07) OnscreenText(text="Ball ray-plane: %s" % self.ball.RayGroundZ, style=1, fg=(1,1,1,1), pos=(-0.9,-0.75), scale = .07) def _setupCollisionDetection(self): self.pq = CollisionHandlerQueue(); # ball-ground collision setup self.ballCollNodeNp = self.ball.nodepath.attachCollisionRay("ball-ground", 0,0,10, # origin 0,0,-1, # direction BitMask32(1),BitMask32.allOff()) self.ballCollNodeNp.setQuat(self.track.nodepath, Quat(1,0,0,0)) self.ballCollNodeNp.show() # camera-ball collision setup bmFrom = BitMask32(1); bmFrom.setBit(1) self.cameraCollNodeNp = base.camera.attachCollisionRay("camera-ball", 0,0,0, 0,1,0, bmFrom,BitMask32.allOff()) self.cameraCollNodeNp.setQuat(base.camera.getQuat() + Quat(.1,0,0,0)) self.cameraCollNodeNp.show() self.picker = CollisionTraverser() self.picker.setRespectPrevTransform(True) self.picker.addCollider(self.ballCollNodeNp, self.pq) self.picker.addCollider(self.cameraCollNodeNp, self.pq) def _subscribeToEvents(self): self.keyMap = {"left":False, "right":False, "forward":False, \ "backward":False, "jump": False} self.inputMgr = InputManager(base) self.inputMgr.createSchemeAndSwitch("game") self.inputMgr.bindCallback("arrow_left", self.setKey, ["left",True], scheme="game") self.inputMgr.bindCallback("arrow_right", self.setKey, ["right",True]) self.inputMgr.bindCallback("arrow_up", self.setKey, ["forward",True]) self.inputMgr.bindCallback("arrow_left-up", self.setKey, ["left",False]) self.inputMgr.bindCallback("arrow_right-up", self.setKey, ["right",False]) self.inputMgr.bindCallback("arrow_up-up", self.setKey, ["forward",False]) self.inputMgr.bindCallback("arrow_down", self.setKey, ["backward",True]) self.inputMgr.bindCallback("arrow_down-up", self.setKey, ["backward",False]) self.inputMgr.bindCallback("space", self.setKey, ["jump",True]) self.inputMgr.bindCallback("c", self.view.switchCamera) self.inputMgr.bindCallback("d", self.debugPosition) class World(AbstractScene): def __init__(self): super(World, self).__init__() self.lines = render.attachNewNode("lines") loader.loadModelCopy("models/misc/xyzAxis").reparentTo(render) self.setSceneGraphNode(render) #self._setupCollisionDetection() self._setupLights() self.gameIsAlive = True def update(self, task): #dt = globalClock.getDt() #if dt > .2: return task.cont if self.gameIsAlive: self.cam.update() self.lines.removeNode() self.lines = render.attachNewNode("lines") return task.cont def switchCamera(self): base.oobe() def _setupLights(self): lAttrib = LightAttrib.makeAllOff() ambientLight = AmbientLight( "ambientLight" ) ambientLight.setColor( Vec4(.55, .55, .55, 1) ) lAttrib = lAttrib.addLight( ambientLight ) directionalLight = DirectionalLight( "directionalLight" ) directionalLight.setDirection( Vec3( 0, 0, -1 ) ) directionalLight.setColor( Vec4( 0.375, 0.375, 0.375, 1 ) ) directionalLight.setSpecularColor(Vec4(1,1,1,1)) lAttrib = lAttrib.addLight( directionalLight ) class GameApplication(AbstractApplication): def _subscribeToEvents(self): base.accept("escape", self.shutdown) base.accept("r", self.restartGame) def _createLogicAndView(self): self.scene = World() self.logic = GameLogic(self.scene) def restartGame(self): taskMgr.remove("update-input") taskMgr.remove("update-logic") taskMgr.remove("update-physics") taskMgr.remove("update-scene") self.logic.resetGame() self.start() def start(self): taskMgr.add(self.logic.update, "update-input") taskMgr.add(self.logic.updateLogic, "update-logic") taskMgr.add(self.logic.updatePhysics, "update-physics") taskMgr.add(self.scene.update, "update-scene") def shutdown(self): sys.exit() # set a fixed frame rate from pandac.PandaModules import ClockObject FPS = 40 globalClock = ClockObject.getGlobalClock() #globalClock.setMode(ClockObject.MLimited) #globalClock.setFrameRate(FPS) if __name__ == '__main__': GameApplication().start() run()
mit
6,073,786,822,308,098,000
35.28744
91
0.563174
false
3.806893
false
false
false
hlzz/dotfiles
graphics/cgal/Documentation/conversion_tools/markup_replacement.py
1
1846
#!/usr/bin/python2 #replace markup #, ## ,### by \section, \subsection, \subsubsection. #anchor names are preserved and generated from the section name otherwise #The script is not perfect and might miss some specific cases from sys import argv from os import path import string import re anchors={} def generate_anchor(chapter,text): pattern = re.compile('[\W_]+') words=text.split() i=1; res=chapter+pattern.sub('',words[0]) while len(res)<40 and i<len(words): word=pattern.sub('',words[i]) res+=word i+=1 if anchors.has_key(res): anchors[res]+=1 res+="_"+str(anchors[res]) else: anchors[res]=0 return res f=file(argv[1]) regexp_line=re.compile('^\s*#') #~ regexp_section=re.compile('^\s*#\s*([ a-b().,]+)\s*#(.*)') regexp_section=re.compile('^\s*(#+)\s*([0-9a-zA-Z (),.:?%-`\']+[0-9a-zA-Z.?`)])\s*#+(.*)') regexp_anchor=re.compile('^\s*{#([0-9a-zA-Z_]+)}') result="" diff=False chapter=path.abspath(argv[1]).split('/')[-2] for line in f.readlines(): if regexp_line.match(line): m=regexp_section.search(line) if m: values=m.groups() anchor='' if len(values)==2: anchor=generate_anchor(chapter,values[1]) else: anchor=regexp_anchor.match(values[2]) if anchor: anchor=anchor.group(1) else: anchor=generate_anchor(chapter,values[1]) if len(values[0])==1: result+="\section "+anchor+" "+values[1]+"\n" elif len(values[0])==2: result+="\subsection "+anchor+" "+values[1]+"\n" elif len(values[0])==3: result+="\subsubsection "+anchor+" "+values[1]+"\n" else: print "Error while processing "+argv[1] assert False diff=True else: result+=line else: result+=line f.close() if diff: f=file(argv[1],'w') f.write(result) f.close()
bsd-3-clause
-7,124,903,640,389,768,000
24.638889
90
0.591008
false
3.041186
false
false
false
Aegeaner/spark
python/pyspark/testing/utils.py
1
3566
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import glob import os import struct import sys import unittest from pyspark import SparkContext, SparkConf have_scipy = False have_numpy = False try: import scipy.sparse have_scipy = True except: # No SciPy, but that's okay, we'll skip those tests pass try: import numpy as np have_numpy = True except: # No NumPy, but that's okay, we'll skip those tests pass SPARK_HOME = os.environ["SPARK_HOME"] def read_int(b): return struct.unpack("!i", b)[0] def write_int(i): return struct.pack("!i", i) class QuietTest(object): def __init__(self, sc): self.log4j = sc._jvm.org.apache.log4j def __enter__(self): self.old_level = self.log4j.LogManager.getRootLogger().getLevel() self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL) def __exit__(self, exc_type, exc_val, exc_tb): self.log4j.LogManager.getRootLogger().setLevel(self.old_level) class PySparkTestCase(unittest.TestCase): def setUp(self): self._old_sys_path = list(sys.path) class_name = self.__class__.__name__ self.sc = SparkContext('local[4]', class_name) def tearDown(self): self.sc.stop() sys.path = self._old_sys_path class ReusedPySparkTestCase(unittest.TestCase): @classmethod def conf(cls): """ Override this in subclasses to supply a more specific conf """ return SparkConf() @classmethod def setUpClass(cls): cls.sc = SparkContext('local[4]', cls.__name__, conf=cls.conf()) @classmethod def tearDownClass(cls): cls.sc.stop() class ByteArrayOutput(object): def __init__(self): self.buffer = bytearray() def write(self, b): self.buffer += b def close(self): pass def search_jar(project_relative_path, jar_name_prefix): project_full_path = os.path.join( os.environ["SPARK_HOME"], project_relative_path) # We should ignore the following jars ignored_jar_suffixes = ("javadoc.jar", "sources.jar", "test-sources.jar", "tests.jar") # Search jar in the project dir using the jar name_prefix for both sbt build and maven # build because the artifact jars are in different directories. sbt_build = glob.glob(os.path.join( project_full_path, "target/scala-*/%s*.jar" % jar_name_prefix)) maven_build = glob.glob(os.path.join( project_full_path, "target/%s*.jar" % jar_name_prefix)) jar_paths = sbt_build + maven_build jars = [jar for jar in jar_paths if not jar.endswith(ignored_jar_suffixes)] if not jars: return None elif len(jars) > 1: raise Exception("Found multiple JARs: %s; please remove all but one" % (", ".join(jars))) else: return jars[0]
apache-2.0
-146,794,885,754,362,100
27.07874
97
0.668256
false
3.623984
true
false
false
rahulraj/web_projects
assignment2/src/photogallery/generator/galleryitemfactory.py
1
6059
import os import re import os.path from iptcinfo import IPTCInfo from galleryitem import JpegPicture, JpegDirectory, directory_name_to_html_file_name from ..utils.inject import assign_injectables def is_jpeg_file(file_name): """ Determine if a file is labeled as a JPEG. Args: file_name the name of the file. Returns: True if the file ends with .jpg. """ return file_is_of_type(file_name, 'jpg') def is_css_file(file_name): """ Determine if a file is labeled as CSS. Args: file_name the name of the file. Returns: True if the file ends with .css. """ return file_is_of_type(file_name, 'css') def is_js_file(file_name): """ Determine if a file is labeled as JavaScript. Args: file_name the name of the file. Returns: True if the file ends with .js. """ return file_is_of_type(file_name, 'js') def file_is_of_type(file_name, extension): """ Return whether a file is of a certain type. Args: file_name the name of the file to test. extension the part of the name after the . which will be checked with a regular expression. Returns: True if file_name ends with extension. """ type_re = re.compile(r'\.%s' % extension) return type_re.search(file_name) != None class GalleryItemFactory(object): """ Class to bootstrap the application by reading the disk and creating GalleryItems from the existing JPEGs and subdirectories. """ def __init__(self, lookup_table, should_prompt, iptc_info_constructor=IPTCInfo, list_directory=os.listdir, is_directory=os.path.isdir): """ Constructor for GalleryItemFactory Args: lookup_table the lookup_table that the files use to search IPTCInfo.data. should_prompt whether the program should prompt the user for directory names. iptc_info_constructor the constructor for IPTCInfo objects that the files will use to lookup metadata (defaults to IPTCInfo). list_directory the function that takes a path and lists the files in it, defaults to os.listdir is_directory a function that takes a file name and returns true if it is a directory (defaults to os.path.isdir). """ assign_injectables(self, locals()) def create_directory(self, path, parent_path=None): """ Creates a JpegDirectory object with the appropriate GalleryItems Args: path the path to the directory that the JPEGs are stored in. parent_path the directory one level up of path; if we are creating a subdirectory this will be used to populate back_href. It can be None if we are creating the top-most directory. Returns: A JpegDirectory containing GalleryItems wrapped around all the appropriate contents of the directory referred to by path. Raises: Any exception thrown when trying to extract IPTC information from a JPEG file. See the documentation of try_create_jpeg_picture for details. """ file_names = self.list_directory(path) jpeg_names = filter(is_jpeg_file, file_names) path_contents = [] for name in jpeg_names: maybe_jpeg_picture = self.try_create_jpeg_picture(path, name) if maybe_jpeg_picture is not None: path_contents.append(maybe_jpeg_picture) subdirectories = self.create_subdirectories(file_names, path) path_contents.extend(subdirectories) back_href = self.maybe_get_back_href(parent_path) return JpegDirectory(path, path_contents, self.should_prompt, back_href=back_href) def try_create_jpeg_picture(self, path, name): """ Given a path and the name of a file ending in .jpg, tries to create a JpegPicture object out of it. Args: path the path to the directory the file is in. name the name of the file. Returns: A JpegPicture object, if creating it was successful. None if creating the JpegPicture failed for some reason that does not warrant crashing the program. Raises: Any exception raised when trying to extract IPTC information from the JPEG, that is not an IOError or an exception with the message 'No IPTC data found.' In those two cases, simply skips the file and prints a message saying so. """ full_jpeg_name = os.path.join(path, name) try: return JpegPicture(name, directory_name_to_html_file_name(path), self.iptc_info_constructor(full_jpeg_name), self.lookup_table) except IOError: print "I was unable to open the file ", name, " for some reason" print "Maybe it's corrupted?" print "Skipping it..." return None except Exception as possible_iptc_exception: if str(possible_iptc_exception) == 'No IPTC data found.': print "I was unable to get IPTC data from the file %s" % name print "Skipping it..." return None else: raise possible_iptc_exception # Some other exception def maybe_get_back_href(self, path): """ Given a nullable path name, turns it into a href that can be used to write an anchor tag pointing to a HTML file. If path is None, propagates the None by returning it. Args: path the path name, or None if it is not applicable. """ if path is None: return None else: return directory_name_to_html_file_name(path) def create_subdirectories(self, file_names, path): """ Helper methods to find the subdirectories of path and create JpegDirectories for them, fully initializing their contents too. Args: file_names the names of the files in path. path the root directory path to process. """ full_file_names = [os.path.join(path, name) for name in file_names] directory_names = filter(self.is_directory, full_file_names) jpeg_directories = [self.create_directory(directory_name, parent_path=path) \ for directory_name in directory_names] return jpeg_directories
mit
-6,736,790,274,765,474,000
31.575269
84
0.674039
false
3.952381
false
false
false
mjtamlyn/archery-scoring
scores/migrations/0001_initial.py
1
2398
# -*- coding: utf-8 -*- from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('entries', '0001_initial'), ] operations = [ migrations.CreateModel( name='Arrow', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('arrow_value', models.PositiveIntegerField()), ('arrow_of_round', models.PositiveIntegerField()), ('is_x', models.BooleanField(default=False)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Dozen', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('total', models.PositiveIntegerField()), ('dozen', models.PositiveIntegerField()), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='Score', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('score', models.PositiveIntegerField(default=0, db_index=True)), ('hits', models.PositiveIntegerField(default=0)), ('golds', models.PositiveIntegerField(default=0)), ('xs', models.PositiveIntegerField(default=0)), ('alteration', models.IntegerField(default=0)), ('retired', models.BooleanField(default=False)), ('disqualified', models.BooleanField(default=False)), ('target', models.OneToOneField(to='entries.TargetAllocation', on_delete=models.CASCADE)), ], options={ }, bases=(models.Model,), ), migrations.AddField( model_name='dozen', name='score', field=models.ForeignKey(to='scores.Score', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='arrow', name='score', field=models.ForeignKey(to='scores.Score', on_delete=models.CASCADE), preserve_default=True, ), ]
bsd-3-clause
7,393,244,661,349,838,000
35.892308
114
0.525855
false
4.864097
false
false
false
gemrb/gemrb
gemrb/GUIScripts/bg1/ImportFile.py
1
2330
# GemRB - Infinity Engine Emulator # Copyright (C) 2003 The GemRB Project # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # #character generation, import (GUICG20) import GemRB from GUIDefines import * import GUICommon import CharGenCommon #import from a character sheet ImportWindow = 0 TextAreaControl = 0 def OnLoad(): global ImportWindow, TextAreaControl ImportWindow = GemRB.LoadWindow(20, "GUICG") TextAreaControl = ImportWindow.GetControl(4) TextAreaControl.SetText(10963) TextAreaControl = ImportWindow.GetControl(2) TextAreaControl.ListResources(CHR_EXPORTS) DoneButton = ImportWindow.GetControl(0) DoneButton.SetText (11973) DoneButton.SetState(IE_GUI_BUTTON_DISABLED) CancelButton = ImportWindow.GetControl(1) CancelButton.SetText (13727) DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, DonePress) CancelButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CancelPress) TextAreaControl.SetEvent(IE_GUI_TEXTAREA_ON_SELECT, SelectPress) ImportWindow.ShowModal(MODAL_SHADOW_NONE) return def SelectPress(): DoneButton = ImportWindow.GetControl(0) DoneButton.SetState(IE_GUI_BUTTON_ENABLED) return def DonePress(): ImportWindow.Close() FileName = TextAreaControl.QueryText() Slot = GemRB.GetVar("Slot") GemRB.CreatePlayer(FileName, Slot| 0x8000, 1) GemRB.SetToken ("CHARNAME", GemRB.GetPlayerName (Slot)) GemRB.SetToken ("SmallPortrait", GemRB.GetPlayerPortrait (Slot, 1)["ResRef"]) GemRB.SetToken ("LargePortrait", GemRB.GetPlayerPortrait (Slot, 0)["ResRef"]) GemRB.SetVar ("ImportedChar", 1) CharGenCommon.jumpTo("appearance") return def CancelPress(): ImportWindow.Close() GemRB.SetNextScript(GemRB.GetToken("NextScript")) return
gpl-2.0
-4,098,847,976,789,444,000
29.657895
81
0.777682
false
3.200549
false
false
false
datafolklabs/cement
cement/core/extension.py
1
3997
"""Cement core extensions module.""" import sys from abc import abstractmethod from ..core import exc from ..core.interface import Interface from ..core.handler import Handler from ..utils.misc import minimal_logger LOG = minimal_logger(__name__) class ExtensionInterface(Interface): """ This class defines the Extension Interface. Handlers that implement this interface must provide the methods and attributes defined below. In general, most implementations should sub-class from the provided :class:`ExtensionHandler` base class as a starting point. """ class Meta: """Handler meta-data.""" #: The string identifier of the interface. interface = 'extension' @abstractmethod def load_extension(self, ext_module): """ Load an extension whose module is ``ext_module``. For example, ``cement.ext.ext_json``. Args: ext_module (str): The name of the extension to load """ pass # pragma: no cover @abstractmethod def load_extensions(self, ext_list): """ Load all extensions from ``ext_list``. Args: ext_list (list): A list of extension modules to load. For example: ``['cement.ext.ext_json', 'cement.ext.ext_logging']`` """ pass # pragma: no cover class ExtensionHandler(ExtensionInterface, Handler): """ This handler implements the Extention Interface, which handles loading framework extensions. All extension handlers should sub-class from here, or ensure that their implementation meets the requirements of this base class. """ class Meta: """ Handler meta-data (can be passed as keyword arguments to the parent class). """ #: The string identifier of the handler. label = 'cement' def __init__(self, **kw): super().__init__(**kw) self.app = None self._loaded_extensions = [] def get_loaded_extensions(self): """ Get all loaded extensions. Returns: list: A list of loaded extensions. """ return self._loaded_extensions def list(self): """ Synonymous with ``get_loaded_extensions()``. Returns: list: A list of loaded extensions. """ return self._loaded_extensions def load_extension(self, ext_module): """ Given an extension module name, load or in other-words ``import`` the extension. Args: ext_module (str): The extension module name. For example: ``cement.ext.ext_logging``. Raises: cement.core.exc.FrameworkError: Raised if ``ext_module`` can not be loaded. """ # If its not a full module path then preppend our default path if ext_module.find('.') == -1: ext_module = 'cement.ext.ext_%s' % ext_module if ext_module in self._loaded_extensions: LOG.debug("framework extension '%s' already loaded" % ext_module) return LOG.debug("loading the '%s' framework extension" % ext_module) try: if ext_module not in sys.modules: __import__(ext_module, globals(), locals(), [], 0) if hasattr(sys.modules[ext_module], 'load'): sys.modules[ext_module].load(self.app) if ext_module not in self._loaded_extensions: self._loaded_extensions.append(ext_module) except ImportError as e: raise exc.FrameworkError(e.args[0]) def load_extensions(self, ext_list): """ Given a list of extension modules, iterate over the list and pass individually to ``self.load_extension()``. Args: ext_list (list): A list of extension module names (str). """ for ext in ext_list: self.load_extension(ext)
bsd-3-clause
2,490,373,445,105,531,400
26.565517
79
0.589192
false
4.521493
false
false
false
xjw1001001/IGCexpansion
test/Ancestral_reconstruction/PAML/parse reconstructed fasta.py
1
7314
# -*- coding: utf-8 -*- """ Created on Thu Aug 10 08:23:33 2017 @author: xjw1001001 """ #only when PAML in desktop is available,the yeast version only from Bio import Seq, SeqIO, AlignIO from Bio.Phylo.PAML import codeml, baseml import numpy as np paralog_list = [['YLR406C', 'YDL075W'], ['YER131W', 'YGL189C'], ['YML026C', 'YDR450W'], ['YNL301C', 'YOL120C'], ['YNL069C', 'YIL133C'], ['YMR143W', 'YDL083C'], ['YJL177W', 'YKL180W'], ['YBR191W', 'YPL079W'], ['YER074W', 'YIL069C'], ['YDR418W', 'YEL054C'], ['YBL087C', 'YER117W'], ['YLR333C', 'YGR027C'], ['YMR142C', 'YDL082W'], ['YER102W', 'YBL072C'], ] for pair in paralog_list: primalline=[] fastaline=[] with open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f: for line in f.readlines(): primalline.append(line) sline = '>' + line sline=sline.replace('node #14','Root'+pair[0]) sline=sline.replace(' ','') sline=sline.replace('\n','') sline=sline.replace('node#15','N0'+pair[0]) for i in range(5): sline=sline.replace('node#' + str(15+1+i),'N'+str(1+i)+pair[1]) sline=sline.replace('node#' + str(20+1+i),'N'+str(1+i)+pair[0]) sline=sline.replace(pair[0],pair[0] + '\n') sline=sline.replace(pair[1],pair[1] + '\n') fastaline.append(sline) f1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+') for line in fastaline: f1.write(line) f1.write('\n') f1.close() #ERa_ERb pair = ['ERa','ERb'] primalline=[] fastaline=[] substitution_dict = {'node#39':'N14ERa','node#38':'N8ERa','node#37':'N7ERa','node#36':'N6ERa','node#41':'N9ERa','node#40':'N5ERa' ,'node#35':'N4ERa','node#44':'N13ERa','node#46':'N12ERa','node#47':'N11ERa','node#45':'N10ERa' ,'node#43':'N3ERa','node#42':'N2ERa','node#34':'N1ERa' ,'node#53':'N14ERb','node#52':'N8ERb','node#51':'N7ERb','node#50':'N6ERb','node#55':'N9ERb','node#54':'N5ERb' ,'node#49':'N4ERb','node#58':'N13ERb','node#60':'N12ERb','node#61':'N11ERb','node#59':'N10ERb' ,'node#57':'N3ERb','node#56':'N2ERb','node#48':'N1ERb'} with open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f: for line in f.readlines(): primalline.append(line) sline = '>' + line sline=sline.replace('node #32','Root'+pair[0]) sline=sline.replace(' ','') sline=sline.replace('\n','') sline=sline.replace('node#33','N0'+pair[0]) for i in substitution_dict.keys(): sline=sline.replace(i,substitution_dict[i]) sline=sline.replace(pair[0],pair[0] + '\n') sline=sline.replace(pair[1],pair[1] + '\n') fastaline.append(sline) f1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+') for line in fastaline: f1.write(line) f1.write('\n') f1.close() #ARa_ERa pair = ['ARa','ERa'] primalline=[] fastaline=[] substitution_dict = {'node#36':'N12ERa','node#35':'N11ERa','node#34':'N7ERa','node#33':'N6ERa','node#32':'N5ERa','node#37':'N8ERa' ,'node#31':'N4ERa','node#41':'N10ERa','node#40':'N9ERa','node#39':'N3ERa','node#38':'N2ERa' ,'node#30':'N1ERa' ,'node#48':'N12ARa','node#47':'N11ARa','node#46':'N7ARa','node#45':'N6ARa','node#44':'N5ARa','node#49':'N8ARa' ,'node#43':'N4ARa','node#53':'N10ARa','node#52':'N9ARa','node#51':'N3ARa','node#50':'N2ARa' ,'node#42':'N1ARa','node#29':'N0ERa','node#28':'RootERa'} with open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f: for line in f.readlines(): primalline.append(line) sline = '>' + line sline=sline.replace(' ','') sline=sline.replace('\n','') for i in substitution_dict.keys(): sline=sline.replace(i,substitution_dict[i]) sline=sline.replace(pair[0],pair[0] + '\n') sline=sline.replace(pair[1],pair[1] + '\n') fastaline.append(sline) f1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+') for line in fastaline: f1.write(line) f1.write('\n') f1.close() #ARGRMRPR pairlist = [['AR', 'MR'], ['AR', 'GR'], ['AR', 'PR'], ['MR', 'GR'], ['MR', 'PR'], ['PR', 'GR']] for pair in pairlist: primalline=[] fastaline=[] substitution_dict = {'node#25':'N4'+pair[0],'node#31':'N9'+pair[0],'node#30':'N7'+pair[0] ,'node#32':'N8'+pair[0],'node#29':'N6'+pair[0],'node#28':'N5'+pair[0] ,'node#27':'N3'+pair[0],'node#26':'N2'+pair[0],'node#24':'N1'+pair[0] ,'node#34':'N4'+pair[1],'node#40':'N9'+pair[1],'node#39':'N7'+pair[1] ,'node#41':'N8'+pair[1],'node#38':'N6'+pair[1],'node#37':'N5'+pair[1] ,'node#36':'N3'+pair[1],'node#35':'N2'+pair[1],'node#33':'N1'+pair[1] ,'node#23':'N0'+pair[0],'node#22':'ROOT'+pair[0] } with open('/Users/xjw1001001/Desktop/PAML/output/' + '_'.join(pair) +'/out/construct.fasta','r') as f: for line in f.readlines(): primalline.append(line) sline = '>' + line sline=sline.replace(' ','') sline=sline.replace('\n','') for i in substitution_dict.keys(): sline=sline.replace(i,substitution_dict[i]) sline=sline.replace(pair[0],pair[0] + '\n') sline=sline.replace(pair[1],pair[1] + '\n') fastaline.append(sline) f1 = open('/Users/xjw1001001/Desktop/PAML/PAMLfasta/PAML_' + '_'.join(pair) +'.fasta','w+') for line in fastaline: f1.write(line) f1.write('\n') f1.close() PAML_parameter_dict = {} path = '/Users/xjw1001001/Desktop/PAML/' paralog_list = [['YLR406C', 'YDL075W'],#pair#TODO: other data ['YER131W', 'YGL189C'], ['YML026C', 'YDR450W'], ['YNL301C', 'YOL120C'], ['YNL069C', 'YIL133C'], ['YMR143W', 'YDL083C'], ['YJL177W', 'YKL180W'], ['YBR191W', 'YPL079W'], ['YER074W', 'YIL069C'], ['YDR418W', 'YEL054C'], ['YBL087C', 'YER117W'], ['YLR333C', 'YGR027C'], ['YMR142C', 'YDL082W'], ['YER102W', 'YBL072C'], ['EDN', 'ECP'],['ERa', 'ERb'],['AR', 'MR'],['AR', 'GR'],['AR', 'PR'], ['MR', 'GR'],['MR', 'PR'],['PR', 'GR'] ] for pair in paralog_list:#parameters: kappa(-5), omega(-1), tau,branches PAML_parameter_dict['_'.join(pair)] = {} codeml_result = codeml.read(path+'output/' + '_'.join(pair) + '/out/' + '_'.join(pair) + '_codeml') #baseml_result = baseml.read('/Users/xjw1001001/Documents/GitHub/IGCexpansion2/test/Ancestral_reconstruction/PAML/output/' + '_'.join(pair) + '/' + '_'.join(pair) + '_baseml') parameter_list = codeml_result['NSsites'][0]['parameters']['parameter list'].split(' ') PAML_parameter_dict['_'.join(pair)]['kappa'] = parameter_list[-5] PAML_parameter_dict['_'.join(pair)]['omega'] = parameter_list[-1]
gpl-3.0
-1,790,204,341,473,735,700
45.592357
179
0.537462
false
2.703882
false
false
false
qinjian623/dlnotes
tutorials/tensorflow/mnist_softmax.py
1
2619
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """A very simple MNIST classifier. See extensive documentation at http://tensorflow.org/tutorials/mnist/beginners/index.md """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse # Import data from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf FLAGS = None def main(_): mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # Create the model x = tf.placeholder(tf.float32, [None, 784]) W = tf.Variable(tf.zeros([784, 10])) b = tf.Variable(tf.zeros([10])) y = tf.matmul(x, W) + b # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, 10]) # The raw formulation of cross-entropy, # # tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(tf.softmax(y)), # reduction_indices=[1])) # # can be numerically unstable. # # So here we use tf.nn.softmax_cross_entropy_with_logits on the raw # outputs of 'y', and then average across the batch. cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, y_)) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) sess = tf.InteractiveSession() # Train tf.initialize_all_variables().run() for _ in range(50000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) # Test trained model correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='/tmp/data', help='Directory for storing data') FLAGS = parser.parse_args() tf.app.run()
gpl-3.0
-4,167,562,545,988,799,000
33.012987
80
0.66323
false
3.563265
false
false
false
rinigus/osmscout-server
scripts/import/prepare_distribution.py
1
5119
#!/usr/bin/env python # This script prepares files before uploading them for distribution # This has to be run after all imports are finished import json, pickle, os, stat, shutil from mapbox_country_pack import world_pack as mapboxgl_world_pack root_dir = "distribution" bucket = open("bucket_name", "r").read().strip() url_base = "http://data.modrana.org/osm_scout_server" #url_base = "https://kuqrhldx.e24files.com" url_specs = { "base": url_base, "type": "url", #"osmscout": "osmscout-27", "geocoder_nlp": "geocoder-nlp-29", "postal_global": "postal-global-2", "postal_country": "postal-country-2", "mapnik_global": "mapnik-global-1", "mapnik_country": "mapnik-country-24", "mapboxgl_country": "mapboxgl-16", "mapboxgl_global": "mapboxgl-16", "mapboxgl_glyphs": "mapboxgl-16", "valhalla": "valhalla-24", } dist = json.loads( open("countries.json", "r").read() ) dist["postal/global"] = { "id": "postal/global", "type": "postal/global", "postal_global": { "path": "postal/global-v1" } } dist["mapnik/global"] = { "id": "mapnik/global", "type": "mapnik/global", "mapnik_global": { "path": "mapnik/global" } } dist["mapboxgl/glyphs"] = { "id": "mapboxgl/glyphs", "type": "mapboxgl/glyphs", "mapboxgl_glyphs": { "path": "mapboxgl/glyphs" } } dist["url"] = url_specs # could make it smarter in future to check whether the files have # changed since the last upload toupload = [] upload_commands = "#!/bin/bash\nset -e\nrm -f digest.md5\n" def uploader(dirname, targetname, extra="/"): global toupload, upload_commands toupload.append([dirname, targetname]) upload_commands += "echo\necho " + dirname + "\n" sd = dirname.replace("/", "\/") st = targetname.replace("/", "\/") upload_commands += "md5deep -t -l -r " + dirname + " | sed 's/%s/%s/g' >> digest.md5\n" % (sd,st) upload_commands += "s3cmd --config=.s3cfg sync " + dirname + extra + " s3://" + bucket + "/" + targetname + extra + " --acl-public --signature-v2 " + "\n" def getprop(dirname): props = {} for p in ["size", "size-compressed", "timestamp", "version"]: v = open(dirname + "." + p, "r").read().split()[0] props[p] = v return props # fill database details for d in dist: for sub in dist[d]: if "packages" in dist[d][sub]: continue # this item is distributed via packages try: rpath = dist[d][sub]["path"] print(rpath) except: continue locdir = root_dir + "/" + rpath remotedir = url_specs[sub] + "/" + rpath dist[d][sub].update( getprop(locdir) ) uploader(locdir, remotedir) uploader(root_dir + "/valhalla", url_specs["valhalla"] + "/valhalla") uploader(root_dir + "/mapboxgl/packages", url_specs["mapboxgl_country"] + "/mapboxgl/packages") # add mapbox global object after uploader commands are ready dist["mapboxgl/global"] = { "id": "mapboxgl/global", "type": "mapboxgl/global", "mapboxgl_global": mapboxgl_world_pack() } # save provided countries fjson = open("provided/countries_provided.json", "w") fjson.write( json.dumps( dist, sort_keys=True, indent=4, separators=(',', ': ')) ) fjson.close() uploader("provided/countries_provided.json", "countries_provided.json", extra = "") upload_commands += "bzip2 -f digest.md5\n" uploader("digest.md5.bz2", "digest.md5.bz2", extra = "") upload_commands += "echo\necho 'Set S3 permissions'\n" upload_commands += "s3cmd --config=.s3cfg setacl s3://" + bucket + "/ --acl-public --recursive\n" upload_commands += "mv digest.md5 digest.md5.bz2.md5\n" uploader("digest.md5.bz2.md5", "digest.md5.bz2.md5", extra = "") # save uploader script fscript = open("uploader.sh", "w") fscript.write( upload_commands ) fscript.write( "echo\necho 'Set S3 permissions'\n" ) fscript.write( "s3cmd --config=.s3cfg setacl s3://" + bucket + "/ --acl-public --recursive\n" ) fscript.write( "s3cmd --config=.s3cfg setacl s3://" + bucket + "/ --acl-private\n" ) fscript.close() st = os.stat('uploader.sh') os.chmod('uploader.sh', st.st_mode | stat.S_IEXEC) print("Check uploader script and run it") # generate public_html folder for testing testing_mirror = "public_http" shutil.rmtree(testing_mirror, ignore_errors=True) os.mkdir(testing_mirror) os.symlink("../provided/countries_provided.json", os.path.join(testing_mirror, "countries_provided.json")) distlink = { "geocoder_nlp": "geocoder-nlp", "mapboxgl_country": "mapboxgl", "mapnik_country": "mapnik", "mapnik_global": "mapnik", #"osmscout": "osmscout", "postal_country": "postal", "postal_global": "postal", "valhalla": "valhalla" } for t in ["geocoder_nlp", "mapboxgl_country", "mapnik_country", "mapnik_global", #"osmscout", "postal_country", "postal_global", "valhalla" ]: d = os.path.join(testing_mirror, url_specs[t]) os.mkdir(d) os.symlink( "../../distribution/" + distlink[t], os.path.join(d, distlink[t]) )
gpl-3.0
1,752,026,904,201,722,600
33.126667
158
0.621606
false
3.021842
true
false
false
jose-caballero/cvmfsreplica
cvmfsreplica/cvmfsreplicaex.py
1
1122
#! /usr/bin/env python # # exception classes for cvmfsreplica project class ServiceConfigurationFailure(Exception): """ Exception to be raised when basic service configuration cannot be read """ def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class RepositoriesConfigurationFailure(Exception): """ Exception to be raised when basic repositories configuration cannot be read """ def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class PluginConfigurationFailure(Exception): """ Exception to be raised when a plugin configuration cannot be read """ def __init__(self, value): self.value = value def __str__(self): return repr(self.value) class AcceptancePluginFailed(Exception): """ Exception to be raised when an Acceptance Plugin failed and it has an attribute should_abort = True """ def __init__(self, value): self.value = value def __str__(self): return repr(self.value)
gpl-3.0
-3,819,787,098,476,026,400
22.87234
64
0.635472
false
4.382813
true
false
false
SchulzLab/SOS
install_script.py
1
11581
#!/usr/bin/env python import os from optparse import OptionParser import subprocess import sys #import commands class install_script(): def __init__(self): self.prog_installed = [] def obtaining_tar(self, prog, path): if (prog == 6): os.chdir(path) #Before obtaining tha tar file of the corresponding tool, we always check whether the folder exists in the path. If it exists then we throw an exception otherwise we download the tool #Checking and downloading oases chk = self.checkfolder("oases") if(chk == False): os.system("git clone --recursive http://github.com/dzerbino/oases.git") else: print ("The path already contains a folder named oases. Please rename the folder or remove it from the path") sys.exit() #Checking and downloading SEECER. This is not the version mentioned in the manuscript of SEECER. This is the modified version which was used for the SOS manuscript. chk1 = self.checkfolder("SEECER.tar.gz") if(chk1 == False): os.system("wget https://zenodo.org/record/3686150/files/SEECER.tar.gz?download=1") os.system("tar -zxvf SEECER.tar.gz") else: print ("The path already contains a folder named SEECER.tar.gz. Please rename it or remove it from the path") #Checking and downloading salmon chk2 = self.checkfolder("salmon-1.1.0_linux_x86_64.tar.gz") if(chk2 == False): #To get the latest version of salmon, please change the link in the next three lines print("-----salmon installation-------") os.system("wget https://github.com/COMBINE-lab/salmon/releases/download/v1.1.0/salmon-1.1.0_linux_x86_64.tar.gz >"+path+"/LogFiles/salmon.txt 2> "+path+"/LogFiles/salmonError.txt") os.system("tar -zxvf salmon-1.1.0_linux_x86_64.tar.gz >"+path+"/LogFiles/salmon.txt 2> "+path+"/LogFiles/salmonError.txt") self.prog_installed.append(path+"/salmon-1.1.0_linux_x86_64.tar.gz") else: print ("The path already contains a folder named salmon-1.1.0_linux_x86_64.tar.gz. Please rename it or remove it from the path") sys.exit() chk3 = self.checkfolder("ORNA") if(chk3 == False): os.system("git clone https://github.com/SchulzLab/ORNA") self.prog_installed.append(path+"/ORNA") else: print ("The path already contains a folder named ORNA. Please rename it or remove it from the path") chk4 = self.checkfolder("KREATION") if(chk4 == False): print("-----KREATION installation-------") os.system("git clone https://github.com/SchulzLab/KREATION >"+path+"/LogFiles/KREATION.txt 2> "+path+"/LogFiles/KreationError.txt") self.prog_installed.append(path+"/KREATION") else: print ("The path already contains a folder named KREATION. Please rename it or remove it from the path") if(prog==1): os.chdir(path) chk6 = self.checkfolder("oases") if(chk6 == False): os.system("git clone http://github.com/dzerbino/oases.git >"+path+"/LogFiles/Oases.txt 2> "+path+"/LogFiles/OasesError.txt") else: print ("The path already contains a folder named oases. please rename the folder or remove it from the path") sys.exit() if(prog==2): os.chdir(path) output = subprocess.check_output("uname") chk2 = self.checkfolder("salmon-1.1.0_linux_x86_64") if(chk2 == False): print("-----salmon installation-------") os.system("wget https://github.com/COMBINE-lab/salmon/releases/download/v1.1.0/salmon-1.1.0_linux_x86_64.tar.gz >"+path+"/LogFiles/salmon.txt 2> "+path+"/LogFiles/salmonError.txt") os.system("tar -zxvf salmon-1.1.0_linux_x86_64.tar.gz >"+path+"/LogFiles/salmon.txt 2> "+path+"/LogFiles/salmonError.txt") self.prog_installed.append(path+"/salmon-1.1.0_linux_x86_64.tar.gz") chksalmon=self.checkfolder(path+"/salmon-latest_linux_x86_64/bin/salmon") if(chksalmon==False): print("Salmon did not install correctly. Please try again") sys.exit() else: print("Salmon installed successfully") else: print ("The path already contains a folder named salmon-1.1.0_linux_x86_64.tar.gz. please rename it or remove it from the path") sys.exit() if (prog == 3): os.chdir(path) chk2 = self.checkfolder("ORNA") if(chk2 == False): os.system("git clone https://github.com/SchulzLab/ORNA >"+path+"/LogFiles/ORNA.txt 2> "+path+"/LogFiles/ORNAError.txt") self.prog_installed.append(path+"/ORNA") else: print ("The path already contains a folder named ORNA. Please rename it or remove it from the path") if (prog == 4): os.chdir(path) s,t = subprocess.check_output("which cd-hit-est") if(s == 256): uc = input("cd-hit is not found in the environment variables. Do you want to install (y/n) : ") if(uc == "y"): os.system("git clone https://github.com/weizhongli/cdhit >"+path+"/LogFiles/cdhit.txt 2> "+path+"/LogFiles/cdhitError.txt") self.install_cdhit(path) os.chdir(path) else: print ("Please remember that cd-hit-est is required for the running of KREATION and must be in the environment variable $PATH") chk2 = self.checkfolder("KREATION") if(chk2 == False): print("-----KREATION installation-------") os.system("git clone https://github.com/SchulzLab/KREATION >"+path+"/LogFiles/KREATION.txt 2> "+path+"/LogFiles/KreationError.txt") self.prog_installed.append(path+"/KREATION") chkkreation=self.checkfolder(path+"/KREATION/KREATION.py") if(chkkreation==False): print("KREATION did not install correctly. Please try again") sys.exit() else: print("KREATION installed successfully") else: print ("The path already contains a folder named KREATION. Please rename it or remove it from the path") if (prog == 5): os.chdir(path) chk1 = self.checkfolder("SEECER.tar.gz") if(chk1 == False): print("-----SEECER installation-----") os.system("wget https://zenodo.org/record/3686150/files/SEECER.tar.gz > "+path+"/LogFiles/Seecer.txt 2> "+path+"/LogFiles/SeecerError.txt") os.system("tar -zxvf SEECER.tar.gz > "+path+"/LogFiles/Seecer.txt 2> "+path+"/LogFiles/SeecerError.txt") chkkreation=self.checkfolder(path+"/SEECER-0.1.3/SEECER/bin/run_seecer.sh") if(chkkreation==False): print("SEECER did not install correctly. Please try again") sys.exit() else: print("SEECER installed successfully") else: print ("The path already contains a folder named SEECER.tar.gz. Please rename it or remove it from the path") if(prog==8): os.chdir(path) chk5 = self.checkfolder("velvet") if(chk5 == False): os.system("git clone http://github.com/dzerbino/velvet.git >"+path+"/LogFiles/Velvet.txt 2> "+path+"/LogFiles/VelvetError.txt") else: print ("The path already contains a folder named velvet. please rename the folder or remove it from the path") sys.exit() def install_oases(self, path, cs): print("------Oases installation------") path2 = path + "/oases" os.chdir(path2) os.system("make "+cs+" > "+path+"/LogFiles/Oases.txt 2> "+path+"/LogFiles/OasesError.txt") self.prog_installed.append(path2) chk=self.checkfolder(path+"/oases/oases") if(chk==False): print("Oases did not install correctly. Please try again") sys.exit() else: print("Oases installed successfully") def install_orna(self, path): print("------ORNA installation------") path2 = path + "/ORNA" os.chdir(path2) os.system("bash install.sh > "+path+"/LogFiles/ORNA.txt 2> "+path+"/LogFiles/ORNAError.txt") self.prog_installed.append(path2) chk=self.checkfolder(path+"/ORNA/build/bin/ORNA") if(chk==False): print("ORNA did not install correctly. Please try again") sys.exit() else: print("ORNA installed successfully") def install_velvet(self,path, cs): path1 = path + "/velvet" os.chdir(path1) print("------Velvet installation------") os.system("make "+cs+" > "+path+"/LogFiles/velvet.txt 2> "+path+"/LogFiles/VelvetError.txt") self.prog_installed.append(path1) chk=self.checkfolder(path+"/velvet/velvetg") and self.checkfolder(path+"/velvet/velveth") if(chk==False): print("velvet did not install correctly. Please try again") sys.exit() else: print("velvet installed successfully") def install_cdhit(self, path): path1 = path + "/cdhit" os.chdir(path1) print("------cd-hit-est installation------") os.system("make > "+path+"/LogFiles/cdhit.txt 2> "+path+"/LogFiles/cdHitError.txt") def getoptions(self): parser = OptionParser() parser.add_option("-f", "--folder", dest="foldername", help="destination folder") (options, args) = parser.parse_args() return options def checkfolder(self, program): var = os.path.exists(program) return var ########### MAIN PROGRAM ########### x = install_script() y1 = x.getoptions() if(y1.foldername != None): try: os.chdir(y1.foldername) except: uc = input("folder "+ y1.foldername + " does not exists. Do you want to create one (y/n) : ") if(uc == "y"): os.system("mkdir " +y1.foldername) os.chdir(y1.foldername) else: sys.exit() pwd = os.getcwd() os.system("mkdir LogFiles") print ("Programs to install :") print ("1. OASES") print ("2. SALMON") print ("3. ORNA") print ("4. KREATION") print ("5. SEECER") print ("6. ALL") print ("7. QUIT") x1 = input("Enter the option number (if multiple options then separate it by comma): ") y = x1.split(",") acs = "" vd = "" flg = 0 cs = "" a13 = "" if("7" in y): print("Thank you. It was nice working for you") sys.exit() if "6" in y: #Obtaining and installing oases and velvet vc = input("Execution of Oases requires velvet. Do you want to install velvet (y/n) : ") if(vc == "y"): ch = input("Do you want to include additional compilation settings for velvet (refer to velvet manual for details) y/n : ") if(ch == "y"): print("Enter the additional compilation settings of velvet seperated by space (for instance - \'MAXKMERLENGTH=57\'):") a1 = input() a11 = a1.split() for a2 in a11: a2 = a2.replace("'","") a2 = "\'" + a2 + "\'" a13 = a13 + " " + a2 cs = cs + a13 flg = 1 cs = cs + "\'VELVET_DIR="+pwd+"/velvet\'" if(vc == "n"): vd = input("Enter the location of velvet : ") cs = cs + " \'VELVET_DIR=" + vd +"\'" x.obtaining_tar(1, pwd) if (flg == 1): x.obtaining_tar(8, pwd) x.install_velvet(pwd, cs) x.install_oases(pwd, cs) #Obtaining salmon x.obtaining_tar(2, pwd) #Obtaining ORNA x.obtaining_tar(3, pwd) x.install_orna(pwd) #Obtaining KREATION x.obtaining_tar(4, pwd) #Obtaining SEECER x.obtaining_tar(5, pwd) else: for i in y: if(int(i) == 1): vc = input("Execution of Oases requires velvet. Do you want to install velvet (y/n) : ") if(vc == "y"): ch = input("Do you want to include additional compilation settings for velvet (refer to velvet manual for details) y/n : ") if(ch == "y"): print("Enter the additional compilation settings of velvet seperated by space (for instance - \'MAXKMERLENGTH=57\'):") a1 = input() a11 = a1.split() for a2 in a11: a2 = a2.replace("'","") a2 = "\'" + a2 + "\'" a13 = a13 + " " + a2 cs = cs + a13 flg = 1 cs = cs + " \'VELVET_DIR="+pwd+"/velvet\'" if(vc == "n"): vd = input("Enter the location of velvet : ") if("\\" not in vd): cs = cs + " \'VELVET_DIR=" +pwd+"\\"+ vd +"\'" else: cs = cs + " \'VELVET_DIR=" + vd +"\'" x.obtaining_tar(1,pwd) if(flg == 1): x.obtaining_tar(8,pwd) x.install_velvet(pwd, cs) x.install_oases(pwd, cs) elif(int(i)==3): x.obtaining_tar(3,pwd) x.install_orna(pwd) else: x.obtaining_tar(int(i), pwd)
mit
928,657,617,958,597,400
36.723127
186
0.656075
false
2.848254
false
false
false
spirali/elphie
elphie/textparser.py
1
1946
def normalize_tokens(tokens): # Remove empty texts tokens = [kv for kv in tokens if kv[0] != "text" or kv[1]] # Merge lines i = 1 while i < len(tokens): token_name, value = tokens[i] if token_name == "newline" and tokens[i - 1][0] == "newline": value2 = tokens[i - 1][1] del tokens[i] del tokens[i - 1] tokens.insert(i - 1, ("newline", value + value2)) continue i += 1 # Remove trailing empty lines if tokens and tokens[-1][0] == "newline": tokens = tokens[:-1] return tokens def parse_text(text, escape_char="~", begin_char="{", end_char="}"): result = [] start = 0 i = 0 counter = 0 while i < len(text): c = text[i] if c == escape_char: result.append(("text", text[start:i])) i += 1 start = i while i < len(text) and text[i] != begin_char: i += 1 result.append(("begin", text[start:i])) i += 1 start = i counter += 1 elif c == end_char: result.append(("text", text[start:i])) result.append(("end", None)) i += 1 start = i counter -= 1 if counter < 0: raise Exception("Invalid format, too many closing characters") else: i += 1 if i != start: result.append(("text", text[start:i])) final_result = [] for r in result: if r[0] != "text": final_result.append(r) continue lines = r[1].split("\n") final_result.append(("text", lines[0])) for line in lines[1:]: final_result.append(("newline", 1)) final_result.append(("text", line)) if counter > 0: raise Exception("Invalid format, unclosed command") return normalize_tokens(final_result)
bsd-2-clause
5,328,669,194,339,669,000
28.044776
78
0.482014
false
3.907631
false
false
false
abacuspix/NFV_project
Build_Web_With_Flask/Building web applications with Flask_Code/chapter08/ex05.py
1
1529
# coding:utf-8 from flask import Flask, render_template, session, flash from flask.ext.sqlalchemy import SQLAlchemy app = Flask(__name__) # strong secret key!! app.config['SECRET_KEY'] = '\xa6\xb5\x0e\x7f\xd3}\x0b-\xaa\x03\x03\x82\x10\xbe\x1e0u\x93,{\xd4Z\xa3\x8f' app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ex05.sqlite' db = SQLAlchemy(app) class Product(db.Model): __tablename__ = 'products' id = db.Column(db.Integer, primary_key=True) sku = db.Column(db.String(30), unique=True) name = db.Column(db.String(255), nullable=False) def __unicode__(self): return self.name @app.route("/cart/add/<sku>") def add_to_cart_view(sku): product = Product.query.filter_by(sku=sku).first() if product is not None: session['cart'] = session.get('cart') or dict() item = session['cart'].get(product.sku) or dict() item['qty'] = item.get('qty', 0) + 1 session['cart'][product.sku] = item flash(u'%s add to cart. Total: %d' % (product, item['qty'])) return render_template('cart.html') def init(): """ Initializes and populates the database """ db.create_all() if Product.query.count() == 0: db.session.add_all([ Product(sku='010', name='Boots'), Product(sku='020', name='Gauntlets'), Product(sku='030', name='Helmets'), ]) db.session.commit() if __name__ == '__main__': app.debug = True with app.test_request_context(): init() app.run()
mit
-965,825,808,537,951,900
24.5
104
0.59843
false
3.120408
false
false
false
mozilla/bztools
auto_nag/history.py
1
16781
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. from pprint import pprint from libmozdata.bugzilla import Bugzilla from auto_nag import logger class History(object): BOT = "[email protected]" def __init__(self): super(History, self).__init__() def get_bugs(self): logger.info("History: get bugs: start...") def bug_handler(bug, data): data.add(bug["id"]) fields = { "changedby": [ "keywords", "product", "component", "assigned_to", "cf_crash_signature", "everconfirmed", "cf_has_regression_range", "cf_has_str", "priority", "bug_severity", "resolution", "bug_status", "bug_type", "cf_status_firefox68", "cf_status_firefox67", "cf_status_firefox66", "cf_status_firefox65", "cf_status_firefox64", "cf_status_firefox63", "cf_status_firefox62", ], "equals": ["commenter", "setters.login_name"], } queries = [] bugids = set() for op, fs in fields.items(): for f in fs: params = {"include_fields": "id", "f1": f, "o1": op, "v1": History.BOT} queries.append( Bugzilla(params, bughandler=bug_handler, bugdata=bugids, timeout=20) ) for q in queries: q.get_data().wait() logger.info("History: get bugs: end.") return bugids def get_bug_info(self, bugids): logger.info("History: get bugs info: start...") def history_handler(bug, data): bugid = str(bug["id"]) for h in bug["history"]: if h["who"] == History.BOT: del h["who"] data[bugid].append(h) def comment_handler(bug, bugid, data): bugid = str(bugid) for comment in bug["comments"]: if comment["author"] == History.BOT: text = comment["text"] data[bugid].append( {"comment": text, "date": comment["creation_time"]} ) data = {str(bugid): [] for bugid in bugids} Bugzilla( list(data.keys()), historyhandler=history_handler, historydata=data, commenthandler=comment_handler, commentdata=data, timeout=960, ).get_data().wait() logger.info("History: get bugs info: end.") return data def cleanup(self, data): # res is a dictionary: change_date_time => change or comment res = {} for bugid, info in data.items(): res[bugid] = x = {} for c in info: if "changes" in c: when = c["when"] del c["when"] if when not in x: x[when] = {"changes": c["changes"]} else: x[when]["changes"] += c["changes"] if "comment" in c: when = c["date"] del c["date"] if when not in x: x[when] = {"comment": c["comment"]} else: x[when]["comment"] = c["comment"] return res def get_pc(self, changes): p = "" c = "" for change in changes: if change.get("field_name") == "component" and "added" in change: c = change["added"] if change.get("field_name") == "product" and "added" in change: p = change["added"] return "{}::{}".format(p, c) def get_ni(self, changes): for change in changes: if change.get("field_name") == "flagtypes.name" and "added" in change: c = change["added"] ni = "needinfo?(" if c.startswith(ni): return c[len(ni) : -1] return "" def guess_tool(self, data): res = [] no_tool = [] for bugid, info in data.items(): for date, i in info.items(): if "comment" in i: c = i["comment"] if c.startswith("Crash volume for signature"): continue tool = None if c.startswith( "The leave-open keyword is there and there is no activity for" ): tool = "leave_open_no_activity" elif c.startswith("Closing because no crashes reported for"): tool = "no_crashes" elif c.startswith("Moving to p3 because no activity for at least"): tool = "old_p2_bug" elif c.startswith("Moving to p2 because no activity for at least"): tool = "old_p1_bug" elif c.startswith( "There's a r+ patch which didn't land and no activity in this bug" ) or c.startswith( "There are some r+ patches which didn't land and no activity in this bug for" ): tool = "not_landed" elif c.startswith( "The meta keyword is there, the bug doesn't depend on other bugs and there is no activity for" ): tool = "meta_no_deps_no_activity" elif ( "[mozregression](https://wiki.mozilla.org/Auto-tools/Projects/Mozregression)" in c ): tool = "has_str_no_range" elif ( "as the bug is tracked by a release manager for the current nightly" in c ): tool = "mismatch_priority_tracking_nightly" elif ( "as the bug is tracked by a release manager for the current beta" in c ): tool = "mismatch_priority_tracking_beta" elif ( "as the bug is tracked by a release manager for the current release" in c ): tool = "mismatch_priority_tracking_release" elif c.startswith("The priority flag is not set for this bug.\n:"): tool = "no_priority" elif c.startswith( "The priority flag is not set for this bug and there is no activity for" ): tool = "ni_triage_owner" if tool is None: no_tool.append((bugid, info)) else: extra = self.get_ni(i.get("changes", [])) res.append( {"tool": tool, "date": date, "bugid": bugid, "extra": extra} ) else: changes = i["changes"] N = len(res) for change in changes: if change.get("added") == "meta": res.append( { "tool": "summary_meta_missing", "date": date, "bugid": bugid, "extra": "", } ) break elif change.get("field_name") in {"component", "product"}: res.append( { "tool": "component", "date": date, "bugid": bugid, "extra": self.get_pc(changes), } ) break elif change.get("field_name") == "cf_has_str": res.append( { "tool": "has_str_no_hasstr", "date": date, "bugid": bugid, "extra": "", } ) break elif change.get("removed") == "leave-open": res.append( { "tool": "leave_open", "date": date, "bugid": bugid, "extra": "", } ) break elif change.get("field_name") == "assigned_to": res.append( { "tool": "no_assignee", "date": date, "bugid": bugid, "extra": change["added"], } ) break elif ( change.get("field_name", "").startswith("cf_status_firefox") and change.get("added") == "affected" ): res.append( { "tool": "nighty_reopened", "date": date, "bugid": bugid, "extra": "", } ) break elif ( change.get("field_name") == "status" and change.get("added") == "ASSIGNED" ): res.append( { "tool": "assignee_but_unconfirmed", "date": date, "bugid": bugid, "extra": "", } ) break elif ( change.get("field_name") == "keywords" and change.get("added") == "regression" ): res.append( { "tool": "regression", "date": date, "bugid": bugid, "extra": "", } ) break elif ( change.get("field_name") == "severity" and change.get("added") == "major" ): res.append( { "tool": "tracked_bad_severity", "date": date, "bugid": bugid, "extra": "", } ) break elif change.get("field_name") == "cf_crash_signature": res.append( { "tool": "copy_duplicate_info", "date": date, "bugid": bugid, "extra": "", } ) break elif ( change.get("field_name") == "keywords" and change.get("removed") == "stalled" ): res.append( { "tool": "regression", "date": date, "bugid": bugid, "extra": "", } ) break elif ( change.get("field_name") == "type" and change.get("added") == "defect" ): res.append( { "tool": "regression_but_type_enhancement_task", "date": date, "bugid": bugid, "extra": "", } ) break elif ( change.get("field_name") == "keywords" and change.get("removed") == "dupeme" ): res.append( { "tool": "closed_dupeme", "date": date, "bugid": bugid, "extra": "", } ) break elif ( change.get("field_name") == "keywords" and change.get("added") == "dupeme" ): res.append( { "tool": "dupeme_whiteboard_keyword", "date": date, "bugid": bugid, "extra": "", } ) break elif change.get("field_name") == "summary" and change.get( "added" ).startswith("[meta]"): res.append( { "tool": "meta_summary_missing", "date": date, "bugid": bugid, "extra": "", } ) break elif change.get("field_name", "").startswith( "cf_status_firefox" ) and change.get("added") in { "?", "fixed", "verified", "unaffected", }: res.append( { "tool": "missing_beta_status", "date": date, "bugid": bugid, "extra": "", } ) break if len(res) == N: no_tool.append((bugid, info)) if no_tool: pprint(no_tool) return res def get(self): bugids = self.get_bugs() bugs = self.get_bug_info(bugids) bugs = self.cleanup(bugs) history = self.guess_tool(bugs) return history
bsd-3-clause
1,119,390,109,281,556,700
38.859857
118
0.311722
false
5.892205
false
false
false
econ-ark/HARK
HARK/ConsumptionSaving/tests/test_SmallOpenEconomy.py
1
1397
import copy from HARK import distribute_params from HARK.ConsumptionSaving.ConsAggShockModel import ( AggShockConsumerType, SmallOpenEconomy, init_cobb_douglas, ) from HARK.distribution import Uniform import numpy as np import unittest class testSmallOpenEconomy(unittest.TestCase): def test_small_open(self): agent = AggShockConsumerType() agent.AgentCount = 100 # Very low number of agents for the sake of speed agent.cycles = 0 # Make agents heterogeneous in their discount factor agents = distribute_params( agent, "DiscFac", 3, Uniform(bot=0.90, top=0.94) # Impatient agents ) # Make an economy with those agents living in it small_economy = SmallOpenEconomy( agents=agents, Rfree=1.03, wRte=1.0, KtoLnow=1.0, **copy.copy(init_cobb_douglas) ) small_economy.act_T = 400 # Short simulation history small_economy.max_loops = 3 # Give up quickly for the sake of time small_economy.make_AggShkHist() # Simulate a history of aggregate shocks small_economy.verbose = False # Turn off printed messages # Give data about the economy to all the agents in it for this_type in small_economy.agents: this_type.get_economy_data(small_economy) small_economy.solve()
apache-2.0
-2,018,076,852,372,516,600
32.261905
81
0.652112
false
3.501253
false
false
false
reeshupatel/demo
keystone/openstack/common/lockutils.py
1
12121
# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import errno import functools import os import shutil import subprocess import sys import tempfile import threading import time import weakref from oslo.config import cfg from keystone.openstack.common import fileutils from keystone.openstack.common.gettextutils import _, _LE, _LI from keystone.openstack.common import log as logging LOG = logging.getLogger(__name__) util_opts = [ cfg.BoolOpt('disable_process_locking', default=False, help='Enables or disables inter-process locks.'), cfg.StrOpt('lock_path', default=os.environ.get("KEYSTONE_LOCK_PATH"), help='Directory to use for lock files.') ] CONF = cfg.CONF CONF.register_opts(util_opts) def set_defaults(lock_path): cfg.set_defaults(util_opts, lock_path=lock_path) class _FileLock(object): """Lock implementation which allows multiple locks, working around issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does not require any cleanup. Since the lock is always held on a file descriptor rather than outside of the process, the lock gets dropped automatically if the process crashes, even if __exit__ is not executed. There are no guarantees regarding usage by multiple green threads in a single process here. This lock works only between processes. Exclusive access between local threads should be achieved using the semaphores in the @synchronized decorator. Note these locks are released when the descriptor is closed, so it's not safe to close the file descriptor while another green thread holds the lock. Just opening and closing the lock file can break synchronisation, so lock files must be accessed only using this abstraction. """ def __init__(self, name): self.lockfile = None self.fname = name def acquire(self): basedir = os.path.dirname(self.fname) if not os.path.exists(basedir): fileutils.ensure_tree(basedir) LOG.info(_LI('Created lock path: %s'), basedir) self.lockfile = open(self.fname, 'w') while True: try: # Using non-blocking locks since green threads are not # patched to deal with blocking locking calls. # Also upon reading the MSDN docs for locking(), it seems # to have a laughable 10 attempts "blocking" mechanism. self.trylock() LOG.debug('Got file lock "%s"', self.fname) return True except IOError as e: if e.errno in (errno.EACCES, errno.EAGAIN): # external locks synchronise things like iptables # updates - give it some time to prevent busy spinning time.sleep(0.01) else: raise threading.ThreadError(_("Unable to acquire lock on" " `%(filename)s` due to" " %(exception)s") % { 'filename': self.fname, 'exception': e, }) def __enter__(self): self.acquire() return self def release(self): try: self.unlock() self.lockfile.close() LOG.debug('Released file lock "%s"', self.fname) except IOError: LOG.exception(_LE("Could not release the acquired lock `%s`"), self.fname) def __exit__(self, exc_type, exc_val, exc_tb): self.release() def exists(self): return os.path.exists(self.fname) def trylock(self): raise NotImplementedError() def unlock(self): raise NotImplementedError() class _WindowsLock(_FileLock): def trylock(self): msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) def unlock(self): msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) class _FcntlLock(_FileLock): def trylock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) def unlock(self): fcntl.lockf(self.lockfile, fcntl.LOCK_UN) class _PosixLock(object): def __init__(self, name): # Hash the name because it's not valid to have POSIX semaphore # names with things like / in them. Then use base64 to encode # the digest() instead taking the hexdigest() because the # result is shorter and most systems can't have shm sempahore # names longer than 31 characters. h = hashlib.sha1() h.update(name.encode('ascii')) self.name = str((b'/' + base64.urlsafe_b64encode( h.digest())).decode('ascii')) def acquire(self, timeout=None): self.semaphore = posix_ipc.Semaphore(self.name, flags=posix_ipc.O_CREAT, initial_value=1) self.semaphore.acquire(timeout) return self def __enter__(self): self.acquire() return self def release(self): self.semaphore.release() self.semaphore.close() def __exit__(self, exc_type, exc_val, exc_tb): self.release() def exists(self): try: semaphore = posix_ipc.Semaphore(self.name) except posix_ipc.ExistentialError: return False else: semaphore.close() return True if os.name == 'nt': import msvcrt InterProcessLock = _WindowsLock FileLock = _WindowsLock else: import base64 import fcntl import hashlib import posix_ipc InterProcessLock = _PosixLock FileLock = _FcntlLock _semaphores = weakref.WeakValueDictionary() _semaphores_lock = threading.Lock() def _get_lock_path(name, lock_file_prefix, lock_path=None): # NOTE(mikal): the lock name cannot contain directory # separators name = name.replace(os.sep, '_') if lock_file_prefix: sep = '' if lock_file_prefix.endswith('-') else '-' name = '%s%s%s' % (lock_file_prefix, sep, name) local_lock_path = lock_path or CONF.lock_path if not local_lock_path: # NOTE(bnemec): Create a fake lock path for posix locks so we don't # unnecessarily raise the RequiredOptError below. if InterProcessLock is not _PosixLock: raise cfg.RequiredOptError('lock_path') local_lock_path = 'posixlock:/' return os.path.join(local_lock_path, name) def external_lock(name, lock_file_prefix=None, lock_path=None): LOG.debug('Attempting to grab external lock "%(lock)s"', {'lock': name}) lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) # NOTE(bnemec): If an explicit lock_path was passed to us then it # means the caller is relying on file-based locking behavior, so # we can't use posix locks for those calls. if lock_path: return FileLock(lock_file_path) return InterProcessLock(lock_file_path) def remove_external_lock_file(name, lock_file_prefix=None): """Remove an external lock file when it's not used anymore This will be helpful when we have a lot of lock files """ with internal_lock(name): lock_file_path = _get_lock_path(name, lock_file_prefix) try: os.remove(lock_file_path) except OSError: LOG.info(_LI('Failed to remove file %(file)s'), {'file': lock_file_path}) def internal_lock(name): with _semaphores_lock: try: sem = _semaphores[name] except KeyError: sem = threading.Semaphore() _semaphores[name] = sem LOG.debug('Got semaphore "%(lock)s"', {'lock': name}) return sem @contextlib.contextmanager def lock(name, lock_file_prefix=None, external=False, lock_path=None): """Context based lock This function yields a `threading.Semaphore` instance (if we don't use eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is True, in which case, it'll yield an InterProcessLock instance. :param lock_file_prefix: The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. :param external: The external keyword argument denotes whether this lock should work across multiple processes. This means that if two different workers both run a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. """ int_lock = internal_lock(name) with int_lock: if external and not CONF.disable_process_locking: ext_lock = external_lock(name, lock_file_prefix, lock_path) with ext_lock: yield ext_lock else: yield int_lock LOG.debug('Released semaphore "%(lock)s"', {'lock': name}) def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): """Synchronization decorator. Decorating a method like so:: @synchronized('mylock') def foo(self, *args): ... ensures that only one thread will execute the foo method at a time. Different methods can share the same lock:: @synchronized('mylock') def foo(self, *args): ... @synchronized('mylock') def bar(self, *args): ... This way only one of either foo or bar can be executing at a time. """ def wrap(f): @functools.wraps(f) def inner(*args, **kwargs): try: with lock(name, lock_file_prefix, external, lock_path): LOG.debug('Got semaphore / lock "%(function)s"', {'function': f.__name__}) return f(*args, **kwargs) finally: LOG.debug('Semaphore / lock released "%(function)s"', {'function': f.__name__}) return inner return wrap def synchronized_with_prefix(lock_file_prefix): """Partial object generator for the synchronization decorator. Redefine @synchronized in each project like so:: (in nova/utils.py) from nova.openstack.common import lockutils synchronized = lockutils.synchronized_with_prefix('nova-') (in nova/foo.py) from nova import utils @utils.synchronized('mylock') def bar(self, *args): ... The lock_file_prefix argument is used to provide lock files on disk with a meaningful prefix. """ return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) def main(argv): """Create a dir for locks and pass it to command from arguments If you run this: python -m openstack.common.lockutils python setup.py testr <etc> a temporary directory will be created for all your locks and passed to all your tests in an environment variable. The temporary dir will be deleted afterwards and the return value will be preserved. """ lock_dir = tempfile.mkdtemp() os.environ["KEYSTONE_LOCK_PATH"] = lock_dir try: ret_val = subprocess.call(argv[1:]) finally: shutil.rmtree(lock_dir, ignore_errors=True) return ret_val if __name__ == '__main__': sys.exit(main(sys.argv))
apache-2.0
1,740,347,212,759,408,400
30.98153
78
0.613646
false
4.179655
false
false
false
Namax0r/resistor-calculator
resistor_calculator.py
1
9566
#!/usr/bin/env python # Basic version handling try: # Python2 import Tkinter as tk except ImportError: # Python3 import tkinter as tk from tkinter.ttk import Combobox from tkinter import messagebox # Small utility that adds dot notation access to dictionary attributes class dotdict(dict): __getattr__ = dict.get __setattr__ = dict.__setitem__ __delattr__ = dict.__delitem__ # Main view window root = tk.Tk() # Store width and height in variable for ease of change window_width = 300 window_height = 380 # Set min and max size of a GUI window root.minsize(window_width, window_height) root.maxsize(window_width, window_height) # Var is used to store our result var_result = tk.StringVar() var_max = tk.StringVar() var_min = tk.StringVar() # Create dictionary of colors and values d = { #Values of the band are stored as string to allow concatenation of the numbers. 'band':{ 'black': "0", 'brown': "1", 'red': "2", 'orange': "3", 'yellow': "4", 'green': "5", 'blue': "6", 'violet': "7", 'gray': "8", 'white': "9" }, 'multiplier':{ 'black': 1, 'brown': 10, 'red': 100, 'orange': 1000, 'yellow': 10000, 'green': 100000, 'blue': 1000000, 'violet': 10000000 }, 'tolerance':{ 'brown': 0.01, 'red': 0.02, 'green': 0.005, 'blue': 0.025, 'violet': 0.010, 'gray': 0.005, 'gold': 0.05, 'silver': 0.10 } } # Enable dot notation on the dictionary d = dotdict(d) class ResistorCalculator: def __init__(self, parent, title): self.parent = parent self.parent.title(title) self.parent.protocol("WM_DELETE_WINDOW", self.close_program) # Define variables to store values of comboboxes self.band1_var_result = 0 self.band2_var_result = 0 self.band3_var_result = 0 self.multiplier_var_result = 0 self.tolerance_var_result = 0 self.build_window() # Function to destroy the window when [X] is pressed def close_program(self, event=None): self.parent.destroy() # Function called when '<<ComboboxSelected>>' event is triggered def combobox_handler(self, event): #store values of comboboxes in variables. self.band1_var_result = self.band1_var.get() self.band2_var_result = self.band2_var.get() self.band3_var_result = self.band3_var.get() self.multiplier_var_result = self.multiplier_var.get() self.tolerance_var_result = self.tolerance_var.get() # Function to handle error, when there are not enough arguments for formula to calculate properly. def error_not_enough_args(self): tk.messagebox.showinfo("Error", "Not enough arguments to calculate. Please select more values.") # Function to add a mark at the end of a result def add_mark(self, val, mark): return val, mark # Function to calculate the resistors def calculate_resistor(self): try: # If there are only 2 bands to add, change the formula to skip the band3 if self.band3_var_result == " ": bands = d.band[self.band1_var_result] + d.band[self.band2_var_result] else: bands = d.band[self.band1_var_result] + d.band[self.band2_var_result] + d.band[self.band3_var_result] # Convert string into int so we can do mathematical operations on it int_bands = int(bands) # Set multiplier and tolerance multiplier = d.multiplier[self.multiplier_var_result] tolerance = d.tolerance[self.tolerance_var_result] # Calculate the resistance based on the formula formula = (int_bands * multiplier) max_resistance = formula + (formula * tolerance) min_resistance = formula - (formula * tolerance) result_max = max_resistance / multiplier result_min = min_resistance / multiplier result_normal = formula / multiplier if formula < 1000: result_max = max_resistance result_min = min_resistance result_normal = formula # if result of formula exceeds 1000 add "k" after the result. elif formula > 1000 and formula < 1000000: result_max = self.add_mark(result_max, "kΩ") result_min = self.add_mark(result_min, "kΩ") result_normal = self.add_mark(result_normal, "kΩ") else: result_max = self.add_mark(result_max, "MΩ") result_min = self.add_mark(result_min, "MΩ") result_normal = self.add_mark(result_normal, "MΩ") # Set the variables that display result in the GUI var_result.set(result_normal) var_max.set(result_max) var_min.set(result_min) # KeyError exception when there are not enough values to calculate except KeyError: self.error_not_enough_args() # Function to build a GUI window and all of it's widgets. def build_window(self): # Band 1 band1_label = tk.Label(self.parent, text="Band 1" ) band1_label.grid(row=0, column=0, ipadx=30, pady=5) self.band1_var = tk.StringVar() band1_combo = Combobox(self.parent, state='readonly', height = '10', justify = 'center', textvariable=self.band1_var) band1_combo['values']=('black', 'brown', 'red', 'orange', 'yellow', 'green', 'blue', 'violet', 'gray', 'white') band1_combo.bind('<<ComboboxSelected>>', self.combobox_handler) band1_combo.grid(row=0, column=1, padx=10) # Band 2 band2_label = tk.Label( self.parent, text="Band 2") band2_label.grid(row=2, column=0, pady=5) self.band2_var = tk.StringVar() band2_combo = Combobox(self.parent, state='readonly', height = '10', justify = 'center', textvariable=self.band2_var) band2_combo['values']=('black', 'brown', 'red', 'orange', 'yellow', 'green', 'blue', 'violet', 'gray', 'white') band2_combo.bind('<<ComboboxSelected>>', self.combobox_handler) band2_combo.grid(row=2, column=1) # Band 3 band3_label = tk.Label( self.parent, text="Band 3" ) band3_label.grid(row=4, column=0, pady=5) self.band3_var = tk.StringVar() # Setting band3 to " " helps with modification of calculation formula based on this value self.band3_var.set(" ") band3_combo = Combobox(self.parent, state='readonly', height = '10', justify = 'center', textvariable=self.band3_var) band3_combo['values']=('black', 'brown', 'red', 'orange', 'yellow', 'green', 'blue', 'violet', 'gray', 'white') band3_combo.bind('<<ComboboxSelected>>', self.combobox_handler) band3_combo.grid(row=4, column=1) # Multiplier multiplier_label = tk.Label( self.parent, text="Multiplier" ) multiplier_label.grid(row=6, column=0, pady=5) self.multiplier_var = tk.StringVar() multiplier_combo = Combobox(self.parent, state='readonly', height = '10', justify = 'center', textvariable=self.multiplier_var) multiplier_combo['values']=('black', 'brown', 'red', 'orange', 'yellow', 'green', 'blue', 'violet') multiplier_combo.bind('<<ComboboxSelected>>', self.combobox_handler) multiplier_combo.grid(row=6, column=1) # Tolerance tolerance_label = tk.Label( self.parent, text="Tolerance" ) tolerance_label.grid(row=8, column=0, pady=5) self.tolerance_var = tk.StringVar() tolerance_combo = Combobox(self.parent, state='readonly', height = '10', justify = 'center', textvariable=self.tolerance_var) tolerance_combo['values']=('brown', 'red', 'green', 'blue', 'violet', 'gray', 'gold', 'silver') tolerance_combo.bind('<<ComboboxSelected>>', self.combobox_handler) tolerance_combo.grid(row=8, column=1) # Calculate button self.calculate_button = tk.Button(self.parent, text ="Calculate", command = self.calculate_resistor) self.calculate_button.grid(row=9, column=1, pady=5, ipadx=40) # Results section result_label = tk.Message( self.parent, text="Result:") result_label.grid(row=12, column=0, pady=10) result_value = tk.Message( self.parent, textvariable=var_result, relief=tk.RAISED ) result_value.grid(row=12, column=1) max_result_label = tk.Message( self.parent, text="Max:") max_result_label.grid(row=13, column=0, pady=10, ipadx=20) max_result_value = tk.Message( self.parent, textvariable=var_max, relief=tk.RAISED) max_result_value.grid(row=13, column=1) min_result_label = tk.Message( self.parent, text="Min:") min_result_label.grid(row=14, column=0, pady=10) min_result_value = tk.Message( self.parent, textvariable=var_min, relief=tk.RAISED ) min_result_value.grid(row=14, column=1) # Author name, displayed at the bottom of a program author_name = tk.Label(self.parent, text="by Namax0r", relief=tk.SUNKEN, bd=1) author_name.place(x=window_width - 70, y=window_height - 20) if __name__ == '__main__': app = ResistorCalculator(root, "Resistor Calculator") root.mainloop()
mit
-1,463,508,674,641,083,400
43.259259
135
0.601255
false
3.593985
false
false
false
mjasher/gac
GAC/flopy/modflow/mfdrn.py
1
7133
""" mfdrn module. Contains the ModflowDrn class. Note that the user can access the ModflowDrn class as `flopy.modflow.ModflowDrn`. Additional information for this MODFLOW package can be found at the `Online MODFLOW Guide <http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?drn.htm>`_. """ import sys import numpy as np from flopy.mbase import Package from flopy.utils.util_list import mflist class ModflowDrn(Package): """ MODFLOW Drain Package Class. Parameters ---------- model : model object The model object (of type :class:`flopy.modflow.mf.Modflow`) to which this package will be added. ipakcb : int is a flag and a unit number. (default is 0). stress_period_data : list of boundaries or recarray of boundaries or dictionary of boundaries Each drain cell is defined through definition of layer(int), row(int), column(int), elevation(float), conductance(float) The simplest form is a dictionary with a lists of boundaries for each stress period, where each list of boundaries itself is a list of boundaries. Indices of the dictionary are the numbers of the stress period. This gives the form of stress_period_data = {0: [ [lay, row, col, stage, cond], [lay, row, col, stage, cond], [lay, row, col, stage, cond], ], 1: [ [lay, row, col, stage, cond], [lay, row, col, stage, cond], [lay, row, col, stage, cond], ], ... kper: [ [lay, row, col, stage, cond], [lay, row, col, stage, cond], [lay, row, col, stage, cond], ] } Note that if no values are specified for a certain stress period, then the list of boundaries for the previous stress period for which values were defined is used. Full details of all options to specify stress_period_data can be found in the flopy3boundaries Notebook in the basic subdirectory of the examples directory dtype : dtype definition if data type is different from default options : list of strings Package options. (default is None). extension : string Filename extension (default is 'drn') unitnumber : int File unit number (default is 21). Attributes ---------- Methods ------- See Also -------- Notes ----- Parameters are not supported in FloPy. Examples -------- >>> import flopy >>> ml = flopy.modflow.Modflow() >>> lrcec = {0:[2, 3, 4, 10., 100.]} #this drain will be applied to all >>> #stress periods >>> drn = flopy.modflow.ModflowDrn(ml, stress_period_data=lrcec) """ def __init__(self, model, ipakcb=0, stress_period_data=None, dtype=None, extension='drn', unitnumber=21, options=None, **kwargs): """ Package constructor """ Package.__init__(self, model, extension, 'DRN', unitnumber) # Call ancestor's init to set self.parent, extension, name and unit number self.heading = '# DRN for MODFLOW, generated by Flopy.' self.url = 'drn.htm' self.ipakcb = ipakcb # 0: no cell by cell terms are written self.np = 0 if options is None: options = [] self.options = options if dtype is not None: self.dtype = dtype else: self.dtype = self.get_default_dtype(structured=self.parent.structured) self.stress_period_data = mflist(self, stress_period_data) self.parent.add_package(self) def __repr__(self): return 'Drain class' @staticmethod def get_default_dtype(structured=True): if structured: dtype = np.dtype([("k", np.int), ("i", np.int), ("j", np.int), ("elev", np.float32), ("cond", np.float32)]) else: dtype = np.dtype([("node", np.int), ("elev", np.float32), ("cond", np.float32)]) return dtype def ncells(self): # Returns the maximum number of cells that have drains (developed for MT3DMS SSM package) # print 'Function must be implemented properly for drn package' return self.stress_period_data.mxact def write_file(self): """ Write the file. """ f_drn = open(self.fn_path, 'w') f_drn.write('{0}\n'.format(self.heading)) # f_drn.write('%10i%10i\n' % (self.mxactd, self.idrncb)) line = '{0:10d}{1:10d}'.format(self.stress_period_data.mxact, self.ipakcb) for opt in self.options: line += ' ' + str(opt) line += '\n' f_drn.write(line) self.stress_period_data.write_transient(f_drn) f_drn.close() def add_record(self, kper, index, values): try: self.stress_period_data.add_record(kper, index, values) except Exception as e: raise Exception("mfdrn error adding record to list: " + str(e)) @staticmethod def get_empty(ncells=0, aux_names=None, structured=True): # get an empty recaray that correponds to dtype dtype = ModflowDrn.get_default_dtype(structured=structured) if aux_names is not None: dtype = Package.add_to_dtype(dtype, aux_names, np.float32) d = np.zeros((ncells, len(dtype)), dtype=dtype) d[:, :] = -1.0E+10 return np.core.records.fromarrays(d.transpose(), dtype=dtype) @staticmethod def load(f, model, nper=None, ext_unit_dict=None): """ Load an existing package. Parameters ---------- f : filename or file handle File to load. model : model object The model object (of type :class:`flopy.modflow.mf.Modflow`) to which this package will be added. ext_unit_dict : dictionary, optional If the arrays in the file are specified using EXTERNAL, or older style array control records, then `f` should be a file handle. In this case ext_unit_dict is required, which can be constructed using the function :class:`flopy.utils.mfreadnam.parsenamefile`. Returns ------- drn : ModflowDrn object ModflowDrn object. Examples -------- >>> import flopy >>> m = flopy.modflow.Modflow() >>> drn = flopy.modflow.ModflowDrn.load('test.drn', m) """ if model.verbose: sys.stdout.write('loading drn package file...\n') return Package.load(model, ModflowDrn, f, nper)
gpl-2.0
-3,510,670,181,342,770,700
33.311881
112
0.549418
false
4.025395
false
false
false
mosen/salt-osx
_modules/deprecated/mac_shadow.py
1
10388
# -*- coding: utf-8 -*- ''' Manage Mac OSX local directory passwords and policies. Note that it is usually better to apply password policies through the creation of a configuration profile. Tech Notes: Usually when a password is changed by the system, there's a responsibility to check the hash list and generate hashes for each. Many osx password changing scripts/modules only deal with the SHA-512 PBKDF2 hash when working with the local node. ''' # Authentication concepts reference: # https://developer.apple.com/library/mac/documentation/Networking/Conceptual/Open_Directory/openDirectoryConcepts/openDirectoryConcepts.html#//apple_ref/doc/uid/TP40000917-CH3-CIFCAIBB from __future__ import absolute_import import logging log = logging.getLogger(__name__) # Start logging import os import base64 import salt.utils import string import binascii import salt.exceptions try: from passlib.utils import pbkdf2, ab64_encode, ab64_decode HAS_PASSLIB = True except ImportError: HAS_PASSLIB = False def __virtual__(): if HAS_PASSLIB and salt.utils.platform.is_darwin(): return True else: return False def _pl_salted_sha512_pbkdf2_from_string(strvalue, salt_bin=None, iterations=1000): ''' Create a PBKDF2-SHA512 hash with a 128 byte key length. The standard passlib.hash.pbkdf2_sha512 functions assume a 64 byte key length which does not match OSX's implementation. :param strvalue: The string to derive the hash from :param salt: The (randomly generated) salt :param iterations: The number of iterations, for Mac OS X it's normally between 23000-25000? need to confirm. :return: (binary digest, binary salt, number of iterations used) ''' if salt_bin is None: salt_bin = os.urandom(32) key_length = 128 hmac_sha512, dsize = pbkdf2.get_prf("hmac-sha512") digest_bin = pbkdf2.pbkdf2(strvalue, salt_bin, iterations, key_length, hmac_sha512) return digest_bin, salt_bin, iterations def _extract_authdata(item): ''' Extract version, authority tag, and authority data from a single array item of AuthenticationAuthority item The NSString instance representing the authority string returns version (default 1.0.0), tag, data as a tuple ''' parts = string.split(item, ';', 2) if not parts[0]: parts[0] = '1.0.0' return { 'version': parts[0], 'tag': parts[1], 'data': parts[2] } def authorities(name): ''' Read the list of authentication authorities for the given user. name Short username of the local user. ''' authorities_plist = __salt__['cmd.run']('/usr/bin/dscl -plist . read /Users/{0} AuthenticationAuthority'.format(name)) plist = __salt__['plist.parse_string'](authorities_plist) authorities_list = [_extract_authdata(item) for item in plist.objectForKey_('dsAttrTypeStandard:AuthenticationAuthority')] return authorities_list def user_shadowhash(name): ''' Read the existing hash for the named user. Returns a dict with the ShadowHash content for the named user in the form: { 'HASH_TYPE': { 'entropy': <base64 hash>, 'salt': <base64 salt>, 'iterations': <n iterations> }} Hash types are hard coded to SALTED-SHA-PBKDF2, CRAM-MD5, NT, RECOVERABLE. In future releases the AuthenticationAuthority property should be checked for the hash list name The username associated with the local directory user. ''' # We have to strip the output string, convert hex back to binary data, read that plist and get our specific # key/value property to find the hash. I.E there's a lot of unwrapping to do. log.debug('Reading ShadowHashData') data = __salt__['dscl.read']('.', '/Users/{0}'.format(name), 'ShadowHashData') log.debug('Got ShadowHashData') log.debug(data) if data is None: log.debug('No such record/attribute found, returning None') return None if 'dsAttrTypeNative:ShadowHashData' not in data: raise salt.exceptions.SaltInvocationError( 'Expected to find ShadowHashData in user record: {0}'.format(name) ) plist_hex = string.replace(data['dsAttrTypeNative:ShadowHashData'], ' ', '') plist_bin = binascii.unhexlify(plist_hex) # plistlib is not used, because mavericks ships without binary plist support from plistlib. plist = __salt__['plist.parse_string'](plist_bin) log.debug(plist) pbkdf = plist.objectForKey_('SALTED-SHA512-PBKDF2') cram_md5 = plist.objectForKey_('CRAM-MD5') nt = plist.objectForKey_('NT') recoverable = plist.objectForKey_('RECOVERABLE') hashes = {} if pbkdf is not None: hashes['SALTED-SHA512-PBKDF2'] = { 'entropy': pbkdf.objectForKey_('entropy').base64EncodedStringWithOptions_(0), 'salt': pbkdf.objectForKey_('salt').base64EncodedStringWithOptions_(0), 'iterations': pbkdf.objectForKey_('iterations') } if cram_md5 is not None: hashes['CRAM-MD5'] = cram_md5.base64EncodedStringWithOptions_(0) if nt is not None: hashes['NT'] = nt.base64EncodedStringWithOptions_(0) if recoverable is not None: hashes['RECOVERABLE'] = recoverable.base64EncodedStringWithOptions_(0) return hashes def info(name): ''' Return information for the specified user CLI Example: .. code-block:: bash salt '*' mac_shadow.info admin ''' # dscl -plist . -read /Users/<User> ShadowHashData # Read out name from dscl # Read out passwd hash from decrypted ShadowHashData in dslocal # Read out lstchg/min/max/warn/inact/expire from PasswordPolicy pass def gen_password(password, salt=None, iterations=None): ''' Generate hashed (PBKDF2-SHA512) password Returns a dict containing values for 'entropy', 'salt' and 'iterations'. password Plaintext password to be hashed. salt Cryptographic salt (base64 encoded). If not given, a random 32-character salt will be generated. (32 bytes is the standard salt length for OSX) iterations Number of iterations for the key derivation function, default is 1000 CLI Example: .. code-block:: bash salt '*' mac_shadow.gen_password 'I_am_password' salt '*' mac_shadow.gen_password 'I_am_password' 'Ausrbk5COuB9V4ata6muoj+HPjA92pefPfbW9QPnv9M=' 23000 ''' if iterations is None: iterations = 1000 if salt is None: salt_bin = os.urandom(32) else: salt_bin = base64.b64decode(salt, '+/') entropy, used_salt, used_iterations = _pl_salted_sha512_pbkdf2_from_string(password, salt_bin, iterations) result = { 'entropy': base64.b64encode(entropy, '+/'), 'salt': base64.b64encode(used_salt, '+/'), 'iterations': used_iterations } return {'SALTED-SHA512-PBKDF2': result} def set_password_hash(name, hashtype, hash, salt=None, iterations=None): ''' Set the given hash as the shadow hash data for the named user. name The name of the local user, which is assumed to be in the local directory service. hashtype A valid hash type, one of: PBKDF2, CRAM-MD5, NT, RECOVERABLE hash The computed hash salt (optional) The salt to use, if applicable. iterations The number of iterations to use, if applicable. ''' # current_hashes = user_shadowhash(name) # current_pbkdf2 = current_hashes['SALTED-SHA512-PBKDF2'] # # log.debug('Current ShadowHashdata follows') # log.debug(current_hashes) shd = {'SALTED-SHA512-PBKDF2': {'entropy': hash, 'salt': salt, 'iterations': iterations}} log.debug('Encoding following dict as bplist') log.debug(shd) # if shd['SALTED-SHA512-PBKDF2']['entropy'] == current_pbkdf2['entropy']: # log.debug('Entropy IS EQUAL!') shd_bplist = __salt__['plist.gen_string'](shd, 'binary') shd_bplist_b64 = base64.b64encode(shd_bplist, '+/') log.debug('Flushing directory services cache') __salt__['dscl.flushcache']() log.debug('Writing directly to dslocal') __salt__['plist.append_key']('/var/db/dslocal/nodes/Default/users/{0}.plist'.format(name), 'ShadowHashData', 'data', shd_bplist_b64) log.debug('Flushing directory services cache') __salt__['dscl.flushcache']() return True def set_password(name, password, salt=None, iterations=None): ''' Set the password for a named user (insecure). Use mac_shadow.set_password_hash to supply pre-computed hash values. For the moment this sets only the PBKDF2-SHA512 salted hash. To be a good citizen we should set every hash in the authority list. name The name of the local user, which is assumed to be in the local directory service. password The plaintext password to set (warning: insecure, used for testing) salt The salt to use, defaults to automatically generated. iterations The number of iterations to use, defaults to an automatically generated random number. CLI Example: .. code-block:: bash salt '*' mac_shadow.set_password macuser macpassword ''' #current_hashes = user_shadowhash(name) #current_pbkdf2 = current_hashes['SALTED-SHA512-PBKDF2'] # hash = gen_password(password, current_pbkdf2['salt'], current_pbkdf2['iterations']) hash = gen_password(password, salt, iterations) # # log.debug('Current ShadowHashData follows') # if current_hashes: # log.debug(current_hashes) # # if hash['SALTED-SHA512-PBKDF2']['entropy'] == current_pbkdf2['entropy']: # return False # No change required # else: # log.debug('No Shadow Hash Data exists for User: {0}'.format(name)) set_password_hash( name, 'PBKDF2', hash['SALTED-SHA512-PBKDF2']['entropy'], hash['SALTED-SHA512-PBKDF2']['salt'], hash['SALTED-SHA512-PBKDF2']['iterations'] ) return True def del_password(name): ''' Delete the password from name user CLI Example: .. code-block:: bash salt '*' shadow.del_password username ''' pass # Re-order authentication authority and remove ShadowHashData
mit
-2,658,986,317,874,695,000
30.383686
185
0.663939
false
3.712652
false
false
false
rodrigosurita/GDAd
sdaps/model/questionnaire.py
1
9008
# -*- coding: utf8 -*- # SDAPS - Scripts for data acquisition with paper based surveys # Copyright(C) 2008, Christoph Simon <[email protected]> # Copyright(C) 2008, Benjamin Berg <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. u''' Hinweis zu den Diamantstrukturen Bei Klassen mit mehreren Basisklassen definiert maximal eine Basisklasse eine eigene __init__ - Funktion. Die anderen Klassen sind "nur" Mixin - Klassen. Dadurch werden die Probleme der Diamantstruktur umgangen. ''' import buddy import data import struct class DataObject(object): u'''Mixin ''' def get_data(self): if not self.id in self.sheet.data: self.sheet.data[self.id] = getattr(data, self.__class__.__name__)(self) return self.sheet.data[self.id] data = property(get_data) class Questionnaire(buddy.Object): ''' Identification: There is only one. Reference: survey.questionnaire Parent: self.survey ''' def __init__(self): self.survey = None self.qobjects = list() self.last_id = (0, 0) self.init_attributes() def init_attributes(self): self.page_count = 0 def add_qobject(self, qobject, new_id=None): qobject.questionnaire = self # XXX: Is this any good? if new_id is not None: assert new_id > self.last_id self.last_id = new_id qobject.id = new_id else: self.last_id = qobject.init_id(self.last_id) self.qobjects.append(qobject) def get_sheet(self): return self.survey.sheet sheet = property(get_sheet) def __unicode__(self): return unicode().join( [u'%s\n' % self.__class__.__name__] + [unicode(qobject) for qobject in self.qobjects] ) class QObject(buddy.Object): ''' Identification: id ==(major, minor) Reference: survey.questionnaire.qobjects[i](i != id) Parent: self.questionnaire ''' def __init__(self): self.questionnaire = None self.boxes = list() self.last_id = -1 self.init_attributes() def init_attributes(self): pass def init_id(self, id): self.id = (id[0], id[1] + 1) return self.id def add_box(self, box): box.question = self self.last_id = box.init_id(self.last_id) self.boxes.append(box) def get_sheet(self): return self.questionnaire.sheet sheet = property(get_sheet) def calculate_survey_id(self, md5): pass def id_str(self): ids = [str(x) for x in self.id] return u'.'.join(ids) def id_csv(self, theid=None): if theid is None: theid = self.id ids = [str(x) for x in theid] return u'_'.join(ids) def id_filter(self): ids = [str(x) for x in self.id] return u'_' + u'_'.join(ids) def __unicode__(self): return u'(%s)\n' % ( self.__class__.__name__, ) class Head(QObject): def init_attributes(self): QObject.init_attributes(self) self.title = unicode() def init_id(self, id): self.id = (id[0] + 1, 0) return self.id def __unicode__(self): return u'%s(%s) %s\n' % ( self.id_str(), self.__class__.__name__, self.title, ) class Question(QObject): def init_attributes(self): QObject.init_attributes(self) self.page_number = 0 self.question = unicode() def calculate_survey_id(self, md5): for box in self.boxes: box.calculate_survey_id(md5) def __unicode__(self): return u'%s(%s) %s {%i}\n' % ( self.id_str(), self.__class__.__name__, self.question, self.page_number ) class Choice(Question): def __unicode__(self): return unicode().join( [Question.__unicode__(self)] + [unicode(box) for box in self.boxes] ) def get_answer(self): '''it's a list containing all selected values ''' answer = list() for box in self.boxes: if box.data.state: answer.append(box.value) return answer class Mark(Question): def init_attributes(self): Question.init_attributes(self) self.answers = list() def __unicode__(self): if len(self.answers) == 2: return unicode().join( [Question.__unicode__(self)] + [u'\t%s - %s\n' % tuple(self.answers)] + [unicode(box) for box in self.boxes] ) else: return unicode().join( [Question.__unicode__(self)] + [u'\t? - ?\n'] + [unicode(box) for box in self.boxes] ) def get_answer(self): '''it's an integer between 0 and 5 1 till 5 are valid marks, 0 is returned if there's something wrong ''' # box.value is zero based, a mark is based 1 answer = list() for box in self.boxes: if box.data.state: answer.append(box.value) if len(answer) == 1: return answer[0] + 1 else: return 0 def set_answer(self, answer): for box in self.boxes: box.data.state = box.value == answer - 1 class Text(Question): def __unicode__(self): return unicode().join( [Question.__unicode__(self)] + [unicode(box) for box in self.boxes] ) def get_answer(self): '''it's a bool, wether there is content in the textbox ''' assert len(self.boxes) == 1 return self.boxes[0].data.state class Additional_Head(Head): pass class Additional_Mark(Question, DataObject): def init_attributes(self): Question.init_attributes(self) self.answers = list() def __unicode__(self): return unicode().join( [Question.__unicode__(self)] + [u'\t%s - %s\n' % tuple(self.answers)] ) def get_answer(self): return self.data.value def set_answer(self, answer): self.data.value = answer class Additional_FilterHistogram(Question, DataObject): def init_attributes(self): Question.init_attributes(self) self.answers = list() self.filters = list() def __unicode__(self): result = [] result.append(Question.__unicode__(self)) for i in xrange(len(self.answers)): result.append(u'\t%s - %s\n' % (self.answers[i], self.filters[i])) return unicode().join(result) def get_answer(self): return self.data.value def set_answer(self, answer): raise NotImplemented() class Box(buddy.Object, DataObject): ''' Identification: id of the parent and value of the box :: id == (major, minor, value) Reference: survey.questionnaire.qobjects[i].boxes[j] Parent: self.question ''' def __init__(self): self.question = None self.init_attributes() def init_attributes(self): self.page_number = 0 self.x = 0 self.y = 0 self.width = 0 self.height = 0 self.text = unicode() def init_id(self, id): self.value = id + 1 self.id = self.question.id + (self.value,) return self.value def id_str(self): ids = [str(x) for x in self.id] return u'.'.join(ids) def get_sheet(self): return self.question.sheet sheet = property(get_sheet) def calculate_survey_id(self, md5): tmp = struct.pack('!ffff', self.x, self.y, self.width, self.height) md5.update(tmp) def __unicode__(self): return u'\t%i(%s) %s %s %s %s %s\n' % ( self.value, (self.__class__.__name__).ljust(8), (u'%.1f' % self.x).rjust(5), (u'%.1f' % self.y).rjust(5), (u'%.1f' % self.width).rjust(5), (u'%.1f' % self.height).rjust(5), self.text ) class Checkbox(Box): def init_attributes(self): Box.init_attributes(self) self.form = "box" def calculate_survey_id(self, md5): Box.calculate_survey_id(self, md5) md5.update(self.form) class Textbox(Box): pass
gpl-3.0
2,867,286,513,674,983,400
24.232493
83
0.559614
false
3.594573
false
false
false
mornsun/javascratch
src/topcoder.py/LC_330_Patching_Array.py
1
1807
#!/usr/bin/env python #coding=utf8 ''' Given a sorted positive integer array nums and an integer n, add/patch elements to the array such that any number in range [1, n] inclusive can be formed by the sum of some elements in the array. Return the minimum number of patches required. Example 1: nums = [1, 3], n = 6 Return 1. Combinations of nums are [1], [3], [1,3], which form possible sums of: 1, 3, 4. Now if we add/patch 2 to nums, the combinations are: [1], [2], [3], [1,3], [2,3], [1,2,3]. Possible sums are 1, 2, 3, 4, 5, 6, which now covers the range [1, 6]. So we only need 1 patch. Example 2: nums = [1, 5, 10], n = 20 Return 2. The two patches can be [2, 4]. Example 3: nums = [1, 2, 2], n = 5 Return 0. @author: Chauncey beat 92.56% ''' import heapq import datetime import time import sys class Solution(object): def minPatches(self, nums, n): """ :type nums: List[int] :type n: int :rtype: int """ if n<=0: return 0 if nums is None: nums = [] miss = 1 index = 0 patch = 0 while miss<=n: if index>=len(nums) or miss<nums[index]: miss <<= 1 patch += 1 continue if miss>=nums[index]: miss += nums[index] index += 1 continue return patch if __name__ == '__main__': solution = Solution() start_time = datetime.datetime.now() print solution.minPatches([1, 3], 6) #1 print solution.minPatches([1, 5, 10], 20) #2 print solution.minPatches([1, 2, 2], 5) #0 print solution.minPatches([], 7) #3 elapsed = datetime.datetime.now() - start_time print 'elapsed: ', elapsed.total_seconds() #transactions = [buy, sell, cooldown, buy, sell]
gpl-2.0
8,682,041,023,751,641,000
24.111111
242
0.570559
false
3.333948
false
false
false
SaltusVita/ReoGrab
Spiders.py
1
6942
''' Created on 2 сент. 2016 г. @author: garet ''' import urllib.request import queue import sqlite3 import re import json from urllib.parse import urlparse from Parser import HtmlPage import lxml class BaseSpider: def __init__(self): self.urls = QueueUrls() self.cache = SqliteCache('some_db') def add_urls(self, urls): self.urls.add_urls(urls) def add_urls_routed(self, urls): result = [] for url in urls: if self.fetch_route(url) is not None: result.append(url) self.add_urls(result) def add_route(self, route): self.routes.append(route) def add_routes(self, routes): pass def fetch_route(self, url): if not hasattr(self, 'routes'): return for route in self.routes: part_url = re.match(route['re'], url) if part_url is not None and part_url.group(0) == url: if 'skip' in route and route['skip'] is True: break return route return None def save_cache(self, url, data=None): pass def get_cache(self, url): pass def run(self): self.init() self.work() # self.clear() def init(self): if hasattr(self, 'start_urls'): self.add_urls(self.start_urls) if hasattr(self, 'routes'): self.add_routes(self.routes) def work(self): while not self.urls.empty(): url = self.urls.get_url() response = self.get_page(url) route = self.fetch_route(url) if route is None: continue if 'type' in route and route['type'] == 'sitemap': urls = self.sitemap(response) self.add_urls_routed(urls) continue if 'name' in route and hasattr(self, route['name']): getattr(self, route['name'])(response) pass def sitemap(self, data): sitemap_text = data.text.replace('<?xml version="1.0" encoding="UTF-8"?>', '') doc = lxml.etree.XML(sitemap_text) ns = {"d": "http://www.sitemaps.org/schemas/sitemap/0.9"} return doc.xpath("//d:loc/text()", namespaces=ns) def charset(self, headers): encode = 'UTF-8' if hasattr(headers, 'Content-Type'): m = re.search('charset=([a-z 0-9\-\_]+)', self.headers, re.IGNORECASE) if m: encode = m.group(1) return encode def get_page(self, url): r = self.cache.get(url) if r is not None: print(r['url']) return Response(r) r = self.get_data(url) self.cache.set(r) print('{0} --- {1}'.format(url, r['url'])) return Response(r) @staticmethod def get_data(url): try: r = urllib.request.urlopen(url) out = { 'url': r.geturl(), 'code': r.getcode(), 'headers': json.dumps(r.getheaders()), 'data': r.read() } return out except urllib.error.HTTPError as e: out = { 'url': e.geturl(), 'code': e.getcode(), 'headers': json.dumps(e.getheaders()), 'data': e.read() } return out class QueueUrls: def __init__(self): self._urls_queue = queue.Queue() self._urls_set = set() def add_url(self, url): u = urlparse(url) url = u[0] + '://' + u[1] + u[2] + u[3] if u[4] != '': url += '?' + u[4] if url not in self._urls_set: self._urls_queue.put(url) self._urls_set.add(url) def add_urls(self, urls): urls_type = type(urls) if urls_type is str: self.add_url(urls) return for url in urls: self.add_url(url) def exist_url(self, url): if url in self._urls_set: return True return False def get_url(self): return self._urls_queue.get() def empty(self): return self._urls_queue.empty() class SqliteCache: def __init__(self, db_name): self.db_name = db_name self.init_db() def init_db(self): file = self.db_name + '.sqlite' self._db = sqlite3.connect(file) self._cursor = self._db.cursor() # Create table sql = """ CREATE TABLE IF NOT EXISTS tbl_urls( url TEXT primary key not null, code INTEGER, headers TEXT, data BLOB, time TIMESTAMP DEFAULT CURRENT_TIMESTAMP );""" self._cursor.execute(sql) def get(self, url): if self._cursor is None: self.InitDB() sql = "SELECT * FROM tbl_urls WHERE url=?;" self._cursor.execute(sql, (url,)) row = self._cursor.fetchone() if row is not None: out = { 'url': row[0], 'code': row[1], 'headers': json.loads(row[2]), 'data': row[3] } return out return None def set(self, dat): if self._cursor is None: self.init_db() sql = "INSERT OR REPLACE INTO tbl_urls(url,code,headers,data) VALUES (?,?,?,?);" self._cursor.execute(sql, (dat['url'], dat['code'], dat['headers'], dat['data'])) self._db.commit() class Download: def __init__(self): self.method = 'GET' self.user_agent = self.random_user_agent() @staticmethod def random_user_agent(self, browser=None, os=None): return 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 8.0; WOW64; Trident/5.0; .NET CLR 2.7.40781; .NET4.0E; en-SG)' @staticmethod def get_page(url): r = urllib.request.urlopen(url) code = r.getcode() headers = r.getheaders() data = r.read() url = r.geturl() # return Response(r) class Response: def __init__(self, res): self.code = res['code'] self.headers = res['headers'] self.data = res['data'] self.url = res['url'] def charset(self): encode = 'UTF-8' if hasattr(self.headers, 'Content-Type'): m = re.search('charset=([a-z 0-9\-\_]+)', self.headers, re.IGNORECASE) if m: encode = m.group(1) return encode @property def text(self): encode = self.charset() return self.data.decode(encode) def parser(self): return HtmlPage(self.html, self.url)
bsd-3-clause
2,515,193,081,959,107,000
26.430328
124
0.486666
false
3.856031
false
false
false
rven/odoo
addons/l10n_ch/models/res_bank.py
1
16379
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import re from odoo import api, fields, models, _ from odoo.exceptions import ValidationError from odoo.tools.misc import mod10r from odoo.exceptions import UserError import werkzeug.urls ISR_SUBSCRIPTION_CODE = {'CHF': '01', 'EUR': '03'} CLEARING = "09000" _re_postal = re.compile('^[0-9]{2}-[0-9]{1,6}-[0-9]$') def _is_l10n_ch_postal(account_ref): """ Returns True if the string account_ref is a valid postal account number, i.e. it only contains ciphers and is last cipher is the result of a recursive modulo 10 operation ran over the rest of it. Shorten form with - is also accepted. """ if _re_postal.match(account_ref or ''): ref_subparts = account_ref.split('-') account_ref = ref_subparts[0] + ref_subparts[1].rjust(6, '0') + ref_subparts[2] if re.match('\d+$', account_ref or ''): account_ref_without_check = account_ref[:-1] return mod10r(account_ref_without_check) == account_ref return False def _is_l10n_ch_isr_issuer(account_ref, currency_code): """ Returns True if the string account_ref is a valid a valid ISR issuer An ISR issuer is postal account number that starts by 01 (CHF) or 03 (EUR), """ if (account_ref or '').startswith(ISR_SUBSCRIPTION_CODE[currency_code]): return _is_l10n_ch_postal(account_ref) return False class ResPartnerBank(models.Model): _inherit = 'res.partner.bank' l10n_ch_postal = fields.Char( string="Swiss Postal Account", readonly=False, store=True, compute='_compute_l10n_ch_postal', help="This field is used for the Swiss postal account number on a vendor account and for the client number on " "your own account. The client number is mostly 6 numbers without -, while the postal account number can " "be e.g. 01-162-8") # fields to configure ISR payment slip generation l10n_ch_isr_subscription_chf = fields.Char(string='CHF ISR Subscription Number', help='The subscription number provided by the bank or Postfinance to identify the bank, used to generate ISR in CHF. eg. 01-162-8') l10n_ch_isr_subscription_eur = fields.Char(string='EUR ISR Subscription Number', help='The subscription number provided by the bank or Postfinance to identify the bank, used to generate ISR in EUR. eg. 03-162-5') l10n_ch_show_subscription = fields.Boolean(compute='_compute_l10n_ch_show_subscription', default=lambda self: self.env.company.country_id.code == 'CH') def _is_isr_issuer(self): return (_is_l10n_ch_isr_issuer(self.l10n_ch_postal, 'CHF') or _is_l10n_ch_isr_issuer(self.l10n_ch_postal, 'EUR')) @api.constrains("l10n_ch_postal", "partner_id") def _check_postal_num(self): """Validate postal number format""" for rec in self: if rec.l10n_ch_postal and not _is_l10n_ch_postal(rec.l10n_ch_postal): # l10n_ch_postal is used for the purpose of Client Number on your own accounts, so don't do the check there if rec.partner_id and not rec.partner_id.ref_company_ids: raise ValidationError( _("The postal number {} is not valid.\n" "It must be a valid postal number format. eg. 10-8060-7").format(rec.l10n_ch_postal)) return True @api.constrains("l10n_ch_isr_subscription_chf", "l10n_ch_isr_subscription_eur") def _check_subscription_num(self): """Validate ISR subscription number format Subscription number can only starts with 01 or 03 """ for rec in self: for currency in ["CHF", "EUR"]: subscrip = rec.l10n_ch_isr_subscription_chf if currency == "CHF" else rec.l10n_ch_isr_subscription_eur if subscrip and not _is_l10n_ch_isr_issuer(subscrip, currency): example = "01-162-8" if currency == "CHF" else "03-162-5" raise ValidationError( _("The ISR subcription {} for {} number is not valid.\n" "It must starts with {} and we a valid postal number format. eg. {}" ).format(subscrip, currency, ISR_SUBSCRIPTION_CODE[currency], example)) return True @api.depends('partner_id', 'company_id') def _compute_l10n_ch_show_subscription(self): for bank in self: if bank.partner_id: bank.l10n_ch_show_subscription = bank.partner_id.ref_company_ids.country_id.code =='CH' elif bank.company_id: bank.l10n_ch_show_subscription = bank.company_id.country_id.code == 'CH' else: bank.l10n_ch_show_subscription = self.env.company.country_id.code == 'CH' @api.depends('acc_number', 'acc_type') def _compute_sanitized_acc_number(self): #Only remove spaces in case it is not postal postal_banks = self.filtered(lambda b: b.acc_type == "postal") for bank in postal_banks: bank.sanitized_acc_number = bank.acc_number super(ResPartnerBank, self - postal_banks)._compute_sanitized_acc_number() @api.model def _get_supported_account_types(self): rslt = super(ResPartnerBank, self)._get_supported_account_types() rslt.append(('postal', _('Postal'))) return rslt @api.model def retrieve_acc_type(self, acc_number): """ Overridden method enabling the recognition of swiss postal bank account numbers. """ acc_number_split = "" # acc_number_split is needed to continue to recognize the account # as a postal account even if the difference if acc_number and " " in acc_number: acc_number_split = acc_number.split(" ")[0] if _is_l10n_ch_postal(acc_number) or (acc_number_split and _is_l10n_ch_postal(acc_number_split)): return 'postal' else: return super(ResPartnerBank, self).retrieve_acc_type(acc_number) @api.depends('acc_number', 'partner_id', 'acc_type') def _compute_l10n_ch_postal(self): for record in self: if record.acc_type == 'iban': record.l10n_ch_postal = self._retrieve_l10n_ch_postal(record.sanitized_acc_number) elif record.acc_type == 'postal': if record.acc_number and " " in record.acc_number: record.l10n_ch_postal = record.acc_number.split(" ")[0] else: record.l10n_ch_postal = record.acc_number # In case of ISR issuer, this number is not # unique and we fill acc_number with partner # name to give proper information to the user if record.partner_id and record.acc_number[:2] in ["01", "03"]: record.acc_number = ("{} {}").format(record.acc_number, record.partner_id.name) @api.model def _is_postfinance_iban(self, iban): """Postfinance IBAN have format CHXX 0900 0XXX XXXX XXXX K Where 09000 is the clearing number """ return iban.startswith('CH') and iban[4:9] == CLEARING @api.model def _pretty_postal_num(self, number): """format a postal account number or an ISR subscription number as per specifications with '-' separators. eg. 010001628 -> 01-162-8 """ if re.match('^[0-9]{2}-[0-9]{1,6}-[0-9]$', number or ''): return number currency_code = number[:2] middle_part = number[2:-1] trailing_cipher = number[-1] middle_part = middle_part.lstrip("0") return currency_code + '-' + middle_part + '-' + trailing_cipher @api.model def _retrieve_l10n_ch_postal(self, iban): """Reads a swiss postal account number from a an IBAN and returns it as a string. Returns None if no valid postal account number was found, or the given iban was not from Swiss Postfinance. CH09 0900 0000 1000 8060 7 -> 10-8060-7 """ if self._is_postfinance_iban(iban): # the IBAN corresponds to a swiss account return self._pretty_postal_num(iban[-9:]) return None def _get_qr_code_url(self, qr_method, amount, currency, debtor_partner, free_communication, structured_communication): if qr_method == 'ch_qr': qr_code_vals = self._l10n_ch_get_qr_vals(amount, currency, debtor_partner, free_communication, structured_communication) return '/report/barcode/?type=%s&value=%s&width=%s&height=%s&quiet=1&mask=ch_cross' % ('QR', werkzeug.urls.url_quote_plus('\n'.join(qr_code_vals)), 256, 256) return super()._get_qr_code_url(qr_method, amount, currency, debtor_partner, free_communication, structured_communication) def _l10n_ch_get_qr_vals(self, amount, currency, debtor_partner, free_communication, structured_communication): comment = "" if free_communication: comment = (free_communication[:137] + '...') if len(free_communication) > 140 else free_communication creditor_addr_1, creditor_addr_2 = self._get_partner_address_lines(self.partner_id) debtor_addr_1, debtor_addr_2 = self._get_partner_address_lines(debtor_partner) # Compute reference type (empty by default, only mandatory for QR-IBAN, # and must then be 27 characters-long, with mod10r check digit as the 27th one, # just like ISR number for invoices) reference_type = 'NON' reference = '' if self._is_qr_iban(): # _check_for_qr_code_errors ensures we can't have a QR-IBAN without a QR-reference here reference_type = 'QRR' reference = structured_communication currency = currency or self.currency_id or self.company_id.currency_id return [ 'SPC', # QR Type '0200', # Version '1', # Coding Type self.sanitized_acc_number, # IBAN 'K', # Creditor Address Type (self.acc_holder_name or self.partner_id.name)[:70], # Creditor Name creditor_addr_1, # Creditor Address Line 1 creditor_addr_2, # Creditor Address Line 2 '', # Creditor Postal Code (empty, since we're using combined addres elements) '', # Creditor Town (empty, since we're using combined addres elements) self.partner_id.country_id.code, # Creditor Country '', # Ultimate Creditor Address Type '', # Name '', # Ultimate Creditor Address Line 1 '', # Ultimate Creditor Address Line 2 '', # Ultimate Creditor Postal Code '', # Ultimate Creditor Town '', # Ultimate Creditor Country '{:.2f}'.format(amount), # Amount currency.name, # Currency 'K', # Ultimate Debtor Address Type debtor_partner.commercial_partner_id.name[:70], # Ultimate Debtor Name debtor_addr_1, # Ultimate Debtor Address Line 1 debtor_addr_2, # Ultimate Debtor Address Line 2 '', # Ultimate Debtor Postal Code (not to be provided for address type K) '', # Ultimate Debtor Postal City (not to be provided for address type K) debtor_partner.country_id.code, # Ultimate Debtor Postal Country reference_type, # Reference Type reference, # Reference comment, # Unstructured Message 'EPD', # Mandatory trailer part ] def _get_partner_address_lines(self, partner): """ Returns a tuple of two elements containing the address lines to use for this partner. Line 1 contains the street and number, line 2 contains zip and city. Those two lines are limited to 70 characters """ streets = [partner.street, partner.street2] line_1 = ' '.join(filter(None, streets)) line_2 = partner.zip + ' ' + partner.city return line_1[:70], line_2[:70] def _check_qr_iban_range(self, iban): if not iban or len(iban) < 9: return False iid_start_index = 4 iid_end_index = 8 iid = iban[iid_start_index : iid_end_index+1] return re.match('\d+', iid) \ and 30000 <= int(iid) <= 31999 # Those values for iid are reserved for QR-IBANs only def _is_qr_iban(self): """ Tells whether or not this bank account has a QR-IBAN account number. QR-IBANs are specific identifiers used in Switzerland as references in QR-codes. They are formed like regular IBANs, but are actually something different. """ self.ensure_one() return self.acc_type == 'iban' \ and self._check_qr_iban_range(self.sanitized_acc_number) @api.model def _is_qr_reference(self, reference): """ Checks whether the given reference is a QR-reference, i.e. it is made of 27 digits, the 27th being a mod10r check on the 26 previous ones. """ return reference \ and len(reference) == 27 \ and re.match('\d+$', reference) \ and reference == mod10r(reference[:-1]) def _eligible_for_qr_code(self, qr_method, debtor_partner, currency): if qr_method == 'ch_qr': return self.acc_type == 'iban' and \ self.partner_id.country_id.code == 'CH' and \ (not debtor_partner or debtor_partner.country_id.code == 'CH') \ and currency.name in ('EUR', 'CHF') return super()._eligible_for_qr_code(qr_method, debtor_partner, currency) def _check_for_qr_code_errors(self, qr_method, amount, currency, debtor_partner, free_communication, structured_communication): def _partner_fields_set(partner): return partner.zip and \ partner.city and \ partner.country_id.code and \ (partner.street or partner.street2) if qr_method == 'ch_qr': if not _partner_fields_set(self.partner_id): return _("The partner set on the bank account meant to receive the payment (%s) must have a complete postal address (street, zip, city and country).", self.acc_number) if debtor_partner and not _partner_fields_set(debtor_partner): return _("The partner the QR-code must have a complete postal address (street, zip, city and country).") if self._is_qr_iban() and not self._is_qr_reference(structured_communication): return _("When using a QR-IBAN as the destination account of a QR-code, the payment reference must be a QR-reference.") return super()._check_for_qr_code_errors(qr_method, amount, currency, debtor_partner, free_communication, structured_communication) @api.model def _get_available_qr_methods(self): rslt = super()._get_available_qr_methods() rslt.append(('ch_qr', _("Swiss QR bill"), 10)) return rslt
agpl-3.0
1,126,025,373,065,044,900
51.16242
216
0.567251
false
3.88036
false
false
false
googleapis/googleapis-gen
google/cloud/talent/v4beta1/talent-v4beta1-py/google/cloud/talent_v4beta1/services/completion/transports/grpc.py
1
11561
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import grpc_helpers # type: ignore from google.api_core import gapic_v1 # type: ignore import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from google.cloud.talent_v4beta1.types import completion_service from .base import CompletionTransport, DEFAULT_CLIENT_INFO class CompletionGrpcTransport(CompletionTransport): """gRPC backend transport for Completion. A service handles auto completion. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _stubs: Dict[str, Callable] def __init__(self, *, host: str = 'jobs.googleapis.com', credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or applicatin default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, credentials=self._credentials, credentials_file=credentials_file, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @classmethod def create_channel(cls, host: str = 'jobs.googleapis.com', credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs) -> grpc.Channel: """Create and return a gRPC channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: grpc.Channel: A gRPC channel object. Raises: google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ return grpc_helpers.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs ) @property def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel @property def complete_query(self) -> Callable[ [completion_service.CompleteQueryRequest], completion_service.CompleteQueryResponse]: r"""Return a callable for the complete query method over gRPC. Completes the specified prefix with keyword suggestions. Intended for use by a job search auto- complete search box. Returns: Callable[[~.CompleteQueryRequest], ~.CompleteQueryResponse]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if 'complete_query' not in self._stubs: self._stubs['complete_query'] = self.grpc_channel.unary_unary( '/google.cloud.talent.v4beta1.Completion/CompleteQuery', request_serializer=completion_service.CompleteQueryRequest.serialize, response_deserializer=completion_service.CompleteQueryResponse.deserialize, ) return self._stubs['complete_query'] __all__ = ( 'CompletionGrpcTransport', )
apache-2.0
3,560,645,474,204,908,500
44.515748
91
0.607992
false
4.817083
false
false
false
rldleblanc/ceph-tools
osd_hunter.py
1
6255
#!/usr/bin/python # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 import argparse import re import datetime import operator import pprint import glob import gzip slow_threshold = 10 #seconds # Nothing to change past here verbose = None re_slow = re.compile(r'^(\d+-\d+-\d+\s+\d+:\d+:\d+\.\d+)\s+\w+\s+0.*slow.*(client\.\d+\.\d+:\d+).*from\s+(\d+(,\d+)*)') re_io = re.compile(r'^(\d+-\d+-\d+\s+\d+:\d+:\d+\.\d+)\s+\w+\s+1.*<==.*(osd\.\d+|client).*(client\.\d+\.\d+:\d+).*') def get_date(datestring): nofrag, frag = datestring.split(".") date = datetime.datetime.strptime(nofrag, "%Y-%m-%d %H:%M:%S") frag = frag[:6] #truncate to microseconds frag += (6 - len(frag)) * '0' date = date.replace(microsecond=int(frag)) return date def get_log_files(args): if args.all is True: if args.zip is True: return glob.glob(args.logdir + "ceph-osd.*.log*") else: return glob.glob(args.logdir + "ceph-osd.*.log") else: if args.zip is True: return glob.glob(args.logdir + "ceph-osd." + str(args.osd) + ".log*") else: return glob.glob(args.logdir + "ceph-osd." + str(args.osd) + ".log") def find_blocked(args): slow_osds = {} if args.all is True: if verbose >= 1: print "Searching all OSDs." for file in get_log_files(args): result = search_logs(file) if result: slow_osds.update(result) pass else: if verbose >= 1: print "Going to search OSD " + str(args.osd) + "." slow_osds = search_logs(get_log_files(args)[0]) if verbose >=3: pprint.pprint(slow_osds) if len(slow_osds) > 0: print_output(slow_osds) else: print "Could not find any slow OSDs." def print_output(slow_osds): # Tally up the slow OSDs # go thorugh all arrays and create a new array of slow OSDs # with the OSD ID as the key and increment the value for each # Sort the list asending and print out the OSDs. osd_report = {} for key in slow_osds.keys(): if slow_osds[key].get('start', None): if slow_osds[key].get('slow', None): for i in slow_osds[key]['slow']: if i not in osd_report.keys(): osd_report[i] = 1 else: osd_report[i] += 1 osd_report = sorted(osd_report.items(), key=operator.itemgetter(1)) if len(osd_report) > 0: for i in osd_report: print "OSD " + str(i[0]) + ": " + str(i[1]) else: print "Could not find any slow OSDs." def search_logs(logfile): if verbose >= 1: print "Searching through " + logfile + "..." try: # Iterate through the file looking for slow messages so we know # which I/O are problematic if 'gz' in logfile: with gzip.open(logfile, 'rb') as f: return scan_file(f) else: with open(logfile, 'rb') as f: return scan_file(f) return None except OSError, e: print "Could not open " + logfile + " for reading." sys.exit(1) def scan_file(fd): slow_osds = {} # If the line has slow, capture the date/time, the client id # and the secondary OSDs as slow clients for line in fd: matches = re_slow.match(line) if matches and not matches.group(1) in slow_osds.keys(): slow_osds[matches.group(2)] = {} #slow_osds[matches.group(2)]['start'] = get_date(matches.group(1)) slow_osds[matches.group(2)]['slow'] = matches.group(3).split(",") # On the second iteration, look for lines that have the client id # 1. Get the data/time stamp from the request from the client, # set as the start time for the I/O # 2. If it has ondisk status. Get the date/time. Compare with the # start time and if less than 30 seconds, move osd to the # fast list. if len(slow_osds) > 0: # Jump back to the start of the file fd.seek(0) for line in fd: matches = re_io.match(line) if matches and matches.group(3) in slow_osds.keys(): if 'client' in matches.group(2): slow_osds[matches.group(3)]['start'] = get_date(matches.group(1)) elif 'osd' in matches.group(2) and slow_osds[matches.group(3)].get('start', None): latency = get_date(matches.group(1)) - slow_osds[matches.group(3)]['start'] osd = matches.group(2).split(".")[1] if latency < datetime.timedelta(seconds=slow_threshold): if osd in slow_osds[matches.group(3)]['slow']: slow_osds[matches.group(3)]['slow'].remove(osd) if not slow_osds[matches.group(3)].get('fast', None): slow_osds[matches.group(3)]['fast'] = [osd] elif osd not in slow_osds[matches.group(3)]['fast']: slow_osds[matches.group(3)]['fast'] += [osd] return slow_osds def main(): # Main execution global verbose parser = argparse.ArgumentParser(description="Hunts for slow OSDs by looking thorugh OSD logs.") osdgroup = parser.add_mutually_exclusive_group(required=True) osdgroup.add_argument('-o', '--osd', type=int, help="an OSD on this host that is reporting slow I/O.") osdgroup.add_argument('-a', '--all', action="store_true", default="false", help="Search logs of all OSDs in logdir.") parser.add_argument('-z', '--zip', action="store_true", default="false", help="Also search through compressed logfiles.") parser.add_argument('-l', '--logdir', default="/var/log/ceph/", help="Location of log files. Defaults to /var/log/ceph/.") parser.add_argument('-v', '--verbose', action="count", default=0, help="Increase verbosity, more flags means more output.") args = parser.parse_args() verbose = args.verbose if verbose >= 3: pprint.pprint(args) if args.all or args.osd: find_blocked(args) if __name__ == "__main__": main()
lgpl-3.0
-2,655,373,338,628,918,300
37.850932
127
0.561311
false
3.431157
false
false
false
santisiri/popego
envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/distutils/cpuinfo.py
1
22466
#!/usr/bin/env python """ cpuinfo Copyright 2002 Pearu Peterson all rights reserved, Pearu Peterson <[email protected]> Permission to use, modify, and distribute this software is given under the terms of the NumPy (BSD style) license. See LICENSE.txt that came with this distribution for specifics. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. Pearu Peterson """ __all__ = ['cpu'] import sys, re, types import os import commands import warnings def getoutput(cmd, successful_status=(0,), stacklevel=1): try: status, output = commands.getstatusoutput(cmd) except EnvironmentError, e: warnings.warn(str(e), UserWarning, stacklevel=stacklevel) return False, output if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: return True, output return False, output def command_info(successful_status=(0,), stacklevel=1, **kw): info = {} for key in kw: ok, output = getoutput(kw[key], successful_status=successful_status, stacklevel=stacklevel+1) if ok: info[key] = output.strip() return info def command_by_line(cmd, successful_status=(0,), stacklevel=1): ok, output = getoutput(cmd, successful_status=successful_status, stacklevel=stacklevel+1) if not ok: return for line in output.splitlines(): yield line.strip() def key_value_from_command(cmd, sep, successful_status=(0,), stacklevel=1): d = {} for line in command_by_line(cmd, successful_status=successful_status, stacklevel=stacklevel+1): l = [s.strip() for s in line.split(sep, 1)] if len(l) == 2: d[l[0]] = l[1] return d class CPUInfoBase(object): """Holds CPU information and provides methods for requiring the availability of various CPU features. """ def _try_call(self,func): try: return func() except: pass def __getattr__(self,name): if not name.startswith('_'): if hasattr(self,'_'+name): attr = getattr(self,'_'+name) if type(attr) is types.MethodType: return lambda func=self._try_call,attr=attr : func(attr) else: return lambda : None raise AttributeError,name def _getNCPUs(self): return 1 def _is_32bit(self): return not self.is_64bit() class LinuxCPUInfo(CPUInfoBase): info = None def __init__(self): if self.info is not None: return info = [ {} ] ok, output = getoutput('uname -m') if ok: info[0]['uname_m'] = output.strip() try: fo = open('/proc/cpuinfo') except EnvironmentError, e: warnings.warn(str(e), UserWarning) else: for line in fo: name_value = [s.strip() for s in line.split(':', 1)] if len(name_value) != 2: continue name, value = name_value if not info or info[-1].has_key(name): # next processor info.append({}) info[-1][name] = value fo.close() self.__class__.info = info def _not_impl(self): pass # Athlon def _is_AMD(self): return self.info[0]['vendor_id']=='AuthenticAMD' def _is_AthlonK6_2(self): return self._is_AMD() and self.info[0]['model'] == '2' def _is_AthlonK6_3(self): return self._is_AMD() and self.info[0]['model'] == '3' def _is_AthlonK6(self): return re.match(r'.*?AMD-K6',self.info[0]['model name']) is not None def _is_AthlonK7(self): return re.match(r'.*?AMD-K7',self.info[0]['model name']) is not None def _is_AthlonMP(self): return re.match(r'.*?Athlon\(tm\) MP\b', self.info[0]['model name']) is not None def _is_AMD64(self): return self.is_AMD() and self.info[0]['family'] == '15' def _is_Athlon64(self): return re.match(r'.*?Athlon\(tm\) 64\b', self.info[0]['model name']) is not None def _is_AthlonHX(self): return re.match(r'.*?Athlon HX\b', self.info[0]['model name']) is not None def _is_Opteron(self): return re.match(r'.*?Opteron\b', self.info[0]['model name']) is not None def _is_Hammer(self): return re.match(r'.*?Hammer\b', self.info[0]['model name']) is not None # Alpha def _is_Alpha(self): return self.info[0]['cpu']=='Alpha' def _is_EV4(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' def _is_EV5(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' def _is_EV56(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' def _is_PCA56(self): return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' # Intel #XXX _is_i386 = _not_impl def _is_Intel(self): return self.info[0]['vendor_id']=='GenuineIntel' def _is_i486(self): return self.info[0]['cpu']=='i486' def _is_i586(self): return self.is_Intel() and self.info[0]['cpu family'] == '5' def _is_i686(self): return self.is_Intel() and self.info[0]['cpu family'] == '6' def _is_Celeron(self): return re.match(r'.*?Celeron', self.info[0]['model name']) is not None def _is_Pentium(self): return re.match(r'.*?Pentium', self.info[0]['model name']) is not None def _is_PentiumII(self): return re.match(r'.*?Pentium.*?II\b', self.info[0]['model name']) is not None def _is_PentiumPro(self): return re.match(r'.*?PentiumPro\b', self.info[0]['model name']) is not None def _is_PentiumMMX(self): return re.match(r'.*?Pentium.*?MMX\b', self.info[0]['model name']) is not None def _is_PentiumIII(self): return re.match(r'.*?Pentium.*?III\b', self.info[0]['model name']) is not None def _is_PentiumIV(self): return re.match(r'.*?Pentium.*?(IV|4)\b', self.info[0]['model name']) is not None def _is_PentiumM(self): return re.match(r'.*?Pentium.*?M\b', self.info[0]['model name']) is not None def _is_Prescott(self): return self.is_PentiumIV() and self.has_sse3() def _is_Nocona(self): return self.is_64bit() and self.is_PentiumIV() def _is_Core2(self): return self.is_64bit() and self.is_Intel() and \ re.match(r'.*?Core\(TM\)2\b', \ self.info[0]['model name']) is not None def _is_Itanium(self): return re.match(r'.*?Itanium\b', self.info[0]['family']) is not None def _is_XEON(self): return re.match(r'.*?XEON\b', self.info[0]['model name'],re.IGNORECASE) is not None _is_Xeon = _is_XEON # Varia def _is_singleCPU(self): return len(self.info) == 1 def _getNCPUs(self): return len(self.info) def _has_fdiv_bug(self): return self.info[0]['fdiv_bug']=='yes' def _has_f00f_bug(self): return self.info[0]['f00f_bug']=='yes' def _has_mmx(self): return re.match(r'.*?\bmmx\b',self.info[0]['flags']) is not None def _has_sse(self): return re.match(r'.*?\bsse\b',self.info[0]['flags']) is not None def _has_sse2(self): return re.match(r'.*?\bsse2\b',self.info[0]['flags']) is not None def _has_sse3(self): return re.match(r'.*?\bsse3\b',self.info[0]['flags']) is not None def _has_3dnow(self): return re.match(r'.*?\b3dnow\b',self.info[0]['flags']) is not None def _has_3dnowext(self): return re.match(r'.*?\b3dnowext\b',self.info[0]['flags']) is not None def _is_64bit(self): if self.is_Alpha(): return True if self.info[0].get('clflush size','')=='64': return True if self.info[0].get('uname_m','')=='x86_64': return True if self.info[0].get('arch','')=='IA-64': return True return False def _is_32bit(self): return not self.is_64bit() class IRIXCPUInfo(CPUInfoBase): info = None def __init__(self): if self.info is not None: return info = key_value_from_command('sysconf', sep=' ', successful_status=(0,1)) self.__class__.info = info def _not_impl(self): pass def _is_singleCPU(self): return self.info.get('NUM_PROCESSORS') == '1' def _getNCPUs(self): return int(self.info.get('NUM_PROCESSORS', 1)) def __cputype(self,n): return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) def _is_r2000(self): return self.__cputype(2000) def _is_r3000(self): return self.__cputype(3000) def _is_r3900(self): return self.__cputype(3900) def _is_r4000(self): return self.__cputype(4000) def _is_r4100(self): return self.__cputype(4100) def _is_r4300(self): return self.__cputype(4300) def _is_r4400(self): return self.__cputype(4400) def _is_r4600(self): return self.__cputype(4600) def _is_r4650(self): return self.__cputype(4650) def _is_r5000(self): return self.__cputype(5000) def _is_r6000(self): return self.__cputype(6000) def _is_r8000(self): return self.__cputype(8000) def _is_r10000(self): return self.__cputype(10000) def _is_r12000(self): return self.__cputype(12000) def _is_rorion(self): return self.__cputype('orion') def get_ip(self): try: return self.info.get('MACHINE') except: pass def __machine(self,n): return self.info.get('MACHINE').lower() == 'ip%s' % (n) def _is_IP19(self): return self.__machine(19) def _is_IP20(self): return self.__machine(20) def _is_IP21(self): return self.__machine(21) def _is_IP22(self): return self.__machine(22) def _is_IP22_4k(self): return self.__machine(22) and self._is_r4000() def _is_IP22_5k(self): return self.__machine(22) and self._is_r5000() def _is_IP24(self): return self.__machine(24) def _is_IP25(self): return self.__machine(25) def _is_IP26(self): return self.__machine(26) def _is_IP27(self): return self.__machine(27) def _is_IP28(self): return self.__machine(28) def _is_IP30(self): return self.__machine(30) def _is_IP32(self): return self.__machine(32) def _is_IP32_5k(self): return self.__machine(32) and self._is_r5000() def _is_IP32_10k(self): return self.__machine(32) and self._is_r10000() class DarwinCPUInfo(CPUInfoBase): info = None def __init__(self): if self.info is not None: return info = command_info(arch='arch', machine='machine') info['sysctl_hw'] = key_value_from_command('sysctl hw', sep='=') self.__class__.info = info def _not_impl(self): pass def _getNCPUs(self): return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) def _is_Power_Macintosh(self): return self.info['sysctl_hw']['hw.machine']=='Power Macintosh' def _is_i386(self): return self.info['arch']=='i386' def _is_ppc(self): return self.info['arch']=='ppc' def __machine(self,n): return self.info['machine'] == 'ppc%s'%n def _is_ppc601(self): return self.__machine(601) def _is_ppc602(self): return self.__machine(602) def _is_ppc603(self): return self.__machine(603) def _is_ppc603e(self): return self.__machine('603e') def _is_ppc604(self): return self.__machine(604) def _is_ppc604e(self): return self.__machine('604e') def _is_ppc620(self): return self.__machine(620) def _is_ppc630(self): return self.__machine(630) def _is_ppc740(self): return self.__machine(740) def _is_ppc7400(self): return self.__machine(7400) def _is_ppc7450(self): return self.__machine(7450) def _is_ppc750(self): return self.__machine(750) def _is_ppc403(self): return self.__machine(403) def _is_ppc505(self): return self.__machine(505) def _is_ppc801(self): return self.__machine(801) def _is_ppc821(self): return self.__machine(821) def _is_ppc823(self): return self.__machine(823) def _is_ppc860(self): return self.__machine(860) class SunOSCPUInfo(CPUInfoBase): info = None def __init__(self): if self.info is not None: return info = command_info(arch='arch', mach='mach', uname_i='uname_i', isainfo_b='isainfo -b', isainfo_n='isainfo -n', ) info['uname_X'] = key_value_from_command('uname -X', sep='=') for line in command_by_line('psrinfo -v 0'): m = re.match(r'\s*The (?P<p>[\w\d]+) processor operates at', line) if m: info['processor'] = m.group('p') break self.__class__.info = info def _not_impl(self): pass def _is_32bit(self): return self.info['isainfo_b']=='32' def _is_64bit(self): return self.info['isainfo_b']=='64' def _is_i386(self): return self.info['isainfo_n']=='i386' def _is_sparc(self): return self.info['isainfo_n']=='sparc' def _is_sparcv9(self): return self.info['isainfo_n']=='sparcv9' def _getNCPUs(self): return int(self.info['uname_X'].get('NumCPU', 1)) def _is_sun4(self): return self.info['arch']=='sun4' def _is_SUNW(self): return re.match(r'SUNW',self.info['uname_i']) is not None def _is_sparcstation5(self): return re.match(r'.*SPARCstation-5',self.info['uname_i']) is not None def _is_ultra1(self): return re.match(r'.*Ultra-1',self.info['uname_i']) is not None def _is_ultra250(self): return re.match(r'.*Ultra-250',self.info['uname_i']) is not None def _is_ultra2(self): return re.match(r'.*Ultra-2',self.info['uname_i']) is not None def _is_ultra30(self): return re.match(r'.*Ultra-30',self.info['uname_i']) is not None def _is_ultra4(self): return re.match(r'.*Ultra-4',self.info['uname_i']) is not None def _is_ultra5_10(self): return re.match(r'.*Ultra-5_10',self.info['uname_i']) is not None def _is_ultra5(self): return re.match(r'.*Ultra-5',self.info['uname_i']) is not None def _is_ultra60(self): return re.match(r'.*Ultra-60',self.info['uname_i']) is not None def _is_ultra80(self): return re.match(r'.*Ultra-80',self.info['uname_i']) is not None def _is_ultraenterprice(self): return re.match(r'.*Ultra-Enterprise',self.info['uname_i']) is not None def _is_ultraenterprice10k(self): return re.match(r'.*Ultra-Enterprise-10000',self.info['uname_i']) is not None def _is_sunfire(self): return re.match(r'.*Sun-Fire',self.info['uname_i']) is not None def _is_ultra(self): return re.match(r'.*Ultra',self.info['uname_i']) is not None def _is_cpusparcv7(self): return self.info['processor']=='sparcv7' def _is_cpusparcv8(self): return self.info['processor']=='sparcv8' def _is_cpusparcv9(self): return self.info['processor']=='sparcv9' class Win32CPUInfo(CPUInfoBase): info = None pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" # XXX: what does the value of # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 # mean? def __init__(self): if self.info is not None: return info = [] try: #XXX: Bad style to use so long `try:...except:...`. Fix it! import _winreg prgx = re.compile(r"family\s+(?P<FML>\d+)\s+model\s+(?P<MDL>\d+)"\ "\s+stepping\s+(?P<STP>\d+)",re.IGNORECASE) chnd=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, self.pkey) pnum=0 while 1: try: proc=_winreg.EnumKey(chnd,pnum) except _winreg.error: break else: pnum+=1 info.append({"Processor":proc}) phnd=_winreg.OpenKey(chnd,proc) pidx=0 while True: try: name,value,vtpe=_winreg.EnumValue(phnd,pidx) except _winreg.error: break else: pidx=pidx+1 info[-1][name]=value if name=="Identifier": srch=prgx.search(value) if srch: info[-1]["Family"]=int(srch.group("FML")) info[-1]["Model"]=int(srch.group("MDL")) info[-1]["Stepping"]=int(srch.group("STP")) except: print sys.exc_value,'(ignoring)' self.__class__.info = info def _not_impl(self): pass # Athlon def _is_AMD(self): return self.info[0]['VendorIdentifier']=='AuthenticAMD' def _is_Am486(self): return self.is_AMD() and self.info[0]['Family']==4 def _is_Am5x86(self): return self.is_AMD() and self.info[0]['Family']==4 def _is_AMDK5(self): return self.is_AMD() and self.info[0]['Family']==5 \ and self.info[0]['Model'] in [0,1,2,3] def _is_AMDK6(self): return self.is_AMD() and self.info[0]['Family']==5 \ and self.info[0]['Model'] in [6,7] def _is_AMDK6_2(self): return self.is_AMD() and self.info[0]['Family']==5 \ and self.info[0]['Model']==8 def _is_AMDK6_3(self): return self.is_AMD() and self.info[0]['Family']==5 \ and self.info[0]['Model']==9 def _is_AMDK7(self): return self.is_AMD() and self.info[0]['Family'] == 6 # To reliably distinguish between the different types of AMD64 chips # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would # require looking at the 'brand' from cpuid def _is_AMD64(self): return self.is_AMD() and self.info[0]['Family'] == 15 # Intel def _is_Intel(self): return self.info[0]['VendorIdentifier']=='GenuineIntel' def _is_i386(self): return self.info[0]['Family']==3 def _is_i486(self): return self.info[0]['Family']==4 def _is_i586(self): return self.is_Intel() and self.info[0]['Family']==5 def _is_i686(self): return self.is_Intel() and self.info[0]['Family']==6 def _is_Pentium(self): return self.is_Intel() and self.info[0]['Family']==5 def _is_PentiumMMX(self): return self.is_Intel() and self.info[0]['Family']==5 \ and self.info[0]['Model']==4 def _is_PentiumPro(self): return self.is_Intel() and self.info[0]['Family']==6 \ and self.info[0]['Model']==1 def _is_PentiumII(self): return self.is_Intel() and self.info[0]['Family']==6 \ and self.info[0]['Model'] in [3,5,6] def _is_PentiumIII(self): return self.is_Intel() and self.info[0]['Family']==6 \ and self.info[0]['Model'] in [7,8,9,10,11] def _is_PentiumIV(self): return self.is_Intel() and self.info[0]['Family']==15 def _is_PentiumM(self): return self.is_Intel() and self.info[0]['Family'] == 6 \ and self.info[0]['Model'] in [9, 13, 14] def _is_Core2(self): return self.is_Intel() and self.info[0]['Family'] == 6 \ and self.info[0]['Model'] in [15, 16, 17] # Varia def _is_singleCPU(self): return len(self.info) == 1 def _getNCPUs(self): return len(self.info) def _has_mmx(self): if self.is_Intel(): return (self.info[0]['Family']==5 and self.info[0]['Model']==4) \ or (self.info[0]['Family'] in [6,15]) elif self.is_AMD(): return self.info[0]['Family'] in [5,6,15] else: return False def _has_sse(self): if self.is_Intel(): return (self.info[0]['Family']==6 and \ self.info[0]['Model'] in [7,8,9,10,11]) \ or self.info[0]['Family']==15 elif self.is_AMD(): return (self.info[0]['Family']==6 and \ self.info[0]['Model'] in [6,7,8,10]) \ or self.info[0]['Family']==15 else: return False def _has_sse2(self): if self.is_Intel(): return self.is_Pentium4() or self.is_PentiumM() \ or self.is_Core2() elif self.is_AMD(): return self.is_AMD64() else: return False def _has_3dnow(self): return self.is_AMD() and self.info[0]['Family'] in [5,6,15] def _has_3dnowext(self): return self.is_AMD() and self.info[0]['Family'] in [6,15] if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) cpuinfo = LinuxCPUInfo elif sys.platform.startswith('irix'): cpuinfo = IRIXCPUInfo elif sys.platform == 'darwin': cpuinfo = DarwinCPUInfo elif sys.platform.startswith('sunos'): cpuinfo = SunOSCPUInfo elif sys.platform.startswith('win32'): cpuinfo = Win32CPUInfo elif sys.platform.startswith('cygwin'): cpuinfo = LinuxCPUInfo #XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. else: cpuinfo = CPUInfoBase cpu = cpuinfo() if __name__ == "__main__": cpu.is_blaa() cpu.is_Intel() cpu.is_Alpha() print 'CPU information:', for name in dir(cpuinfo): if name[0]=='_' and name[1]!='_': r = getattr(cpu,name[1:])() if r: if r!=1: print '%s=%s' %(name[1:],r), else: print name[1:], print
bsd-3-clause
-3,900,664,180,430,769,700
31.989721
85
0.54509
false
3.277794
false
false
false
skim1420/spinnaker
spinbot/event/release_branch_pull_request_handler.py
1
2049
from .handler import Handler from .pull_request_event import GetBaseBranch, GetPullRequest, GetTitle, GetRepo from gh import ReleaseBranchFor, ParseCommitMessage format_message = ('Features cannot be merged into release branches. The following commits ' + 'are not tagged as one of "{}":\n\n{}\n\n' + 'Read more about [commit conventions](https://www.spinnaker.io/community/contributing/submitting/#commit-message-conventions) ' + 'and [patch releases](https://www.spinnaker.io/community/releases/release-cadence/#patching-the-release-candidate) here.') class ReleaseBranchPullRequestHandler(Handler): def __init__(self): super().__init__() self.omit_repos = self.config.get('omit_repos', []) self.allowed_types = self.config.get( 'allowed_types', ['fix', 'chore', 'docs', 'test'] ) def handles(self, event): return (event.type == 'PullRequestEvent' and event.payload.get('action') == 'opened' and ReleaseBranchFor(GetBaseBranch(event)) != None) def handle(self, g, event): repo = GetRepo(event) if repo in self.omit_repos: self.logging.info('Skipping {} because it\'s in omitted repo {}'.format(event, repo)) return pull_request = GetPullRequest(g, event) if pull_request is None: self.logging.warn('Unable to determine PR that created {}'.format(event)) return commits = pull_request.get_commits() bad_commits = [] for commit in commits: message = ParseCommitMessage(commit.commit.message) if message is None or message.get('type') not in self.allowed_types: bad_commits.append(commit.commit) if len(bad_commits) > 0: pull_request.create_issue_comment(format_message.format( ', '.join(self.allowed_types), '\n\n'.join(map(lambda c: '{}: {}'.format(c.sha, c.message), bad_commits)) )) ReleaseBranchPullRequestHandler()
apache-2.0
-3,984,927,411,745,407,000
40.816327
133
0.627135
false
3.96325
false
false
false
wjwwood/open-robotics-platform
template.py
1
1949
#!/usr/bin/env python -OO # encoding: utf-8 ########### # ORP - Open Robotics Platform # # Copyright (c) 2010 John Harrison, William Woodall # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ########## """ ${TM_NEW_FILE_BASENAME}.py - <PURPOSE> Created by ${TM_FULLNAME} on ${TM_DATE}. """ __author__ = "William Woodall" __copyright__ = "Copyright (c) 2010 John Harrison, William Woodall" ### Imports ### # Standard Python Libraries import sys import os try: # try to catch any missing dependancies # <PKG> for <PURPOSE> PKGNAME = '<EASY_INSTALL NAME>' import <LIBRARY NAME> del PKGNAME except ImportError as PKG_ERROR: # We are missing something, let them know... sys.stderr.write(str(PKG_ERROR)+"\nYou might not have the "+PKGNAME+" \ module, try 'easy_install "+PKGNAME+"', else consult google.") ### Class ### ### Functions ### def main(): pass ### IfMain ### if __name__ == '__main__': main()
mit
97,687,108,796,476,430
29.453125
79
0.709595
false
3.670433
false
false
false
Ebag333/Pyfa
eos/effects/subsystembonusgallentedefensivearmoredwarfare.py
1
1528
# subSystemBonusGallenteDefensiveArmoredWarfare # # Used by: # Subsystem: Proteus Defensive - Warfare Processor type = "passive" def handler(fit, src, context): fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Armored Command"), "buffDuration", src.getModifiedItemAttr("subsystemBonusGallenteDefensive"), skill="Gallente Defensive Systems") fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Armored Command"), "warfareBuff3Value", src.getModifiedItemAttr("subsystemBonusGallenteDefensive"), skill="Gallente Defensive Systems") fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Armored Command"), "warfareBuff4Value", src.getModifiedItemAttr("subsystemBonusGallenteDefensive"), skill="Gallente Defensive Systems") fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Armored Command"), "warfareBuff2Value", src.getModifiedItemAttr("subsystemBonusGallenteDefensive"), skill="Gallente Defensive Systems") fit.modules.filteredItemBoost(lambda mod: mod.item.requiresSkill("Armored Command"), "warfareBuff1Value", src.getModifiedItemAttr("subsystemBonusGallenteDefensive"), skill="Gallente Defensive Systems")
gpl-3.0
-8,301,749,523,908,657,000
65.434783
109
0.632199
false
3.958549
false
false
false
seanbell/opensurfaces
server/normals/views.py
1
9087
import json from django.shortcuts import render, get_object_or_404 from django.db.models import F from django.http import HttpResponse from django.views.decorators.http import require_POST from django.core.urlresolvers import reverse from django.contrib.admin.views.decorators import staff_member_required from django.views.decorators.csrf import ensure_csrf_cookie from endless_pagination.decorators import page_template from common.utils import dict_union, prepare_votes_bar, \ json_success_response, json_error_response from normals.models import ShapeRectifiedNormalLabel def rectified_normal_detail(request, pk): entry = get_object_or_404(ShapeRectifiedNormalLabel, pk=pk) votes = [ prepare_votes_bar(entry, 'qualities', 'correct', 'correct', 'Quality'), ] data = { 'nav': 'browse/rectified-normal', 'entry': entry, 'votes': votes, } return render(request, 'rectified_normal_detail.html', data) @page_template('grid3_page.html') def rectified_normal_all(request, template='endless_list.html', extra_context=None): entries = ShapeRectifiedNormalLabel.objects.all().order_by('-id') if 'publishable' in request.GET: entries = entries.filter(shape__photo__license__publishable=True) context = dict_union({ 'nav': 'browse/rectified-normal', 'subnav': 'all', 'entries': entries, 'base_template': 'rectified_normal_base.html', 'thumb_template': 'rectified_normal_thumb.html', 'header': 'All submissions', 'header_small': 'sorted by submission time', #'enable_voting': False, }, extra_context) return render(request, template, context) @page_template('grid3_page.html') def rectified_normal_good(request, template='endless_list.html', extra_context=None): entries = ShapeRectifiedNormalLabel.objects \ .filter(shape__planar=True, correct=True, correct_score__isnull=False) \ .order_by('-correct_score') #.filter(admin_score__gt=0, shape__synthetic=False) \ #.order_by('-admin_score', '-shape__pixel_area') context = dict_union({ 'nav': 'browse/rectified-normal', 'subnav': 'good', 'entries': entries, 'base_template': 'rectified_normal_base.html', 'thumb_template': 'rectified_normal_thumb.html', 'header': 'High quality submissions' #'header_sub': 'These submissions were voted as high quality.' #'enable_voting': False, }, extra_context) return render(request, template, context) @page_template('grid3_page.html') def rectified_normal_bad(request, template='endless_list.html', extra_context=None): entries = ShapeRectifiedNormalLabel.objects \ .filter(shape__planar=True, correct=False, correct_score__isnull=False) \ .order_by('correct_score') #.filter(admin_score__lt=0, shape__synthetic=False) \ #.order_by('admin_score', 'shape__num_vertices') if 'publishable' in request.GET: entries = entries.filter(shape__photo__license__publishable=True) context = dict_union({ 'nav': 'browse/rectified-normal', 'subnav': 'bad', 'entries': entries, 'base_template': 'rectified_normal_base.html', 'thumb_template': 'rectified_normal_thumb.html', 'header': 'Low quality submissions', 'header_small': 'sorted by quality', #'enable_voting': False, }, extra_context) return render(request, template, context) @page_template('grid3_page.html') def rectified_normal_auto(request, template='endless_list.html', extra_context=None): entries = ShapeRectifiedNormalLabel.objects \ .filter(shape__planar=True, shape__correct=True, automatic=True) \ .order_by('-shape__num_vertices') if 'publishable' in request.GET: entries = entries.filter(shape__photo__license__publishable=True) context = dict_union({ 'nav': 'browse/rectified-normal', 'subnav': 'auto', 'entries': entries, 'base_template': 'rectified_normal_base.html', 'thumb_template': 'rectified_normal_thumb.html', 'header': 'Automatically rectified shapes', 'header_small': 'using vanishing points', }, extra_context) return render(request, template, context) @page_template('grid3_page.html') def rectified_normal_best(request, template='endless_list.html', extra_context=None): entries = ShapeRectifiedNormalLabel.objects \ .filter(shape__photo__inappropriate=False, shape__correct=True, shape__planar=True, shape__rectified_normal_id=F('id')) \ if 'by-id' in request.GET: header_small = 'sorted by id' entries = entries.order_by('-id') else: header_small = 'sorted by complexity' entries = entries.order_by('-shape__num_vertices') if 'publishable' in request.GET: entries = entries.filter(shape__photo__license__publishable=True) context = dict_union({ 'nav': 'browse/rectified-normal', 'subnav': 'best', 'entries': entries, 'base_template': 'rectified_normal_base.html', 'thumb_template': 'rectified_normal_thumb.html', 'header': 'High quality submissions', 'header_small': header_small, }, extra_context) return render(request, template, context) @staff_member_required @page_template('grid3_page.html') def rectified_normal_curate( request, template='endless_list_curate.html', extra_context=None): entries = ShapeRectifiedNormalLabel.objects \ .filter(shape__planar=True, correct=True) \ .order_by('-shape__num_vertices') if 'publishable' in request.GET: entries = entries.filter(shape__photo__license__publishable=True) context = dict_union({ 'nav': 'browse/rectified-normal', 'subnav': 'curate', 'entries': entries, 'base_template': 'rectified_normal_base.html', 'thumb_template': 'rectified_normal_thumb.html', 'header': 'Curate rectified textures', 'curate_post_url': reverse('rectified-normal-curate-post'), 'curate': True }, extra_context) return render(request, template, context) @require_POST @staff_member_required def rectified_normal_curate_post(request): if request.POST['model'] != "shapes/shaperectifiednormallabel": return json_error_response("invalid model") normal = ShapeRectifiedNormalLabel.objects.get(id=request.POST['id']) normal.quality_method = 'A' normal.correct = not normal.correct normal.save() normal.shape.update_entropy(save=True) return HttpResponse( json.dumps({'selected': not normal.correct}), mimetype='application/json') @ensure_csrf_cookie @page_template('grid3_page.html') def rectified_normal_voted_none(request, template='endless_list.html', extra_context=None): entries = ShapeRectifiedNormalLabel.objects \ .filter(admin_score=0, time_ms__gt=500, shape__dominant_delta__isnull=False) \ .order_by('-shape__synthetic', '?') context = dict_union({ 'nav': 'browse/rectified-normal', 'subnav': 'vote', 'entries': entries, 'base_template': 'rectified_normal_base.html', 'thumb_template': 'rectified_normal_thumb_vote.html', 'enable_voting': True, }, extra_context) return render(request, template, context) @ensure_csrf_cookie @page_template('grid3_page.html') def rectified_normal_voted_yes(request, template='endless_list.html', extra_context=None): entries = ShapeRectifiedNormalLabel.objects \ .filter(admin_score__gt=0) \ .order_by('-admin_score', '-shape__pixel_area') context = dict_union({ 'nav': 'browse/rectified-normal', 'subnav': 'voted-yes', 'entries': entries, 'base_template': 'rectified_normal_base.html', 'thumb_template': 'rectified_normal_thumb_vote.html', 'enable_voting': True, }, extra_context) return render(request, template, context) @ensure_csrf_cookie @page_template('grid3_page.html') def rectified_normal_voted_no(request, template='endless_list.html', extra_context=None): entries = ShapeRectifiedNormalLabel.objects \ .filter(admin_score__lt=0) \ .order_by('admin_score', '-shape__pixel_area') context = dict_union({ 'nav': 'browse/rectified-normal', 'subnav': 'voted-no', 'entries': entries, 'base_template': 'rectified_normal_base.html', 'thumb_template': 'rectified_normal_thumb_vote.html', 'enable_voting': True, }, extra_context) return render(request, template, context) @require_POST def rectified_normal_vote(request): id = request.POST['id'] score = request.POST['score'] ShapeRectifiedNormalLabel.objects.filter(id=id).update(admin_score=score) return json_success_response()
mit
-7,393,847,818,757,390,000
33.683206
86
0.646748
false
3.684915
false
false
false
a25kk/bfa
src/bfa.sitecontent/bfa/sitecontent/widgets/content/video.py
1
4222
# -*- coding: utf-8 -*- """Module providing event filter widget""" import uuid as uuid_tool from Acquisition import aq_inner from Products.Five import BrowserView from plone import api from plone.i18n.normalizer import IIDNormalizer from wildcard.media.behavior import IVideo from zope.component import queryUtility class WidgetContentVideoCard(BrowserView): """ Basic context content card """ def __call__(self, widget_data=None, widget_mode="view", **kw): self.params = {"widget_mode": widget_mode, "widget_data": widget_data} return self.render() def render(self): return self.index() @staticmethod def can_edit(): return not api.user.is_anonymous() @property def record(self): return self.params['widget_data'] def has_content(self): if self.widget_content(): return True return False def widget_uid(self): try: widget_id = self.record['id'] except (KeyError, TypeError): widget_id = str(uuid_tool.uuid4()) return widget_id @staticmethod def normalizer(): return queryUtility(IIDNormalizer) def card_subject_classes(self, item): context = item subjects = context.Subject() class_list = [ "c-card-tag--{0}".format(self.normalizer().normalize(keyword)) for keyword in subjects ] return class_list def card_css_classes(self, item): class_list = self.card_subject_classes(item) if class_list: return " ".join(class_list) else: return "c-card-tag--all" @staticmethod def has_image(context): try: lead_img = context.image except AttributeError: lead_img = None if lead_img is not None: return True return False @staticmethod def has_animated_cover(context): try: animated_lead_img = context.image_animated except AttributeError: animated_lead_img = None if animated_lead_img is not None: return True return False @staticmethod def get_standalone_image_caption(context): try: caption = context.image_caption except AttributeError: caption = None return caption def get_embed_url(self): """ Try to guess video id from a various case of possible youtube urls and returns the correct url for embed. For example: - 'https://youtu.be/VIDEO_ID' - 'https://www.youtube.com/watch?v=VIDEO_ID' - 'https://www.youtube.com/embed/2Lb2BiUC898' """ video_behavior = IVideo(self.context) if not video_behavior: return "" video_id = video_behavior.get_youtube_id_from_url() if not video_id: return "" return "https://www.youtube.com/embed/" + video_id def get_edit_url(self): """ If the user can edit the video, returns the edit url. """ if not api.user.has_permission( 'Modify portal content', obj=self.context): return "" from plone.protect.utils import addTokenToUrl url = "%s/@@edit" % self.context.absolute_url() return addTokenToUrl(url) def widget_content(self): context = aq_inner(self.context) widget_data = self.params["widget_data"] if widget_data and "uuid" in widget_data: context = api.content.get(UID=widget_data["uuid"]) details = { "title": context.Title(), "description": context.Description(), "url": context.absolute_url(), "timestamp": context.Date, "uuid": context.UID(), "has_image": self.has_image(context), "has_animated_cover": self.has_animated_cover(context), "image_caption": self.get_standalone_image_caption(context), "css_classes": "c-card--{0} {1}".format( context.UID(), self.card_css_classes(context) ), "content_item": context, } return details
mit
7,906,045,721,442,587,000
29.594203
78
0.578399
false
4.119024
false
false
false
lgarren/spack
var/spack/repos/builtin/packages/r-affycomp/package.py
1
1773
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RAffycomp(RPackage): """The package contains functions that can be used to compare expression measures for Affymetrix Oligonucleotide Arrays.""" homepage = "https://www.bioconductor.org/packages/affycomp/" url = "https://git.bioconductor.org/packages/affycomp" version('1.52.0', git='https://git.bioconductor.org/packages/affycomp', commit='1b97a1cb21ec93bf1e5c88d5d55b988059612790') depends_on('[email protected]:3.4.9', when='@1.52.0') depends_on('r-biobase', type=('build', 'run'))
lgpl-2.1
-6,339,694,925,005,970,000
45.657895
126
0.681331
false
3.788462
false
false
false
CoderDuan/mantaflow
scenes/simpleplume.py
2
1414
# # Simple example scene (hello world) # Simulation of a buoyant smoke density plume (with noise texture as smoke source) # #import pdb; pdb.set_trace() from manta import * # solver params res = 64 gs = vec3(res, int(1.5*res), res) s = FluidSolver(name='main', gridSize = gs) # prepare grids flags = s.create(FlagGrid) vel = s.create(MACGrid) density = s.create(RealGrid) pressure = s.create(RealGrid) # noise field, tweak a bit for smoke source noise = s.create(NoiseField, loadFromFile=True) noise.posScale = vec3(45) noise.clamp = True noise.clampNeg = 0 noise.clampPos = 1 noise.valOffset = 0.75 noise.timeAnim = 0.2 source = s.create(Cylinder, center=gs*vec3(0.5,0.1,0.5), radius=res*0.14, z=gs*vec3(0, 0.02, 0)) flags.initDomain() flags.fillGrid() if (GUI): gui = Gui() gui.show() #main loop for t in range(250): mantaMsg('\nFrame %i' % (s.frame)) if t<100: densityInflow(flags=flags, density=density, noise=noise, shape=source, scale=1, sigma=0.5) # optionally, enforce inflow velocity #source.applyToGrid(grid=vel, value=vec3(0.1,0,0)) advectSemiLagrange(flags=flags, vel=vel, grid=density, order=2) advectSemiLagrange(flags=flags, vel=vel, grid=vel , order=2, strength=1.0) setWallBcs(flags=flags, vel=vel) addBuoyancy(density=density, vel=vel, gravity=vec3(0,-6e-4,0), flags=flags) solvePressure( flags=flags, vel=vel, pressure=pressure ) s.step()
gpl-3.0
-5,257,533,783,658,418,000
24.25
96
0.701556
false
2.529517
false
false
false
BaseBot/Triangula
src/python/setup.py
1
1035
__author__ = 'tom' from setuptools import setup # Makes use of the sphinx and sphinx-pypi-upload packages. To build for local development # use 'python setup.py develop'. To upload a version to pypi use 'python setup.py clean sdist upload'. # To build docs use 'python setup.py build_sphinx' and to upload docs to pythonhosted.org use # 'python setup.py upload_sphinx'. Both uploads require 'python setup.py register' to be run, and will # only work for Tom as they need the pypi account credentials. setup( name='triangula', version='0.3.1', description='Code for Triangula', classifiers=['Programming Language :: Python :: 2.7'], url='https://github.com/tomoinn/triangula/', author='Tom Oinn', author_email='[email protected]', license='ASL2.0', packages=['triangula'], install_requires=['evdev==0.5.0', 'euclid==0.1', 'pyserial==2.7', 'numpy==1.10.1'], include_package_data=True, test_suite='nose.collector', tests_require=['nose'], dependency_links=[], zip_safe=False)
apache-2.0
2,400,253,366,044,438,500
40.4
102
0.689855
false
3.415842
false
false
false
libAtoms/matscipy
scripts/fracture_mechanics/run_crack_thin_strip.py
1
4618
#! /usr/bin/env python # ====================================================================== # matscipy - Python materials science tools # https://github.com/libAtoms/matscipy # # Copyright (2014) James Kermode, King's College London # Lars Pastewka, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ====================================================================== """ Script to run classical molecular dynamics for a crack slab, incrementing the load in small steps until fracture starts. James Kermode <[email protected]> August 2013 """ import numpy as np import ase.io import ase.units as units from ase.constraints import FixAtoms from ase.md.verlet import VelocityVerlet from ase.md.velocitydistribution import MaxwellBoltzmannDistribution from ase.io.netcdftrajectory import NetCDFTrajectory from matscipy.fracture_mechanics.crack import (get_strain, get_energy_release_rate, ConstantStrainRate, find_tip_stress_field) import sys sys.path.insert(0, '.') import params # ********** Read input file ************ print 'Loading atoms from file "crack.xyz"' atoms = ase.io.read('crack.xyz') orig_height = atoms.info['OrigHeight'] orig_crack_pos = atoms.info['CrackPos'].copy() # ***** Setup constraints ******* top = atoms.positions[:, 1].max() bottom = atoms.positions[:, 1].min() left = atoms.positions[:, 0].min() right = atoms.positions[:, 0].max() # fix atoms in the top and bottom rows fixed_mask = ((abs(atoms.positions[:, 1] - top) < 1.0) | (abs(atoms.positions[:, 1] - bottom) < 1.0)) fix_atoms = FixAtoms(mask=fixed_mask) print('Fixed %d atoms\n' % fixed_mask.sum()) # Increase epsilon_yy applied to all atoms at constant strain rate strain_atoms = ConstantStrainRate(orig_height, params.strain_rate*params.timestep) atoms.set_constraint(fix_atoms) atoms.set_calculator(params.calc) # ********* Setup and run MD *********** # Set the initial temperature to 2*simT: it will then equilibriate to # simT, by the virial theorem MaxwellBoltzmannDistribution(atoms, 2.0*params.sim_T) # Initialise the dynamical system dynamics = VelocityVerlet(atoms, params.timestep) # Print some information every time step def printstatus(): if dynamics.nsteps == 1: print """ State Time/fs Temp/K Strain G/(J/m^2) CrackPos/A D(CrackPos)/A ---------------------------------------------------------------------------------""" log_format = ('%(label)-4s%(time)12.1f%(temperature)12.6f'+ '%(strain)12.5f%(G)12.4f%(crack_pos_x)12.2f (%(d_crack_pos_x)+5.2f)') atoms.info['label'] = 'D' # Label for the status line atoms.info['time'] = dynamics.get_time()/units.fs atoms.info['temperature'] = (atoms.get_kinetic_energy() / (1.5*units.kB*len(atoms))) atoms.info['strain'] = get_strain(atoms) atoms.info['G'] = get_energy_release_rate(atoms)/(units.J/units.m**2) crack_pos = find_tip_stress_field(atoms) atoms.info['crack_pos_x'] = crack_pos[0] atoms.info['d_crack_pos_x'] = crack_pos[0] - orig_crack_pos[0] print log_format % atoms.info dynamics.attach(printstatus) # Check if the crack has advanced enough and apply strain if it has not def check_if_crack_advanced(atoms): crack_pos = find_tip_stress_field(atoms) # strain if crack has not advanced more than tip_move_tol if crack_pos[0] - orig_crack_pos[0] < params.tip_move_tol: strain_atoms.apply_strain(atoms) dynamics.attach(check_if_crack_advanced, 1, atoms) # Save frames to the trajectory every `traj_interval` time steps trajectory = NetCDFTrajectory(params.traj_file, mode='w') def write_frame(atoms): trajectory.write(atoms) dynamics.attach(write_frame, params.traj_interval, atoms) # Start running! dynamics.run(params.nsteps)
gpl-2.0
7,947,069,792,221,883,000
33.721805
90
0.638372
false
3.38315
false
false
false
mvaled/sentry
src/sentry/api/endpoints/group_integration_details.py
1
11884
from __future__ import absolute_import from django.db import IntegrityError, transaction from rest_framework.response import Response from sentry import features from sentry.api.bases import GroupEndpoint from sentry.api.serializers import serialize from sentry.api.serializers.models.integration import IntegrationIssueConfigSerializer from sentry.integrations import IntegrationFeatures from sentry.integrations.exceptions import IntegrationError, IntegrationFormError from sentry.models import Activity, ExternalIssue, GroupLink, Integration from sentry.signals import integration_issue_created, integration_issue_linked MISSING_FEATURE_MESSAGE = "Your organization does not have access to this feature." class GroupIntegrationDetailsEndpoint(GroupEndpoint): def _has_issue_feature(self, organization, user): has_issue_basic = features.has( "organizations:integrations-issue-basic", organization, actor=user ) has_issue_sync = features.has( "organizations:integrations-issue-sync", organization, actor=user ) return has_issue_sync or has_issue_basic def create_issue_activity(self, request, group, installation, external_issue): issue_information = { "title": external_issue.title, "provider": installation.model.get_provider().name, "location": installation.get_issue_url(external_issue.key), "label": installation.get_issue_display_name(external_issue) or external_issue.key, } Activity.objects.create( project=group.project, group=group, type=Activity.CREATE_ISSUE, user=request.user, data=issue_information, ) def get(self, request, group, integration_id): if not self._has_issue_feature(group.organization, request.user): return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400) # Keep link/create separate since create will likely require # many external API calls that aren't necessary if the user is # just linking action = request.GET.get("action") if action not in {"link", "create"}: return Response({"detail": "Action is required and should be either link or create"}) organization_id = group.project.organization_id try: integration = Integration.objects.get(id=integration_id, organizations=organization_id) except Integration.DoesNotExist: return Response(status=404) if not ( integration.has_feature(IntegrationFeatures.ISSUE_BASIC) or integration.has_feature(IntegrationFeatures.ISSUE_SYNC) ): return Response( {"detail": "This feature is not supported for this integration."}, status=400 ) try: return Response( serialize( integration, request.user, IntegrationIssueConfigSerializer(group, action, params=request.GET), organization_id=organization_id, ) ) except IntegrationError as exc: return Response({"detail": exc.message}, status=400) # was thinking put for link an existing issue, post for create new issue? def put(self, request, group, integration_id): if not self._has_issue_feature(group.organization, request.user): return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400) external_issue_id = request.data.get("externalIssue") if not external_issue_id: return Response({"externalIssue": ["Issue ID is required"]}, status=400) organization_id = group.project.organization_id try: integration = Integration.objects.get(id=integration_id, organizations=organization_id) except Integration.DoesNotExist: return Response(status=404) if not ( integration.has_feature(IntegrationFeatures.ISSUE_BASIC) or integration.has_feature(IntegrationFeatures.ISSUE_SYNC) ): return Response( {"detail": "This feature is not supported for this integration."}, status=400 ) installation = integration.get_installation(organization_id) try: data = installation.get_issue(external_issue_id, data=request.data) except IntegrationFormError as exc: return Response(exc.field_errors, status=400) except IntegrationError as exc: return Response({"non_field_errors": [exc.message]}, status=400) defaults = { "title": data.get("title"), "description": data.get("description"), "metadata": data.get("metadata"), } external_issue_key = installation.make_external_key(data) external_issue, created = ExternalIssue.objects.get_or_create( organization_id=organization_id, integration_id=integration.id, key=external_issue_key, defaults=defaults, ) if created: integration_issue_linked.send_robust( integration=integration, organization=group.project.organization, user=request.user, sender=self.__class__, ) else: external_issue.update(**defaults) installation.store_issue_last_defaults(group.project_id, request.data) try: installation.after_link_issue(external_issue, data=request.data) except IntegrationFormError as exc: return Response(exc.field_errors, status=400) except IntegrationError as exc: return Response({"non_field_errors": [exc.message]}, status=400) try: with transaction.atomic(): GroupLink.objects.create( group_id=group.id, project_id=group.project_id, linked_type=GroupLink.LinkedType.issue, linked_id=external_issue.id, relationship=GroupLink.Relationship.references, ) except IntegrityError: return Response({"non_field_errors": ["That issue is already linked"]}, status=400) self.create_issue_activity(request, group, installation, external_issue) # TODO(jess): would be helpful to return serialized external issue # once we have description, title, etc url = data.get("url") or installation.get_issue_url(external_issue.key) context = { "id": external_issue.id, "key": external_issue.key, "url": url, "integrationId": external_issue.integration_id, "displayName": installation.get_issue_display_name(external_issue), } return Response(context, status=201) def post(self, request, group, integration_id): if not self._has_issue_feature(group.organization, request.user): return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400) organization_id = group.project.organization_id try: integration = Integration.objects.get(id=integration_id, organizations=organization_id) except Integration.DoesNotExist: return Response(status=404) if not ( integration.has_feature(IntegrationFeatures.ISSUE_BASIC) or integration.has_feature(IntegrationFeatures.ISSUE_SYNC) ): return Response( {"detail": "This feature is not supported for this integration."}, status=400 ) installation = integration.get_installation(organization_id) try: data = installation.create_issue(request.data) except IntegrationFormError as exc: return Response(exc.field_errors, status=400) except IntegrationError as exc: return Response({"non_field_errors": [exc.message]}, status=400) external_issue_key = installation.make_external_key(data) external_issue, created = ExternalIssue.objects.get_or_create( organization_id=organization_id, integration_id=integration.id, key=external_issue_key, defaults={ "title": data.get("title"), "description": data.get("description"), "metadata": data.get("metadata"), }, ) try: with transaction.atomic(): GroupLink.objects.create( group_id=group.id, project_id=group.project_id, linked_type=GroupLink.LinkedType.issue, linked_id=external_issue.id, relationship=GroupLink.Relationship.references, ) except IntegrityError: return Response({"detail": "That issue is already linked"}, status=400) if created: integration_issue_created.send_robust( integration=integration, organization=group.project.organization, user=request.user, sender=self.__class__, ) installation.store_issue_last_defaults(group.project_id, request.data) self.create_issue_activity(request, group, installation, external_issue) # TODO(jess): return serialized issue url = data.get("url") or installation.get_issue_url(external_issue.key) context = { "id": external_issue.id, "key": external_issue.key, "url": url, "integrationId": external_issue.integration_id, "displayName": installation.get_issue_display_name(external_issue), } return Response(context, status=201) def delete(self, request, group, integration_id): if not self._has_issue_feature(group.organization, request.user): return Response({"detail": MISSING_FEATURE_MESSAGE}, status=400) # note here externalIssue refers to `ExternalIssue.id` wheras above # it refers to the id from the provider external_issue_id = request.GET.get("externalIssue") if not external_issue_id: return Response({"detail": "External ID required"}, status=400) organization_id = group.project.organization_id try: integration = Integration.objects.get(id=integration_id, organizations=organization_id) except Integration.DoesNotExist: return Response(status=404) if not ( integration.has_feature(IntegrationFeatures.ISSUE_BASIC) or integration.has_feature(IntegrationFeatures.ISSUE_SYNC) ): return Response( {"detail": "This feature is not supported for this integration."}, status=400 ) try: external_issue = ExternalIssue.objects.get( organization_id=organization_id, integration_id=integration.id, id=external_issue_id ) except ExternalIssue.DoesNotExist: return Response(status=404) with transaction.atomic(): GroupLink.objects.filter( group_id=group.id, project_id=group.project_id, linked_type=GroupLink.LinkedType.issue, linked_id=external_issue_id, relationship=GroupLink.Relationship.references, ).delete() # check if other groups reference this external issue # and delete if not if not GroupLink.objects.filter( linked_type=GroupLink.LinkedType.issue, linked_id=external_issue_id ).exists(): external_issue.delete() return Response(status=204)
bsd-3-clause
-9,031,003,722,667,048,000
39.838488
100
0.61688
false
4.654916
false
false
false
madmatah/lapurge
lapurge/types.py
1
3448
# Copyright (c) 2013 Matthieu Huguet # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from collections import OrderedDict from datetime import datetime import os import sys class Backup: """ A Backup represents a file in the backup directory """ def __init__(self, mtime, filepath): self.mtime = mtime self.filepath = filepath def remove(self, simulate=True): if (simulate): print ("REMOVE " + str(self)) return True else: try: os.remove(self.filepath) return True except OSError as info: sys.stderr.write("ERROR : %s\n" % info) return False def __key(self): return (self.mtime, self.filepath) def __eq__(x, y): return x.__key() == y.__key() def __hash__(self): return hash(self.__key()) def __str__(self): return self.filepath + " (" + str(self.mtime.date().isoformat()) + ")" @classmethod def from_path(cls, filepath): stats = os.lstat(filepath) mtime = datetime.utcfromtimestamp(stats.st_mtime) return cls(mtime, filepath) class BackupCollection: """ Collection of Backup elements grouped by date """ def __init__(self, backups={}): self.backups = dict(backups) def add(self, backup): """ add a backup to the collection """ date = backup.mtime.date() if date not in self.backups: s = set() s.add(backup) self.backups[date] = s else: self.backups[date].add(backup) def days(self, recent_first=True): """ returns the list of days having backups, ordered by modification date (most recent backups first by default) """ return sorted(self.backups.keys(), reverse=recent_first) def except_days(self, days): """ returns a copy of the BackupCollection without the specified days """ filtered_backups = {day: self.backups[day] for day in self.days() if day not in days} return BackupCollection(filtered_backups) def remove_all(self, simulate=True): """ remove every backups of this collection """ errors = False for days in self.days(recent_first=False): for backup in self.backups[days]: if not backup.remove(simulate): errors = True return not errors
mit
2,537,618,906,637,552,000
33.48
93
0.640371
false
4.386768
false
false
false
NMGRL/pychron
pychron/ml/tasks/actions.py
1
1114
# =============================================================================== # Copyright 2019 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from traits.api import List, Int, HasTraits, Str, Bool from traitsui.api import View, UItem, Item, HGroup, VGroup # ============= standard library imports ======================== # ============= local library imports ========================== # ============= EOF =============================================
apache-2.0
6,036,008,776,714,166,000
45.416667
81
0.531418
false
5.063636
false
false
false
rlutz/xorn
src/backend/gnet_bae.py
1
1626
# gaf.netlist - gEDA Netlist Extraction and Generation # Copyright (C) 1998-2010 Ales Hvezda # Copyright (C) 1998-2010 gEDA Contributors (see ChangeLog for details) # Copyright (C) 2013-2019 Roland Lutz # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # Bartels Format # Layout board; # PARTS # part : footprint; # CONNECT # /net1/ uref.pin=uref.pin=uref.pin=...uref.pin; # /net2/ PRIORITY(1..100) MINDIST(mm) ROUTWIDTH(mm) uref.pin(width_mm)=...; # END. def run(f, netlist): f.write('LAYOUT board;\n') f.write('PARTS\n') for package in reversed(netlist.packages): f.write(' %s : %s;\n' % ( package.refdes, package.get_attribute('footprint', 'unknown'))) f.write('CONNECT\n') for net in reversed(netlist.nets): f.write(" /'%s'/ %s;\n" % ( net.name, '='.join('%s.%s' % (pin.package.refdes, pin.number) for pin in reversed(net.connections)))) f.write('END.\n')
gpl-2.0
-7,487,250,061,490,648,000
39.65
77
0.674662
false
3.338809
false
false
false
EvilCult/Video-Downloader
Library/toolClass.py
1
3025
#!/usr/bin/env python # -*- coding: utf-8 -*- import pycurl import StringIO import random class Tools : def __init__ (self) : pass def getPage (self, url, requestHeader = []) : resultFormate = StringIO.StringIO() fakeIp = self.fakeIp() requestHeader.append('CLIENT-IP:' + fakeIp) requestHeader.append('X-FORWARDED-FOR:' + fakeIp) try: curl = pycurl.Curl() curl.setopt(pycurl.URL, url.strip()) curl.setopt(pycurl.ENCODING, 'gzip,deflate') curl.setopt(pycurl.HEADER, 1) curl.setopt(pycurl.TIMEOUT, 120) curl.setopt(pycurl.SSL_VERIFYPEER, 0) curl.setopt(pycurl.SSL_VERIFYHOST, 0) curl.setopt(pycurl.HTTPHEADER, requestHeader) curl.setopt(pycurl.WRITEFUNCTION, resultFormate.write) curl.perform() headerSize = curl.getinfo(pycurl.HEADER_SIZE) curl.close() header = resultFormate.getvalue()[0 : headerSize].split('\r\n') body = resultFormate.getvalue()[headerSize : ] except Exception, e: header = '' body = '' return header, body def fakeIp (self) : fakeIpList = [] for x in xrange(0, 4): fakeIpList.append(str(int(random.uniform(0, 255)))) fakeIp = '.'.join(fakeIpList) return fakeIp def xor (self, x, y, base = 32) : stat = True if x >= 0 : x = str(bin(int(str(x), 10)))[2:] for i in xrange(0, base - len(x)): x = '0' + x else : x = str(bin(int(str(x + 1), 10)))[3:] for i in xrange(0, base - len(x)): x = '0' + x t = '' for i in xrange(0,len(x)): if x[i] == '1' : t = t + '0' else : t = t + '1' x = t if y >= 0 : y = str(bin(int(str(y), 10)))[2:] for i in xrange(0, base - len(y)): y = '0' + y else : y = str(bin(int(str(y + 1), 10)))[3:] for i in xrange(0, base - len(y)): y = '0' + y t = '' for i in xrange(0,len(y)): if y[i] == '1' : t = t + '0' else : t = t + '1' y = t t = '' for i in xrange(0, base): if x[i] == y[i] : t = t + '0' else : t = t + '1' x = t if x[0] == '1' : stat = False t = '' for i in xrange(0,len(x)): if x[i] == '1' : t = t + '0' else : t = t + '1' x = t r = int(str(x), 2) if stat == False : r = 0 - r - 1 return r def rotate (self, x, y, w, base = 32) : stat = True if x >= 0 : x = str(bin(int(str(x), 10)))[2:] for i in xrange(0, base - len(x)): x = '0' + x else : x = str(bin(int(str(x + 1), 10)))[3:] for i in xrange(0, base - len(x)): x = '0' + x t = '' for i in xrange(0,len(x)): if x[i] == '1' : t = t + '0' else : t = t + '1' x = t if y >= base : y = y % base for i in xrange (0, y) : if w != 'r+' : x = x[0] + x + '0' else : x = '0' + x + '0' if w == 'r' or w == 'r+' : x = x[0 : base] else : x = x[(len(x) - base) : ] if x[0] == '1' : stat = False t = '' for i in xrange(0,len(x)): if x[i] == '1' : t = t + '0' else : t = t + '1' x = t r = int(str(x), 2) if stat == False : r = 0 - r - 1 return r
gpl-2.0
-4,087,712,925,453,522,000
19.585034
66
0.495207
false
2.33952
false
false
false
bpetering/python-pattern-recognition
pattern_recognition.py
1
2300
def constant(diffs): val = diffs.pop() for d in diffs: if d != val: return False return val def pat1(seq): # consider two elements at a time diffs = [] for i in xrange(1, len(seq)): diffs.append( seq[i] - seq[i-1] ) # implicit directionality - factor out return constant(diffs) # representation of the pattern for pat1 was easy. how can we represent # more complex patterns? class Pattern(object): (PAT_INT_ADD, PAT_INT_MULT, PAT_INT_POW) = range(3) # TODO how does panda3d get constants? def __init__(self, pat_type, pat_vals, prev_data, over=2, *args, **kwargs): self.pat_type = pat_type self.over = over self.prev_data = prev_data self.pat_vals = pat_vals def next(self): if self.pat_type == Pattern.PAT_INT_ADD: tmp = self.prev_data[-1] + self.pat_vals[0] # TODO how much prev_data to keep? self.prev_data.append(tmp) return tmp class PatternSeq(object): def __init__(self, *args, **kwargs): self.pattern = None def have_pattern(self): return self.pattern is not None def infer(self, seq): v = pat1(seq) if v is not False: self.pattern = Pattern(pat_type=Pattern.PAT_INT_ADD, pat_vals=[v], prev_data=seq) # TODO generalize else: raise Exception("NYI") def extend(self, n): if self.have_pattern(): x = [] for i in xrange(n): x.append(self.pattern.next()) return x else: raise Exception("ALSDKJLASKJD") # def pat2(seq): # consider three elements at a time # diffs = [] # for i in xrange(1, len(seq)): # diffs.append( seq[i] - seq[i-1] ) # implicit directionality - factor out # val = constant(diffs) # if val is False: # print 'no pattern' # else: # print val # TODO look at sympy interface, requests interface # TODO detect pattern with certain number of anomalous values: # e.g. 2,4,6,8,11 ps = PatternSeq() ps.infer([2,4,6,8,10]) print "have pattern:", ps.have_pattern() print "next 10 vals:", ps.extend(10)
mit
4,782,913,297,461,526,000
28.263158
118
0.553478
false
3.437967
false
false
false
okuraoy/mywork
mtlearn/datasets.py
1
2037
#!/usr/bin/python # -*- coding: utf-8 -*- import numpy as np import pandas as pd from sklearn.datasets.base import Bunch from os.path import join PATH = "d:\\data" # class Bunch(dict): # """Container object for datasets # Dictionary-like object that exposes its keys as attributes. # # See: sklearn.datasets.base.py Bunch # """ # # def __init__(self, **kwargs): # super(Bunch, self).__init__(kwargs) # # def __setattr__(self, key, value): # self[key] = value # # def __dir__(self): # return self.keys() # # def __getattr__(self, key): # try: # return self[key] # except KeyError: # raise AttributeError(key) # # def __setstate__(self, state): # # Bunch pickles generated with scikit-learn 0.16.* have an non # # empty __dict__. This causes a surprising behaviour when # # loading these pickles scikit-learn 0.17: reading bunch.key # # uses __dict__ but assigning to bunch.key use __setattr__ and # # only changes bunch['key']. More details can be found at: # # https://github.com/scikit-learn/scikit-learn/issues/6196. # # Overriding __setstate__ to be a noop has the effect of # # ignoring the pickled __dict__ # pass def parse_date(x): return pd.datetime.strptime(x, '%Y-%m-%d') def load_pcs_data(): # column: date,pcs,f1,f2,... # sep='\001', df = pd.read_csv(join(PATH, 'spu_pcs_20170721.csv'), sep='\001', parse_dates=['date'], date_parser=parse_date) df.sort_values(by='date') columns = np.array(df.columns.values) feature_name = columns[2:] tmp_data = np.array(df) inx_data = tmp_data[:, 0] target = tmp_data[:, 1] data = tmp_data[:, 2:] # print shape print data.shape print feature_name return Bunch(data=data, target=target, feature_names=feature_name, inx=inx_data) if __name__ == '__main__': load_pcs_data()
apache-2.0
4,101,028,020,952,745,000
27.1
114
0.569956
false
3.124233
false
false
false
lum4chi/mygensim
models/qlmodel.py
1
1822
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2016 Francesco Lumachi <[email protected]> from __future__ import division from gensim import models, utils import math class QLModel(models.TfidfModel): """ Use of models.TfidfModel as base to build Query Likelihood Model (12.9) appeared in "An introduction to Information Retrieval" by Manning, Raghavan and Schütze """ def __init__(self, *args, **kwargs): super(QLModel, self).__init__(*args, normalize=False, **kwargs) def __str__(self): return "QueryLikelihoodModel(num_docs=%s, num_nnz=%s)" % (self.num_docs, self.num_nnz) def __getitem__(self, bog, eps=1e-12): """ Overwrite weight calculus with estimation of a Model of d, based on its own "gram" (we can see bag-of-word as bag-of-gram based upon what tokenize policy to adopt): P(q|d) ≈ prod( P(g|d) for g in q ) # product of only the gram present in query P(g|d) ≈ tf(g,d) / len(d) # compute prob of every gram """ # if the input vector is in fact a corpus, return a transformed corpus as a result is_corpus, bog = utils.is_corpus(bog) if is_corpus: return self._apply(bog) # --- only vector component calculation has changed from original method --- # unknown (new) terms will be given zero weight # 0 < P(g|d) <= 1, then -1 * log() to avoid negative vector = [(gramid, -math.log(tf / len(bog))) for gramid, tf in bog if self.idfs.get(gramid, 0.0) != 0.0] # --- no need to normalize --- # make sure there are no explicit zeroes in the vector (must be sparse) vector = [(termid, weight) for termid, weight in vector if abs(weight) > eps] return vector
gpl-3.0
2,304,769,948,544,169,700
43.341463
94
0.614199
false
3.421846
false
false
false
lightbase/LBConverter
lbconverter/config.py
1
4423
def set_config(): import ConfigParser config = ConfigParser.ConfigParser() config.read('development.ini') global REST_URL global OUTPATH global DEFAULT_OPENOFFICE_PORT global PIDFILE_PATH global LOGFILE_PATH global SUPPORTED_FILES #---------------------# # Configuration Start # #---------------------# REST_URL = config.get('LBConverter', 'rest_url') OUTPATH = config.get('LBConverter', 'outpath') DEFAULT_OPENOFFICE_PORT = int(config.get('LBConverter', 'default_openoffice_port')) PIDFILE_PATH = config.get('Daemon', 'pidfile_path') LOGFILE_PATH = config.get('Daemon', 'logfile_path') SUPPORTED_FILES = [ 'doc', 'docx', 'odt', 'rtf', 'txt', 'html', 'pdf', 'xml', #'ods', #'xls', #'xlsx', #'ppt', #'pptx', #'pps', #'ppsx', #'odp' ] #-------------------# # Configuration End # #-------------------# global FAMILY_TEXT global FAMILY_WEB global FAMILY_SPREADSHEET global FAMILY_PRESENTATION global FAMILY_DRAWING FAMILY_TEXT = "Text" FAMILY_WEB = "Web" FAMILY_SPREADSHEET = "Spreadsheet" FAMILY_PRESENTATION = "Presentation" FAMILY_DRAWING = "Drawing" # see http://wiki.services.openoffice.org/wiki/Framework/Article/Filter # most formats are auto-detected; only those requiring options are defined here global IMPORT_FILTER_MAP IMPORT_FILTER_MAP = { "txt": { "FilterName": "Text (encoded)", "FilterOptions": "utf8" }, "csv": { "FilterName": "Text - txt - csv (StarCalc)", "FilterOptions": "44,34,0" }, 'default':{ 'Hidden': True, 'RepairPackage': True, 'Silent': True, } } global EXPORT_FILTER_MAP EXPORT_FILTER_MAP = { "pdf": { FAMILY_TEXT: { "FilterName": "writer_pdf_Export" }, FAMILY_WEB: { "FilterName": "writer_web_pdf_Export" }, FAMILY_SPREADSHEET: { "FilterName": "calc_pdf_Export" }, FAMILY_PRESENTATION: { "FilterName": "impress_pdf_Export" }, FAMILY_DRAWING: { "FilterName": "draw_pdf_Export" } }, "html": { FAMILY_TEXT: { "FilterName": "HTML (StarWriter)" }, FAMILY_SPREADSHEET: { "FilterName": "HTML (StarCalc)" }, FAMILY_PRESENTATION: { "FilterName": "impress_html_Export" } }, "odt": { FAMILY_TEXT: { "FilterName": "writer8" }, FAMILY_WEB: { "FilterName": "writerweb8_writer" } }, "doc": { FAMILY_TEXT: { "FilterName": "MS Word 97" } }, "docx": { FAMILY_TEXT: { "FilterName": "MS Word 2007 XML" } }, "rtf": { FAMILY_TEXT: { "FilterName": "Rich Text Format" } }, "txt": { FAMILY_TEXT: { "FilterName": "Text", "FilterOptions": "utf8" } }, "ods": { FAMILY_SPREADSHEET: { "FilterName": "calc8" } }, "xls": { FAMILY_SPREADSHEET: { "FilterName": "MS Excel 97" } }, "csv": { FAMILY_SPREADSHEET: { "FilterName": "Text - txt - csv (StarCalc)", "FilterOptions": "44,34,0" } }, "odp": { FAMILY_PRESENTATION: { "FilterName": "impress8" } }, "ppt": { FAMILY_PRESENTATION: { "FilterName": "MS PowerPoint 97" } }, "swf": { FAMILY_DRAWING: { "FilterName": "draw_flash_Export" }, FAMILY_PRESENTATION: { "FilterName": "impress_flash_Export" } } } global PAGE_STYLE_OVERRIDE_PROPERTIES PAGE_STYLE_OVERRIDE_PROPERTIES = { FAMILY_SPREADSHEET: { #--- Scale options: uncomment 1 of the 3 --- # a) 'Reduce / enlarge printout': 'Scaling factor' "PageScale": 100, # b) 'Fit print range(s) to width / height': 'Width in pages' and 'Height in pages' #"ScaleToPagesX": 1, "ScaleToPagesY": 1000, # c) 'Fit print range(s) on number of pages': 'Fit print range(s) on number of pages' #"ScaleToPages": 1, "PrintGrid": False } }
gpl-2.0
-7,174,447,507,429,265,000
28.098684
97
0.496496
false
3.471743
true
false
false
jlengrand/Ivolution
ivolution/util/Notifier.py
1
2002
""" .. module:: Notifier :platform: Unix, Windows :synopsis: Implements a simple Observer/Observable pattern for communication between between Facemovie thread and Ivolution GUI .. moduleauthor:: Julien Lengrand-Lambert <[email protected]> """ class Observer(): """ Implements a simple Observer from the Observer pattern """ def __init__(self, name="Observer"): """ """ self.name = name def update(self, message): """ """ if message is not None: #print "%s received %s" %(self.name, message) pass def __str__(self): return self.name class Observable(): """ Implements a simple Observable from the Observer pattern """ def __init__(self): """ """ self.val = 1 self.obs_collection = [] def subscribe(self, observer): """ """ try: if not(observer in self.obs_collection): self.obs_collection.append(observer) #print "%s added to collection" %(str(observer)) else: #print "%s already in collection" %(str(observer)) pass except TypeError: #print "Failed to add %s" %(str(observer)) pass def unsubscribe(self, observer): """ """ try: if observer in self.obs_collection: self.obs_collection.remove(observer) #print "%s removed from collection" %(str(observer)) else: #print "%s not in collection" %(str(observer)) pass except TypeError: #print "Failed to remove %s" %(str(observer)) pass def notify(self, message): """ """ for observer in self.obs_collection: #print "sent %s to %s" %(message, str(observer)) if message[0] == observer.name: observer.update(message[1])
bsd-3-clause
-6,127,289,947,069,954,000
24.341772
130
0.51998
false
4.478747
false
false
false
zcoinofficial/zcoin
src/tor/scripts/codegen/makedesc.py
1
10850
#!/usr/bin/python # Copyright 2014-2019, The Tor Project, Inc. # See LICENSE for license information # This is a kludgey python script that uses ctypes and openssl to sign # router descriptors and extrainfo documents and put all the keys in # the right places. There are examples at the end of the file. # I've used this to make inputs for unit tests. I wouldn't suggest # using it for anything else. import base64 import binascii import ctypes import ctypes.util import hashlib import optparse import os import re import struct import time import UserDict import slow_ed25519 import slownacl_curve25519 import ed25519_exts_ref # Pull in the openssl stuff we need. crypt = ctypes.CDLL(ctypes.util.find_library('crypto')) BIO_s_mem = crypt.BIO_s_mem BIO_s_mem.argtypes = [] BIO_s_mem.restype = ctypes.c_void_p BIO_new = crypt.BIO_new BIO_new.argtypes = [ctypes.c_void_p] BIO_new.restype = ctypes.c_void_p crypt.BIO_free.argtypes = [ctypes.c_void_p] crypt.BIO_free.restype = ctypes.c_int crypt.BIO_ctrl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_long, ctypes.c_void_p ] crypt.BIO_ctrl.restype = ctypes.c_long crypt.PEM_write_bio_RSAPublicKey.argtypes = [ ctypes.c_void_p, ctypes.c_void_p ] crypt.PEM_write_bio_RSAPublicKey.restype = ctypes.c_int RSA_generate_key = crypt.RSA_generate_key RSA_generate_key.argtypes = [ctypes.c_int, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_void_p] RSA_generate_key.restype = ctypes.c_void_p RSA_private_encrypt = crypt.RSA_private_encrypt RSA_private_encrypt.argtypes = [ ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int ] RSA_private_encrypt.restype = ctypes.c_int i2d_RSAPublicKey = crypt.i2d_RSAPublicKey i2d_RSAPublicKey.argtypes = [ ctypes.c_void_p, ctypes.POINTER(ctypes.c_char_p) ] i2d_RSAPublicKey.restype = ctypes.c_int def rsa_sign(msg, rsa): buf = ctypes.create_string_buffer(1024) n = RSA_private_encrypt(len(msg), msg, buf, rsa, 1) if n <= 0: raise Exception() return buf.raw[:n] def b64(x): x = base64.b64encode(x) res = [] for i in xrange(0, len(x), 64): res.append(x[i:i+64]+"\n") return "".join(res) def bio_extract(bio): buf = ctypes.c_char_p() length = crypt.BIO_ctrl(bio, 3, 0, ctypes.byref(buf)) return ctypes.string_at(buf, length) def make_rsa_key(e=65537): rsa = crypt.RSA_generate_key(1024, e, None, None) bio = BIO_new(BIO_s_mem()) crypt.PEM_write_bio_RSAPublicKey(bio, rsa) pem = bio_extract(bio).rstrip() crypt.BIO_free(bio) buf = ctypes.create_string_buffer(1024) pBuf = ctypes.c_char_p(ctypes.addressof(buf)) n = crypt.i2d_RSAPublicKey(rsa, ctypes.byref(pBuf)) s = buf.raw[:n] digest = hashlib.sha1(s).digest() return (rsa,pem,digest) def makeEdSigningKeyCert(sk_master, pk_master, pk_signing, date, includeSigning=False, certType=1): assert len(pk_signing) == len(pk_master) == 32 expiration = struct.pack("!L", date//3600) if includeSigning: extensions = "\x01\x00\x20\x04\x00%s"%(pk_master) else: extensions = "\x00" signed = "\x01%s%s\x01%s%s" % ( chr(certType), expiration, pk_signing, extensions) signature = ed25519_exts_ref.signatureWithESK(signed, sk_master, pk_master) assert len(signature) == 64 return signed+signature def objwrap(identifier, body): return ("-----BEGIN {0}-----\n" "{1}" "-----END {0}-----").format(identifier, body) MAGIC1 = "<<<<<<MAGIC>>>>>>" MAGIC2 = "<<<<<!#!#!#XYZZY#!#!#!>>>>>" class OnDemandKeys(object): def __init__(self, certDate=None): if certDate is None: certDate = time.time() + 86400 self.certDate = certDate self.rsa_id = None self.rsa_onion_key = None self.ed_id_sk = None self.ntor_sk = None self.ntor_crosscert = None self.rsa_crosscert_ed = None self.rsa_crosscert_noed = None @property def RSA_IDENTITY(self): if self.rsa_id is None: self.rsa_id, self.rsa_ident_pem, self.rsa_id_digest = make_rsa_key() return self.rsa_ident_pem @property def RSA_ID_DIGEST(self): self.RSA_IDENTITY return self.rsa_id_digest @property def RSA_FINGERPRINT_NOSPACE(self): return binascii.b2a_hex(self.RSA_ID_DIGEST).upper() @property def RSA_ONION_KEY(self): if self.rsa_onion_key is None: self.rsa_onion_key, self.rsa_onion_pem, _ = make_rsa_key() return self.rsa_onion_pem @property def RSA_FINGERPRINT(self): hexdigest = self.RSA_FINGERPRINT_NOSPACEK return " ".join(hexdigest[i:i+4] for i in range(0,len(hexdigest),4)) @property def RSA_SIGNATURE(self): return MAGIC1 @property def ED_SIGNATURE(self): return MAGIC2 @property def NTOR_ONION_KEY(self): if self.ntor_sk is None: self.ntor_sk = slownacl_curve25519.Private() self.ntor_pk = self.ntor_sk.get_public() return base64.b64encode(self.ntor_pk.serialize()) @property def ED_CERT(self): if self.ed_id_sk is None: self.ed_id_sk = ed25519_exts_ref.expandSK(os.urandom(32)) self.ed_signing_sk = ed25519_exts_ref.expandSK(os.urandom(32)) self.ed_id_pk = ed25519_exts_ref.publickeyFromESK(self.ed_id_sk) self.ed_signing_pk = ed25519_exts_ref.publickeyFromESK(self.ed_signing_sk) self.ed_cert = makeEdSigningKeyCert(self.ed_id_sk, self.ed_id_pk, self.ed_signing_pk, self.certDate, includeSigning=True, certType=4) return objwrap('ED25519 CERT', b64(self.ed_cert)) @property def NTOR_CROSSCERT(self): if self.ntor_crosscert is None: self.ED_CERT self.NTOR_ONION_KEY ed_privkey = self.ntor_sk.serialize() + os.urandom(32) ed_pub0 = ed25519_exts_ref.publickeyFromESK(ed_privkey) sign = (ord(ed_pub0[31]) & 255) >> 7 self.ntor_crosscert = makeEdSigningKeyCert(self.ntor_sk.serialize() + os.urandom(32), ed_pub0, self.ed_id_pk, self.certDate, certType=10) self.ntor_crosscert_sign = sign return objwrap('ED25519 CERT', b64(self.ntor_crosscert)) @property def NTOR_CROSSCERT_SIGN(self): self.NTOR_CROSSCERT return self.ntor_crosscert_sign @property def RSA_CROSSCERT_NOED(self): if self.rsa_crosscert_noed is None: self.RSA_ONION_KEY signed = self.RSA_ID_DIGEST self.rsa_crosscert_noed = rsa_sign(signed, self.rsa_onion_key) return objwrap("CROSSCERT",b64(self.rsa_crosscert_noed)) @property def RSA_CROSSCERT_ED(self): if self.rsa_crosscert_ed is None: self.RSA_ONION_KEY self.ED_CERT signed = self.RSA_ID_DIGEST + self.ed_id_pk self.rsa_crosscert_ed = rsa_sign(signed, self.rsa_onion_key) return objwrap("CROSSCERT",b64(self.rsa_crosscert_ed)) def sign_desc(self, body): idx = body.rfind("\nrouter-sig-ed25519 ") if idx >= 0: self.ED_CERT signed_part = body[:idx+len("\nrouter-sig-ed25519 ")] signed_part = "Tor router descriptor signature v1" + signed_part digest = hashlib.sha256(signed_part).digest() ed_sig = ed25519_exts_ref.signatureWithESK(digest, self.ed_signing_sk, self.ed_signing_pk) body = body.replace(MAGIC2, base64.b64encode(ed_sig).replace("=","")) idx = body.rindex("\nrouter-signature") end_of_sig = body.index("\n", idx+1) signed_part = body[:end_of_sig+1] digest = hashlib.sha1(signed_part).digest() assert len(digest) == 20 rsasig = rsa_sign(digest, self.rsa_id) body = body.replace(MAGIC1, objwrap("SIGNATURE", b64(rsasig))) return body def signdesc(body, args_out=None): rsa, ident_pem, id_digest = make_key() _, onion_pem, _ = make_key() need_ed = '{ED25519-CERT}' in body or '{ED25519-SIGNATURE}' in body if need_ed: sk_master = os.urandom(32) sk_signing = os.urandom(32) pk_master = slow_ed25519.pubkey(sk_master) pk_signing = slow_ed25519.pubkey(sk_signing) hexdigest = binascii.b2a_hex(id_digest).upper() fingerprint = " ".join(hexdigest[i:i+4] for i in range(0,len(hexdigest),4)) MAGIC = "<<<<<<MAGIC>>>>>>" MORE_MAGIC = "<<<<<!#!#!#XYZZY#!#!#!>>>>>" args = { "RSA-IDENTITY" : ident_pem, "ONION-KEY" : onion_pem, "FINGERPRINT" : fingerprint, "FINGERPRINT-NOSPACE" : hexdigest, "RSA-SIGNATURE" : MAGIC } if need_ed: args['ED25519-CERT'] = makeEdSigningKeyCert( sk_master, pk_master, pk_signing) args['ED25519-SIGNATURE'] = MORE_MAGIC if args_out: args_out.update(args) body = body.format(**args) idx = body.rindex("\nrouter-signature") end_of_sig = body.index("\n", idx+1) signed_part = body[:end_of_sig+1] digest = hashlib.sha1(signed_part).digest() assert len(digest) == 20 buf = ctypes.create_string_buffer(1024) n = RSA_private_encrypt(20, digest, buf, rsa, 1) sig = buf.raw[:n] sig = """-----BEGIN SIGNATURE----- %s -----END SIGNATURE-----""" % b64(sig).rstrip() body = body.replace(MAGIC, sig) return body.rstrip() def print_c_string(ident, body): print "static const char %s[] =" % ident for line in body.split("\n"): print ' "%s\\n"' %(line) print " ;" def emit_ri(name, body): info = OnDemandKeys() body = body.format(d=info) body = info.sign_desc(body) print_c_string("EX_RI_%s"%name.upper(), body) def emit_ei(name, body): info = OnDemandKeys() body = body.format(d=info) body = info.sign_desc(body) print_c_string("EX_EI_%s"%name.upper(), body) print 'const char EX_EI_{NAME}_FP[] = "{d.RSA_FINGERPRINT_NOSPACE}";'.format( d=info, NAME=name.upper()) print_c_string("EX_EI_%s_KEY"%name.upper(), info.RSA_IDENTITY) def analyze(s): fields = {} while s.startswith(":::"): first,s=s.split("\n", 1) m = re.match(r'^:::(\w+)=(.*)',first) if not m: raise ValueError(first) k,v = m.groups() fields[k] = v return fields, s def process_file(s): fields, s = analyze(s) try: name = fields['name'] tp = fields['type'] except KeyError: raise ValueError("missing required field") if tp == 'ei': emit_ei(name, s) elif tp == 'ri': emit_ri(name, s) else: raise ValueError("unrecognized type") if __name__ == '__main__': import sys for fn in sys.argv[1:]: process_file(open(fn).read())
mit
6,792,913,120,685,563,000
29.911681
149
0.614194
false
3.030726
false
false
false
cheral/orange3
Orange/widgets/utils/plot/owplot.py
4
69148
''' ################# Plot (``owplot``) ################# .. autoclass:: OrangeWidgets.plot.OWPlot ''' from AnyQt.QtWidgets import \ QGraphicsView, QGraphicsScene, QGraphicsRectItem, QGraphicsTextItem,\ QToolTip, QApplication from AnyQt.QtGui import QPen, QBrush, QColor, QPainter, QTransform, QPolygonF from AnyQt.QtCore import \ QPointF, QRectF, QLineF, QPoint, QRect, QPropertyAnimation, Qt, QEvent, \ pyqtProperty from Orange.widgets.gui import OWComponent from Orange.widgets.settings import Setting LeftLegend = 0 RightLegend = 1 BottomLegend = 2 TopLegend = 3 ExternalLegend = 4 UNUSED_ATTRIBUTES_STR = 'unused attributes' from .owaxis import * from .owcurve import * from .owlegend import * from .owplotgui import OWPlotGUI from .owtools import * from ..colorpalette import ColorPaletteGenerator ## Color values copied from orngView.SchemaView for consistency SelectionPen = QPen(QBrush(QColor(51, 153, 255, 192)), 1, Qt.SolidLine, Qt.RoundCap) SelectionBrush = QBrush(QColor(168, 202, 236, 192)) #from OWDlgs import OWChooseImageSizeDlg #from OWColorPalette import * # color palletes, ... #from Orange.utils import deprecated_members, deprecated_attribute import orangeqt def n_min(*args): lst = args[0] if len(args) == 1 else args a = [i for i in lst if i is not None] return min(a) if a else None def n_max(*args): lst = args[0] if len(args) == 1 else args a = [i for i in lst if i is not None] return max(a) if a else None name_map = { "saveToFileDirect": "save_to_file_direct", "saveToFile" : "save_to_file", "addCurve" : "add_curve", "addMarker" : "add_marker", "updateLayout" : "update_layout", "activateZooming" : "activate_zooming", "activateSelection" : "activate_selection", "activateRectangleSelection" : "activate_rectangle_selection", "activatePolygonSelection" : "activate_polygon_selection", "activatePanning" : "activate_panning", "getSelectedPoints" : "get_selected_points", "setAxisScale" : "set_axis_scale", "setAxisLabels" : "set_axis_labels", "setAxisAutoScale" : "set_axis_autoscale", "setTickLength" : "set_axis_tick_length", "updateCurves" : "update_curves", "itemList" : "plot_items", "setShowMainTitle" : "set_show_main_title", "setMainTitle" : "set_main_title", "invTransform" : "inv_transform", "setAxisTitle" : "set_axis_title", "setShowAxisTitle" : "set_show_axis_title" } #@deprecated_members(name_map, wrap_methods=list(name_map.keys())) class OWPlot(orangeqt.Plot, OWComponent): """ The base class for all plots in Orange. It uses the Qt Graphics View Framework to draw elements on a graph. **Plot layout** .. attribute:: show_legend A boolean controlling whether the legend is displayed or not .. attribute:: show_main_title Controls whether or not the main plot title is displayed .. attribute:: main_title The plot title, usually show on top of the plot .. automethod:: set_main_title .. automethod:: set_show_main_title .. attribute:: axis_margin How much space (in pixels) should be left on each side for the axis, its label and its title. .. attribute:: title_margin How much space (in pixels) should be left at the top of the plot for the title, if the title is shown. .. seealso:: attribute :attr:`show_main_title` .. attribute:: plot_margin How much space (in pixels) should be left at each side of the plot as whitespace. **Coordinate transformation** There are several coordinate systems used by OWPlot: * `widget` coordinates. This is the coordinate system of the position returned by :meth:`.QEvent.pos()`. No calculations or positions is done with this coordinates, they must first be converted to scene coordinates with :meth:`mapToScene`. * `data` coordinates. The value used internally in Orange to specify the values of attributes. For example, this can be age in years, the number of legs, or any other numeric value. * `plot` coordinates. These coordinates specify where the plot items are placed on the graph, but doesn't account for zoom. They can be retrieved for a particular plot item with :meth:`.PlotItem.pos()`. * `scene` or `zoom` coordinates. Like plot coordinates, except that they take the :attr:`zoom_transform` into account. They represent the actual position of an item on the scene. These are the coordinates returned by :meth:`.PlotItem.scenePos()` and :meth:`mapToScene`. For example, they can be used to determine what is under the cursor. In most cases, you will use data coordinates for interacting with the actual data, and scene coordinates for interacting with the plot items. The other two sets are mostly used for converting. .. automethod:: map_to_graph .. automethod:: map_from_graph .. automethod:: transform .. automethod:: inv_transform .. method:: nearest_point(pos) Returns the point nearest to ``pos``, or ``None`` if no point is close enough. :param pos: The position in scene coordinates :type pos: QPointF :rtype: :obj:`.OWPoint` .. method:: point_at(pos) If there is a point with data coordinates equal to ``pos``, if is returned. Otherwise, this function returns None. :param pos: The position in data coordinates :type pos: tuple of float float :rtype: :obj:`.OWPoint` **Data curves** The preferred method for showing a series of data points is :meth:`set_main_curve_data`. It allows you to specify point positions, colors, labels, sizes and shapes. .. automethod:: set_main_curve_data .. automethod:: add_curve .. automethod:: add_custom_curve .. automethod:: add_marker .. method:: add_item(item) Adds any PlotItem ``item`` to this plot. Calling this function directly is useful for adding a :obj:`.Marker` or another object that does not have to appear in the legend. For data curves, consider using :meth:`add_custom_curve` instead. .. method:: plot_items() Returns the list of all plot items added to this graph with :meth:`add_item` or :meth:`.PlotItem.attach`. **Axes** .. automethod:: add_axis .. automethod:: add_custom_axis .. automethod:: set_axis_enabled .. automethod:: set_axis_labels .. automethod:: set_axis_scale **Settings** .. attribute:: gui An :obj:`.OWPlotGUI` object associated with this graph **Point Selection and Marking** There are four possible selection behaviors used for selecting or marking points in OWPlot. They are used in :meth:`select_points` and :meth:`mark_points` and are the same for both operations. .. data:: AddSelection The points are added to the selection, without affected the currently selected points .. data:: RemoveSelection The points are removed from the selection, without affected the currently selected points .. data:: ToggleSelection The points' selection state is toggled .. data:: ReplaceSelection The current selection is replaced with the new one .. note:: There are exactly the same functions for point selection and marking. For simplicity, they are only documented once. .. method:: select_points(area, behavior) .. method:: mark_points(area, behavior) Selects or marks all points inside the ``area`` :param area: The newly selected/marked area :type area: QRectF or QPolygonF :param behavior: :data:`AddSelection`, :data:`RemoveSelection`, :data:`ToggleSelection` or :data:`ReplaceSelection` :type behavior: int .. method:: unselect_all_points() .. method:: unmark_all_points() Unselects or unmarks all the points in the plot .. method:: selected_points() .. method:: marked_points() Returns a list of all selected or marked points :rtype: list of OWPoint .. method:: selected_points(xData, yData) For each of the point specified by ``xData`` and ``yData``, the point's selection state is returned. :param xData: The list of x coordinates :type xData: list of float :param yData: The list of y coordinates :type yData: list of float :rtype: list of int **Color schemes** By default, OWPlot uses the application's system palette for drawing everything except data curves and points. This way, it maintains consistency with other application with regards to the user interface. If data is plotted with no color specified, it will use a system color as well, so that a good contrast with the background in guaranteed. OWPlot uses the :meth:`.OWidget.palette` to determine its color scheme, so it can be changed using :meth:`.QWidget.setPalette`. There are also two predefined color schemes: ``OWPalette.Dark`` and ``OWPalette.Light``, which provides a dark and a light scheme respectively. .. attribute:: theme_name A string attribute with three possible values: ============== =========================== Value Meaning -------------- --------------------------- "default" The system palette is used "dark" The dark theme is used "light" The light theme is used ============== =========================== To apply the settings, first set this attribute's value, and then call :meth:`update_theme` .. automethod:: update_theme On the other hand, curves with a specified color will use colors from Orange's palette, which can be configured within Orange. Each plot contains two separate palettes: one for continuous attributes, and one for discrete ones. Both are created by :obj:`.OWColorPalette.ColorPaletteGenerator` .. attribute:: continuous_palette The palette used when point color represents a continuous attribute .. attribute:: discrete_palette The palette used when point color represents a discrete attribute """ point_settings = ["point_width", "alpha_value"] plot_settings = ["show_legend", "show_grid"] alpha_value = Setting(255) show_legend = Setting(False) show_grid = Setting(False) appearance_settings = ["antialias_plot", "animate_plot", "animate_points", "disable_animations_threshold", "auto_adjust_performance"] def settings_list(self, graph_name, settings): return [graph_name + '.' + setting for setting in settings] def __init__(self, parent = None, name = "None", show_legend = 1, axes = [xBottom, yLeft], widget = None): """ Creates a new graph If your visualization uses axes other than ``xBottom`` and ``yLeft``, specify them in the ``axes`` parameter. To use non-cartesian axes, set ``axes`` to an empty list and add custom axes with :meth:`add_axis` or :meth:`add_custom_axis` """ orangeqt.Plot.__init__(self, parent) OWComponent.__init__(self, widget) self.widget = widget self.parent_name = name self.title_item = None self.setRenderHints(QPainter.Antialiasing | QPainter.TextAntialiasing) self._legend = OWLegend(self, self.scene()) self._legend.setZValue(LegendZValue) self._legend_margin = QRectF(0, 0, 100, 0) self._legend_moved = False self.axes = dict() self.axis_margin = 50 self.y_axis_extra_margin = 30 self.title_margin = 40 self.graph_margin = 10 self.mainTitle = None self.showMainTitle = False self.XaxisTitle = None self.YLaxisTitle = None self.YRaxisTitle = None # Method aliases, because there are some methods with different names but same functions self.setCanvasBackground = self.setCanvasColor self.map_from_widget = self.mapToScene # OWScatterPlot needs these: self.point_width = 5 self.show_filled_symbols = True self.show_grid = True self.curveSymbols = list(range(13)) self.tips = TooltipManager(self) self.setMouseTracking(True) self.grabGesture(Qt.PinchGesture) self.grabGesture(Qt.PanGesture) self.state = NOTHING self._pressed_mouse_button = Qt.NoButton self._pressed_point = None self.selection_items = [] self._current_rs_item = None self._current_ps_item = None self.polygon_close_treshold = 10 self.sendSelectionOnUpdate = False self.auto_send_selection_callback = None self.data_range = {} self.map_transform = QTransform() self.graph_area = QRectF() ## Performance optimization self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate) self.scene().setItemIndexMethod(QGraphicsScene.NoIndex) self.animate_plot = True self.animate_points = True self.antialias_plot = True self.antialias_points = True self.antialias_lines = True self.auto_adjust_performance = True self.disable_animations_threshold = 5000 # self.setInteractive(False) self.warn_unused_attributes = False self._bounds_cache = {} self._transform_cache = {} self.block_update = False self.use_animations = True self._animations = [] ## Mouse event handlers self.mousePressEventHandler = None self.mouseMoveEventHandler = None self.mouseReleaseEventHandler = None self.mouseStaticClickHandler = self.mouseStaticClick self.static_click = False self._marker_items = [] self.grid_curve = PlotGrid(self) self._zoom_rect = None self._zoom_transform = QTransform() self.zoom_stack = [] self.old_legend_margin = None self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) ## Add specified axes: for key in axes: if key in [yLeft, xTop]: self.add_axis(key, title_above=1) else: self.add_axis(key) self.continuous_palette = ColorPaletteGenerator(number_of_colors= -1) self.discrete_palette = ColorPaletteGenerator() self.gui = OWPlotGUI(self) """ An :obj:`.OWPlotGUI` object associated with this plot """ self.activate_zooming() self.selection_behavior = self.AddSelection self.main_curve = None self.replot() # selectionCurveList = deprecated_attribute("selectionCurveList", "selection_items") # autoSendSelectionCallback = deprecated_attribute("autoSendSelectionCallback", "auto_send_selection_callback") # showLegend = deprecated_attribute("showLegend", "show_legend") # pointWidth = deprecated_attribute("pointWidth", "point_width") # alphaValue = deprecated_attribute("alphaValue", "alpha_value") # useAntialiasing = deprecated_attribute("useAntialiasing", "use_antialiasing") # showFilledSymbols = deprecated_attribute("showFilledSymbols", "show_filled_symbols") # mainTitle = deprecated_attribute("mainTitle", "main_title") # showMainTitle = deprecated_attribute("showMainTitle", "show_main_title") # gridCurve = deprecated_attribute("gridCurve", "grid_curve") # contPalette = deprecated_attribute("contPalette", "continuous_palette") # discPalette = deprecated_attribute("discPalette", "discrete_palette") def scrollContentsBy(self, dx, dy): # This is overriden here to prevent scrolling with mouse and keyboard # Instead of moving the contents, we simply do nothing pass def graph_area_rect(self): return self.graph_area def map_to_graph(self, point, axes = None, zoom = False): ''' Maps ``point``, which can be ether a tuple of (x,y), a QPoint or a QPointF, from data coordinates to plot coordinates. :param point: The point in data coordinates :type point: tuple or QPointF :param axes: The pair of axes along which to transform the point. If none are specified, (xBottom, yLeft) will be used. :type axes: tuple of float float :param zoom: if ``True``, the current :attr:`zoom_transform` will be considered in the transformation, and the result will be in scene coordinates instead. :type zoom: int :return: The transformed point in scene coordinates :type: tuple of float float ''' if type(point) == tuple: (x, y) = point point = QPointF(x, y) if axes: x_id, y_id = axes point = point * self.transform_for_axes(x_id, y_id) else: point = point * self.map_transform if zoom: point = point * self._zoom_transform return (point.x(), point.y()) def map_from_graph(self, point, axes = None, zoom = False): ''' Maps ``point``, which can be ether a tuple of (x,y), a QPoint or a QPointF, from plot coordinates to data coordinates. :param point: The point in data coordinates :type point: tuple or QPointF :param axes: The pair of axes along which to transform the point. If none are specified, (xBottom, yLeft) will be used. :type axes: tuple of float float :param zoom: if ``True``, the current :attr:`zoom_transform` will be considered in the transformation, and the ``point`` should be in scene coordinates instead. :type zoom: int :returns: The transformed point in data coordinates :rtype: tuple of float float ''' if type(point) == tuple: (x, y) = point point = QPointF(x,y) if zoom: t, ok = self._zoom_transform.inverted() point = point * t if axes: x_id, y_id = axes t, ok = self.transform_for_axes(x_id, y_id).inverted() else: t, ok = self.map_transform.inverted() ret = point * t return (ret.x(), ret.y()) def save_to_file(self, extraButtons = []): sizeDlg = OWChooseImageSizeDlg(self, extraButtons, parent=self) sizeDlg.exec_() def save_to_file_direct(self, fileName, size = None): sizeDlg = OWChooseImageSizeDlg(self) sizeDlg.saveImage(fileName, size) def activate_zooming(self): ''' Activates the zooming mode, where the user can zoom in and out with a single mouse click or by dragging the mouse to form a rectangular area ''' self.state = ZOOMING def activate_rectangle_selection(self): ''' Activates the rectangle selection mode, where the user can select points in a rectangular area by dragging the mouse over them ''' self.state = SELECT_RECTANGLE def activate_selection(self): ''' Activates the point selection mode, where the user can select points by clicking on them ''' self.state = SELECT def activate_polygon_selection(self): ''' Activates the polygon selection mode, where the user can select points by drawing a polygon around them ''' self.state = SELECT_POLYGON def activate_panning(self): ''' Activates the panning mode, where the user can move the zoom projection by dragging the mouse ''' self.state = PANNING def set_show_main_title(self, b): ''' Shows the main title if ``b`` is ``True``, and hides it otherwise. ''' self.showMainTitle = b self.replot() def set_main_title(self, t): ''' Sets the main title to ``t`` ''' self.mainTitle = t self.replot() def setShowXaxisTitle(self, b = -1): if b == -1 and hasattr(self, 'showXaxisTitle'): b = self.showXaxisTitle self.set_show_axis_title(xBottom, b) def setXaxisTitle(self, title): self.set_axis_title(xBottom, title) def setShowYLaxisTitle(self, b = -1): if b == -1 and hasattr(self, 'showYLaxisTitle'): b = self.showYLaxisTitle self.set_show_axis_title(yLeft, b) def setYLaxisTitle(self, title): self.set_axis_title(yLeft, title) def setShowYRaxisTitle(self, b = -1): if b == -1 and hasattr(self, 'showYRaxisTitle'): b = self.showYRaxisTitle self.set_show_axis_title(yRight, b) def setYRaxisTitle(self, title): self.set_axis_title(yRight, title) def enableGridXB(self, b): self.grid_curve.set_x_enabled(b) self.replot() def enableGridYL(self, b): self.grid_curve.set_y_enabled(b) self.replot() def setGridColor(self, c): self.grid_curve.set_pen(QPen(c)) self.replot() def setCanvasColor(self, c): p = self.palette() p.setColor(OWPalette.Canvas, c) self.set_palette(p) def setData(self, data): self.clear() self.replot() def setXlabels(self, labels): if xBottom in self.axes: self.set_axis_labels(xBottom, labels) elif xTop in self.axes: self.set_axis_labels(xTop, labels) def set_axis_autoscale(self, axis_id): if axis_id in self.axes: self.axes[axis_id].auto_scale = True elif axis_id in self.data_range: del self.data_range[axis_id] def set_axis_labels(self, axis_id, labels, values=None): ''' Sets the labels of axis ``axis_id`` to ``labels``. This is used for axes displaying a discrete data type. :param labels: The ID of the axis to change :type labels: int :param labels: The list of labels to be displayed along the axis :type labels: A list of strings .. note:: This changes the axis scale and removes any previous scale set with :meth:`set_axis_scale`. ''' if axis_id in self._bounds_cache: del self._bounds_cache[axis_id] self._transform_cache = {} self.axes[axis_id].set_labels(labels, values) def set_axis_scale(self, axis_id, min, max, step_size=0): ''' Sets the scale of axis ``axis_id`` to show an interval between ``min`` and ``max``. If ``step`` is specified and non-zero, it determines the steps between label on the axis. Otherwise, they are calculated automatically. .. note:: This changes the axis scale and removes any previous labels set with :meth:`set_axis_labels`. ''' if axis_id in self._bounds_cache: del self._bounds_cache[axis_id] self._transform_cache = {} if axis_id in self.axes: self.axes[axis_id].set_scale(min, max, step_size) else: self.data_range[axis_id] = (min, max) def set_axis_title(self, axis_id, title): if axis_id in self.axes: self.axes[axis_id].set_title(title) def set_show_axis_title(self, axis_id, b): if axis_id in self.axes: if b == -1: b = not self.axes[axis_id].show_title self.axes[axis_id].set_show_title(b) self.replot() def set_axis_tick_length(self, axis_id, minor, medium, major): if axis_id in self.axes: self.axes[axis_id].set_tick_legth(minor, medium, major) def setYLlabels(self, labels): self.set_axis_labels(yLeft, labels) def setYRlabels(self, labels): self.set_axis_labels(yRight, labels) def add_custom_curve(self, curve, enableLegend = False): ''' Adds a custom PlotItem ``curve`` to the plot. If ``enableLegend`` is ``True``, a curve symbol defined by :meth:`.OWCurve.point_item` and the ``curve``'s name :obj:`.OWCurve.name` is added to the legend. This function recalculates axis bounds and replots the plot if needed. :param curve: The curve to add :type curve: :obj:`.OWCurve` ''' self.add_item(curve) if enableLegend: self.legend().add_curve(curve) for key in [curve.axes()]: if key in self._bounds_cache: del self._bounds_cache[key] self._transform_cache = {} if hasattr(curve, 'tooltip'): curve.setToolTip(curve.tooltip) x,y = curve.axes() if curve.is_auto_scale() and (self.is_axis_auto_scale(x) or self.is_axis_auto_scale(y)): self.set_dirty() self.replot() else: curve.set_graph_transform(self.transform_for_axes(x,y)) curve.update_properties() return curve def add_curve(self, name, brushColor = None, penColor = None, size = 5, style = Qt.NoPen, symbol = OWPoint.Ellipse, enableLegend = False, xData = [], yData = [], showFilledSymbols = None, lineWidth = 1, pen = None, autoScale = 0, antiAlias = None, penAlpha = 255, brushAlpha = 255, x_axis_key = xBottom, y_axis_key = yLeft): ''' Creates a new :obj:`.OWCurve` with the specified parameters and adds it to the graph. If ``enableLegend`` is ``True``, a curve symbol is added to the legend. ''' c = OWCurve(xData, yData, x_axis_key, y_axis_key, tooltip=name) c.set_zoom_transform(self._zoom_transform) c.name = name c.set_style(style) if not brushColor: brushColor = self.color(OWPalette.Data) if not penColor: penColor = self.color(OWPalette.Data) c.set_color(penColor) if pen: p = pen else: p = QPen() p.setColor(penColor) p.setWidth(lineWidth) c.set_pen(p) c.set_brush(brushColor) c.set_symbol(symbol) c.set_point_size(size) c.set_data(xData, yData) c.set_auto_scale(autoScale) return self.add_custom_curve(c, enableLegend) def set_main_curve_data(self, x_data, y_data, color_data, label_data, size_data, shape_data, marked_data = [], valid_data = [], x_axis_key=xBottom, y_axis_key=yLeft): """ Creates a single curve that can have points of different colors, shapes and sizes. This is the preferred method for visualization that show a series of different points. :param x_data: The list of X coordinates of the points :type x_data: list of float :param y_data: The list of Y coordinates of the points :type y_data: list of float :param color_data: The list of point colors :type color_data: list of QColor :param label_data: The list of point labels :type label_data: list of str :param size_data: The list of point sizes :type size_data: list of int :param shape_data: The list of point symbols :type shape_data: list of int The number of points in the curve will be equal to min(len(x_data), len(y_data)). The other four list can be empty, in which case a default value will be used. If they contain only one element, its value will be used for all points. .. note:: This function does not add items to the legend automatically. You will have to add them yourself with :meth:`.OWLegend.add_item`. .. seealso:: :obj:`.OWMultiCurve`, :obj:`.OWPoint` """ if not self.main_curve: self.main_curve = OWMultiCurve([], []) self.add_item(self.main_curve) self.update_performance(len(x_data)) if len(valid_data): import numpy x_data = numpy.compress(valid_data, x_data) y_data = numpy.compress(valid_data, y_data) if len(color_data) > 1: color_data = numpy.compress(valid_data, color_data) if len(size_data) > 1: size_data = numpy.compress(valid_data, size_data) if len(shape_data) > 1: shape_data = numpy.compress(valid_data, shape_data) if len(label_data) > 1: label_data = numpy.compress(valid_data, label_data) if len(marked_data) > 1: marked_data = numpy.compress(valid_data, marked_data).tolist() c = self.main_curve c.set_data(x_data, y_data) c.set_axes(x_axis_key, y_axis_key) c.set_point_colors(color_data) c.set_point_labels(label_data) c.set_point_sizes(size_data) c.set_point_symbols(shape_data) if len(marked_data): c.set_points_marked(marked_data) self.marked_points_changed.emit() c.name = 'Main Curve' self.replot() def remove_curve(self, item): ''' Removes ``item`` from the plot ''' self.remove_item(item) self.legend().remove_curve(item) def plot_data(self, xData, yData, colors, labels, shapes, sizes): pass def add_axis(self, axis_id, title='', title_above=False, title_location=AxisMiddle, line=None, arrows=0, zoomable=False, bounds=None): ''' Creates an :obj:`OrangeWidgets.plot.OWAxis` with the specified ``axis_id`` and ``title``. ''' a = OWAxis(axis_id, title, title_above, title_location, line, arrows, self, bounds=bounds) self.scene().addItem(a) a.zoomable = zoomable a.update_callback = self.replot if axis_id in self._bounds_cache: del self._bounds_cache[axis_id] self._transform_cache = {} self.axes[axis_id] = a if not axis_id in CartesianAxes: self.set_show_axis_title(axis_id, True) return a def remove_all_axes(self, user_only = True): ''' Removes all axes from the plot ''' ids = [] for id,item in self.axes.items(): if not user_only or id >= UserAxis: ids.append(id) self.scene().removeItem(item) for id in ids: del self.axes[id] def add_custom_axis(self, axis_id, axis): ''' Adds a custom ``axis`` with id ``axis_id`` to the plot ''' self.axes[axis_id] = axis self.replot() def add_marker(self, name, x, y, alignment = -1, bold = 0, color = None, brushColor = None, size=None, antiAlias = None, x_axis_key = xBottom, y_axis_key = yLeft): m = Marker(name, x, y, alignment, bold, color, brushColor) self._marker_items.append((m, x, y, x_axis_key, y_axis_key)) self.add_custom_curve(m) return m def removeAllSelections(self): ## TODO pass def clear(self): """ Clears the plot, removing all curves, markers and tooltips. Axes and the grid are not removed """ for i in self.plot_items(): if i is not self.grid_curve: self.remove_item(i) self.main_curve = None self._bounds_cache = {} self._transform_cache = {} self.clear_markers() self.tips.removeAll() self.legend().clear() self.old_legend_margin = None self.update_grid() def clear_markers(self): """ Removes all markers added with :meth:`add_marker` from the plot """ for item,x,y,x_axis,y_axis in self._marker_items: item.detach() self._marker_items = [] def update_layout(self): ''' Updates the plot layout. This function recalculates the position of titles, axes, the legend and the main plot area. It does not update the curve or the other plot items. ''' if not self.isVisible(): # No point in updating the graph if it's still hidden return graph_rect = QRectF(self.contentsRect()) self.centerOn(graph_rect.center()) m = self.graph_margin graph_rect.adjust(m, m, -m, -m) if self.showMainTitle and self.mainTitle: if self.title_item: self.scene().remove_item(self.title_item) del self.title_item self.title_item = QGraphicsTextItem(self.mainTitle, scene=self.scene()) title_size = self.title_item.boundingRect().size() ## TODO: Check if the title is too big self.title_item.setPos( graph_rect.width()/2 - title_size.width()/2, self.title_margin/2 - title_size.height()/2 ) graph_rect.setTop(graph_rect.top() + self.title_margin) if self.show_legend: self._legend_outside_area = QRectF(graph_rect) self._legend.max_size = self._legend_outside_area.size() r = self._legend_margin graph_rect.adjust(r.left(), r.top(), -r.right(), -r.bottom()) self._legend.update_items() axis_rects = dict() base_margin = min(self.axis_margin, graph_rect.height()/4, graph_rect.height()/4) if xBottom in self.axes and self.axes[xBottom].isVisible(): margin = base_margin if self.axes[xBottom].should_be_expanded(): margin += min(20, graph_rect.height()/8, graph_rect.width() / 8) bottom_rect = QRectF(graph_rect) bottom_rect.setTop( bottom_rect.bottom() - margin) axis_rects[xBottom] = bottom_rect graph_rect.setBottom( graph_rect.bottom() - margin) if xTop in self.axes and self.axes[xTop].isVisible(): margin = base_margin if self.axes[xTop].should_be_expanded(): margin += min(20, graph_rect.height()/8, graph_rect.width() / 8) top_rect = QRectF(graph_rect) top_rect.setBottom(top_rect.top() + margin) axis_rects[xTop] = top_rect graph_rect.setTop(graph_rect.top() + margin) if yLeft in self.axes and self.axes[yLeft].isVisible(): margin = base_margin if self.axes[yLeft].should_be_expanded(): margin += min(20, graph_rect.height()/8, graph_rect.width() / 8) left_rect = QRectF(graph_rect) left = graph_rect.left() + margin + self.y_axis_extra_margin left_rect.setRight(left) graph_rect.setLeft(left) axis_rects[yLeft] = left_rect if xBottom in axis_rects: axis_rects[xBottom].setLeft(left) if xTop in axis_rects: axis_rects[xTop].setLeft(left) if yRight in self.axes and self.axes[yRight].isVisible(): margin = base_margin if self.axes[yRight].should_be_expanded(): margin += min(20, graph_rect.height()/8, graph_rect.width() / 8) right_rect = QRectF(graph_rect) right = graph_rect.right() - margin - self.y_axis_extra_margin right_rect.setLeft(right) graph_rect.setRight(right) axis_rects[yRight] = right_rect if xBottom in axis_rects: axis_rects[xBottom].setRight(right) if xTop in axis_rects: axis_rects[xTop].setRight(right) if self.graph_area != graph_rect: self.graph_area = QRectF(graph_rect) self.set_graph_rect(self.graph_area) self._transform_cache = {} if self._zoom_rect: data_zoom_rect = self.map_transform.inverted()[0].mapRect(self._zoom_rect) self.map_transform = self.transform_for_axes() self.set_zoom_rect(self.map_transform.mapRect(data_zoom_rect)) self.map_transform = self.transform_for_axes() for c in self.plot_items(): x,y = c.axes() c.set_graph_transform(self.transform_for_axes(x,y)) c.update_properties() def update_zoom(self): ''' Updates the zoom transformation of the plot items. ''' zt = self.zoom_transform() self._zoom_transform = zt self.set_zoom_transform(zt) self.update_axes(zoom_only=True) self.viewport().update() def update_axes(self, zoom_only=False): """ Updates the axes. If ``zoom_only`` is ``True``, only the positions of the axes and their labels are recalculated. Otherwise, all their labels are updated. """ if self.warn_unused_attributes and not zoom_only: self._legend.remove_category(UNUSED_ATTRIBUTES_STR) for id, item in self.axes.items(): if item.scale is None and item.labels is None: item.auto_range = self.bounds_for_axis(id) if id in XAxes: (x,y) = (id, yLeft) elif id in YAxes: (x,y) = (xBottom, id) else: (x,y) = (xBottom, yLeft) if id in CartesianAxes: ## This class only sets the lines for these four axes, widgets are responsible for the rest if x in self.axes and y in self.axes: item.data_line = self.axis_line(self.data_rect_for_axes(x,y), id) if id in CartesianAxes: item.graph_line = self.axis_line(self.graph_area, id, invert_y = True) elif item.data_line: t = self.transform_for_axes(x, y) item.graph_line = t.map(item.data_line) if item.graph_line and item.zoomable: item.graph_line = self._zoom_transform.map(item.graph_line) if not zoom_only: if item.graph_line: item.show() else: item.hide() if self.warn_unused_attributes: self._legend.add_item(UNUSED_ATTRIBUTES_STR, item.title, None) item.zoom_transform = self._zoom_transform item.update(zoom_only) def replot(self): ''' Replot the entire graph. This functions redraws everything on the graph, so it can be very slow ''' #self.setBackgroundBrush(self.color(OWPalette.Canvas)) self._bounds_cache = {} self._transform_cache = {} self.set_clean() self.update_antialiasing() self.update_legend() self.update_layout() self.update_zoom() self.update_axes() self.update_grid() self.update_filled_symbols() self.setSceneRect(QRectF(self.contentsRect())) self.viewport().update() def update_legend(self): if self.show_legend and not self._legend_moved: ## If the legend hasn't been moved it, we set it outside, in the top right corner m = self.graph_margin r = QRectF(self.contentsRect()) r.adjust(m, m, -m, -m) self._legend.max_size = r.size() self._legend.update_items() w = self._legend.boundingRect().width() self._legend_margin = QRectF(0, 0, w, 0) self._legend.set_floating(False) self._legend.set_orientation(Qt.Vertical) self._legend.setPos(QRectF(self.contentsRect()).topRight() + QPointF(-w, 0)) if (self._legend.isVisible() == self.show_legend): return self._legend.setVisible(self.show_legend) if self.show_legend: if self.old_legend_margin is not None: self.animate(self, 'legend_margin', self.old_legend_margin, duration = 100) else: r = self.legend_rect() self.ensure_inside(r, self.contentsRect()) self._legend.setPos(r.topLeft()) self.notify_legend_moved(r.topLeft()) else: self.old_legend_margin = self.legend_margin self.animate(self, 'legend_margin', QRectF(), duration=100) def update_filled_symbols(self): ## TODO: Implement this in Curve.cpp pass def update_grid(self): self.grid_curve.set_x_enabled(self.show_grid) self.grid_curve.set_y_enabled(self.show_grid) self.grid_curve.update_properties() def legend(self): ''' Returns the plot's legend, which is a :obj:`OrangeWidgets.plot.OWLegend` ''' return self._legend def legend_rect(self): if self.show_legend: return self._legend.mapRectToScene(self._legend.boundingRect()) else: return QRectF() def isLegendEvent(self, event, function): if self.show_legend and self.legend_rect().contains(self.mapToScene(event.pos())): function(self, event) return True else: return False def mouse_action(self, event): b = event.buttons() | event.button() m = event.modifiers() if b == Qt.LeftButton | Qt.RightButton: b = Qt.MidButton if m & Qt.AltModifier and b == Qt.LeftButton: m = m & ~Qt.AltModifier b = Qt.MidButton if b == Qt.LeftButton and not m: return self.state if b == Qt.RightButton and not m and self.state == SELECT: return SELECT_RIGHTCLICK if b == Qt.MidButton: return PANNING if b in [Qt.LeftButton, Qt.RightButton] and (self.state == ZOOMING or m == Qt.ControlModifier): return ZOOMING if b == Qt.LeftButton and m == Qt.ShiftModifier: return SELECT ## Event handling def event(self, event): if event.type() == QEvent.Gesture: return self.gestureEvent(event) else: return orangeqt.Plot.event(self, event) def gestureEvent(self, event): for gesture in event.gestures(): if gesture.state() == Qt.GestureStarted: self.current_gesture_scale = 1. event.accept(gesture) continue elif gesture.gestureType() == Qt.PinchGesture: old_animate_plot = self.animate_plot self.animate_plot = False self.zoom(gesture.centerPoint(), gesture.scaleFactor()/self.current_gesture_scale ) self.current_gesture_scale = gesture.scaleFactor() self.animate_plot = old_animate_plot elif gesture.gestureType() == Qt.PanGesture: self.pan(gesture.delta()) return True def resizeEvent(self, event): self.replot() s = event.size() - event.oldSize() if self.legend_margin.right() > 0: self._legend.setPos(self._legend.pos() + QPointF(s.width(), 0)) if self.legend_margin.bottom() > 0: self._legend.setPos(self._legend.pos() + QPointF(0, s.height())) def showEvent(self, event): self.replot() def mousePressEvent(self, event): self.static_click = True self._pressed_mouse_button = event.button() self._pressed_mouse_pos = event.pos() if self.mousePressEventHandler and self.mousePressEventHandler(event): event.accept() return if self.isLegendEvent(event, QGraphicsView.mousePressEvent): return point = self.mapToScene(event.pos()) a = self.mouse_action(event) if a == SELECT and hasattr(self, 'move_selected_points'): self._pressed_point = self.nearest_point(point) self._pressed_point_coor = None if self._pressed_point is not None: self._pressed_point_coor = self._pressed_point.coordinates() if a == PANNING: self._last_pan_pos = point event.accept() else: orangeqt.Plot.mousePressEvent(self, event) def mouseMoveEvent(self, event): if event.buttons() and (self._pressed_mouse_pos - event.pos()).manhattanLength() > QApplication.instance().startDragDistance(): self.static_click = False if self.mouseMoveEventHandler and self.mouseMoveEventHandler(event): event.accept() return if self.isLegendEvent(event, QGraphicsView.mouseMoveEvent): return point = self.mapToScene(event.pos()) if not self._pressed_mouse_button: if self.receivers(self.point_hovered) > 0: self.point_hovered.emit(self.nearest_point(point)) ## We implement a workaround here, because sometimes mouseMoveEvents are not fast enough ## so the moving legend gets left behind while dragging, and it's left in a pressed state if self._legend.mouse_down: QGraphicsView.mouseMoveEvent(self, event) return a = self.mouse_action(event) if a == SELECT and self._pressed_point is not None and self._pressed_point.is_selected() and hasattr(self, 'move_selected_points'): animate_points = self.animate_points self.animate_points = False x1, y1 = self._pressed_point_coor x2, y2 = self.map_from_graph(point, zoom=True) self.move_selected_points((x2 - x1, y2 - y1)) self.replot() if self._pressed_point is not None: self._pressed_point_coor = self._pressed_point.coordinates() self.animate_points = animate_points elif a in [SELECT, ZOOMING] and self.graph_area.contains(point): if not self._current_rs_item: self._selection_start_point = self.mapToScene(self._pressed_mouse_pos) self._current_rs_item = QGraphicsRectItem(scene=self.scene()) self._current_rs_item.setPen(SelectionPen) self._current_rs_item.setBrush(SelectionBrush) self._current_rs_item.setZValue(SelectionZValue) self._current_rs_item.setRect(QRectF(self._selection_start_point, point).normalized()) elif a == PANNING: if not self._last_pan_pos: self._last_pan_pos = self.mapToScene(self._pressed_mouse_pos) self.pan(point - self._last_pan_pos) self._last_pan_pos = point else: x, y = self.map_from_graph(point, zoom=True) text, x, y = self.tips.maybeTip(x, y) if type(text) == int: text = self.buildTooltip(text) if text and x is not None and y is not None: tp = self.mapFromScene(QPointF(x,y) * self.map_transform * self._zoom_transform) self.showTip(tp.x(), tp.y(), text) else: orangeqt.Plot.mouseMoveEvent(self, event) def mouseReleaseEvent(self, event): self._pressed_mouse_button = Qt.NoButton if self.mouseReleaseEventHandler and self.mouseReleaseEventHandler(event): event.accept() return if self.static_click and self.mouseStaticClickHandler and self.mouseStaticClickHandler(event): event.accept() return if self.isLegendEvent(event, QGraphicsView.mouseReleaseEvent): return a = self.mouse_action(event) if a == SELECT and self._pressed_point is not None: self._pressed_point = None if a in [ZOOMING, SELECT] and self._current_rs_item: rect = self._current_rs_item.rect() if a == ZOOMING: self.zoom_to_rect(self._zoom_transform.inverted()[0].mapRect(rect)) else: self.add_selection(rect) self.scene().removeItem(self._current_rs_item) self._current_rs_item = None return orangeqt.Plot.mouseReleaseEvent(self, event) def mouseStaticClick(self, event): point = self.mapToScene(event.pos()) if point not in self.graph_area: return False a = self.mouse_action(event) b = event.buttons() | event.button() if a == ZOOMING: if event.button() == Qt.LeftButton: self.zoom_in(point) elif event.button() == Qt.RightButton: self.zoom_back() else: return False return True elif a == SELECT and b == Qt.LeftButton: point_item = self.nearest_point(point) b = self.selection_behavior if b == self.ReplaceSelection: self.unselect_all_points() b = self.AddSelection if point_item: point_item.set_selected(b == self.AddSelection or (b == self.ToggleSelection and not point_item.is_selected())) self.selection_changed.emit() elif a == SELECT and b == Qt.RightButton: point_item = self.nearest_point(point) if point_item: self.point_rightclicked.emit(self.nearest_point(point)) else: self.unselect_all_points() else: return False def wheelEvent(self, event): point = self.mapToScene(event.pos()) d = event.delta() / 120.0 self.zoom(point, pow(2,d)) @staticmethod def transform_from_rects(r1, r2): """ Returns a QTransform that maps from rectangle ``r1`` to ``r2``. """ if r1 is None or r2 is None: return QTransform() if r1.width() == 0 or r1.height() == 0 or r2.width() == 0 or r2.height() == 0: return QTransform() tr1 = QTransform().translate(-r1.left(), -r1.top()) ts = QTransform().scale(r2.width()/r1.width(), r2.height()/r1.height()) tr2 = QTransform().translate(r2.left(), r2.top()) return tr1 * ts * tr2 def transform_for_zoom(self, factor, point, rect): if factor == 1: return QTransform() dp = point t = QTransform() t.translate(dp.x(), dp.y()) t.scale(factor, factor) t.translate(-dp.x(), -dp.y()) return t def rect_for_zoom(self, point, old_rect, scale = 2): r = QRectF() r.setWidth(old_rect.width() / scale) r.setHeight(old_rect.height() / scale) r.moveCenter(point) self.ensure_inside(r, self.graph_area) return r def set_state(self, state): self.state = state if state != SELECT_RECTANGLE: self._current_rs_item = None if state != SELECT_POLYGON: self._current_ps_item = None def get_selected_points(self, xData, yData, validData): if self.main_curve: selected = [] points = self.main_curve.points() i = 0 for d in validData: if d: selected.append(points[i].is_selected()) i += 1 else: selected.append(False) else: selected = self.selected_points(xData, yData) unselected = [not i for i in selected] return selected, unselected def add_selection(self, reg): """ Selects all points in the region ``reg`` using the current :attr: `selection_behavior`. """ self.select_points(reg, self.selection_behavior) self.viewport().update() if self.auto_send_selection_callback: self.auto_send_selection_callback() def points_equal(self, p1, p2): if type(p1) == tuple: (x, y) = p1 p1 = QPointF(x, y) if type(p2) == tuple: (x, y) = p2 p2 = QPointF(x, y) return (QPointF(p1)-QPointF(p2)).manhattanLength() < self.polygon_close_treshold def data_rect_for_axes(self, x_axis = xBottom, y_axis = yLeft): """ Calculates the bounding rectangle in data coordinates for the axes ``x_axis`` and ``y_axis``. """ if x_axis in self.axes and y_axis in self.axes: x_min, x_max = self.bounds_for_axis(x_axis, try_auto_scale=True) y_min, y_max = self.bounds_for_axis(y_axis, try_auto_scale=True) if (x_min or x_max) and (y_min or y_max): r = QRectF(x_min, y_min, x_max-x_min, y_max-y_min) return r r = orangeqt.Plot.data_rect_for_axes(self, x_axis, y_axis) for id, axis in self.axes.items(): if id not in CartesianAxes and axis.data_line: r |= QRectF(axis.data_line.p1(), axis.data_line.p2()) ## We leave a 5% margin on each side so the graph doesn't look overcrowded ## TODO: Perhaps change this from a fixed percentage to always round to a round number dx = r.width() / 20.0 dy = r.height() / 20.0 r.adjust(-dx, -dy, dx, dy) return r def transform_for_axes(self, x_axis = xBottom, y_axis = yLeft): """ Returns the graph transform that maps from data to scene coordinates using axes ``x_axis`` and ``y_axis``. """ if not (x_axis, y_axis) in self._transform_cache: # We must flip the graph area, becase Qt coordinates start from top left, while graph coordinates start from bottom left a = QRectF(self.graph_area) t = a.top() a.setTop(a.bottom()) a.setBottom(t) self._transform_cache[(x_axis, y_axis)] = self.transform_from_rects(self.data_rect_for_axes(x_axis, y_axis), a) return self._transform_cache[(x_axis, y_axis)] def transform(self, axis_id, value): """ Transforms the ``value`` from data to plot coordinates along the axis ``axis_id``. This function always ignores zoom. If you need to account for zooming, use :meth:`map_to_graph`. """ if axis_id in XAxes: size = self.graph_area.width() margin = self.graph_area.left() else: size = self.graph_area.height() margin = self.graph_area.top() m, M = self.bounds_for_axis(axis_id) if m is None or M is None or M == m: return 0 else: return margin + (value-m)/(M-m) * size def inv_transform(self, axis_id, value): """ Transforms the ``value`` from plot to data coordinates along the axis ``axis_id``. This function always ignores zoom. If you need to account for zooming, use :meth:`map_from_graph`. """ if axis_id in XAxes: size = self.graph_area.width() margin = self.graph_area.left() else: size = self.graph_area.height() margin = self.graph_area.top() m, M = self.bounds_for_axis(axis_id) if m is not None and M is not None: return m + (value-margin)/size * (M-m) else: return 0 def bounds_for_axis(self, axis_id, try_auto_scale=True): if axis_id in self.axes and not self.axes[axis_id].auto_scale: return self.axes[axis_id].bounds() if try_auto_scale: lower, upper = orangeqt.Plot.bounds_for_axis(self, axis_id) if lower != upper: lower = lower - (upper-lower)/20.0 upper = upper + (upper-lower)/20.0 return lower, upper else: return None, None def enableYRaxis(self, enable=1): self.set_axis_enabled(yRight, enable) def enableLRaxis(self, enable=1): self.set_axis_enabled(yLeft, enable) def enableXaxis(self, enable=1): self.set_axis_enabled(xBottom, enable) def set_axis_enabled(self, axis, enable): if axis not in self.axes: self.add_axis(axis) self.axes[axis].setVisible(enable) self.replot() @staticmethod def axis_coordinate(point, axis_id): if axis_id in XAxes: return point.x() elif axis_id in YAxes: return point.y() else: return None # #################################################################### # return string with attribute names and their values for example example def getExampleTooltipText(self, example, indices=None, maxIndices=20): if indices and type(indices[0]) == str: indices = [self.attributeNameIndex[i] for i in indices] if not indices: indices = list(range(len(self.dataDomain.attributes))) # don't show the class value twice if example.domain.classVar: classIndex = self.attributeNameIndex[example.domain.classVar.name] while classIndex in indices: indices.remove(classIndex) text = "<b>Attributes:</b><br>" for index in indices[:maxIndices]: attr = self.attributeNames[index] if attr not in example.domain: text += "&nbsp;"*4 + "%s = ?<br>" % (Qt.escape(attr)) elif example[attr].isSpecial(): text += "&nbsp;"*4 + "%s = ?<br>" % (Qt.escape(attr)) else: text += "&nbsp;"*4 + "%s = %s<br>" % (Qt.escape(attr), Qt.escape(str(example[attr]))) if len(indices) > maxIndices: text += "&nbsp;"*4 + " ... <br>" if example.domain.classVar: text = text[:-4] text += "<hr><b>Class:</b><br>" if example.getclass().isSpecial(): text += "&nbsp;"*4 + "%s = ?<br>" % (Qt.escape(example.domain.classVar.name)) else: text += "&nbsp;"*4 + "%s = %s<br>" % (Qt.escape(example.domain.classVar.name), Qt.escape(str(example.getclass()))) if len(example.domain.getmetas()) != 0: text = text[:-4] text += "<hr><b>Meta attributes:</b><br>" # show values of meta attributes for key in example.domain.getmetas(): try: text += "&nbsp;"*4 + "%s = %s<br>" % (Qt.escape(example.domain[key].name), Qt.escape(str(example[key]))) except: pass return text[:-4] # remove the last <br> # show a tooltip at x,y with text. if the mouse will move for more than 2 pixels it will be removed def showTip(self, x, y, text): QToolTip.showText(self.mapToGlobal(QPoint(x, y)), text, self, QRect(x-3,y-3,6,6)) def notify_legend_moved(self, pos): self._legend_moved = True l = self.legend_rect() g = getattr(self, '_legend_outside_area', QRectF()) p = QPointF() rect = QRectF() offset = 20 if pos.x() > g.right() - offset: self._legend.set_orientation(Qt.Vertical) rect.setRight(self._legend.boundingRect().width()) p = g.topRight() - self._legend.boundingRect().topRight() elif pos.x() < g.left() + offset: self._legend.set_orientation(Qt.Vertical) rect.setLeft(self._legend.boundingRect().width()) p = g.topLeft() elif pos.y() < g.top() + offset: self._legend.set_orientation(Qt.Horizontal) rect.setTop(self._legend.boundingRect().height()) p = g.topLeft() elif pos.y() > g.bottom() - offset: self._legend.set_orientation(Qt.Horizontal) rect.setBottom(self._legend.boundingRect().height()) p = g.bottomLeft() - self._legend.boundingRect().bottomLeft() if p.isNull(): self._legend.set_floating(True, pos) else: self._legend.set_floating(False, p) if rect != self._legend_margin: orientation = Qt.Horizontal if rect.top() or rect.bottom() else Qt.Vertical self._legend.set_orientation(orientation) self.animate(self, 'legend_margin', rect, duration=100) def get_legend_margin(self): return self._legend_margin def set_legend_margin(self, value): self._legend_margin = value self.update_layout() self.update_axes() legend_margin = pyqtProperty(QRectF, get_legend_margin, set_legend_margin) def update_curves(self): if self.main_curve: self.main_curve.set_alpha_value(self.alpha_value) else: for c in self.plot_items(): if isinstance(c, orangeqt.Curve) and not getattr(c, 'ignore_alpha', False): au = c.auto_update() c.set_auto_update(False) c.set_point_size(self.point_width) color = c.color() color.setAlpha(self.alpha_value) c.set_color(color) c.set_auto_update(au) c.update_properties() self.viewport().update() update_point_size = update_curves update_alpha_value = update_curves def update_antialiasing(self, use_antialiasing=None): if use_antialiasing is not None: self.antialias_plot = use_antialiasing self.setRenderHint(QPainter.Antialiasing, self.antialias_plot) def update_animations(self, use_animations=None): if use_animations is not None: self.animate_plot = use_animations self.animate_points = use_animations def update_performance(self, num_points = None): if self.auto_adjust_performance: if not num_points: if self.main_curve: num_points = len(self.main_curve.points()) else: num_points = sum( len(c.points()) for c in self.curves ) if num_points > self.disable_animations_threshold: self.disabled_animate_points = self.animate_points self.animate_points = False self.disabled_animate_plot = self.animate_plot self.animate_plot = False self.disabled_antialias_lines = self.animate_points self.antialias_lines = True elif hasattr(self, 'disabled_animate_points'): self.animate_points = self.disabled_animate_points del self.disabled_animate_points self.animate_plot = self.disabled_animate_plot del self.disabled_animate_plot self.antialias_lines = True # self.disabled_antialias_lines del self.disabled_antialias_lines def animate(self, target, prop_name, end_val, duration = None, start_val = None): for a in self._animations: if a.state() == QPropertyAnimation.Stopped: self._animations.remove(a) if self.animate_plot: a = QPropertyAnimation(target, prop_name) a.setEndValue(end_val) if start_val is not None: a.setStartValue(start_val) if duration: a.setDuration(duration) self._animations.append(a) a.start(QPropertyAnimation.KeepWhenStopped) else: target.setProperty(prop_name, end_val) def clear_selection(self): self.unselect_all_points() def send_selection(self): if self.auto_send_selection_callback: self.auto_send_selection_callback() def pan(self, delta): if type(delta) == tuple: x, y = delta else: x, y = delta.x(), delta.y() t = self.zoom_transform() x = x / t.m11() y = y / t.m22() r = QRectF(self.zoom_rect) r.translate(-QPointF(x,y)) self.ensure_inside(r, self.graph_area) self.zoom_rect = r def zoom_to_rect(self, rect): self.ensure_inside(rect, self.graph_area) # add to zoom_stack if zoom_rect is larger if self.zoom_rect.width() > rect.width() or self.zoom_rect.height() > rect.height(): self.zoom_stack.append(self.zoom_rect) self.animate(self, 'zoom_rect', rect, start_val = self.get_zoom_rect()) def zoom_back(self): if self.zoom_stack: rect = self.zoom_stack.pop() self.animate(self, 'zoom_rect', rect, start_val = self.get_zoom_rect()) def reset_zoom(self): self._zoom_rect = None self.update_zoom() def zoom_transform(self): return self.transform_from_rects(self.zoom_rect, self.graph_area) def zoom_in(self, point): self.zoom(point, scale = 2) def zoom_out(self, point): self.zoom(point, scale = 0.5) def zoom(self, point, scale): print(len(self.zoom_stack)) t, ok = self._zoom_transform.inverted() point = point * t r = QRectF(self.zoom_rect) i = 1.0/scale r.setTopLeft(point*(1-i) + r.topLeft()*i) r.setBottomRight(point*(1-i) + r.bottomRight()*i) self.ensure_inside(r, self.graph_area) # remove smaller zoom rects from stack while len(self.zoom_stack) > 0 and r.width() >= self.zoom_stack[-1].width() and r.height() >= self.zoom_stack[-1].height(): self.zoom_stack.pop() self.zoom_to_rect(r) def get_zoom_rect(self): if self._zoom_rect: return self._zoom_rect else: return self.graph_area def set_zoom_rect(self, rect): self._zoom_rect = rect self._zoom_transform = self.transform_from_rects(rect, self.graph_area) self.update_zoom() zoom_rect = pyqtProperty(QRectF, get_zoom_rect, set_zoom_rect) @staticmethod def ensure_inside(small_rect, big_rect): if small_rect.width() > big_rect.width(): small_rect.setWidth(big_rect.width()) if small_rect.height() > big_rect.height(): small_rect.setHeight(big_rect.height()) if small_rect.right() > big_rect.right(): small_rect.moveRight(big_rect.right()) elif small_rect.left() < big_rect.left(): small_rect.moveLeft(big_rect.left()) if small_rect.bottom() > big_rect.bottom(): small_rect.moveBottom(big_rect.bottom()) elif small_rect.top() < big_rect.top(): small_rect.moveTop(big_rect.top()) def shuffle_points(self): if self.main_curve: self.main_curve.shuffle_points() def set_progress(self, done, total): if not self.widget: return if done == total: self.widget.progressBarFinished() else: self.widget.progressBarSet(100.0 * done / total) def start_progress(self): if self.widget: self.widget.progressBarInit() def end_progress(self): if self.widget: self.widget.progressBarFinished() def is_axis_auto_scale(self, axis_id): if axis_id not in self.axes: return axis_id not in self.data_range return self.axes[axis_id].auto_scale def axis_line(self, rect, id, invert_y = False): if invert_y: r = QRectF(rect) r.setTop(rect.bottom()) r.setBottom(rect.top()) rect = r if id == xBottom: line = QLineF(rect.topLeft(), rect.topRight()) elif id == xTop: line = QLineF(rect.bottomLeft(), rect.bottomRight()) elif id == yLeft: line = QLineF(rect.topLeft(), rect.bottomLeft()) elif id == yRight: line = QLineF(rect.topRight(), rect.bottomRight()) else: line = None return line def color(self, role, group = None): if group: return self.palette().color(group, role) else: return self.palette().color(role) def set_palette(self, p): ''' Sets the plot palette to ``p``. :param p: The new color palette :type p: :obj:`.QPalette` ''' self.setPalette(p) self.replot() def update_theme(self): ''' Updates the current color theme, depending on the value of :attr:`theme_name`. ''' if self.theme_name.lower() == 'default': self.set_palette(OWPalette.System) elif self.theme_name.lower() == 'light': self.set_palette(OWPalette.Light) elif self.theme_name.lower() == 'dark': self.set_palette(OWPalette.Dark)
bsd-2-clause
-1,525,150,586,387,520,000
35.820021
172
0.580668
false
3.870152
false
false
false
lamondlab/sipify
CppHeaderParser-2.7/CppHeaderParser/CppHeaderParser.py
1
114661
#!/usr/bin/python # # Author: Jashua R. Cloutier (contact via https://bitbucket.org/senex) # Project: http://senexcanis.com/open-source/cppheaderparser/ # # Copyright (C) 2011, Jashua R. Cloutier # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of Jashua R. Cloutier nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. Stories, # blog entries etc making reference to this project may mention the # name Jashua R. Cloutier in terms of project originator/creator etc. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # # The CppHeaderParser.py script is written in Python 2.4 and released to # the open source community for continuous improvements under the BSD # 2.0 new license, which can be found at: # # http://www.opensource.org/licenses/bsd-license.php # """Parse C++ header files and generate a data structure representing the class """ import ply.lex as lex import os import sys import re import inspect def lineno(): """Returns the current line number in our program.""" return inspect.currentframe().f_back.f_lineno version = __version__ = "2.7" tokens = [ 'NUMBER', 'FLOAT_NUMBER', 'TEMPLATE_NAME', 'NAME', 'OPEN_PAREN', 'CLOSE_PAREN', 'OPEN_BRACE', 'CLOSE_BRACE', 'OPEN_SQUARE_BRACKET', 'CLOSE_SQUARE_BRACKET', 'COLON', 'SEMI_COLON', 'COMMA', 'TAB', 'BACKSLASH', 'PIPE', 'PERCENT', 'EXCLAMATION', 'CARET', 'COMMENT_SINGLELINE', 'COMMENT_MULTILINE', 'PRECOMP_MACRO', 'PRECOMP_MACRO_CONT', 'ASTERISK', 'AMPERSTAND', 'EQUALS', 'MINUS', 'PLUS', 'DIVIDE', 'CHAR_LITERAL', 'STRING_LITERAL', 'NEW_LINE', 'SQUOTE', ] t_ignore = " \r.?@\f" t_NUMBER = r'[0-9][0-9XxA-Fa-f]*' t_FLOAT_NUMBER = r'[-+]?[0-9]*\.[0-9]+([eE][-+]?[0-9]+)?' t_TEMPLATE_NAME = r'CppHeaderParser_template_[0-9]+' t_NAME = r'[<>A-Za-z_~][A-Za-z0-9_]*' t_OPEN_PAREN = r'\(' t_CLOSE_PAREN = r'\)' t_OPEN_BRACE = r'{' t_CLOSE_BRACE = r'}' t_OPEN_SQUARE_BRACKET = r'\[' t_CLOSE_SQUARE_BRACKET = r'\]' t_SEMI_COLON = r';' t_COLON = r':' t_COMMA = r',' t_TAB = r'\t' t_BACKSLASH = r'\\' t_PIPE = r'\|' t_PERCENT = r'%' t_CARET = r'\^' t_EXCLAMATION = r'!' t_PRECOMP_MACRO = r'\#.*' t_PRECOMP_MACRO_CONT = r'.*\\\n' def t_COMMENT_SINGLELINE(t): r'\/\/.*\n' global doxygenCommentCache if t.value.startswith("///") or t.value.startswith("//!"): if doxygenCommentCache: doxygenCommentCache += "\n" if t.value.endswith("\n"): doxygenCommentCache += t.value[:-1] else: doxygenCommentCache += t.value t.lexer.lineno += len([a for a in t.value if a=="\n"]) t_ASTERISK = r'\*' t_MINUS = r'\-' t_PLUS = r'\+' t_DIVIDE = r'/(?!/)' t_AMPERSTAND = r'&' t_EQUALS = r'=' t_CHAR_LITERAL = "'.'" t_SQUOTE = "'" #found at http://wordaligned.org/articles/string-literals-and-regular-expressions #TODO: This does not work with the string "bla \" bla" t_STRING_LITERAL = r'"([^"\\]|\\.)*"' #Found at http://ostermiller.org/findcomment.html def t_COMMENT_MULTILINE(t): r'/\*([^*]|[\r\n]|(\*+([^*/]|[\r\n])))*\*+/' global doxygenCommentCache if t.value.startswith("/**") or t.value.startswith("/*!"): #not sure why, but get double new lines v = t.value.replace("\n\n", "\n") #strip prefixing whitespace v = re.sub("\n[\s]+\*", "\n*", v) doxygenCommentCache += v t.lexer.lineno += len([a for a in t.value if a=="\n"]) def t_NEWLINE(t): r'\n+' t.lexer.lineno += len(t.value) def t_error(v): print(( "Lex error: ", v )) lex.lex() # Controls error_print print_errors = 1 # Controls warning_print print_warnings = 1 # Controls debug_print debug = 0 # Controls trace_print debug_trace = 0 def error_print(arg): if print_errors: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg))) def warning_print(arg): if print_warnings: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg))) def debug_print(arg): global debug if debug: print(("[%4d] %s"%(inspect.currentframe().f_back.f_lineno, arg))) def trace_print(*arg): global debug_trace if debug_trace: sys.stdout.write("[%s] "%(inspect.currentframe().f_back.f_lineno)) for a in arg: sys.stdout.write("%s "%a) sys.stdout.write("\n") supportedAccessSpecifier = [ 'public', 'protected', 'private', 'public slots', 'protected slots', 'private slots', 'public Q_SLOTS', 'protected Q_SLOTS', 'private Q_SLOTS', 'signals', 'Q_SIGNALS', ] #Symbols to ignore, usually special macros ignoreSymbols = [ 'Q_OBJECT', 'Q_PROPERTY()', 'Q_DECLARE_FLAGS()', 'Q_INVOKABLE', ] doxygenCommentCache = "" #Track what was added in what order and at what depth parseHistory = [] def is_namespace(nameStack): """Determines if a namespace is being specified""" if len(nameStack) == 0: return False if nameStack[0] == "namespace": return True return False def is_enum_namestack(nameStack): """Determines if a namestack is an enum namestack""" if len(nameStack) == 0: return False if nameStack[0] == "enum": return True if len(nameStack) > 1 and nameStack[0] == "typedef" and nameStack[1] == "enum": return True return False def is_fundamental(s): for a in s.split(): if a not in ["size_t", "struct", "union", "unsigned", "signed", "bool", "char", "short", "int", "float", "double", "long", "void", "*"]: return False return True def is_function_pointer_stack(stack): """Count how many non-nested paranthesis are in the stack. Useful for determining if a stack is a function pointer""" paren_depth = 0 paren_count = 0 star_after_first_paren = False last_e = None for e in stack: if e == "(": paren_depth += 1 elif e == ")" and paren_depth > 0: paren_depth -= 1 if paren_depth == 0: paren_count += 1 elif e == "*" and last_e == "(" and paren_count == 0 and paren_depth == 1: star_after_first_paren = True last_e = e if star_after_first_paren and paren_count == 2: return True else: return False def is_method_namestack(stack): r = False if '(' not in stack: r = False elif stack[0] == 'typedef': r = False # TODO deal with typedef function prototypes #elif '=' in stack and stack.index('=') < stack.index('(') and stack[stack.index('=')-1] != 'operator': r = False #disabled July6th - allow all operators elif 'operator' in stack: r = True # allow all operators elif '{' in stack and stack.index('{') < stack.index('('): r = False # struct that looks like a method/class elif '(' in stack and ')' in stack: if '{' in stack and '}' in stack: r = True elif stack[-1] == ';': if is_function_pointer_stack(stack): r = False else: r = True elif '{' in stack: r = True # ideally we catch both braces... TODO else: r = False #Test for case of property set to something with parens such as "static const int CONST_A = (1 << 7) - 1;" if r and "(" in stack and "=" in stack and 'operator' not in stack: if stack.index("=") < stack.index("("): r = False return r def is_property_namestack(nameStack): r = False if '(' not in nameStack and ')' not in nameStack: r = True elif "(" in nameStack and "=" in nameStack and nameStack.index("=") < nameStack.index("("): r = True #See if we are a function pointer if not r and is_function_pointer_stack(nameStack): r = True return r def detect_lineno(s): """Detect the line number for a given token string""" try: rtn = s.lineno() if rtn != -1: return rtn except: pass global curLine return curLine def filter_out_attribute_keyword(stack): """Strips __attribute__ and its parenthetical expression from the stack""" if "__attribute__" not in stack: return stack try: debug_print("Stripping __attribute__ from %s"% stack) attr_index = stack.index("__attribute__") attr_end = attr_index + 1 #Assuming not followed by parenthetical expression which wont happen #Find final paren if stack[attr_index + 1] == '(': paren_count = 1 for i in range(attr_index + 2, len(stack)): elm = stack[i] if elm == '(': paren_count += 1 elif elm == ')': paren_count -= 1 if paren_count == 0: attr_end = i + 1 break new_stack = stack[0:attr_index] + stack[attr_end:] debug_print("stripped stack is %s"% new_stack) return new_stack except: return stack class TagStr(str): """Wrapper for a string that allows us to store the line number associated with it""" lineno_reg = {} def __new__(cls,*args,**kw): new_obj = str.__new__(cls,*args) if "lineno" in kw: TagStr.lineno_reg[id(new_obj)] = kw["lineno"] return new_obj def __del__(self): try: del TagStr.lineno_reg[id(self)] except: pass def lineno(self): return TagStr.lineno_reg.get(id(self), -1) class CppParseError(Exception): pass class CppClass(dict): """Takes a name stack and turns it into a class Contains the following Keys: self['name'] - Name of the class self['doxygen'] - Doxygen comments associated with the class if they exist self['inherits'] - List of Classes that this one inherits where the values are of the form {"access": Anything in supportedAccessSpecifier "class": Name of the class self['methods'] - Dictionary where keys are from supportedAccessSpecifier and values are a lists of CppMethod's self['properties'] - Dictionary where keys are from supportedAccessSpecifier and values are lists of CppVariable's self['enums'] - Dictionary where keys are from supportedAccessSpecifier and values are lists of CppEnum's self['structs'] - Dictionary where keys are from supportedAccessSpecifier and values are lists of nested Struct's An example of how this could look is as follows: #self = { 'name': "" 'inherits':[] 'methods': { 'public':[], 'protected':[], 'private':[] }, 'properties': { 'public':[], 'protected':[], 'private':[] }, 'enums': { 'public':[], 'protected':[], 'private':[] } } """ def get_all_methods(self): r = [] for typ in supportedAccessSpecifier: r += self['methods'][typ] return r def get_all_method_names( self ): r = [] for typ in supportedAccessSpecifier: r += self.get_method_names(typ) # returns list return r def get_all_pure_virtual_methods( self ): r = {} for typ in supportedAccessSpecifier: r.update(self.get_pure_virtual_methods(typ)) # returns dict return r def get_method_names( self, type='public' ): return [ meth['name'] for meth in self['methods'][ type ] ] def get_pure_virtual_methods( self, type='public' ): r = {} for meth in self['methods'][ type ]: if meth['pure_virtual']: r[ meth['name'] ] = meth return r def __init__(self, nameStack, curTemplate): self['nested_classes'] = [] self['parent'] = None self['abstract'] = False self._public_enums = {} self._public_structs = {} self._public_typedefs = {} self._public_forward_declares = [] self['namespace'] = "" debug_print( "Class: %s"%nameStack ) debug_print( "Template: %s"%curTemplate) if (len(nameStack) < 2): nameStack.insert(1, "")#anonymous struct global doxygenCommentCache if len(doxygenCommentCache): self["doxygen"] = doxygenCommentCache doxygenCommentCache = "" if "::" in "".join(nameStack): #Re-Join class paths (ex ['class', 'Bar', ':', ':', 'Foo'] -> ['class', 'Bar::Foo'] try: new_nameStack = [] for name in nameStack: if len(new_nameStack) == 0: new_nameStack.append(name) elif name == ":" and new_nameStack[-1].endswith(":"): new_nameStack[-1] += name elif new_nameStack[-1].endswith("::"): new_nameStack[-2] += new_nameStack[-1] + name del new_nameStack[-1] else: new_nameStack.append(name) trace_print("Convert from namestack\n %s\nto\n%s"%(nameStack, new_nameStack)) nameStack = new_nameStack except: pass # Handle final specifier self["final"] = False try: final_index = nameStack.index("final") # Dont trip up the rest of the logic del nameStack[final_index] self["final"] = True trace_print("final") except: pass self["name"] = nameStack[1] self["line_number"] = detect_lineno(nameStack[0]) #Handle template classes if len(nameStack) > 3 and nameStack[2].startswith("<"): open_template_count = 0 param_separator = 0 found_first = False i = 0 for elm in nameStack: if '<' in elm : open_template_count += 1 found_first = True elif '>' in elm: open_template_count -= 1 if found_first and open_template_count == 0: self["name"] = "".join(nameStack[1:i + 1]) break; i += 1 elif ":" in nameStack: self['name'] = nameStack[ nameStack.index(':') - 1 ] inheritList = [] if nameStack.count(':') == 1: nameStack = nameStack[nameStack.index(":") + 1:] while len(nameStack): tmpStack = [] tmpInheritClass = {"access":"private", "virtual": False} if "," in nameStack: tmpStack = nameStack[:nameStack.index(",")] nameStack = nameStack[nameStack.index(",") + 1:] else: tmpStack = nameStack nameStack = [] # Convert template classes to one name in the last index for i in range(0, len(tmpStack)): if '<' in tmpStack[i]: tmpStack2 = tmpStack[:i-1] tmpStack2.append("".join(tmpStack[i-1:])) tmpStack = tmpStack2 break if len(tmpStack) == 0: break; elif len(tmpStack) == 1: tmpInheritClass["class"] = tmpStack[0] elif len(tmpStack) == 2: tmpInheritClass["access"] = tmpStack[0] tmpInheritClass["class"] = tmpStack[1] elif len(tmpStack) == 3 and "virtual" in tmpStack: tmpInheritClass["access"] = tmpStack[1] if tmpStack[1] != "virtual" else tmpStack[0] tmpInheritClass["class"] = tmpStack[2] tmpInheritClass["virtual"] = True else: warning_print( "Warning: can not parse inheriting class %s"%(" ".join(tmpStack))) if '>' in tmpStack: pass # allow skip templates for now else: raise NotImplemented if 'class' in tmpInheritClass: inheritList.append(tmpInheritClass) elif nameStack.count(':') == 2: self['parent'] = self['name']; self['name'] = nameStack[-1] elif nameStack.count(':') > 2 and nameStack[0] in ("class", "struct"): tmpStack = nameStack[nameStack.index(":") + 1:] superTmpStack = [[]] for tok in tmpStack: if tok == ',': superTmpStack.append([]) else: superTmpStack[-1].append(tok) for tmpStack in superTmpStack: tmpInheritClass = {"access":"private"} if len(tmpStack) and tmpStack[0] in supportedAccessSpecifier: tmpInheritClass["access"] = tmpStack[0] tmpStack = tmpStack[1:] inheritNSStack = [] while len(tmpStack) > 3: if tmpStack[0] == ':': break; if tmpStack[1] != ':': break; if tmpStack[2] != ':': break; inheritNSStack.append(tmpStack[0]) tmpStack = tmpStack[3:] if len(tmpStack) == 1 and tmpStack[0] != ':': inheritNSStack.append(tmpStack[0]) tmpInheritClass["class"] = "::".join(inheritNSStack) inheritList.append(tmpInheritClass) self['inherits'] = inheritList if curTemplate: self["template"] = curTemplate trace_print("Setting template to '%s'"%self["template"]) methodAccessSpecificList = {} propertyAccessSpecificList = {} enumAccessSpecificList = {} structAccessSpecificList = {} typedefAccessSpecificList = {} forwardAccessSpecificList = {} for accessSpecifier in supportedAccessSpecifier: methodAccessSpecificList[accessSpecifier] = [] propertyAccessSpecificList[accessSpecifier] = [] enumAccessSpecificList[accessSpecifier] = [] structAccessSpecificList[accessSpecifier] = [] typedefAccessSpecificList[accessSpecifier] = [] forwardAccessSpecificList[accessSpecifier] = [] self['methods'] = methodAccessSpecificList self['properties'] = propertyAccessSpecificList self['enums'] = enumAccessSpecificList self['structs'] = structAccessSpecificList self['typedefs'] = typedefAccessSpecificList self['forward_declares'] = forwardAccessSpecificList def show(self): """Convert class to a string""" namespace_prefix = "" if self["namespace"]: namespace_prefix = self["namespace"] + "::" rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"]) if self["final"]: rtn += " final" if self['abstract']: rtn += ' (abstract)\n' else: rtn += '\n' if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n' if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n' if "inherits" in list(self.keys()): rtn += " Inherits: " for inheritClass in self["inherits"]: if inheritClass["virtual"]: rtn += "virtual " rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"]) rtn += "\n" rtn += " {\n" for accessSpecifier in supportedAccessSpecifier: rtn += " %s\n"%(accessSpecifier) #Enums if (len(self["enums"][accessSpecifier])): rtn += " <Enums>\n" for enum in self["enums"][accessSpecifier]: rtn += " %s\n"%(repr(enum)) #Properties if (len(self["properties"][accessSpecifier])): rtn += " <Properties>\n" for property in self["properties"][accessSpecifier]: rtn += " %s\n"%(repr(property)) #Methods if (len(self["methods"][accessSpecifier])): rtn += " <Methods>\n" for method in self["methods"][accessSpecifier]: rtn += "\t\t" + method.show() + '\n' rtn += " }\n" print(rtn) def __str__(self): """Convert class to a string""" namespace_prefix = "" if self["namespace"]: namespace_prefix = self["namespace"] + "::" rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"]) if self["final"]: rtn += " final" if self['abstract']: rtn += ' (abstract)\n' else: rtn += '\n' if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n' if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n' if "inherits" in list(self.keys()) and len(self["inherits"]): rtn += "Inherits: " for inheritClass in self["inherits"]: if inheritClass.get("virtual", False): rtn += "virtual " rtn += "%s %s, "%(inheritClass["access"], inheritClass["class"]) rtn += "\n" rtn += "{\n" for accessSpecifier in supportedAccessSpecifier: rtn += "%s\n"%(accessSpecifier) #Enums if (len(self["enums"][accessSpecifier])): rtn += " // Enums\n" for enum in self["enums"][accessSpecifier]: rtn += " %s\n"%(repr(enum)) #Properties if (len(self["properties"][accessSpecifier])): rtn += " // Properties\n" for property in self["properties"][accessSpecifier]: rtn += " %s\n"%(repr(property)) #Methods if (len(self["methods"][accessSpecifier])): rtn += " // Methods\n" for method in self["methods"][accessSpecifier]: rtn += " %s\n"%(repr(method)) rtn += "}\n" return rtn class CppUnion( CppClass ): """Takes a name stack and turns it into a union Contains the following Keys: self['name'] - Name of the union self['doxygen'] - Doxygen comments associated with the union if they exist self['members'] - List of members the union has An example of how this could look is as follows: #self = { 'name': "" 'members': [] } """ def __init__(self, nameStack): CppClass.__init__(self, nameStack, None) self["name"] = "union " + self["name"] self["members"] = self["properties"]["public"] def transform_to_union_keys(self): print("union keys: %s"%list(self.keys())) for key in ['inherits', 'parent', 'abstract', 'namespace', 'typedefs', 'methods']: del self[key] def show(self): """Convert class to a string""" print(self) def __str__(self): """Convert class to a string""" namespace_prefix = "" if self["namespace"]: namespace_prefix = self["namespace"] + "::" rtn = "%s %s"%(self["declaration_method"], namespace_prefix + self["name"]) if self['abstract']: rtn += ' (abstract)\n' else: rtn += '\n' if 'doxygen' in list(self.keys()): rtn += self["doxygen"] + '\n' if 'parent' in list(self.keys()) and self['parent']: rtn += 'parent class: ' + self['parent'] + '\n' rtn += "{\n" for member in self["members"]: rtn += " %s\n"%(repr(member)) rtn += "}\n" return rtn class _CppMethod( dict ): def _params_helper1( self, stack ): # deal with "throw" keyword if 'throw' in stack: stack = stack[ : stack.index('throw') ] ## remove GCC keyword __attribute__(...) and preserve returns ## cleaned = [] hit = False; hitOpen = 0; hitClose = 0 for a in stack: if a == '__attribute__': hit = True if hit: if a == '(': hitOpen += 1 elif a == ')': hitClose += 1 if a==')' and hitOpen == hitClose: hit = False else: cleaned.append( a ) stack = cleaned # also deal with attribute((const)) function prefix # # TODO this needs to be better # if len(stack) > 5: a = ''.join(stack) if a.startswith('((__const__))'): stack = stack[ 5 : ] elif a.startswith('__attribute__((__const__))'): stack = stack[ 6 : ] stack = stack[stack.index('(') + 1: ] if not stack: return [] if len(stack)>=3 and stack[0]==')' and stack[1]==':': # is this always a constructor? self['constructor'] = True return [] stack.reverse(); _end_ = stack.index(')'); stack.reverse() stack = stack[ : len(stack)-(_end_+1) ] if '(' not in stack: return stack # safe to return, no defaults that init a class # transforms ['someclass', '(', '0', '0', '0', ')'] into "someclass(0,0,0)'" r = []; hit=False for a in stack: if a == '(': hit=True elif a == ')': hit=False if hit or a == ')': r[-1] = r[-1] + a else: r.append( a ) return r def _params_helper2( self, params ): for p in params: p['method'] = self # save reference in variable to parent method if '::' in p['type']: ns = p['type'].split('::')[0] if ns not in Resolver.NAMESPACES and ns in Resolver.CLASSES: p['type'] = self['namespace'] + p['type'] else: p['namespace'] = self[ 'namespace' ] class CppMethod( _CppMethod ): """Takes a name stack and turns it into a method Contains the following Keys: self['rtnType'] - Return type of the method (ex. "int") self['name'] - Name of the method (ex. "getSize") self['doxygen'] - Doxygen comments associated with the method if they exist self['parameters'] - List of CppVariables """ def show(self): r = ['method name: %s (%s)' %(self['name'],self['debug']) ] if self['returns']: r.append( 'returns: %s'%self['returns'] ) if self['parameters']: r.append( 'number arguments: %s' %len(self['parameters'])) if self['pure_virtual']: r.append( 'pure virtual: %s'%self['pure_virtual'] ) if self['constructor']: r.append( 'constructor' ) if self['destructor']: r.append( 'destructor' ) return '\n\t\t '.join( r ) def __init__(self, nameStack, curClass, methinfo, curTemplate): debug_print( "Method: %s"%nameStack ) debug_print( "Template: %s"%curTemplate ) global doxygenCommentCache if len(doxygenCommentCache): self["doxygen"] = doxygenCommentCache doxygenCommentCache = "" if "operator" in nameStack: self["rtnType"] = " ".join(nameStack[:nameStack.index('operator')]) self["name"] = "".join(nameStack[nameStack.index('operator'):nameStack.index('(')]) else: self["rtnType"] = " ".join(nameStack[:nameStack.index('(') - 1]) self["name"] = " ".join(nameStack[nameStack.index('(') - 1:nameStack.index('(')]) if self["rtnType"].startswith("virtual"): self["rtnType"] = self["rtnType"][len("virtual"):].strip() if len(self["rtnType"]) == 0 or self["name"] == curClass: self["rtnType"] = "void" self["rtnType"] = self["rtnType"].replace(' : : ', '::' ) self["rtnType"] = self["rtnType"].replace(" <","<") self["rtnType"] = self["rtnType"].replace(" >",">").replace(">>", "> >").replace(">>", "> >") self["rtnType"] = self["rtnType"].replace(" ,",",") for spec in ["const", "final", "override"]: self[spec] = False for i in reversed(nameStack): if i == spec: self[spec] = True break elif i == ")": break self.update( methinfo ) self["line_number"] = detect_lineno(nameStack[0]) #Filter out initializer lists used in constructors try: paren_depth_counter = 0 for i in range(0, len(nameStack)): elm = nameStack[i] if elm == "(": paren_depth_counter += 1 if elm == ")": paren_depth_counter -=1 if paren_depth_counter == 0 and nameStack[i+1] == ':': debug_print("Stripping out initializer list") nameStack = nameStack[:i+1] break except: pass paramsStack = self._params_helper1( nameStack ) debug_print( "curTemplate: %s"%curTemplate) if curTemplate: self["template"] = curTemplate debug_print( "SET self['template'] to `%s`"%self["template"]) params = [] #See if there is a doxygen comment for the variable doxyVarDesc = {} if "doxygen" in self: doxyLines = self["doxygen"].split("\n") lastParamDesc = "" for doxyLine in doxyLines: if " @param " in doxyLine or " \param " in doxyLine: try: #Strip out the param doxyLine = doxyLine[doxyLine.find("param ") + 6:] (var, desc) = doxyLine.split(" ", 1) doxyVarDesc[var] = desc.strip() lastParamDesc = var except: pass elif " @return " in doxyLine or " \return " in doxyLine: lastParamDesc = "" # not handled for now elif lastParamDesc: try: doxyLine = doxyLine.strip() if " " not in doxyLine: lastParamDesc = "" continue doxyLine = doxyLine[doxyLine.find(" ") + 1:] doxyVarDesc[lastParamDesc] += " " + doxyLine except: pass #Create the variable now while (len(paramsStack)): # Find commas that are not nexted in <>'s like template types open_template_count = 0 param_separator = 0 i = 0 for elm in paramsStack: if '<' in elm : open_template_count += 1 elif '>' in elm: open_template_count -= 1 elif elm == ',' and open_template_count == 0: param_separator = i break i += 1 if param_separator: param = CppVariable(paramsStack[0:param_separator], doxyVarDesc=doxyVarDesc) if len(list(param.keys())): params.append(param) paramsStack = paramsStack[param_separator + 1:] else: param = CppVariable(paramsStack, doxyVarDesc=doxyVarDesc) if len(list(param.keys())): params.append(param) break self["parameters"] = params #self._params_helper2( params ) # mods params inplace def __str__(self): filter_keys = ("parent", "defined", "operator", "returns_reference") cpy = dict((k,v) for (k,v) in list(self.items()) if k not in filter_keys) return "%s"%cpy class _CppVariable(dict): def _name_stack_helper( self, stack ): stack = list(stack) if '=' not in stack: # TODO refactor me # check for array[n] and deal with funny array syntax: "int myvar:99" array = [] while stack and stack[-1].isdigit(): array.append( stack.pop() ) if array: array.reverse(); self['array'] = int(''.join(array)) if stack and stack[-1].endswith(':'): stack[-1] = stack[-1][:-1] while stack and not stack[-1]: stack.pop() # can be empty return stack def init(self): #assert self['name'] # allow unnamed variables, methods like this: "void func(void);" a = [] self['aliases'] = []; self['parent'] = None; self['typedef'] = None for key in 'constant reference pointer static typedefs class fundamental unresolved'.split(): self[ key ] = 0 for b in self['type'].split(): if b == '__const__': b = 'const' a.append( b ) self['type'] = ' '.join( a ) class CppVariable( _CppVariable ): """Takes a name stack and turns it into a method Contains the following Keys: self['type'] - Type for the variable (ex. "const string &") self['name'] - Name of the variable (ex. "numItems") self['namespace'] - Namespace containing the enum self['desc'] - Description of the variable if part of a method (optional) self['doxygen'] - Doxygen comments associated with the method if they exist self['defaultValue'] - Default value of the variable, this key will only exist if there is a default value self['extern'] - True if its an extern, false if not """ Vars = [] def __init__(self, nameStack, **kwargs): debug_print("trace %s"%nameStack) if len(nameStack) and nameStack[0] == "extern": self['extern'] = True del nameStack[0] else: self['extern'] = False _stack_ = nameStack if "[" in nameStack: #strip off array informatin arrayStack = nameStack[nameStack.index("["):] if nameStack.count("[") > 1: debug_print("Multi dimensional array") debug_print("arrayStack=%s"%arrayStack) nums = filter(lambda x: x.isdigit(), arrayStack) # Calculate size by multiplying all dimensions p = 1 for n in nums: p *= int(n) #Multi dimensional array self["array_size"] = p self["multi_dimensional_array"] = 1 self["multi_dimensional_array_size"] = "x".join(nums) else: debug_print("Array") if len(arrayStack) == 3: self["array_size"] = arrayStack[1] nameStack = nameStack[:nameStack.index("[")] self["array"] = 1 else: self["array"] = 0 nameStack = self._name_stack_helper( nameStack ) global doxygenCommentCache if len(doxygenCommentCache): self["doxygen"] = doxygenCommentCache doxygenCommentCache = "" debug_print( "Variable: %s"%nameStack ) self["line_number"] = detect_lineno(nameStack[0]) self["function_pointer"] = 0 if (len(nameStack) < 2): # +++ if len(nameStack) == 1: self['type'] = nameStack[0]; self['name'] = '' else: error_print(_stack_); assert 0 elif is_function_pointer_stack(nameStack): #function pointer self["type"] = " ".join(nameStack[:nameStack.index("(") + 2] + nameStack[nameStack.index(")") :]) self["name"] = " ".join(nameStack[nameStack.index("(") + 2 : nameStack.index(")")]) self["function_pointer"] = 1 elif ("=" in nameStack): self["type"] = " ".join(nameStack[:nameStack.index("=") - 1]) self["name"] = nameStack[nameStack.index("=") - 1] self["defaultValue"] = " ".join(nameStack[nameStack.index("=") + 1:]) # deprecate camelCase in dicts self['default'] = " ".join(nameStack[nameStack.index("=") + 1:]) elif is_fundamental(nameStack[-1]) or nameStack[-1] in ['>', '<' , ':', '.']: #Un named parameter self["type"] = " ".join(nameStack) self["name"] = "" else: # common case self["type"] = " ".join(nameStack[:-1]) self["name"] = nameStack[-1] self["type"] = self["type"].replace(" :",":") self["type"] = self["type"].replace(": ",":") self["type"] = self["type"].replace(" <","<") self["type"] = self["type"].replace(" >",">").replace(">>", "> >").replace(">>", "> >") self["type"] = self["type"].replace(" ,",",") #Optional doxygen description try: self["desc"] = kwargs["doxyVarDesc"][self["name"]] except: pass self.init() CppVariable.Vars.append( self ) # save and resolve later def __str__(self): keys_white_list = ['constant','name','reference','type','static','pointer','desc', 'line_number', 'extern'] cpy = dict((k,v) for (k,v) in list(self.items()) if k in keys_white_list) if "array_size" in self: cpy["array_size"] = self["array_size"] return "%s"%cpy class _CppEnum(dict): def resolve_enum_values( self, values ): """Evaluates the values list of dictionaries passed in and figures out what the enum value for each enum is editing in place: Example: From: [{'name': 'ORANGE'}, {'name': 'RED'}, {'name': 'GREEN', 'value': '8'}] To: [{'name': 'ORANGE', 'value': 0}, {'name': 'RED', 'value': 1}, {'name': 'GREEN', 'value': 8}] """ t = int; i = 0 names = [ v['name'] for v in values ] for v in values: if 'value' in v: a = v['value'].strip() # Remove single quotes from single quoted chars (unless part of some expression if len(a) == 3 and a[0] == "'" and a[2] == "'": a = v['value'] = a[1] if a.lower().startswith("0x"): try: i = a = int(a , 16) except:pass elif a.isdigit(): i = a = int( a ) elif a in names: for other in values: if other['name'] == a: v['value'] = other['value'] break elif '"' in a or "'" in a: t = str # only if there are quotes it this a string enum else: try: a = i = ord(a) except: pass #Allow access of what is in the file pre-convert if converted if v['value'] != str(a): v['raw_value'] = v['value'] v['value'] = a else: v['value'] = i try: v['value'] = v['value'].replace(" < < ", " << ").replace(" >> ", " >> ") except: pass i += 1 return t class CppEnum(_CppEnum): """Takes a name stack and turns it into an Enum Contains the following Keys: self['name'] - Name of the enum (ex. "ItemState") self['namespace'] - Namespace containing the enum self['values'] - List of values where the values are a dictionary of the form {"name": name of the key (ex. "PARSING_HEADER"), "value": Specified value of the enum, this key will only exist if a value for a given enum value was defined } """ def __init__(self, nameStack): global doxygenCommentCache if len(doxygenCommentCache): self["doxygen"] = doxygenCommentCache doxygenCommentCache = "" if len(nameStack) == 3 and nameStack[0] == "enum": debug_print("Created enum as just name/value") self["name"] = nameStack[1] self["instances"]=[nameStack[2]] if len(nameStack) < 4 or "{" not in nameStack or "}" not in nameStack: #Not enough stuff for an enum debug_print("Bad enum") return valueList = [] self["line_number"] = detect_lineno(nameStack[0]) #Figure out what values it has valueStack = nameStack[nameStack.index('{') + 1: nameStack.index('}')] while len(valueStack): tmpStack = [] if "," in valueStack: tmpStack = valueStack[:valueStack.index(",")] valueStack = valueStack[valueStack.index(",") + 1:] else: tmpStack = valueStack valueStack = [] d = {} if len(tmpStack) == 1: d["name"] = tmpStack[0] elif len(tmpStack) >= 3 and tmpStack[1] == "=": d["name"] = tmpStack[0]; d["value"] = " ".join(tmpStack[2:]) elif len(tmpStack) == 2 and tmpStack[1] == "=": debug_print( "WARN-enum: parser missed value for %s"%tmpStack[0] ) d["name"] = tmpStack[0] if d: valueList.append( d ) if len(valueList): self['type'] = self.resolve_enum_values( valueList ) # returns int for standard enum self["values"] = valueList else: warning_print( 'WARN-enum: empty enum %s'%nameStack ) return #Figure out if it has a name preBraceStack = nameStack[:nameStack.index("{")] postBraceStack = nameStack[nameStack.index("}") + 1:] self["typedef"] = False if (len(preBraceStack) == 2 and "typedef" not in nameStack): self["name"] = preBraceStack[1] elif len(postBraceStack) and "typedef" in nameStack: self["name"] = " ".join(postBraceStack) self["typedef"] = True else: warning_print( 'WARN-enum: nameless enum %s'%nameStack ) #See if there are instances of this if "typedef" not in nameStack and len(postBraceStack): self["instances"] = [] for var in postBraceStack: if "," in var: continue self["instances"].append(var) self["namespace"] = "" class CppStruct(dict): Structs = [] def __init__(self, nameStack): if len(nameStack) >= 2: self['type'] = nameStack[1] else: self['type'] = None self['fields'] = [] self.Structs.append( self ) global curLine self["line_number"] = curLine C99_NONSTANDARD = { 'int8' : 'signed char', 'int16' : 'short int', 'int32' : 'int', 'int64' : 'int64_t', # this can be: long int (64bit), or long long int (32bit) 'uint' : 'unsigned int', 'uint8' : 'unsigned char', 'uint16' : 'unsigned short int', 'uint32' : 'unsigned int', 'uint64' : 'uint64_t', # depends on host bits } def standardize_fundamental( s ): if s in C99_NONSTANDARD: return C99_NONSTANDARD[ s ] else: return s class Resolver(object): C_FUNDAMENTAL = 'size_t unsigned signed bool char wchar short int float double long void'.split() C_FUNDAMENTAL += 'struct union enum'.split() SubTypedefs = {} # TODO deprecate? NAMESPACES = [] CLASSES = {} STRUCTS = {} def initextra(self): self.typedefs = {} self.typedefs_order = [] self.classes_order = [] self.structs = Resolver.STRUCTS self.structs_order = [] self.namespaces = Resolver.NAMESPACES # save all namespaces self.curStruct = None self.stack = [] # full name stack, good idea to keep both stacks? (simple stack and full stack) self._classes_brace_level = {} # class name : level self._structs_brace_level = {} # struct type : level self._method_body = None self._forward_decls = [] self._template_typenames = [] # template<typename XXX> def current_namespace(self): return self.cur_namespace(True) def cur_namespace(self, add_double_colon=False): rtn = "" i = 0 while i < len(self.nameSpaces): rtn += self.nameSpaces[i] if add_double_colon or i < len(self.nameSpaces) - 1: rtn += "::" i+=1 return rtn def guess_ctypes_type( self, string ): pointers = string.count('*') string = string.replace('*','') a = string.split() if 'unsigned' in a: u = 'u' else: u = '' if 'long' in a and 'double' in a: b = 'longdouble' # there is no ctypes.c_ulongdouble (this is a 64bit float?) elif a.count('long') == 2 and 'int' in a: b = '%sint64' %u elif a.count('long') == 2: b = '%slonglong' %u elif 'long' in a: b = '%slong' %u elif 'double' in a: b = 'double' # no udouble in ctypes elif 'short' in a: b = '%sshort' %u elif 'char' in a: b = '%schar' %u elif 'wchar' in a: b = 'wchar' elif 'bool' in a: b = 'bool' elif 'float' in a: b = 'float' elif 'int' in a: b = '%sint' %u elif 'int8' in a: b = 'int8' elif 'int16' in a: b = 'int16' elif 'int32' in a: b = 'int32' elif 'int64' in a: b = 'int64' elif 'uint' in a: b = 'uint' elif 'uint8' in a: b = 'uint8' elif 'uint16' in a: b = 'uint16' elif 'uint32' in a: b = 'uint32' elif 'uint64' in a: b = 'uint64' elif 'size_t' in a: b = 'size_t' elif 'void' in a: b = 'void_p' elif string in 'struct union'.split(): b = 'void_p' # what should be done here? don't trust struct, it could be a class, no need to expose via ctypes else: b = 'void_p' if not pointers: return 'ctypes.c_%s' %b else: x = '' for i in range(pointers): x += 'ctypes.POINTER(' x += 'ctypes.c_%s' %b x += ')' * pointers return x def resolve_type( self, string, result ): # recursive ''' keeps track of useful things like: how many pointers, number of typedefs, is fundamental or a class, etc... ''' ## be careful with templates, what is inside <something*> can be a pointer but the overall type is not a pointer ## these come before a template s = string.split('<')[0] result[ 'constant' ] += s.split().count('const') result[ 'static' ] += s.split().count('static') result[ 'mutable' ] = 'mutable' in s.split() ## these come after a template s = string.split('>')[-1] result[ 'pointer' ] += s.count('*') result[ 'reference' ] += s.count('&') x = string; alias = False for a in '* & const static mutable'.split(): x = x.replace(a,'') for y in x.split(): if y not in self.C_FUNDAMENTAL: alias = y; break #if alias == 'class': # result['class'] = result['name'] # forward decl of class # result['forward_decl'] = True if alias == '__extension__': result['fundamental_extension'] = True elif alias: result['aliases'].append( alias ) if alias in C99_NONSTANDARD: result['type'] = C99_NONSTANDARD[ alias ] result['typedef'] = alias result['typedefs'] += 1 elif alias in self.typedefs: result['typedefs'] += 1 result['typedef'] = alias self.resolve_type( self.typedefs[alias], result ) elif alias in self.classes: klass = self.classes[alias]; result['fundamental'] = False result['class'] = klass result['unresolved'] = False else: result['unresolved'] = True else: result['fundamental'] = True result['unresolved'] = False def finalize_vars(self): for s in CppStruct.Structs: # vars within structs can be ignored if they do not resolve for var in s['fields']: var['parent'] = s['type'] #for c in self.classes.values(): # for var in c.get_all_properties(): var['parent'] = c['name'] ## RESOLVE ## for var in CppVariable.Vars: self.resolve_type( var['type'], var ) #if 'method' in var and var['method']['name'] == '_notifyCurrentCamera': print(var); assert 0 # then find concrete type and best guess ctypes type # for var in CppVariable.Vars: if not var['aliases']: #var['fundamental']: var['ctypes_type'] = self.guess_ctypes_type( var['type'] ) else: var['unresolved'] = False # below may test to True if var['class']: var['ctypes_type'] = 'ctypes.c_void_p' else: assert var['aliases'] tag = var['aliases'][0] klass = None nestedEnum = None nestedStruct = None nestedTypedef = None if 'method' in var and 'parent' in list(var['method'].keys()): klass = var['method']['parent'] if tag in var['method']['parent']._public_enums: nestedEnum = var['method']['parent']._public_enums[ tag ] elif tag in var['method']['parent']._public_structs: nestedStruct = var['method']['parent']._public_structs[ tag ] elif tag in var['method']['parent']._public_typedefs: nestedTypedef = var['method']['parent']._public_typedefs[ tag ] if '<' in tag: # should also contain '>' var['template'] = tag # do not resolve templates var['ctypes_type'] = 'ctypes.c_void_p' var['unresolved'] = True elif nestedEnum: enum = nestedEnum if enum['type'] is int: var['ctypes_type'] = 'ctypes.c_int' var['raw_type'] = 'int' elif enum['type'] is str: var['ctypes_type'] = 'ctypes.c_char_p' var['raw_type'] = 'char*' var['enum'] = var['method']['path'] + '::' + enum['name'] var['fundamental'] = True elif nestedStruct: var['ctypes_type'] = 'ctypes.c_void_p' var['raw_type'] = var['method']['path'] + '::' + nestedStruct['type'] var['fundamental'] = False elif nestedTypedef: var['fundamental'] = is_fundamental( nestedTypedef ) if not var['fundamental']: var['raw_type'] = var['method']['path'] + '::' + tag else: _tag = tag if '::' in tag and tag.split('::')[0] in self.namespaces: tag = tag.split('::')[-1] con = self.concrete_typedef( _tag ) if con: var['concrete_type'] = con var['ctypes_type'] = self.guess_ctypes_type( var['concrete_type'] ) elif tag in self.structs: trace_print( 'STRUCT', var ) var['struct'] = tag var['ctypes_type'] = 'ctypes.c_void_p' var['raw_type'] = self.structs[tag]['namespace'] + '::' + tag elif tag in self._forward_decls: var['forward_declared'] = tag var['ctypes_type'] = 'ctypes.c_void_p' elif tag in self.global_enums: enum = self.global_enums[ tag ] if enum['type'] is int: var['ctypes_type'] = 'ctypes.c_int' var['raw_type'] = 'int' elif enum['type'] is str: var['ctypes_type'] = 'ctypes.c_char_p' var['raw_type'] = 'char*' var['enum'] = enum['namespace'] + enum['name'] var['fundamental'] = True elif var['parent']: warning_print( 'WARN unresolved %s'%_tag) var['ctypes_type'] = 'ctypes.c_void_p' var['unresolved'] = True elif tag.count('::')==1: trace_print( 'trying to find nested something in', tag ) a = tag.split('::')[0] b = tag.split('::')[-1] if a in self.classes: # a::b is most likely something nested in a class klass = self.classes[ a ] if b in klass._public_enums: trace_print( '...found nested enum', b ) enum = klass._public_enums[ b ] if enum['type'] is int: var['ctypes_type'] = 'ctypes.c_int' var['raw_type'] = 'int' elif enum['type'] is str: var['ctypes_type'] = 'ctypes.c_char_p' var['raw_type'] = 'char*' try: if 'method' in var: var['enum'] = var['method']['path'] + '::' + enum['name'] else: # class property var['unresolved'] = True except: var['unresolved'] = True var['fundamental'] = True else: var['unresolved'] = True # TODO klass._public_xxx elif a in self.namespaces: # a::b can also be a nested namespace if b in self.global_enums: enum = self.global_enums[ b ] trace_print(enum) trace_print(var) assert 0 elif b in self.global_enums: # falling back, this is a big ugly enum = self.global_enums[ b ] assert a in enum['namespace'].split('::') if enum['type'] is int: var['ctypes_type'] = 'ctypes.c_int' var['raw_type'] = 'int' elif enum['type'] is str: var['ctypes_type'] = 'ctypes.c_char_p' var['raw_type'] = 'char*' var['fundamental'] = True else: # boost::gets::crazy trace_print('NAMESPACES', self.namespaces) trace_print( a, b ) trace_print( '---- boost gets crazy ----' ) var['ctypes_type'] = 'ctypes.c_void_p' var['unresolved'] = True elif 'namespace' in var and self.concrete_typedef(var['namespace']+tag): #print( 'TRYING WITH NS', var['namespace'] ) con = self.concrete_typedef( var['namespace']+tag ) if con: var['typedef'] = var['namespace']+tag var['type'] = con if 'struct' in con.split(): var['raw_type'] = var['typedef'] var['ctypes_type'] = 'ctypes.c_void_p' else: self.resolve_type( var['type'], var ) var['ctypes_type'] = self.guess_ctypes_type( var['type'] ) elif '::' in var: var['ctypes_type'] = 'ctypes.c_void_p' var['unresolved'] = True elif tag in self.SubTypedefs: # TODO remove SubTypedefs if 'property_of_class' in var or 'property_of_struct' in var: trace_print( 'class:', self.SubTypedefs[ tag ], 'tag:', tag ) var['typedef'] = self.SubTypedefs[ tag ] # class name var['ctypes_type'] = 'ctypes.c_void_p' else: trace_print( "WARN-this should almost never happen!" ) trace_print( var ); trace_print('-'*80) var['unresolved'] = True elif tag in self._template_typenames: var['typename'] = tag var['ctypes_type'] = 'ctypes.c_void_p' var['unresolved'] = True # TODO, how to deal with templates? elif tag.startswith('_'): # assume starting with underscore is not important for wrapping warning_print( 'WARN unresolved %s'%_tag) var['ctypes_type'] = 'ctypes.c_void_p' var['unresolved'] = True else: trace_print( 'WARN: unknown type', var ) assert 'property_of_class' in var or 'property_of_struct' # only allow this case var['unresolved'] = True ## if not resolved and is a method param, not going to wrap these methods ## if var['unresolved'] and 'method' in var: var['method']['unresolved_parameters'] = True # create stripped raw_type # p = '* & const static mutable'.split() # +++ new July7: "mutable" for var in CppVariable.Vars: if 'raw_type' not in var: raw = [] for x in var['type'].split(): if x not in p: raw.append( x ) var['raw_type'] = ' '.join( raw ) #if 'AutoConstantEntry' in var['raw_type']: print(var); assert 0 if var['class']: if '::' not in var['raw_type']: if not var['class']['parent']: var['raw_type'] = var['class']['namespace'] + '::' + var['raw_type'] elif var['class']['parent'] in self.classes: parent = self.classes[ var['class']['parent'] ] var['raw_type'] = parent['namespace'] + '::' + var['class']['name'] + '::' + var['raw_type'] else: var['unresolved'] = True elif '::' in var['raw_type'] and var['raw_type'].split('::')[0] not in self.namespaces: var['raw_type'] = var['class']['namespace'] + '::' + var['raw_type'] else: var['unresolved'] = True elif 'forward_declared' in var and 'namespace' in var: if '::' not in var['raw_type']: var['raw_type'] = var['namespace'] + var['raw_type'] elif '::' in var['raw_type'] and var['raw_type'].split('::')[0] in self.namespaces: pass else: trace_print('-'*80); trace_print(var); raise NotImplemented ## need full name space for classes in raw type ## if var['raw_type'].startswith( '::' ): #print(var) #print('NAMESPACE', var['class']['namespace']) #print( 'PARENT NS', var['class']['parent']['namespace'] ) #assert 0 var['unresolved'] = True if 'method' in var: var['method']['unresolved_parameters'] = True #var['raw_type'] = var['raw_type'][2:] # Take care of #defines and #pragmas etc trace_print("Processing precomp_macro_buf: %s"%self._precomp_macro_buf) for m in self._precomp_macro_buf: macro = m.replace("<CppHeaderParser_newline_temp_replacement>\\n", "\n") try: if macro.lower().startswith("#define"): trace_print("Adding #define %s"%macro) self.defines.append(macro.split(" ", 1)[1].strip()) elif macro.lower().startswith("#if") or macro.lower().startswith("#endif") or macro.lower().startswith("#else"): self.conditionals.append(macro) elif macro.lower().startswith("#pragma"): trace_print("Adding #pragma %s"%macro) self.pragmas.append(macro.split(" ", 1)[1].strip()) elif macro.lower().startswith("#include"): trace_print("Adding #include %s"%macro) self.includes.append(macro.split(" ", 1)[1].strip()) else: debug_print("Cant detect what to do with precomp macro '%s'"%macro) except: pass self._precomp_macro_buf = None def concrete_typedef( self, key ): if key not in self.typedefs: #print( 'FAILED typedef', key ) return None while key in self.typedefs: prev = key key = self.typedefs[ key ] if '<' in key or '>' in key: return prev # stop at template if key.startswith('std::'): return key # stop at std lib return key class _CppHeader( Resolver ): def finalize(self): self.finalize_vars() # finalize classes and method returns types for cls in list(self.classes.values()): for meth in cls.get_all_methods(): if meth['pure_virtual']: cls['abstract'] = True if not meth['returns_fundamental'] and meth['returns'] in C99_NONSTANDARD: meth['returns'] = C99_NONSTANDARD[meth['returns']] meth['returns_fundamental'] = True elif not meth['returns_fundamental']: # describe the return type con = None if cls['namespace'] and '::' not in meth['returns']: con = self.concrete_typedef( cls['namespace'] + '::' + meth['returns'] ) else: con = self.concrete_typedef( meth['returns'] ) if con: meth['returns_concrete'] = con meth['returns_fundamental'] = is_fundamental( con ) elif meth['returns'] in self.classes: trace_print( 'meth returns class:', meth['returns'] ) meth['returns_class'] = True elif meth['returns'] in self.SubTypedefs: meth['returns_class'] = True meth['returns_nested'] = self.SubTypedefs[ meth['returns'] ] elif meth['returns'] in cls._public_enums: enum = cls._public_enums[ meth['returns'] ] meth['returns_enum'] = enum['type'] meth['returns_fundamental'] = True if enum['type'] == int: meth['returns'] = 'int' else: meth['returns'] = 'char*' elif meth['returns'] in self.global_enums: enum = self.global_enums[ meth['returns'] ] meth['returns_enum'] = enum['type'] meth['returns_fundamental'] = True if enum['type'] == int: meth['returns'] = 'int' else: meth['returns'] = 'char*' elif meth['returns'].count('::')==1: trace_print( meth ) a,b = meth['returns'].split('::') if a in self.namespaces: if b in self.classes: klass = self.classes[ b ] meth['returns_class'] = a + '::' + b elif '<' in b and '>' in b: warning_print( 'WARN-can not return template: %s'%b ) meth['returns_unknown'] = True elif b in self.global_enums: enum = self.global_enums[ b ] meth['returns_enum'] = enum['type'] meth['returns_fundamental'] = True if enum['type'] == int: meth['returns'] = 'int' else: meth['returns'] = 'char*' else: trace_print( a, b); trace_print( meth); meth['returns_unknown'] = True # +++ elif a in self.classes: klass = self.classes[ a ] if b in klass._public_enums: trace_print( '...found nested enum', b ) enum = klass._public_enums[ b ] meth['returns_enum'] = enum['type'] meth['returns_fundamental'] = True if enum['type'] == int: meth['returns'] = 'int' else: meth['returns'] = 'char*' elif b in klass._public_forward_declares: meth['returns_class'] = True elif b in klass._public_typedefs: typedef = klass._public_typedefs[ b ] meth['returns_fundamental'] = is_fundamental( typedef ) else: trace_print( meth ) # should be a nested class, TODO fix me. meth['returns_unknown'] = True elif '::' in meth['returns']: trace_print('TODO namespace or extra nested return:', meth) meth['returns_unknown'] = True else: trace_print( 'WARN: UNKNOWN RETURN', meth['name'], meth['returns']) meth['returns_unknown'] = True if meth["returns"].startswith(": : "): meth["returns"] = meth["returns"].replace(": : ", "::") for cls in list(self.classes.values()): methnames = cls.get_all_method_names() pvm = cls.get_all_pure_virtual_methods() for d in cls['inherits']: c = d['class'] a = d['access'] # do not depend on this to be 'public' trace_print( 'PARENT CLASS:', c ) if c not in self.classes: trace_print('WARN: parent class not found') if c in self.classes and self.classes[c]['abstract']: p = self.classes[ c ] for meth in p.get_all_methods(): #p["methods"]["public"]: trace_print( '\t\tmeth', meth['name'], 'pure virtual', meth['pure_virtual'] ) if meth['pure_virtual'] and meth['name'] not in methnames: cls['abstract'] = True; break def evaluate_struct_stack(self): """Create a Struct out of the name stack (but not its parts)""" #print( 'eval struct stack', self.nameStack ) #if self.braceDepth != len(self.nameSpaces): return struct = CppStruct(self.nameStack) struct["namespace"] = self.cur_namespace() self.structs[ struct['type'] ] = struct self.structs_order.append( struct ) if self.curClass: struct['parent'] = self.curClass klass = self.classes[ self.curClass ] klass['structs'][self.curAccessSpecifier].append( struct ) if self.curAccessSpecifier == 'public': klass._public_structs[ struct['type'] ] = struct self.curStruct = struct self._structs_brace_level[ struct['type'] ] = self.braceDepth def parse_method_type( self, stack ): trace_print( 'meth type info', stack ) if stack[0] in ':;' and stack[1] != ':': stack = stack[1:] info = { 'debug': ' '.join(stack).replace(' : : ', '::' ).replace(' < ', '<' ).replace(' > ', '> ' ).replace(" >",">").replace(">>", "> >").replace(">>", "> >"), 'class':None, 'namespace':self.cur_namespace(add_double_colon=True), } for tag in 'defined pure_virtual operator constructor destructor extern template virtual static explicit inline friend returns returns_pointer returns_fundamental returns_class'.split(): info[tag]=False header = stack[ : stack.index('(') ] header = ' '.join( header ) header = header.replace(' : : ', '::' ) header = header.replace(' < ', '<' ) header = header.replace(' > ', '> ' ) header = header.strip() if '{' in stack: info['defined'] = True self._method_body = self.braceDepth + 1 trace_print( 'NEW METHOD WITH BODY', self.braceDepth ) elif stack[-1] == ';': info['defined'] = False self._method_body = None # not a great idea to be clearing here else: assert 0 if len(stack) > 3 and stack[-1] == ';' and stack[-2] == '0' and stack[-3] == '=': info['pure_virtual'] = True r = header.split() name = None if 'operator' in stack: # rare case op overload defined outside of class op = stack[ stack.index('operator')+1 : stack.index('(') ] op = ''.join(op) if not op: if " ".join(['operator', '(', ')', '(']) in " ".join(stack): op = "()" else: trace_print( 'Error parsing operator') return None info['operator'] = op name = 'operator' + op a = stack[ : stack.index('operator') ] elif r: name = r[-1] a = r[ : -1 ] # strip name if name is None: return None #if name.startswith('~'): name = name[1:] while a and a[0] == '}': # strip - can have multiple } } a = a[1:] if '::' in name: #klass,name = name.split('::') # methods can be defined outside of class klass = name[ : name.rindex('::') ] name = name.split('::')[-1] info['class'] = klass if klass in self.classes and not self.curClass: #Class function defined outside the class return None # info['name'] = name #else: info['name'] = name if name.startswith('~'): info['destructor'] = True name = name[1:] elif not a or (name == self.curClass and len(self.curClass)): info['constructor'] = True info['name'] = name for tag in 'extern virtual static explicit inline friend'.split(): if tag in a: info[ tag ] = True; a.remove( tag ) # inplace if 'template' in a: a.remove('template') b = ' '.join( a ) if '>' in b: info['template'] = b[ : b.index('>')+1 ] info['returns'] = b[ b.index('>')+1 : ] # find return type, could be incorrect... TODO if '<typename' in info['template'].split(): typname = info['template'].split()[-1] typname = typname[ : -1 ] # strip '>' if typname not in self._template_typenames: self._template_typenames.append( typname ) else: info['returns'] = ' '.join( a ) else: info['returns'] = ' '.join( a ) info['returns'] = info['returns'].replace(' <', '<').strip() ## be careful with templates, do not count pointers inside template info['returns_pointer'] = info['returns'].split('>')[-1].count('*') if info['returns_pointer']: info['returns'] = info['returns'].replace('*','').strip() info['returns_reference'] = '&' in info['returns'] if info['returns']: info['returns'] = info['returns'].replace('&','').strip() a = [] for b in info['returns'].split(): if b == '__const__': info['returns_const'] = True elif b == 'const': info['returns_const'] = True else: a.append( b ) info['returns'] = ' '.join( a ) info['returns_fundamental'] = is_fundamental( info['returns'] ) return info def evaluate_method_stack(self): """Create a method out of the name stack""" if self.curStruct: trace_print( 'WARN - struct contains methods - skipping' ) trace_print( self.stack ) assert 0 info = self.parse_method_type( self.stack ) if info: if info[ 'class' ] and info['class'] in self.classes: # case where methods are defined outside of class newMethod = CppMethod(self.nameStack, info['name'], info, self.curTemplate) klass = self.classes[ info['class'] ] klass[ 'methods' ][ 'public' ].append( newMethod ) newMethod['parent'] = klass if klass['namespace']: newMethod['path'] = klass['namespace'] + '::' + klass['name'] else: newMethod['path'] = klass['name'] elif self.curClass: # normal case newMethod = CppMethod(self.nameStack, self.curClass, info, self.curTemplate) klass = self.classes[self.curClass] klass['methods'][self.curAccessSpecifier].append(newMethod) newMethod['parent'] = klass if klass['namespace']: newMethod['path'] = klass['namespace'] + '::' + klass['name'] else: newMethod['path'] = klass['name'] else: #non class functions debug_print("FREE FUNCTION") newMethod = CppMethod(self.nameStack, None, info, self.curTemplate) self.functions.append(newMethod) global parseHistory parseHistory.append({"braceDepth": self.braceDepth, "item_type": "method", "item": newMethod}) else: trace_print( 'free function?', self.nameStack ) self.stack = [] def _parse_typedef( self, stack, namespace='' ): if not stack or 'typedef' not in stack: return stack = list( stack ) # copy just to be safe if stack[-1] == ';': stack.pop() while stack and stack[-1].isdigit(): stack.pop() # throw away array size for now idx = stack.index('typedef') if stack[-1] == "]": try: name = namespace + "".join(stack[-4:]) # Strip off the array part so the rest of the parsing is better stack = stack[:-3] except: name = namespace + stack[-1] else: name = namespace + stack[-1] s = '' for a in stack[idx+1:-1]: if a == '{': break if not s or s[-1] in ':<>' or a in ':<>': s += a # keep compact else: s += ' ' + a # spacing r = {'name':name, 'raw':s, 'type':s} if not is_fundamental(s): if 'struct' in s.split(): pass # TODO is this right? "struct ns::something" elif '::' not in s: s = namespace + s # only add the current name space if no namespace given r['type'] = s if s: return r def evaluate_typedef(self): ns = self.cur_namespace(add_double_colon=True) res = self._parse_typedef( self.stack, ns ) if res: name = res['name'] self.typedefs[ name ] = res['type'] if name not in self.typedefs_order: self.typedefs_order.append( name ) def evaluate_property_stack(self): """Create a Property out of the name stack""" global parseHistory assert self.stack[-1] == ';' debug_print( "trace" ) if self.nameStack[0] == 'typedef': if self.curClass: typedef = self._parse_typedef( self.stack ) name = typedef['name'] klass = self.classes[ self.curClass ] klass[ 'typedefs' ][ self.curAccessSpecifier ].append( name ) if self.curAccessSpecifier == 'public': klass._public_typedefs[ name ] = typedef['type'] Resolver.SubTypedefs[ name ] = self.curClass else: assert 0 elif self.curStruct or self.curClass: if len(self.nameStack) == 1: #See if we can de anonymize the type filteredParseHistory = [h for h in parseHistory if h["braceDepth"] == self.braceDepth] if len(filteredParseHistory) and filteredParseHistory[-1]["item_type"] == "class": self.nameStack.insert(0, filteredParseHistory[-1]["item"]["name"]) debug_print("DEANONYMOIZING %s to type '%s'"%(self.nameStack[1], self.nameStack[0])) if "," in self.nameStack: #Maybe we have a variable list #Figure out what part is the variable separator but remember templates of function pointer #First find left most comma outside of a > and ) leftMostComma = 0; for i in range(0, len(self.nameStack)): name = self.nameStack[i] if name in (">", ")"): leftMostComma = 0 if leftMostComma == 0 and name == ",": leftMostComma = i # Is it really a list of variables? if leftMostComma != 0: trace_print("Multiple variables for namestack in %s. Separating processing"%self.nameStack) orig_nameStack = self.nameStack[:] orig_stack = self.stack[:] type_nameStack = orig_nameStack[:leftMostComma-1] for name in orig_nameStack[leftMostComma - 1::2]: self.nameStack = type_nameStack + [name] self.stack = orig_stack[:] # Not maintained for mucking, but this path it doesnt matter self.evaluate_property_stack() return newVar = CppVariable(self.nameStack) newVar['namespace'] = self.current_namespace() if self.curStruct: self.curStruct[ 'fields' ].append( newVar ) newVar['property_of_struct'] = self.curStruct elif self.curClass: klass = self.classes[self.curClass] klass["properties"][self.curAccessSpecifier].append(newVar) newVar['property_of_class'] = klass['name'] parseHistory.append({"braceDepth": self.braceDepth, "item_type": "variable", "item": newVar}) else: debug_print( "Found Global variable" ) newVar = CppVariable(self.nameStack) self.variables.append(newVar) self.stack = [] # CLEAR STACK def evaluate_class_stack(self): """Create a Class out of the name stack (but not its parts)""" #dont support sub classes today #print( 'eval class stack', self.nameStack ) parent = self.curClass if self.braceDepth > len( self.nameSpaces) and parent: trace_print( 'HIT NESTED SUBCLASS' ) self.accessSpecifierStack.append(self.curAccessSpecifier) elif self.braceDepth != len(self.nameSpaces): error_print( 'ERROR: WRONG BRACE DEPTH' ) return # When dealing with typedefed structs, get rid of typedef keyword to handle later on if self.nameStack[0] == "typedef": del self.nameStack[0] if len(self.nameStack) == 1: self.anon_struct_counter += 1 # We cant handle more than 1 anonymous struct, so name them uniquely self.nameStack.append("<anon-struct-%d>"%self.anon_struct_counter) if self.nameStack[0] == "class": self.curAccessSpecifier = 'private' else:#struct self.curAccessSpecifier = 'public' debug_print("curAccessSpecifier changed/defaulted to %s"%self.curAccessSpecifier) if self.nameStack[0] == "union": newClass = CppUnion(self.nameStack) self.anon_union_counter = [self.braceDepth, 2] trace_print( 'NEW UNION', newClass['name'] ) else: newClass = CppClass(self.nameStack, self.curTemplate) trace_print( 'NEW CLASS', newClass['name'] ) newClass["declaration_method"] = self.nameStack[0] self.classes_order.append( newClass ) # good idea to save ordering self.stack = [] # fixes if class declared with ';' in closing brace if parent: newClass["namespace"] = self.classes[ parent ]['namespace'] + '::' + parent newClass['parent'] = parent self.classes[ parent ]['nested_classes'].append( newClass ) ## supports nested classes with the same name ## self.curClass = key = parent+'::'+newClass['name'] self._classes_brace_level[ key ] = self.braceDepth elif newClass['parent']: # nested class defined outside of parent. A::B {...} parent = newClass['parent'] newClass["namespace"] = self.classes[ parent ]['namespace'] + '::' + parent self.classes[ parent ]['nested_classes'].append( newClass ) ## supports nested classes with the same name ## self.curClass = key = parent+'::'+newClass['name'] self._classes_brace_level[ key ] = self.braceDepth else: newClass["namespace"] = self.cur_namespace() key = newClass['name'] self.curClass = newClass["name"] self._classes_brace_level[ newClass['name'] ] = self.braceDepth if not key.endswith("::") and not key.endswith(" ") and len(key) != 0: if key in self.classes: trace_print( 'ERROR name collision:', key ) self.classes[key].show() trace_print('-'*80) newClass.show() assert key not in self.classes # namespace collision self.classes[ key ] = newClass global parseHistory parseHistory.append({"braceDepth": self.braceDepth, "item_type": "class", "item": newClass}) def evalute_forward_decl(self): trace_print( 'FORWARD DECL', self.nameStack ) assert self.nameStack[0] in ('class', 'struct') name = self.nameStack[-1] if self.curClass: klass = self.classes[ self.curClass ] klass['forward_declares'][self.curAccessSpecifier].append( name ) if self.curAccessSpecifier == 'public': klass._public_forward_declares.append( name ) else: self._forward_decls.append( name ) class CppHeader( _CppHeader ): """Parsed C++ class header Variables produced: self.classes - Dictionary of classes found in a given header file where the key is the name of the class """ IGNORE_NAMES = '__extension__'.split() def show(self): for className in list(self.classes.keys()):self.classes[className].show() def __init__(self, headerFileName, argType="file", **kwargs): """Create the parsed C++ header file parse tree headerFileName - Name of the file to parse OR actual file contents (depends on argType) argType - Indicates how to interpret headerFileName as a file string or file name kwargs - Supports the following keywords """ ## reset global state ## global doxygenCommentCache doxygenCommentCache = "" CppVariable.Vars = [] CppStruct.Structs = [] if (argType == "file"): self.headerFileName = os.path.expandvars(headerFileName) self.mainClass = os.path.split(self.headerFileName)[1][:-2] headerFileStr = "" elif argType == "string": self.headerFileName = "" self.mainClass = "???" headerFileStr = headerFileName else: raise Exception("Arg type must be either file or string") self.curClass = "" # nested classes have parent::nested, but no extra namespace, # this keeps the API compatible, TODO proper namespace for everything. Resolver.CLASSES = {} self.classes = Resolver.CLASSES #Functions that are not part of a class self.functions = [] self.pragmas = [] self.defines = [] self.includes = [] self.conditionals = [] self._precomp_macro_buf = [] #for internal purposes, will end up filling out pragmras and defines at the end self.enums = [] self.variables = [] self.global_enums = {} self.nameStack = [] self.nameSpaces = [] self.curAccessSpecifier = 'private' # private is default self.curTemplate = None self.accessSpecifierStack = [] self.accessSpecifierScratch = [] debug_print("curAccessSpecifier changed/defaulted to %s"%self.curAccessSpecifier) self.initextra() # Old namestacks for a given level self.nameStackHistory = [] self.anon_struct_counter = 0 self.anon_union_counter = [-1, 0] self.templateRegistry = [] if (len(self.headerFileName)): fd = open(self.headerFileName) headerFileStr = "".join(fd.readlines()) fd.close() # Make sure supportedAccessSpecifier are sane for i in range(0, len(supportedAccessSpecifier)): if " " not in supportedAccessSpecifier[i]: continue supportedAccessSpecifier[i] = re.sub("[ ]+", " ", supportedAccessSpecifier[i]).strip() # Strip out template declarations templateSectionsToSliceOut = [] try: for m in re.finditer("template[\t ]*<[^>]*>", headerFileStr): start = m.start() # Search for the final '>' which may or may not be caught in the case of nexted <>'s for i in range(start, len(headerFileStr)): if headerFileStr[i] == '<': firstBracket = i break ltgtStackCount = 1 #Now look for fianl '>' for i in range(firstBracket + 1, len(headerFileStr)): if headerFileStr[i] == '<': ltgtStackCount += 1 elif headerFileStr[i] == '>': ltgtStackCount -= 1 if ltgtStackCount == 0: end = i break templateSectionsToSliceOut.append((start, end)) # Now strip out all instances of the template templateSectionsToSliceOut.reverse() for tslice in templateSectionsToSliceOut: # Replace the template symbol with a single symbol template_symbol="CppHeaderParser_template_%d"%len(self.templateRegistry) self.templateRegistry.append(headerFileStr[tslice[0]: tslice[1]+1]) newlines = headerFileStr[tslice[0]: tslice[1]].count("\n") * "\n" #Keep line numbers the same headerFileStr = headerFileStr[:tslice[0]] + newlines + " " + template_symbol + " " + headerFileStr[tslice[1] + 1:] except: pass # Change multi line #defines and expressions to single lines maintaining line nubmers # Based from http://stackoverflow.com/questions/2424458/regular-expression-to-match-cs-multiline-preprocessor-statements matches = re.findall(r'(?m)^(?:.*\\\r?\n)+.*$', headerFileStr) is_define = re.compile(r'[ \t\v]*#[Dd][Ee][Ff][Ii][Nn][Ee]') for m in matches: #Keep the newlines so that linecount doesnt break num_newlines = len([a for a in m if a=="\n"]) if is_define.match(m): new_m = m.replace("\n", "<CppHeaderParser_newline_temp_replacement>\\n") else: # Just expression taking up multiple lines, make it take 1 line for easier parsing new_m = m.replace("\\\n", " ") if (num_newlines > 0): new_m += "\n"*(num_newlines) headerFileStr = headerFileStr.replace(m, new_m) #Filter out Extern "C" statements. These are order dependent matches = re.findall(re.compile(r'extern[\t ]+"[Cc]"[\t \n\r]*{', re.DOTALL), headerFileStr) for m in matches: #Keep the newlines so that linecount doesnt break num_newlines = len([a for a in m if a=="\n"]) headerFileStr = headerFileStr.replace(m, "\n" * num_newlines) headerFileStr = re.sub(r'extern[ ]+"[Cc]"[ ]*', "", headerFileStr) #Filter out any ignore symbols that end with "()" to account for #define magic functions for ignore in ignoreSymbols: if not ignore.endswith("()"): continue while True: locStart = headerFileStr.find(ignore[:-1]) if locStart == -1: break; locEnd = None #Now walk till we find the last paren and account for sub parens parenCount = 1 inQuotes = False for i in range(locStart + len(ignore) - 1, len(headerFileStr)): c = headerFileStr[i] if not inQuotes: if c == "(": parenCount += 1 elif c == ")": parenCount -= 1 elif c == '"': inQuotes = True if parenCount == 0: locEnd = i + 1 break; else: if c == '"' and headerFileStr[i-1] != '\\': inQuotes = False if locEnd: #Strip it out but keep the linecount the same so line numbers are right match_str = headerFileStr[locStart:locEnd] debug_print("Striping out '%s'"%match_str) num_newlines = len([a for a in match_str if a=="\n"]) headerFileStr = headerFileStr.replace(headerFileStr[locStart:locEnd], "\n"*num_newlines) self.braceDepth = 0 lex.lex() lex.input(headerFileStr) global curLine global curChar curLine = 0 curChar = 0 try: while True: tok = lex.token() if not tok: break if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]: self.anon_union_counter[1] -= 1 tok.value = TagStr(tok.value, lineno=tok.lineno) #debug_print("TOK: %s"%tok) if tok.type == 'NAME' and tok.value in self.IGNORE_NAMES: continue if tok.type != 'TEMPLATE_NAME': self.stack.append( tok.value ) curLine = tok.lineno curChar = tok.lexpos if (tok.type in ('PRECOMP_MACRO', 'PRECOMP_MACRO_CONT')): debug_print("PRECOMP: %s"%tok) self._precomp_macro_buf.append(tok.value) self.stack = [] self.nameStack = [] continue if tok.type == 'TEMPLATE_NAME': try: templateId = int(tok.value.replace("CppHeaderParser_template_","")) self.curTemplate = self.templateRegistry[templateId] except: pass if (tok.type == 'OPEN_BRACE'): if len(self.nameStack) >= 2 and is_namespace(self.nameStack): # namespace {} with no name used in boost, this sets default? if self.nameStack[1] == "__IGNORED_NAMESPACE__CppHeaderParser__":#Used in filtering extern "C" self.nameStack[1] = "" self.nameSpaces.append(self.nameStack[1]) ns = self.cur_namespace(); self.stack = [] if ns not in self.namespaces: self.namespaces.append( ns ) # Detect special condition of macro magic before class declaration so we # can filter it out if 'class' in self.nameStack and self.nameStack[0] != 'class': classLocationNS = self.nameStack.index("class") classLocationS = self.stack.index("class") if "(" not in self.nameStack[classLocationNS:]: debug_print("keyword 'class' found in unexpected location in nameStack, must be following #define magic. Process that before moving on") origNameStack = self.nameStack origStack = self.stack #Process first part of stack which is probably #define macro magic and may cause issues self.nameStack = self.nameStack[:classLocationNS] self.stack = self.stack[:classLocationS] try: self.evaluate_stack() except: debug_print("Error processing #define magic... Oh well") #Process rest of stack self.nameStack = origNameStack[classLocationNS:] self.stack = origStack[classLocationS:] if len(self.nameStack) and not is_enum_namestack(self.nameStack): self.evaluate_stack() else: self.nameStack.append(tok.value) if self.stack and self.stack[0] == 'class': self.stack = [] self.braceDepth += 1 elif (tok.type == 'CLOSE_BRACE'): if self.braceDepth == 0: continue if (self.braceDepth == len(self.nameSpaces)): tmp = self.nameSpaces.pop() self.stack = [] # clear stack when namespace ends? if len(self.nameStack) and is_enum_namestack(self.nameStack): self.nameStack.append(tok.value) elif self.braceDepth < 10: self.evaluate_stack() else: self.nameStack = [] self.braceDepth -= 1 #self.stack = []; print 'BRACE DEPTH', self.braceDepth, 'NS', len(self.nameSpaces) if self.curClass: debug_print( 'CURBD %s'%self._classes_brace_level[ self.curClass ] ) if (self.braceDepth == 0) or (self.curClass and self._classes_brace_level[self.curClass]==self.braceDepth): trace_print( 'END OF CLASS DEF' ) if self.accessSpecifierStack: self.curAccessSpecifier = self.accessSpecifierStack[-1] self.accessSpecifierStack = self.accessSpecifierStack[:-1] if self.curClass and self.classes[ self.curClass ]['parent']: self.curClass = self.classes[ self.curClass ]['parent'] else: self.curClass = ""; #self.curStruct = None self.stack = [] #if self.curStruct: self.curStruct = None if self.braceDepth == 0 or (self.curStruct and self._structs_brace_level[self.curStruct['type']]==self.braceDepth): trace_print( 'END OF STRUCT DEF' ) self.curStruct = None if self._method_body and (self.braceDepth + 1) <= self._method_body: self._method_body = None; self.stack = []; self.nameStack = []; trace_print( 'FORCE CLEAR METHBODY' ) if (tok.type == 'OPEN_PAREN'): self.nameStack.append(tok.value) elif (tok.type == 'CLOSE_PAREN'): self.nameStack.append(tok.value) elif (tok.type == 'OPEN_SQUARE_BRACKET'): self.nameStack.append(tok.value) elif (tok.type == 'CLOSE_SQUARE_BRACKET'): self.nameStack.append(tok.value) elif (tok.type == 'TAB'): pass elif (tok.type == 'EQUALS'): self.nameStack.append(tok.value) elif (tok.type == 'COMMA'): self.nameStack.append(tok.value) elif (tok.type == 'BACKSLASH'): self.nameStack.append(tok.value) elif (tok.type == 'DIVIDE'): self.nameStack.append(tok.value) elif (tok.type == 'PIPE'): self.nameStack.append(tok.value) elif (tok.type == 'PERCENT'): self.nameStack.append(tok.value) elif (tok.type == 'CARET'): self.nameStack.append(tok.value) elif (tok.type == 'EXCLAMATION'): self.nameStack.append(tok.value) elif (tok.type == 'SQUOTE'): pass elif (tok.type == 'NUMBER' or tok.type == 'FLOAT_NUMBER'): self.nameStack.append(tok.value) elif (tok.type == 'MINUS'): self.nameStack.append(tok.value) elif (tok.type == 'PLUS'): self.nameStack.append(tok.value) elif (tok.type == 'STRING_LITERAL'): self.nameStack.append(tok.value) elif (tok.type == 'NAME' or tok.type == 'AMPERSTAND' or tok.type == 'ASTERISK' or tok.type == 'CHAR_LITERAL'): if tok.value in ignoreSymbols: debug_print("Ignore symbol %s"%tok.value) elif (tok.value == 'class'): self.nameStack.append(tok.value) elif tok.value in supportedAccessSpecifier: if len(self.nameStack) and self.nameStack[0] in ("class", "struct", "union"): self.nameStack.append(tok.value) elif self.braceDepth == len(self.nameSpaces) + 1 or self.braceDepth == (len(self.nameSpaces) + len(self.curClass.split("::"))): self.curAccessSpecifier = tok.value; self.accessSpecifierScratch.append(tok.value) debug_print("curAccessSpecifier updated to %s"%self.curAccessSpecifier) self.stack = [] else: self.nameStack.append(tok.value) if self.anon_union_counter[0] == self.braceDepth: self.anon_union_counter = [-1, 0] elif (tok.type == 'COLON'): #Dont want colon to be first in stack if len(self.nameStack) == 0: self.accessSpecifierScratch = [] continue # Handle situation where access specifiers can be multi words such as "public slots" jns = " ".join(self.accessSpecifierScratch + self.nameStack) if jns in supportedAccessSpecifier: self.curAccessSpecifier = jns; debug_print("curAccessSpecifier updated to %s"%self.curAccessSpecifier) self.stack = [] self.nameStack = [] else: self.nameStack.append(tok.value) self.accessSpecifierScratch = [] elif (tok.type == 'SEMI_COLON'): if self.anon_union_counter[0] == self.braceDepth and self.anon_union_counter[1]: debug_print("Creating anonymous union") #Force the processing of an anonymous union saved_namestack = self.nameStack[:] saved_stack = self.stack[:] self.nameStack = [""] self.stack = self.nameStack + [";"] self.nameStack = self.nameStack[0:1] debug_print("pre eval anon stack") self.evaluate_stack( tok.type ) debug_print("post eval anon stack") self.nameStack = saved_namestack self.stack = saved_stack self.anon_union_counter = [-1, 0]; if (self.braceDepth < 10): self.evaluate_stack( tok.type ) self.stack = [] self.nameStack = [] except: if (debug): raise raise CppParseError("Not able to parse %s on line %d evaluating \"%s\"\nError around: %s" % (self.headerFileName, tok.lineno, tok.value, " ".join(self.nameStack))) self.finalize() global parseHistory parseHistory = [] # Delete some temporary variables for key in ["_precomp_macro_buf", "nameStack", "nameSpaces", "curAccessSpecifier", "accessSpecifierStack", "accessSpecifierScratch", "nameStackHistory", "anon_struct_counter", "anon_union_counter", "_classes_brace_level", "_forward_decls", "stack", "mainClass", "curStruct", "_template_typenames", "_method_body", "braceDepth", "_structs_brace_level", "typedefs_order", "curTemplate", "templateRegistry"]: del self.__dict__[key] def evaluate_stack(self, token=None): """Evaluates the current name stack""" global doxygenCommentCache self.nameStack = filter_out_attribute_keyword(self.nameStack) self.stack = filter_out_attribute_keyword(self.stack) nameStackCopy = self.nameStack[:] debug_print( "Evaluating stack %s\n BraceDepth: %s (called from %d)" %(self.nameStack,self.braceDepth, inspect.currentframe().f_back.f_lineno)) #Handle special case of overloading operator () if "operator()(" in "".join(self.nameStack): operator_index = self.nameStack.index("operator") self.nameStack.pop(operator_index + 2) self.nameStack.pop(operator_index + 1) self.nameStack[operator_index] = "operator()" if (len(self.curClass)): debug_print( "%s (%s) "%(self.curClass, self.curAccessSpecifier)) else: debug_print( "<anonymous> (%s) "%self.curAccessSpecifier) #Filter special case of array with casting in it try: bracePos = self.nameStack.index("[") parenPos = self.nameStack.index("(") if bracePos == parenPos - 1: endParen = self.nameStack.index(")") self.nameStack = self.nameStack[:bracePos + 1] + self.nameStack[endParen + 1:] debug_print("Filtered namestack to=%s"%self.nameStack) except: pass #if 'typedef' in self.nameStack: self.evaluate_typedef() # allows nested typedefs, probably a bad idea if (not self.curClass and 'typedef' in self.nameStack and (('struct' not in self.nameStack and 'union' not in self.nameStack) or self.stack[-1] == ";") and not is_enum_namestack(self.nameStack)): trace_print('STACK', self.stack) self.evaluate_typedef() return elif (len(self.nameStack) == 0): debug_print( "trace" ) debug_print( "(Empty Stack)" ) return elif (self.nameStack[0] == "namespace"): #Taken care of outside of here pass elif len(self.nameStack) == 2 and self.nameStack[0] == "friend":#friend class declaration pass elif len(self.nameStack) >= 2 and self.nameStack[0] == 'using' and self.nameStack[1] == 'namespace': pass # TODO elif is_enum_namestack(self.nameStack): debug_print( "trace" ) self.evaluate_enum_stack() elif self._method_body and (self.braceDepth + 1) > self._method_body: trace_print( 'INSIDE METHOD DEF' ) elif is_method_namestack(self.stack) and not self.curStruct and '(' in self.nameStack: debug_print( "trace" ) if self.braceDepth > 0: if "{" in self.stack and self.stack[0] != '{' and self.stack[-1] == ';' and self.braceDepth == 1: #Special case of a method defined outside a class that has a body pass else: self.evaluate_method_stack() else: #Free function self.evaluate_method_stack() elif (len(self.nameStack) == 1 and len(self.nameStackHistory) > self.braceDepth and (self.nameStackHistory[self.braceDepth][0][0:2] == ["typedef", "struct"] or self.nameStackHistory[self.braceDepth][0][0:2] == ["typedef", "union"])): # Look for the name of a typedef struct: struct typedef {...] StructName; or unions to get renamed debug_print("found the naming of a union") type_name_to_rename = self.nameStackHistory[self.braceDepth][1] new_name = self.nameStack[0] type_to_rename = self.classes[type_name_to_rename] type_to_rename["name"] = self.nameStack[0] #Now re install it in its new location self.classes[new_name] = type_to_rename del self.classes[type_name_to_rename] elif is_property_namestack(self.nameStack) and self.stack[-1] == ';': debug_print( "trace" ) if self.nameStack[0] in ('class', 'struct') and len(self.stack) == 3: self.evalute_forward_decl() elif len(self.nameStack) >= 2 and (self.nameStack[0]=='friend' and self.nameStack[1]=='class'): pass else: self.evaluate_property_stack() # catches class props and structs in a namespace elif self.nameStack[0] in ("class", "struct", "union") or self.nameStack[0] == 'typedef' and self.nameStack[1] in ('struct', 'union'): #Parsing a union can reuse much of the class parsing debug_print( "trace" ) self.evaluate_class_stack() elif not self.curClass: debug_print( "trace" ) if is_enum_namestack(self.nameStack): self.evaluate_enum_stack() elif self.curStruct and self.stack[-1] == ';': self.evaluate_property_stack() # this catches fields of global structs self.nameStack = [] doxygenCommentCache = "" elif (self.braceDepth < 1): debug_print( "trace" ) #Ignore global stuff for now debug_print( "Global stuff: %s"%self.nameStack ) self.nameStack = [] doxygenCommentCache = "" elif (self.braceDepth > len(self.nameSpaces) + 1): debug_print( "trace" ) self.nameStack = [] doxygenCommentCache = "" try: self.nameStackHistory[self.braceDepth] = (nameStackCopy, self.curClass) except: self.nameStackHistory.append((nameStackCopy, self.curClass)) self.nameStack = [] # its a little confusing to have some if/else above return and others not, and then clearning the nameStack down here doxygenCommentCache = "" self.curTemplate = None def evaluate_enum_stack(self): """Create an Enum out of the name stack""" debug_print( "evaluating enum" ) newEnum = CppEnum(self.nameStack) if len(list(newEnum.keys())): if len(self.curClass): newEnum["namespace"] = self.cur_namespace(False) klass = self.classes[self.curClass] klass["enums"][self.curAccessSpecifier].append(newEnum) if self.curAccessSpecifier == 'public' and 'name' in newEnum: klass._public_enums[ newEnum['name'] ] = newEnum else: newEnum["namespace"] = self.cur_namespace(True) self.enums.append(newEnum) if 'name' in newEnum and newEnum['name']: self.global_enums[ newEnum['name'] ] = newEnum #This enum has instances, turn them into properties if "instances" in newEnum: instanceType = "enum" if "name" in newEnum: instanceType = newEnum["name"] for instance in newEnum["instances"]: self.nameStack = [instanceType, instance] self.evaluate_property_stack() del newEnum["instances"] def strip_parent_keys(self): """Strip all parent keys to prevent loops""" obj_queue = [self] while len(obj_queue): obj = obj_queue.pop() trace_print("pop %s type %s"%(obj, type(obj))) try: if "parent" in obj.keys(): del obj["parent"] trace_print("Stripped parent from %s"%obj.keys()) except: pass # Figure out what sub types are one of ours try: if not hasattr(obj, 'keys'): obj = obj.__dict__ for k in obj.keys(): trace_print("-Try key %s"%(k)) trace_print("-type %s"%(type(obj[k]))) if k in ["nameStackHistory", "parent", "_public_typedefs"]: continue if type(obj[k]) == list: for i in obj[k]: trace_print("push l %s"%i) obj_queue.append(i) elif type(obj[k]) == dict: if len(obj): trace_print("push d %s"%obj[k]) obj_queue.append(obj[k]) elif type(obj[k]) == type(type(0)): if type(obj[k]) == int: obj[k] = "int" elif type(obj[k]) == str: obj[k] = "string" else: obj[k] = "???" trace_print("next key\n") except: trace_print("Exception") def toJSON(self, indent=4): """Converts a parsed structure to JSON""" import json self.strip_parent_keys() try: del self.__dict__["classes_order"] except: pass return json.dumps(self.__dict__, indent=indent) def __repr__(self): rtn = { "classes": self.classes, "functions": self.functions, "enums": self.enums, "variables": self.variables, } return repr(rtn) def __str__(self): rtn = "" for className in list(self.classes.keys()): rtn += "%s\n"%self.classes[className] if self.functions: rtn += "// functions\n" for f in self.functions: rtn += "%s\n"%f if self.variables: rtn += "// variables\n" for f in self.variables: rtn += "%s\n"%f if self.enums: rtn += "// enums\n" for f in self.enums: rtn += "%s\n"%f return rtn
apache-2.0
-2,464,328,562,798,681,000
42.713687
210
0.502629
false
4.225109
false
false
false
nschaetti/EchoTorch
echotorch/nn/ICACell.py
1
2909
# -*- coding: utf-8 -*- # # File : echotorch/nn/ESN.py # Description : An Echo State Network module. # Date : 26th of January, 2018 # # This file is part of EchoTorch. EchoTorch is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Nils Schaetti <[email protected]> """ Created on 26 January 2018 @author: Nils Schaetti """ # Imports import torch.sparse import torch import torch.nn as nn from torch.autograd import Variable # Independent Component Analysis layer class ICACell(nn.Module): """ Principal Component Analysis layer. It can be used to handle different batch-mode algorithm for ICA. """ # Constructor def __init__(self, input_dim, output_dim): """ Constructor :param input_dim: Inputs dimension. :param output_dim: Reservoir size """ super(ICACell, self).__init__() pass # end __init__ ############################################### # PROPERTIES ############################################### ############################################### # PUBLIC ############################################### # Reset learning def reset(self): """ Reset learning :return: """ # Training mode again self.train(True) # end reset # Forward def forward(self, x, y=None): """ Forward :param x: Input signal. :param y: Target outputs :return: Output or hidden states """ # Batch size batch_size = x.size()[0] # Time length time_length = x.size()[1] # Add bias if self.with_bias: x = self._add_constant(x) # end if # end forward # Finish training def finalize(self): """ Finalize training with LU factorization or Pseudo-inverse """ pass # end finalize ############################################### # PRIVATE ############################################### # Add constant def _add_constant(self, x): """ Add constant :param x: :return: """ bias = Variable(torch.ones((x.size()[0], x.size()[1], 1)), requires_grad=False) return torch.cat((bias, x), dim=2) # end _add_constant # end ICACell
gpl-3.0
4,296,446,302,283,977,700
24.973214
104
0.546236
false
4.259151
false
false
false
hehaichi/django-imagemanagement
imageserver/settings.py
1
3326
""" Django settings for imageserver project. Generated by 'django-admin startproject' using Django 1.10.5. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'fvmacrow6pe#wtxg01(9_m01inqisms+255x%uvj0eftaft0xm' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'imagemanagement', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'imageserver.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'imageserver.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True #Data Max upload size DATA_UPLOAD_MAX_MEMORY_SIZE=2621440*10 # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') MEDIA_ROOT = os.path.join(BASE_DIR, 'imagemanagement/media') MEDIA_URL = '/media/'
mit
2,671,746,408,004,011,500
25.396825
91
0.693025
false
3.490031
false
false
false
pkariz/nnsearch
nnsearch/approx/Annoy.py
1
6165
from ..baseindex import Index import numpy as np import math from annoy import AnnoyIndex class Annoy(Index): """ AnnoyIndex from annoy package. """ def __init__(self): self.algorithm = "AnnoyIndex" self.idx_to_vector = {} self.valid_types = [np.uint8, np.uint16, np.uint32, np.uint64, np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64] def build(self, data=None, dimensions=None, distance="angular", trees=-1): """ Builds AnnoyIndex on data or creates an empty one. If both dimensions and data are given then their dimensions must match. At least one of those two attributes must be given to define number of dimensions which is required to create AnnoyIndex. After the trees are built you cannot add additional vectors. :param data: Dataset instance representing vectors which are inserted before trees are built (optional, you can insert data one by one with insert method before building trees) :param dimensions: number of dimensions :param distance: can be "angular" (default) or "euclidean" :param trees: number of binary trees. Default (-1) means that this parameter is determined automatically in a way, that memory usage <= 2 * memory(vectors) """ #check dimensions if data is None and dimensions is None: raise ValueError("Number of dimensions is missing!") if data is not None and dimensions is not None and dimensions != len(data.data[0]): raise ValueError("Dimensions from constructor parameter 'dimensions' and derived dimensions from 'data' are different!") #build index if data is not None: dimensions = len(data.data[0]) self.index = AnnoyIndex(dimensions, distance) self.d = dimensions self._size = 0 self.metric = 0 #angular if distance != "angular": self.metric = 1 #euclidean #fill data if data is not None: if type(data.data) is np.ndarray and data.data.dtype not in self.valid_types: raise ValueError("Invalid dtype of numpy array, check valid_types parameter of index!") for v in data.data: self._insert(v) #build trees self.index.build(trees) def _insert(self, vector): """ Inserts vector in AnnoyIndex. :param vector: 1d numpy array, list or tuple representing vector """ if type(vector) is np.ndarray: vector = vector.tolist() else: vector = list(vector) self.index.add_item(self._size, vector) self._size += 1 def get_dist(self, v1, v2, dist=None): """ Calculates distance (euclidean or angular) between two vectors. By default distance is set to metric of index. :param v1: first vector (list or numpy array) :param v2: second vector :param dist: distance can be 0 (angular) or 1 (euclidean) :return: distance between given vectors """ if dist is None: dist = self.metric if dist == 0: #angular v1_sum, v2_sum, mix_sum = 0.0, 0.0, 0.0 for i in range(self.d): v1_sum += v1[i] * v1[i] v2_sum += v2[i] * v2[i] mix_sum += v1[i] * v2[i] a = v1_sum * v2_sum if a > 0.0: return 2.0 - (2.0 * mix_sum / (math.sqrt(a))) else: return 2.0 else: #euclidean d = 0.0 if self.d != len(v1) or self.d != len(v2): raise ValueError("Length of vectors is not the same as d!") for i in range(self.d): d += (v1[i] - v2[i]) * (v1[i] - v2[i]) return math.sqrt(d) def query(self, queries, k=1): """ Returns k nearest neighbors. :param queries: 1d or 2d numpy array or list :param k: number of nearest neighbors to return :return: array with k nearest neighbors, if return_distances is True it returns (a,b) where a is array with k nearest neighbors and b is an array with the same shape containing their distances """ dists = [] if isinstance(queries, np.ndarray) and len(queries.shape) == 1 or \ isinstance(queries, list) and not isinstance(queries[0], list): if isinstance(queries, np.ndarray): neighbors = self.index.get_nns_by_vector(queries.tolist(), k) else: neighbors = self.index.get_nns_by_vector(queries, k) #calculate distances dists = [self.get_dist(queries.tolist(), self.index.get_item_vector(x)) for x in neighbors] else: #more queries neighbors = [] for query in queries: if isinstance(query, np.ndarray): cur_neighbors = self.index.get_nns_by_vector(query.tolist(), k) else: cur_neighbors = self.index.get_nns_by_vector(query, k) neighbors.append(cur_neighbors) #calculate distances from cur_neighbors to query point dists.append([self.get_dist(query, self.index.get_item_vector(x)) for x in cur_neighbors]) return np.array(neighbors), np.array(dists) def save(self, filename): """Saves index to file.""" self.index.save(filename) def load(self, filename, dimensions=None, distance=None): """ Loads index from file. :param filename: path to file :param dimensions: number of dimensions of index :param distance: distance used """ if dimensions is None or distance is None: raise ValueError("Dimensions and distance are needed!") self.index = AnnoyIndex(dimensions, distance) self.d = dimensions self.metric = 0 if distance == "euclidean": self.metric = 1 self.index.load(filename)
gpl-3.0
-3,607,731,220,281,790,000
39.827815
132
0.579238
false
4.151515
false
false
false
HERA-Team/pyuvdata
pyuvdata/uvbeam/cst_beam.py
1
13336
# -*- mode: python; coding: utf-8 -*- # Copyright (c) 2018 Radio Astronomy Software Group # Licensed under the 2-clause BSD License """Class for reading beam CST files.""" import re import warnings import numpy as np from .uvbeam import UVBeam from .. import utils as uvutils __all__ = ["CSTBeam"] class CSTBeam(UVBeam): """ Defines a CST-specific subclass of UVBeam for reading CST text files. This class should not be interacted with directly, instead use the read_cst_beam method on the UVBeam class. """ def name2freq(self, fname): """ Extract frequency from the filename. Assumes the file name contains a substring with the frequency channel in MHz that the data represents. e.g. "HERA_Sim_120.87MHz.txt" should yield 120.87e6 Parameters ---------- fname : str Filename to parse. Returns ------- float Frequency extracted from filename in Hz. """ fi = fname.rfind("Hz") frequency = float(re.findall(r"\d*\.\d+|\d+", fname[:fi])[-1]) si_prefix = fname[fi - 1] si_dict = {"k": 1e3, "M": 1e6, "G": 1e9} if si_prefix in si_dict.keys(): frequency = frequency * si_dict[si_prefix] return frequency def read_cst_beam( self, filename, beam_type="power", feed_pol="x", rotate_pol=True, frequency=None, telescope_name=None, feed_name=None, feed_version=None, model_name=None, model_version=None, history="", x_orientation=None, reference_impedance=None, extra_keywords=None, run_check=True, check_extra=True, run_check_acceptability=True, ): """ Read in data from a cst file. Parameters ---------- filename : str The cst file to read from. beam_type : str What beam_type to read in ('power' or 'efield'). feed_pol : str The feed or polarization or list of feeds or polarizations the files correspond to. Defaults to 'x' (meaning x for efield or xx for power beams). rotate_pol : bool If True, assume the structure in the simulation is symmetric under 90 degree rotations about the z-axis (so that the y polarization can be constructed by rotating the x polarization or vice versa). Default: True if feed_pol is a single value or a list with all the same values in it, False if it is a list with varying values. frequency : float or list of float The frequency or list of frequencies corresponding to the filename(s). This is assumed to be in the same order as the files. If not passed, the code attempts to parse it from the filenames. telescope_name : str The name of the telescope corresponding to the filename(s). feed_name : str The name of the feed corresponding to the filename(s). feed_version : str The version of the feed corresponding to the filename(s). model_name : str The name of the model corresponding to the filename(s). model_version : str The version of the model corresponding to the filename(s). history : str A string detailing the history of the filename(s). x_orientation : str, optional Orientation of the physical dipole corresponding to what is labelled as the x polarization. Options are "east" (indicating east/west orientation) and "north" (indicating north/south orientation) reference_impedance : float, optional The reference impedance of the model(s). extra_keywords : dict, optional A dictionary containing any extra_keywords. run_check : bool Option to check for the existence and proper shapes of required parameters after reading in the file. check_extra : bool Option to check optional parameters as well as required ones. run_check_acceptability : bool Option to check acceptable range of the values of required parameters after reading in the file. """ self.telescope_name = telescope_name self.feed_name = feed_name self.feed_version = feed_version self.model_name = model_name self.model_version = model_version self.history = history if not uvutils._check_history_version(self.history, self.pyuvdata_version_str): self.history += self.pyuvdata_version_str if x_orientation is not None: self.x_orientation = x_orientation if reference_impedance is not None: self.reference_impedance = float(reference_impedance) if extra_keywords is not None: self.extra_keywords = extra_keywords if beam_type == "power": self.Naxes_vec = 1 if feed_pol == "x": feed_pol = "xx" elif feed_pol == "y": feed_pol = "yy" if rotate_pol: rot_pol_dict = {"xx": "yy", "yy": "xx", "xy": "yx", "yx": "xy"} pol2 = rot_pol_dict[feed_pol] self.polarization_array = np.array( [uvutils.polstr2num(feed_pol), uvutils.polstr2num(pol2)] ) else: self.polarization_array = np.array([uvutils.polstr2num(feed_pol)]) self.Npols = len(self.polarization_array) self._set_power() else: self.Naxes_vec = 2 self.Ncomponents_vec = 2 if rotate_pol: if feed_pol == "x": self.feed_array = np.array(["x", "y"]) else: self.feed_array = np.array(["y", "x"]) else: if feed_pol == "x": self.feed_array = np.array(["x"]) else: self.feed_array = np.array(["y"]) self.Nfeeds = self.feed_array.size self._set_efield() self.data_normalization = "physical" self.antenna_type = "simple" self.Nfreqs = 1 self.Nspws = 1 self.freq_array = np.zeros((self.Nspws, self.Nfreqs)) self.bandpass_array = np.zeros((self.Nspws, self.Nfreqs)) self.spw_array = np.array([0]) self.pixel_coordinate_system = "az_za" self._set_cs_params() out_file = open(filename, "r") line = out_file.readline().strip() # Get the first line out_file.close() raw_names = line.split("]") raw_names = [raw_name for raw_name in raw_names if not raw_name == ""] column_names = [] units = [] for raw_name in raw_names: column_name, unit = tuple(raw_name.split("[")) column_names.append("".join(column_name.lower().split(" "))) units.append(unit.lower().strip()) data = np.loadtxt(filename, skiprows=2) theta_col = np.where(np.array(column_names) == "theta")[0][0] phi_col = np.where(np.array(column_names) == "phi")[0][0] if "deg" in units[theta_col]: theta_data = np.radians(data[:, theta_col]) else: theta_data = data[:, theta_col] if "deg" in units[phi_col]: phi_data = np.radians(data[:, phi_col]) else: phi_data = data[:, phi_col] theta_axis = np.sort(np.unique(theta_data)) phi_axis = np.sort(np.unique(phi_data)) if not theta_axis.size * phi_axis.size == theta_data.size: raise ValueError("Data does not appear to be on a grid") theta_data = theta_data.reshape((theta_axis.size, phi_axis.size), order="F") phi_data = phi_data.reshape((theta_axis.size, phi_axis.size), order="F") delta_theta = np.diff(theta_axis) if not np.isclose(np.max(delta_theta), np.min(delta_theta)): raise ValueError( "Data does not appear to be regularly gridded in zenith angle" ) delta_theta = delta_theta[0] delta_phi = np.diff(phi_axis) if not np.isclose(np.max(delta_phi), np.min(delta_phi)): raise ValueError( "Data does not appear to be regularly gridded in azimuth angle" ) delta_phi = delta_phi[0] self.axis1_array = phi_axis self.Naxes1 = self.axis1_array.size self.axis2_array = theta_axis self.Naxes2 = self.axis2_array.size if self.beam_type == "power": # type depends on whether cross pols are present # (if so, complex, else float) self.data_array = np.zeros( self._data_array.expected_shape(self), dtype=self._data_array.expected_type, ) else: self.data_array = np.zeros( self._data_array.expected_shape(self), dtype=np.complex ) if frequency is not None: self.freq_array[0] = frequency else: self.freq_array[0] = self.name2freq(filename) if rotate_pol: # for second polarization, rotate by pi/2 rot_phi = phi_data + np.pi / 2 rot_phi[np.where(rot_phi >= 2 * np.pi)] -= 2 * np.pi roll_rot_phi = np.roll(rot_phi, int((np.pi / 2) / delta_phi), axis=1) if not np.allclose(roll_rot_phi, phi_data): raise ValueError("Rotating by pi/2 failed") # theta is not affected by the rotation # get beam if self.beam_type == "power": data_col_enum = ["abs(e)", "abs(v)"] data_col = [] for name in data_col_enum: this_col = np.where(np.array(column_names) == name)[0] if this_col.size > 0: data_col = data_col + this_col.tolist() if len(data_col) == 0: raise ValueError("No power column found in file: {}".format(filename)) elif len(data_col) > 1: raise ValueError( "Multiple possible power columns found in file: {}".format(filename) ) data_col = data_col[0] power_beam1 = ( data[:, data_col].reshape((theta_axis.size, phi_axis.size), order="F") ** 2.0 ) self.data_array[0, 0, 0, 0, :, :] = power_beam1 if rotate_pol: # rotate by pi/2 for second polarization power_beam2 = np.roll(power_beam1, int((np.pi / 2) / delta_phi), axis=1) self.data_array[0, 0, 1, 0, :, :] = power_beam2 else: self.basis_vector_array = np.zeros( (self.Naxes_vec, self.Ncomponents_vec, self.Naxes2, self.Naxes1) ) self.basis_vector_array[0, 0, :, :] = 1.0 self.basis_vector_array[1, 1, :, :] = 1.0 theta_mag_col = np.where(np.array(column_names) == "abs(theta)")[0][0] theta_phase_col = np.where(np.array(column_names) == "phase(theta)")[0][0] phi_mag_col = np.where(np.array(column_names) == "abs(phi)")[0][0] phi_phase_col = np.where(np.array(column_names) == "phase(phi)")[0][0] theta_mag = data[:, theta_mag_col].reshape( (theta_axis.size, phi_axis.size), order="F" ) phi_mag = data[:, phi_mag_col].reshape( (theta_axis.size, phi_axis.size), order="F" ) if "deg" in units[theta_phase_col]: theta_phase = np.radians(data[:, theta_phase_col]) else: theta_phase = data[:, theta_phase_col] if "deg" in units[phi_phase_col]: phi_phase = np.radians(data[:, phi_phase_col]) else: phi_phase = data[:, phi_phase_col] theta_phase = theta_phase.reshape( (theta_axis.size, phi_axis.size), order="F" ) phi_phase = phi_phase.reshape((theta_axis.size, phi_axis.size), order="F") theta_beam = theta_mag * np.exp(1j * theta_phase) phi_beam = phi_mag * np.exp(1j * phi_phase) self.data_array[0, 0, 0, 0, :, :] = phi_beam self.data_array[1, 0, 0, 0, :, :] = theta_beam if rotate_pol: # rotate by pi/2 for second polarization theta_beam2 = np.roll(theta_beam, int((np.pi / 2) / delta_phi), axis=1) phi_beam2 = np.roll(phi_beam, int((np.pi / 2) / delta_phi), axis=1) self.data_array[0, 0, 1, 0, :, :] = phi_beam2 self.data_array[1, 0, 1, 0, :, :] = theta_beam2 self.bandpass_array[0] = 1 if frequency is None: warnings.warn( "No frequency provided. Detected frequency is: " "{freqs} Hz".format(freqs=self.freq_array) ) if run_check: self.check( check_extra=check_extra, run_check_acceptability=run_check_acceptability )
bsd-2-clause
7,236,658,128,043,490,000
36.886364
88
0.540492
false
3.811375
false
false
false
joaquinlpereyra/ludema
ludema/abstract/actions.py
1
11361
import random from functools import wraps from ludema.abstract.utils import Direction from ludema.exceptions import (PieceIsNotOnATileError, PieceIsNotOnThisBoardError, TileIsEmptyError, NotGrabbableError) class Action: def __init__(self, piece, action_functions): self.possible_actions = [] self.piece = piece if action_functions is None: action_functions = self._default_actions() self._set_actions(action_functions) self.history = [] def __getattribute__(self, name): attr = object.__getattribute__(self, name) if attr in object.__getattribute__(self, 'possible_actions'): attr = self._history_appender(attr) return attr @property def is_implemented(self): """Return True if action is implemented, False if it can't.""" return True if self.possible_actions else False def _history_appender(self, func): @wraps(func) def history_wrapper(*args, **kwargs): self.history.append(func.__name__) return func(*args, **kwargs) return history_wrapper def _normal_default_actions(self): """Just a collection of four extremely normal set of default actions. The ones who apply the action to the tile up, right, left and down of the piece. """ def up(): return self.do(self.piece.surroundings[Direction.UP]) def right(): return self.do(self.piece.surroundings[Direction.RIGHT]) def down(): return self.do(self.piece.surroundings[Direction.DOWN]) def left(): return self.do(self.piece.surroundings[Direction.LEFT]) return [up, right, down, left] def _set_actions(self, action_functions): """Sets the action_funcions as methods of the class and append them to the possible_actions list. """ for action_function in action_functions: self.possible_actions.append(action_function) setattr(self, action_function.__name__, action_function) def _default_actions(self): """These will be the default action functions. Every action should implement them, but the _normal_default_actions method give you four extremely common default function actions: the one which applies the action to the tiles above, at right, below and at left of the piece. """ return self._normal_default_actions() def _unsafe_do(self, tile): """Intended to actually perform the action. Should check all action conditions and raise an appropiate error if they are not met. Doesn't need to return anything. Shouldn't be used for I/O, instead use the do method for that. Note: Every action should implement this method. """ raise NotImplementedError("The Action class shouldn't be used directly!") def do(self, tile, dont_pass_turn=False): """Inteded as a safe wraper for _unsafe_do. Should take a tile indicating where the action must be performed. Should return a bolean indicating if the action could be performed or not. Should be capable of handling I/O without raising any exceptions. Useful for one-use-cases for the actions, if you want to extraordinarily perform an action to a tile. For ordinary uses, use the actions in the possible_actions lists. For example, if a piece moves up,down,left,right alsways, set those as actions functions. If a magician teleports the piece somewhere, you can use this function to move it there. All the action functions should ultimately use this method. Note: Every action should implement this method. """ raise NotImplementedError("The Action class shouldn't be used directly!") def random(self): """Call a random function from the possible actions list. Keep in mind that the action may or may not be performed, depending on the current position of the piece and what the action tries to do. Returns: bool: True if action was performed, False if not """ surprise_action = random.choice(self.possible_actions) was_action_valid = surprise_action() return was_action_valid def random_and_valid(self): """Call a random function from the possible actions, making sure that the action is actually possible for the piece. If no actions from the list of possible actions, it will just return False. Returns: bool: True if there was a valid action to be made by the piece, False if the piece couldn't move anywhere """ tries = 0 random_action_performed = self.random() while not random_action_performed: random_action_performed = self.random() tries += 1 if tries >= len(self.possible_actions): return False return True def all(self): """Call all possible actions from the list. The actions may or may not be performed depending on the action conditions. Returns: dict: looks like {action_function_name, boolean} key-value pairs, indicating which actions where actually performed (True) and which not (False). """ successes = {} for action_function in self.possible_actions: success = action_function() successes[action_function.__name__] = success return successes def until_success(self): """Call all possible actions from the list of possible actions, but stop once it can perform one successfully. Returns: bool: True if there was a valid action performed by the piece, False if no valid action was found. """ for action_function in self.possible_actions: success = action_function() if success: return True else: return False class Moving(Action): def __init__(self, piece, movement_functions): """ Args: piece (Piece): the movable piece to which the movements refer movement_functions ([nullary functions]): a list of valid functions which as a side effect move the piece. """ Action.__init__(self, piece, movement_functions) self.possible_movements = self.possible_actions def _unsafe_do(self, tile): """Move the object if it can. That means: unlink the piece from its current tile and link it to the new tile; unless there's a piece in the destiny tile already. Args: tile (Tile): the tile to which the piece will try to move Returns: bool: False if there was a piece on tile and it wasn't walkable, True if movement could be completed Raises: PieceIsNotOnATileError: if the piece hasn't been put on a tile before trying to move PieceIsNotOnThisBoardError: if the piece you're trying to move is in fact on another board """ if not self.piece.home_tile: raise PieceIsNotOnATileError if self.piece.home_tile.board is not tile.board: raise PieceIsNotOnThisBoardError if tile.piece is not None: tile.piece.on_touch_do(touching_piece=self.piece) # what if tile.piece.on_touch_do actually moved the touched piece? # it could have, so we need to check if tile.piece still has # a piece... if tile.piece and not tile.piece.walkable: return False self.piece.home_tile.piece = None tile.piece = self.piece return True def do(self, tile): """Move the object, if it can. Args: tile (Tile): the tile to which the piece will try to move. Returns: bool: True if piece could be moved, False if not """ if tile: try: return self._unsafe_do(tile) except (PieceIsNotOnATileError, PieceIsNotOnThisBoardError): return False else: return False class Attacking(Action): def __init__(self, piece, attack_functions): Action.__init__(self, piece, attack_functions) self.possible_attacks = self.possible_actions def _unsafe_do(self, tile): """Attack a piece on tile passed as argument. If tile has no piece, raise a TileIsEmptyError. Args: tile (Tile): the tile which the piece will try to attack """ if tile.piece is None: raise TileIsEmptyError(self.piece, tile) attacked_piece = tile.piece attacked_piece.health -= self.piece.attack_damage def do(self, tile): """Attack a tile passed as argument. Safe to use for I/O, should never raise an error. Args: tile (Tile): the tile which the piece will try to attack Returns: bool: True if attack could be performed, False if attack failed (because the tile didn't have a piece associated or it was None) """ if tile: try: self._unsafe_do(tile) return True except TileIsEmptyError: return False else: return False class Grabbing(Action): def __init__(self, piece, grab_functions): Action.__init__(self, piece, grab_functions) self.possible_grabs = self.possible_actions def _unsafe_do(self, tile): """Grabs from the tile passed as argument. Args: tile (Tile): the tile which the piece will try to attack Raises: NotGrabbableError if the piece on the tile can't be grabbed """ if not callable(tile.piece.grab): raise NotGrabbableError(tile.piece) grabbable = tile.piece grabbable.owner = self.piece self.piece.items.append(grabbable) tile.piece = None # POPS! def do(self, tile): """Grabs from the tile passed as argument. Safe to use for I/O, should never raise an error. Args: tile (Tile): the tile which the piece will try to grab from Returns: bool: True if something could be grabbed could be performed, False if grab failed """ if not tile: return False try: self._unsafe_do(tile) return True except TileIsEmptyError: return False def from_surroundings(self): """Grabs an item from the surroundings of the Character. Stops at first item grabbed. Items look-up goes clockwise. Returns: bool: True if item found and grabbed, False otherwise. """ for tile in self.piece.surroundings.values(): item_grabbed = self.do(tile) if item_grabbed: return True else: return False
gpl-3.0
6,046,794,451,876,565,000
35.066667
93
0.602588
false
4.54986
false
false
false
lycantropos/cetus
cetus/queries/filters.py
1
2007
from typing import Optional, Tuple, Any from cetus.types import (FiltersType, FilterType) from cetus.utils import join_str from .utils import normalize_value LOGICAL_OPERATORS = {'AND', 'OR'} INCLUSION_OPERATORS = {'IN', 'NOT IN'} RANGE_OPERATORS = {'BETWEEN'} COMPARISON_OPERATORS = {'=', '!=', '<', '>', '<=', '>=', 'IS', 'IS NOT', 'LIKE', 'NOT LIKE'} PREDICATES = (INCLUSION_OPERATORS | RANGE_OPERATORS | COMPARISON_OPERATORS) def add_filters(query: str, *, filters: Optional[Tuple[str, Any]] ) -> str: if filters: filters = filters_to_str(filters) query += f'WHERE {filters} ' return query def filters_to_str(filters: FiltersType) -> str: operator, filter_ = filters if operator in LOGICAL_OPERATORS: sub_filters = [filters_to_str(sub_filter) for sub_filter in filter_] return operator.join(f'({sub_filter})' for sub_filter in sub_filters) elif operator in PREDICATES: res = predicate_to_str(predicate_name=operator, filter_=filter_) return res else: err_msg = ('Invalid filters operator: ' f'"{operator}" is not found ' f'in logical operators ' f'and predicates lists.') raise ValueError(err_msg) def predicate_to_str( *, predicate_name: str, filter_: FilterType) -> str: column_name, value = filter_ if predicate_name in INCLUSION_OPERATORS: value = map(normalize_value, value) value = f'({join_str(value)})' elif predicate_name in RANGE_OPERATORS: value = map(normalize_value, value) value = ' AND '.join(value) else: value = normalize_value(value) return f'{column_name} {predicate_name} {value}'
mit
-6,637,450,050,062,195,000
30.857143
59
0.539113
false
3.99006
false
false
false
RandallDW/Aruba_plugin
plugins/org.python.pydev/pysrc/_pydevd_bundle/pydevd_referrers.py
1
8832
from _pydevd_bundle.pydevd_constants import dict_contains import sys from _pydevd_bundle import pydevd_xml from os.path import basename import traceback try: from urllib import quote, quote_plus, unquote, unquote_plus except: from urllib.parse import quote, quote_plus, unquote, unquote_plus #@Reimport @UnresolvedImport #=================================================================================================== # print_var_node #=================================================================================================== def print_var_node(xml_node, stream): name = xml_node.getAttribute('name') value = xml_node.getAttribute('value') val_type = xml_node.getAttribute('type') found_as = xml_node.getAttribute('found_as') stream.write('Name: ') stream.write(unquote_plus(name)) stream.write(', Value: ') stream.write(unquote_plus(value)) stream.write(', Type: ') stream.write(unquote_plus(val_type)) if found_as: stream.write(', Found as: %s' % (unquote_plus(found_as),)) stream.write('\n') #=================================================================================================== # print_referrers #=================================================================================================== def print_referrers(obj, stream=None): if stream is None: stream = sys.stdout result = get_referrer_info(obj) from xml.dom.minidom import parseString dom = parseString(result) xml = dom.getElementsByTagName('xml')[0] for node in xml.childNodes: if node.nodeType == node.TEXT_NODE: continue if node.localName == 'for': stream.write('Searching references for: ') for child in node.childNodes: if child.nodeType == node.TEXT_NODE: continue print_var_node(child, stream) elif node.localName == 'var': stream.write('Referrer found: ') print_var_node(node, stream) else: sys.stderr.write('Unhandled node: %s\n' % (node,)) return result #=================================================================================================== # get_referrer_info #=================================================================================================== def get_referrer_info(searched_obj): DEBUG = 0 if DEBUG: sys.stderr.write('Getting referrers info.\n') try: try: if searched_obj is None: ret = ['<xml>\n'] ret.append('<for>\n') ret.append(pydevd_xml.var_to_xml( searched_obj, 'Skipping getting referrers for None', additional_in_xml=' id="%s"' % (id(searched_obj),))) ret.append('</for>\n') ret.append('</xml>') ret = ''.join(ret) return ret obj_id = id(searched_obj) try: if DEBUG: sys.stderr.write('Getting referrers...\n') import gc referrers = gc.get_referrers(searched_obj) except: traceback.print_exc() ret = ['<xml>\n'] ret.append('<for>\n') ret.append(pydevd_xml.var_to_xml( searched_obj, 'Exception raised while trying to get_referrers.', additional_in_xml=' id="%s"' % (id(searched_obj),))) ret.append('</for>\n') ret.append('</xml>') ret = ''.join(ret) return ret if DEBUG: sys.stderr.write('Found %s referrers.\n' % (len(referrers),)) curr_frame = sys._getframe() frame_type = type(curr_frame) #Ignore this frame and any caller frame of this frame ignore_frames = {} #Should be a set, but it's not available on all python versions. while curr_frame is not None: if basename(curr_frame.f_code.co_filename).startswith('pydev'): ignore_frames[curr_frame] = 1 curr_frame = curr_frame.f_back ret = ['<xml>\n'] ret.append('<for>\n') if DEBUG: sys.stderr.write('Searching Referrers of obj with id="%s"\n' % (obj_id,)) ret.append(pydevd_xml.var_to_xml( searched_obj, 'Referrers of obj with id="%s"' % (obj_id,))) ret.append('</for>\n') all_objects = None for r in referrers: try: if dict_contains(ignore_frames, r): continue #Skip the references we may add ourselves except: pass #Ok: unhashable type checked... if r is referrers: continue r_type = type(r) r_id = str(id(r)) representation = str(r_type) found_as = '' if r_type == frame_type: if DEBUG: sys.stderr.write('Found frame referrer: %r\n' % (r,)) for key, val in r.f_locals.items(): if val is searched_obj: found_as = key break elif r_type == dict: if DEBUG: sys.stderr.write('Found dict referrer: %r\n' % (r,)) # Try to check if it's a value in the dict (and under which key it was found) for key, val in r.items(): if val is searched_obj: found_as = key if DEBUG: sys.stderr.write(' Found as %r in dict\n' % (found_as,)) break #Ok, there's one annoying thing: many times we find it in a dict from an instance, #but with this we don't directly have the class, only the dict, so, to workaround that #we iterate over all reachable objects ad check if one of those has the given dict. if all_objects is None: all_objects = gc.get_objects() for x in all_objects: try: if getattr(x, '__dict__', None) is r: r = x r_type = type(x) r_id = str(id(r)) representation = str(r_type) break except: pass #Just ignore any error here (i.e.: ReferenceError, etc.) elif r_type in (tuple, list): if DEBUG: sys.stderr.write('Found tuple referrer: %r\n' % (r,)) #Don't use enumerate() because not all Python versions have it. i = 0 for x in r: if x is searched_obj: found_as = '%s[%s]' % (r_type.__name__, i) if DEBUG: sys.stderr.write(' Found as %s in tuple: \n' % (found_as,)) break i += 1 if found_as: if not isinstance(found_as, str): found_as = str(found_as) found_as = ' found_as="%s"' % (pydevd_xml.make_valid_xml_value(found_as),) ret.append(pydevd_xml.var_to_xml( r, representation, additional_in_xml=' id="%s"%s' % (r_id, found_as))) finally: if DEBUG: sys.stderr.write('Done searching for references.\n') #If we have any exceptions, don't keep dangling references from this frame to any of our objects. all_objects = None referrers = None searched_obj = None r = None x = None key = None val = None curr_frame = None ignore_frames = None except: traceback.print_exc() ret = ['<xml>\n'] ret.append('<for>\n') ret.append(pydevd_xml.var_to_xml( searched_obj, 'Error getting referrers for:', additional_in_xml=' id="%s"' % (id(searched_obj),))) ret.append('</for>\n') ret.append('</xml>') ret = ''.join(ret) return ret ret.append('</xml>') ret = ''.join(ret) return ret
epl-1.0
8,083,347,202,527,346,000
35.8
109
0.434783
false
4.660686
false
false
false
adamcaudill/yawast
yawast/external/spinner.py
1
1596
# From: https://stackoverflow.com/a/39504463 # License: Creative Commons Attribution-Share Alike # Copyright: Victor Moyseenko import sys import threading import time class Spinner: running = False busy = False delay = 0.1 @staticmethod def spinning_cursor(): while 1: for cursor in "|/-\\": yield cursor def __init__(self, delay=None): self.spinner_generator = self.spinning_cursor() if delay and float(delay): self.delay = delay def spinner_task(self): while self.busy: try: if sys.stdout.isatty(): sys.stdout.write(next(self.spinner_generator)) sys.stdout.flush() time.sleep(self.delay) sys.stdout.write("\b") sys.stdout.flush() except Exception: # we don't care what happens here pass self.running = False def start(self): self.running = True self.busy = True threading.Thread(target=self.spinner_task).start() def stop(self, exception=None): self.busy = False time.sleep(self.delay) while self.running: pass sys.stdout.write(" ") sys.stdout.flush() sys.stdout.write("\b") sys.stdout.flush() if exception is not None: return False def __enter__(self): self.start() return self def __exit__(self, exception, value, tb): return self.stop(exception)
mit
-1,195,580,193,516,973,800
23.553846
66
0.537594
false
4.26738
false
false
false
szaydel/psutil
psutil/_pslinux.py
1
40630
#!/usr/bin/env python # Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Linux platform implementation.""" from __future__ import division import os import errno import socket import struct import sys import base64 import re import warnings import _psutil_posix import _psutil_linux from psutil import _psposix from psutil._error import AccessDenied, NoSuchProcess, TimeoutExpired from psutil._common import * from psutil._compat import PY3, xrange, long, namedtuple, wraps from _psutil_linux import RLIM_INFINITY from _psutil_linux import (RLIMIT_AS, RLIMIT_CORE, RLIMIT_CPU, RLIMIT_DATA, RLIMIT_FSIZE, RLIMIT_LOCKS, RLIMIT_MEMLOCK, RLIMIT_MSGQUEUE, RLIMIT_NICE, RLIMIT_NOFILE, RLIMIT_NPROC, RLIMIT_RSS, RLIMIT_RTPRIO, RLIMIT_RTTIME, RLIMIT_SIGPENDING, RLIMIT_STACK) __extra__all__ = [ # io prio constants "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE", "IOPRIO_CLASS_IDLE", # connection status constants "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1", "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT", "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", # process resources constants "RLIM_INFINITY", "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA", "RLIMIT_FSIZE", "RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_MSGQUEUE", "RLIMIT_NICE", "RLIMIT_NOFILE", "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING", "RLIMIT_STACK", # other "phymem_buffers", "cached_phymem"] def get_system_boot_time(): """Return the system boot time expressed in seconds since the epoch.""" f = open('/proc/stat', 'r') try: for line in f: if line.startswith('btime'): return float(line.strip().split()[1]) raise RuntimeError("line 'btime' not found") finally: f.close() def _get_num_cpus(): """Return the number of CPUs on the system""" try: return os.sysconf("SC_NPROCESSORS_ONLN") except ValueError: # as a second fallback we try to parse /proc/cpuinfo num = 0 f = open('/proc/cpuinfo', 'r') try: lines = f.readlines() finally: f.close() for line in lines: if line.lower().startswith('processor'): num += 1 # unknown format (e.g. amrel/sparc architectures), see: # http://code.google.com/p/psutil/issues/detail?id=200 # try to parse /proc/stat as a last resort if num == 0: f = open('/proc/stat', 'r') try: lines = f.readlines() finally: f.close() search = re.compile('cpu\d') for line in lines: line = line.split(' ')[0] if search.match(line): num += 1 if num == 0: raise RuntimeError("couldn't determine platform's NUM_CPUS") return num # Number of clock ticks per second _CLOCK_TICKS = os.sysconf("SC_CLK_TCK") _PAGESIZE = os.sysconf("SC_PAGE_SIZE") # Since these constants get determined at import time we do not want to # crash immediately; instead we'll set them to None and most likely # we'll crash later as they're used for determining process CPU stats # and creation_time try: BOOT_TIME = get_system_boot_time() except Exception: BOOT_TIME = None warnings.warn("couldn't determine platform's BOOT_TIME", RuntimeWarning) try: NUM_CPUS = _get_num_cpus() except Exception: NUM_CPUS = None warnings.warn("couldn't determine platform's NUM_CPUS", RuntimeWarning) try: TOTAL_PHYMEM = _psutil_linux.get_sysinfo()[0] except Exception: TOTAL_PHYMEM = None warnings.warn("couldn't determine platform's TOTAL_PHYMEM", RuntimeWarning) # ioprio_* constants http://linux.die.net/man/2/ioprio_get IOPRIO_CLASS_NONE = 0 IOPRIO_CLASS_RT = 1 IOPRIO_CLASS_BE = 2 IOPRIO_CLASS_IDLE = 3 # http://students.mimuw.edu.pl/lxr/source/include/net/tcp_states.h _TCP_STATES_TABLE = {"01" : CONN_ESTABLISHED, "02" : CONN_SYN_SENT, "03" : CONN_SYN_RECV, "04" : CONN_FIN_WAIT1, "05" : CONN_FIN_WAIT2, "06" : CONN_TIME_WAIT, "07" : CONN_CLOSE, "08" : CONN_CLOSE_WAIT, "09" : CONN_LAST_ACK, "0A" : CONN_LISTEN, "0B" : CONN_CLOSING } # --- system memory functions nt_virtmem_info = namedtuple('vmem', ' '.join([ # all platforms 'total', 'available', 'percent', 'used', 'free', # linux specific 'active', 'inactive', 'buffers', 'cached'])) def virtual_memory(): total, free, buffers, shared, _, _ = _psutil_linux.get_sysinfo() cached = active = inactive = None f = open('/proc/meminfo', 'r') try: for line in f: if line.startswith('Cached:'): cached = int(line.split()[1]) * 1024 elif line.startswith('Active:'): active = int(line.split()[1]) * 1024 elif line.startswith('Inactive:'): inactive = int(line.split()[1]) * 1024 if cached is not None \ and active is not None \ and inactive is not None: break else: # we might get here when dealing with exotic Linux flavors, see: # http://code.google.com/p/psutil/issues/detail?id=313 msg = "'cached', 'active' and 'inactive' memory stats couldn't " \ "be determined and were set to 0" warnings.warn(msg, RuntimeWarning) cached = active = inactive = 0 finally: f.close() avail = free + buffers + cached used = total - free percent = usage_percent((total - avail), total, _round=1) return nt_virtmem_info(total, avail, percent, used, free, active, inactive, buffers, cached) def swap_memory(): _, _, _, _, total, free = _psutil_linux.get_sysinfo() used = total - free percent = usage_percent(used, total, _round=1) # get pgin/pgouts f = open("/proc/vmstat", "r") sin = sout = None try: for line in f: # values are expressed in 4 kilo bytes, we want bytes instead if line.startswith('pswpin'): sin = int(line.split(' ')[1]) * 4 * 1024 elif line.startswith('pswpout'): sout = int(line.split(' ')[1]) * 4 * 1024 if sin is not None and sout is not None: break else: # we might get here when dealing with exotic Linux flavors, see: # http://code.google.com/p/psutil/issues/detail?id=313 msg = "'sin' and 'sout' swap memory stats couldn't " \ "be determined and were set to 0" warnings.warn(msg, RuntimeWarning) sin = sout = 0 finally: f.close() return nt_swapmeminfo(total, used, free, percent, sin, sout) # --- XXX deprecated memory functions @deprecated('psutil.virtual_memory().cached') def cached_phymem(): return virtual_memory().cached @deprecated('psutil.virtual_memory().buffers') def phymem_buffers(): return virtual_memory().buffers # --- system CPU functions @memoize def _get_cputimes_ntuple(): """ Return a (nt, rindex) tuple depending on the CPU times available on this Linux kernel version which may be: user, nice, system, idle, iowait, irq, softirq [steal, [guest, [guest_nice]]] """ f = open('/proc/stat', 'r') try: values = f.readline().split()[1:] finally: f.close() fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq'] rindex = 8 vlen = len(values) if vlen >= 8: # Linux >= 2.6.11 fields.append('steal') rindex += 1 if vlen >= 9: # Linux >= 2.6.24 fields.append('guest') rindex += 1 if vlen >= 10: # Linux >= 3.2.0 fields.append('guest_nice') rindex += 1 return (namedtuple('cputimes', ' '.join(fields)), rindex) def get_system_cpu_times(): """Return a named tuple representing the following system-wide CPU times: user, nice, system, idle, iowait, irq, softirq [steal, [guest, [guest_nice]]] Last 3 fields may not be available on all Linux kernel versions. """ f = open('/proc/stat', 'r') try: values = f.readline().split() finally: f.close() nt, rindex = _get_cputimes_ntuple() fields = values[1:rindex] fields = [float(x) / _CLOCK_TICKS for x in fields] return nt(*fields) def get_system_per_cpu_times(): """Return a list of namedtuple representing the CPU times for every CPU available on the system. """ nt, rindex = _get_cputimes_ntuple() cpus = [] f = open('/proc/stat', 'r') try: # get rid of the first line which refers to system wide CPU stats f.readline() for line in f: if line.startswith('cpu'): fields = line.split()[1:rindex] fields = [float(x) / _CLOCK_TICKS for x in fields] entry = nt(*fields) cpus.append(entry) return cpus finally: f.close() # --- system disk functions def disk_partitions(all=False): """Return mounted disk partitions as a list of nameduples""" phydevs = [] f = open("/proc/filesystems", "r") try: for line in f: if not line.startswith("nodev"): phydevs.append(line.strip()) finally: f.close() retlist = [] partitions = _psutil_linux.get_disk_partitions() for partition in partitions: device, mountpoint, fstype, opts = partition if device == 'none': device = '' if not all: if device == '' or fstype not in phydevs: continue ntuple = nt_partition(device, mountpoint, fstype, opts) retlist.append(ntuple) return retlist get_disk_usage = _psposix.get_disk_usage # --- other sysetm functions def get_system_users(): """Return currently connected users as a list of namedtuples.""" retlist = [] rawlist = _psutil_linux.get_system_users() for item in rawlist: user, tty, hostname, tstamp, user_process = item # note: the underlying C function includes entries about # system boot, run level and others. We might want # to use them in the future. if not user_process: continue if hostname == ':0.0': hostname = 'localhost' nt = nt_user(user, tty or None, hostname, tstamp) retlist.append(nt) return retlist # --- process functions def get_pid_list(): """Returns a list of PIDs currently running on the system.""" pids = [int(x) for x in os.listdir('/proc') if x.isdigit()] return pids def pid_exists(pid): """Check For the existence of a unix pid.""" return _psposix.pid_exists(pid) def net_io_counters(): """Return network I/O statistics for every network interface installed on the system as a dict of raw tuples. """ f = open("/proc/net/dev", "r") try: lines = f.readlines() finally: f.close() retdict = {} for line in lines[2:]: colon = line.find(':') assert colon > 0, line name = line[:colon].strip() fields = line[colon+1:].strip().split() bytes_recv = int(fields[0]) packets_recv = int(fields[1]) errin = int(fields[2]) dropin = int(fields[2]) bytes_sent = int(fields[8]) packets_sent = int(fields[9]) errout = int(fields[10]) dropout = int(fields[11]) retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv, errin, errout, dropin, dropout) return retdict def disk_io_counters(): """Return disk I/O statistics for every disk installed on the system as a dict of raw tuples. """ # man iostat states that sectors are equivalent with blocks and # have a size of 512 bytes since 2.4 kernels. This value is # needed to calculate the amount of disk I/O in bytes. SECTOR_SIZE = 512 # determine partitions we want to look for partitions = [] f = open("/proc/partitions", "r") try: lines = f.readlines()[2:] finally: f.close() for line in reversed(lines): _, _, _, name = line.split() if name[-1].isdigit(): # we're dealing with a partition (e.g. 'sda1'); 'sda' will # also be around but we want to omit it partitions.append(name) else: if not partitions or not partitions[-1].startswith(name): # we're dealing with a disk entity for which no # partitions have been defined (e.g. 'sda' but # 'sda1' was not around), see: # http://code.google.com/p/psutil/issues/detail?id=338 partitions.append(name) # retdict = {} f = open("/proc/diskstats", "r") try: lines = f.readlines() finally: f.close() for line in lines: # http://www.mjmwired.net/kernel/Documentation/iostats.txt _, _, name, reads, _, rbytes, rtime, writes, _, wbytes, wtime = \ line.split()[:11] if name in partitions: rbytes = int(rbytes) * SECTOR_SIZE wbytes = int(wbytes) * SECTOR_SIZE reads = int(reads) writes = int(writes) rtime = int(rtime) wtime = int(wtime) retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime) return retdict # taken from /fs/proc/array.c _status_map = {"R" : STATUS_RUNNING, "S" : STATUS_SLEEPING, "D" : STATUS_DISK_SLEEP, "T" : STATUS_STOPPED, "t" : STATUS_TRACING_STOP, "Z" : STATUS_ZOMBIE, "X" : STATUS_DEAD, "x" : STATUS_DEAD, "K" : STATUS_WAKE_KILL, "W" : STATUS_WAKING} # --- decorators def wrap_exceptions(fun): """Decorator which translates bare OSError and IOError exceptions into NoSuchProcess and AccessDenied. """ @wraps(fun) def wrapper(self, *args, **kwargs): try: return fun(self, *args, **kwargs) except EnvironmentError: # ENOENT (no such file or directory) gets raised on open(). # ESRCH (no such process) can get raised on read() if # process is gone in meantime. err = sys.exc_info()[1] if err.errno in (errno.ENOENT, errno.ESRCH): raise NoSuchProcess(self.pid, self._process_name) if err.errno in (errno.EPERM, errno.EACCES): raise AccessDenied(self.pid, self._process_name) raise return wrapper class Process(object): """Linux process implementation.""" __slots__ = ["pid", "_process_name"] def __init__(self, pid): self.pid = pid self._process_name = None @wrap_exceptions def get_process_name(self): f = open("/proc/%s/stat" % self.pid) try: name = f.read().split(' ')[1].replace('(', '').replace(')', '') finally: f.close() # XXX - gets changed later and probably needs refactoring return name def get_process_exe(self): try: exe = os.readlink("/proc/%s/exe" % self.pid) except (OSError, IOError): err = sys.exc_info()[1] if err.errno == errno.ENOENT: # no such file error; might be raised also if the # path actually exists for system processes with # low pids (about 0-20) if os.path.lexists("/proc/%s/exe" % self.pid): return "" else: # ok, it is a process which has gone away raise NoSuchProcess(self.pid, self._process_name) if err.errno in (errno.EPERM, errno.EACCES): raise AccessDenied(self.pid, self._process_name) raise # readlink() might return paths containing null bytes causing # problems when used with other fs-related functions (os.*, # open(), ...) exe = exe.replace('\x00', '') # Certain names have ' (deleted)' appended. Usually this is # bogus as the file actually exists. Either way that's not # important as we don't want to discriminate executables which # have been deleted. if exe.endswith(" (deleted)") and not os.path.exists(exe): exe = exe[:-10] return exe @wrap_exceptions def get_process_cmdline(self): f = open("/proc/%s/cmdline" % self.pid) try: # return the args as a list return [x for x in f.read().split('\x00') if x] finally: f.close() @wrap_exceptions def get_process_terminal(self): tmap = _psposix._get_terminal_map() f = open("/proc/%s/stat" % self.pid) try: tty_nr = int(f.read().split(' ')[6]) finally: f.close() try: return tmap[tty_nr] except KeyError: return None @wrap_exceptions def get_process_io_counters(self): f = open("/proc/%s/io" % self.pid) try: for line in f: if line.startswith("rchar"): read_count = int(line.split()[1]) elif line.startswith("wchar"): write_count = int(line.split()[1]) elif line.startswith("read_bytes"): read_bytes = int(line.split()[1]) elif line.startswith("write_bytes"): write_bytes = int(line.split()[1]) return nt_io(read_count, write_count, read_bytes, write_bytes) finally: f.close() if not os.path.exists('/proc/%s/io' % os.getpid()): def get_process_io_counters(self): raise NotImplementedError("couldn't find /proc/%s/io (kernel " \ "too old?)" % self.pid) @wrap_exceptions def get_cpu_times(self): f = open("/proc/%s/stat" % self.pid) try: st = f.read().strip() finally: f.close() # ignore the first two values ("pid (exe)") st = st[st.find(')') + 2:] values = st.split(' ') utime = float(values[11]) / _CLOCK_TICKS stime = float(values[12]) / _CLOCK_TICKS return nt_cputimes(utime, stime) @wrap_exceptions def process_wait(self, timeout=None): try: return _psposix.wait_pid(self.pid, timeout) except TimeoutExpired: raise TimeoutExpired(self.pid, self._process_name) @wrap_exceptions def get_process_create_time(self): f = open("/proc/%s/stat" % self.pid) try: st = f.read().strip() finally: f.close() # ignore the first two values ("pid (exe)") st = st[st.rfind(')') + 2:] values = st.split(' ') # According to documentation, starttime is in field 21 and the # unit is jiffies (clock ticks). # We first divide it for clock ticks and then add uptime returning # seconds since the epoch, in UTC. starttime = (float(values[19]) / _CLOCK_TICKS) + BOOT_TIME return starttime @wrap_exceptions def get_memory_info(self): f = open("/proc/%s/statm" % self.pid) try: vms, rss = f.readline().split()[:2] return nt_meminfo(int(rss) * _PAGESIZE, int(vms) * _PAGESIZE) finally: f.close() _nt_ext_mem = namedtuple('meminfo', 'rss vms shared text lib data dirty') @wrap_exceptions def get_ext_memory_info(self): # ============================================================ # | FIELD | DESCRIPTION | AKA | TOP | # ============================================================ # | rss | resident set size | | RES | # | vms | total program size | size | VIRT | # | shared | shared pages (from shared mappings) | | SHR | # | text | text ('code') | trs | CODE | # | lib | library (unused in Linux 2.6) | lrs | | # | data | data + stack | drs | DATA | # | dirty | dirty pages (unused in Linux 2.6) | dt | | # ============================================================ f = open("/proc/%s/statm" % self.pid) try: vms, rss, shared, text, lib, data, dirty = \ [int(x) * _PAGESIZE for x in f.readline().split()[:7]] finally: f.close() return self._nt_ext_mem(rss, vms, shared, text, lib, data, dirty) _mmap_base_fields = ['path', 'rss', 'size', 'pss', 'shared_clean', 'shared_dirty', 'private_clean', 'private_dirty', 'referenced', 'anonymous', 'swap',] nt_mmap_grouped = namedtuple('mmap', ' '.join(_mmap_base_fields)) nt_mmap_ext = namedtuple('mmap', 'addr perms ' + ' '.join(_mmap_base_fields)) def get_memory_maps(self): """Return process's mapped memory regions as a list of nameduples. Fields are explained in 'man proc'; here is an updated (Apr 2012) version: http://goo.gl/fmebo """ f = None try: f = open("/proc/%s/smaps" % self.pid) first_line = f.readline() current_block = [first_line] def get_blocks(): data = {} for line in f: fields = line.split(None, 5) if not fields[0].endswith(':'): # new block section yield (current_block.pop(), data) current_block.append(line) else: try: data[fields[0]] = int(fields[1]) * 1024 except ValueError: if fields[0].startswith('VmFlags:'): # see issue #369 continue else: raise ValueError("don't know how to interpret" \ " line %r" % line) yield (current_block.pop(), data) if first_line: # smaps file can be empty for header, data in get_blocks(): hfields = header.split(None, 5) try: addr, perms, offset, dev, inode, path = hfields except ValueError: addr, perms, offset, dev, inode, path = hfields + [''] if not path: path = '[anon]' else: path = path.strip() yield (addr, perms, path, data['Rss:'], data.get('Size:', 0), data.get('Pss:', 0), data.get('Shared_Clean:', 0), data.get('Shared_Dirty:', 0), data.get('Private_Clean:', 0), data.get('Private_Dirty:', 0), data.get('Referenced:', 0), data.get('Anonymous:', 0), data.get('Swap:', 0)) f.close() except EnvironmentError: # XXX - Can't use wrap_exceptions decorator as we're # returning a generator; this probably needs some # refactoring in order to avoid this code duplication. if f is not None: f.close() err = sys.exc_info()[1] if err.errno in (errno.ENOENT, errno.ESRCH): raise NoSuchProcess(self.pid, self._process_name) if err.errno in (errno.EPERM, errno.EACCES): raise AccessDenied(self.pid, self._process_name) raise except: if f is not None: f.close() raise f.close() if not os.path.exists('/proc/%s/smaps' % os.getpid()): def get_memory_maps(self, ext): msg = "couldn't find /proc/%s/smaps; kernel < 2.6.14 or CONFIG_MMU " \ "kernel configuration option is not enabled" % self.pid raise NotImplementedError(msg) @wrap_exceptions def get_process_cwd(self): # readlink() might return paths containing null bytes causing # problems when used with other fs-related functions (os.*, # open(), ...) path = os.readlink("/proc/%s/cwd" % self.pid) return path.replace('\x00', '') @wrap_exceptions def get_num_ctx_switches(self): vol = unvol = None f = open("/proc/%s/status" % self.pid) try: for line in f: if line.startswith("voluntary_ctxt_switches"): vol = int(line.split()[1]) elif line.startswith("nonvoluntary_ctxt_switches"): unvol = int(line.split()[1]) if vol is not None and unvol is not None: return nt_ctxsw(vol, unvol) raise NotImplementedError("the 'voluntary_ctxt_switches' and " \ "'nonvoluntary_ctxt_switches' fields were not found in " \ "/proc/%s/status; the kernel is probably older than 2.6.23" \ % self.pid) finally: f.close() @wrap_exceptions def get_process_num_threads(self): f = open("/proc/%s/status" % self.pid) try: for line in f: if line.startswith("Threads:"): return int(line.split()[1]) raise NotImplementedError("line not found") finally: f.close() @wrap_exceptions def get_process_threads(self): thread_ids = os.listdir("/proc/%s/task" % self.pid) thread_ids.sort() retlist = [] hit_enoent = False for thread_id in thread_ids: try: f = open("/proc/%s/task/%s/stat" % (self.pid, thread_id)) except EnvironmentError: err = sys.exc_info()[1] if err.errno == errno.ENOENT: # no such file or directory; it means thread # disappeared on us hit_enoent = True continue raise try: st = f.read().strip() finally: f.close() # ignore the first two values ("pid (exe)") st = st[st.find(')') + 2:] values = st.split(' ') utime = float(values[11]) / _CLOCK_TICKS stime = float(values[12]) / _CLOCK_TICKS ntuple = nt_thread(int(thread_id), utime, stime) retlist.append(ntuple) if hit_enoent: # raise NSP if the process disappeared on us os.stat('/proc/%s' % self.pid) return retlist @wrap_exceptions def get_process_nice(self): #f = open('/proc/%s/stat' % self.pid, 'r') #try: # data = f.read() # return int(data.split()[18]) #finally: # f.close() # Use C implementation return _psutil_posix.getpriority(self.pid) @wrap_exceptions def set_process_nice(self, value): return _psutil_posix.setpriority(self.pid, value) @wrap_exceptions def get_process_cpu_affinity(self): from_bitmask = lambda x: [i for i in xrange(64) if (1 << i) & x] bitmask = _psutil_linux.get_process_cpu_affinity(self.pid) return from_bitmask(bitmask) @wrap_exceptions def set_process_cpu_affinity(self, value): def to_bitmask(l): if not l: raise ValueError("invalid argument %r" % l) out = 0 for b in l: if not isinstance(b, (int, long)) or b < 0: raise ValueError("invalid argument %r" % b) out |= 2**b return out bitmask = to_bitmask(value) try: _psutil_linux.set_process_cpu_affinity(self.pid, bitmask) except OSError: err = sys.exc_info()[1] if err.errno == errno.EINVAL: allcpus = list(range(len(get_system_per_cpu_times()))) for cpu in value: if cpu not in allcpus: raise ValueError("invalid CPU %i" % cpu) raise # only starting from kernel 2.6.13 if hasattr(_psutil_linux, "ioprio_get"): @wrap_exceptions def get_process_ionice(self): ioclass, value = _psutil_linux.ioprio_get(self.pid) return nt_ionice(ioclass, value) @wrap_exceptions def set_process_ionice(self, ioclass, value): if ioclass in (IOPRIO_CLASS_NONE, None): if value: raise ValueError("can't specify value with IOPRIO_CLASS_NONE") ioclass = IOPRIO_CLASS_NONE value = 0 if ioclass in (IOPRIO_CLASS_RT, IOPRIO_CLASS_BE): if value is None: value = 4 elif ioclass == IOPRIO_CLASS_IDLE: if value: raise ValueError("can't specify value with IOPRIO_CLASS_IDLE") value = 0 else: value = 0 if not 0 <= value <= 8: raise ValueError("value argument range expected is between 0 and 8") return _psutil_linux.ioprio_set(self.pid, ioclass, value) @wrap_exceptions def process_rlimit(self, resource, limits=None): if limits is None: # get return _psutil_linux.prlimit(self.pid, resource) else: # set if len(limits) != 2: raise ValueError("second argument must be a (soft, hard) tuple") soft, hard = limits _psutil_linux.prlimit(self.pid, resource, soft, hard) @wrap_exceptions def get_process_status(self): f = open("/proc/%s/status" % self.pid) try: for line in f: if line.startswith("State:"): letter = line.split()[1] if letter in _status_map: return _status_map[letter] return constant(-1, '?') finally: f.close() @wrap_exceptions def get_open_files(self): retlist = [] files = os.listdir("/proc/%s/fd" % self.pid) hit_enoent = False for fd in files: file = "/proc/%s/fd/%s" % (self.pid, fd) if os.path.islink(file): try: file = os.readlink(file) except OSError: # ENOENT == file which is gone in the meantime err = sys.exc_info()[1] if err.errno == errno.ENOENT: hit_enoent = True continue raise else: # If file is not an absolute path there's no way # to tell whether it's a regular file or not, # so we skip it. A regular file is always supposed # to be absolutized though. if file.startswith('/') and isfile_strict(file): ntuple = nt_openfile(file, int(fd)) retlist.append(ntuple) if hit_enoent: # raise NSP if the process disappeared on us os.stat('/proc/%s' % self.pid) return retlist @wrap_exceptions def get_connections(self, kind='inet'): """Return connections opened by process as a list of namedtuples. The kind parameter filters for connections that fit the following criteria: Kind Value Number of connections using inet IPv4 and IPv6 inet4 IPv4 inet6 IPv6 tcp TCP tcp4 TCP over IPv4 tcp6 TCP over IPv6 udp UDP udp4 UDP over IPv4 udp6 UDP over IPv6 all the sum of all the possible families and protocols """ # Note: in case of UNIX sockets we're only able to determine the # local bound path while the remote endpoint is not retrievable: # http://goo.gl/R3GHM inodes = {} # os.listdir() is gonna raise a lot of access denied # exceptions in case of unprivileged user; that's fine: # lsof does the same so it's unlikely that we can to better. for fd in os.listdir("/proc/%s/fd" % self.pid): try: inode = os.readlink("/proc/%s/fd/%s" % (self.pid, fd)) except OSError: continue if inode.startswith('socket:['): # the process is using a socket inode = inode[8:][:-1] inodes[inode] = fd if not inodes: # no connections for this process return [] def process(file, family, type_): retlist = [] try: f = open(file, 'r') except IOError: # IPv6 not supported on this platform err = sys.exc_info()[1] if err.errno == errno.ENOENT and file.endswith('6'): return [] else: raise try: f.readline() # skip the first line for line in f: # IPv4 / IPv6 if family in (socket.AF_INET, socket.AF_INET6): _, laddr, raddr, status, _, _, _, _, _, inode = \ line.split()[:10] if inode in inodes: laddr = self._decode_address(laddr, family) raddr = self._decode_address(raddr, family) if type_ == socket.SOCK_STREAM: status = _TCP_STATES_TABLE[status] else: status = CONN_NONE fd = int(inodes[inode]) conn = nt_connection(fd, family, type_, laddr, raddr, status) retlist.append(conn) elif family == socket.AF_UNIX: tokens = line.split() _, _, _, _, type_, _, inode = tokens[0:7] if inode in inodes: if len(tokens) == 8: path = tokens[-1] else: path = "" fd = int(inodes[inode]) type_ = int(type_) conn = nt_connection(fd, family, type_, path, None, CONN_NONE) retlist.append(conn) else: raise ValueError(family) return retlist finally: f.close() tcp4 = ("tcp" , socket.AF_INET , socket.SOCK_STREAM) tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM) udp4 = ("udp" , socket.AF_INET , socket.SOCK_DGRAM) udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM) unix = ("unix", socket.AF_UNIX, None) tmap = { "all" : (tcp4, tcp6, udp4, udp6, unix), "tcp" : (tcp4, tcp6), "tcp4" : (tcp4,), "tcp6" : (tcp6,), "udp" : (udp4, udp6), "udp4" : (udp4,), "udp6" : (udp6,), "unix" : (unix,), "inet" : (tcp4, tcp6, udp4, udp6), "inet4": (tcp4, udp4), "inet6": (tcp6, udp6), } if kind not in tmap: raise ValueError("invalid %r kind argument; choose between %s" % (kind, ', '.join([repr(x) for x in tmap]))) ret = [] for f, family, type_ in tmap[kind]: ret += process("/proc/net/%s" % f, family, type_) # raise NSP if the process disappeared on us os.stat('/proc/%s' % self.pid) return ret @wrap_exceptions def get_num_fds(self): return len(os.listdir("/proc/%s/fd" % self.pid)) @wrap_exceptions def get_process_ppid(self): f = open("/proc/%s/status" % self.pid) try: for line in f: if line.startswith("PPid:"): # PPid: nnnn return int(line.split()[1]) raise NotImplementedError("line not found") finally: f.close() @wrap_exceptions def get_process_uids(self): f = open("/proc/%s/status" % self.pid) try: for line in f: if line.startswith('Uid:'): _, real, effective, saved, fs = line.split() return nt_uids(int(real), int(effective), int(saved)) raise NotImplementedError("line not found") finally: f.close() @wrap_exceptions def get_process_gids(self): f = open("/proc/%s/status" % self.pid) try: for line in f: if line.startswith('Gid:'): _, real, effective, saved, fs = line.split() return nt_gids(int(real), int(effective), int(saved)) raise NotImplementedError("line not found") finally: f.close() @staticmethod def _decode_address(addr, family): """Accept an "ip:port" address as displayed in /proc/net/* and convert it into a human readable form, like: "0500000A:0016" -> ("10.0.0.5", 22) "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521) The IP address portion is a little or big endian four-byte hexadecimal number; that is, the least significant byte is listed first, so we need to reverse the order of the bytes to convert it to an IP address. The port is represented as a two-byte hexadecimal number. Reference: http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html """ ip, port = addr.split(':') port = int(port, 16) if PY3: ip = ip.encode('ascii') # this usually refers to a local socket in listen mode with # no end-points connected if not port: return () if family == socket.AF_INET: # see: http://code.google.com/p/psutil/issues/detail?id=201 if sys.byteorder == 'little': ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1]) else: ip = socket.inet_ntop(family, base64.b16decode(ip)) else: # IPv6 # old version - let's keep it, just in case... #ip = ip.decode('hex') #return socket.inet_ntop(socket.AF_INET6, # ''.join(ip[i:i+4][::-1] for i in xrange(0, 16, 4))) ip = base64.b16decode(ip) # see: http://code.google.com/p/psutil/issues/detail?id=201 if sys.byteorder == 'little': ip = socket.inet_ntop(socket.AF_INET6, struct.pack('>4I', *struct.unpack('<4I', ip))) else: ip = socket.inet_ntop(socket.AF_INET6, struct.pack('<4I', *struct.unpack('<4I', ip))) return (ip, port)
bsd-3-clause
5,321,260,057,435,817,000
35.53777
84
0.510066
false
3.982552
false
false
false
Rdbaker/Mealbound
ceraon/utils.py
1
4801
# -*- coding: utf-8 -*- """Helper utilities and decorators.""" from datetime import timedelta as td from datetime import tzinfo from threading import Thread import requests from flask import Blueprint, current_app, flash, request def get_fb_access_token(): """Get an access token from facebook for graph API calls.""" base_url = 'https://graph.facebook.com/oauth/access_token?' \ 'grant_type=client_credentials' res = requests.get( base_url + '&client_id={}'.format(current_app.config['FB_APP_ID']) + '&client_secret={}'.format(current_app.config['FB_APP_SECRET'])) return res.json().get('access_token') def friendly_arg_get(key, default=None, type_cast=None): """Same as request.args.get but returns default on ValueError.""" try: return request.args.get(key, default=default, type=type_cast) except: return default class FlaskThread(Thread): """A utility class for threading in a flask app.""" def __init__(self, *args, **kwargs): """Create a new thread with a flask context.""" super().__init__(*args, **kwargs) self.app = current_app._get_current_object() def run(self): """Run the thread.""" # Make this an effective no-op if we're testing. if not self.app.config['TESTING']: with self.app.app_context(): super().run() def flash_errors(form, category='warning'): """Flash all errors for a form.""" for field, errors in form.errors.items(): for error in errors: flash('{0} - {1}'.format(getattr(form, field).label.text, error), category) class RESTBlueprint(Blueprint): """A base class for a RESTful API's view blueprint. This comes with helper methods that set up routes based on method/actions. It infers the route_prefix based on the version and blueprint name in the format: `/api/<version string>/<blueprint name string>` then creates routes from that. Example usage: mod = RESTBlueprint('users', __name__, 'v2') # route is: GET /api/v2/users/<uid> @mod.find() def find_user(uid): return User.get(uid) # route is: PATCH /api/v2/users/<uid> @mod.update() def update_user(uid): return User.update(name='new name') # route is: POST /api/v2/users @mod.create() def create_user(): return User.create(name='my new user') The `find`, `update`, `replace`, and `destroy` methods will add a parameter called `uid` to your route. Make sure to correctly resolve that to your entity's ID. """ def __init__(self, blueprint_name, name, version): return super(RESTBlueprint, self).__init__( 'api.{}.{}'.format(version, blueprint_name), name, url_prefix='/api/{}/{}'.format(version, blueprint_name)) def flexible_route(self, *args, **kwargs): kwargs.update({'strict_slashes': False}) return self.route(*args, **kwargs) def create(self, *args, **kwargs): kwargs.update({'methods': ['POST']}) return self.flexible_route('/', *args, **kwargs) def list(self, *args, **kwargs): kwargs.update({'methods': ['GET']}) return self.flexible_route('/', *args, **kwargs) def find(self, converter='string', *args, **kwargs): kwargs.update({'methods': ['GET']}) return self.flexible_route('/<{}:uid>'.format(converter), *args, **kwargs) def update(self, converter='string', *args, **kwargs): kwargs.update({'methods': ['PATCH']}) return self.flexible_route('/<{}:uid>'.format(converter), *args, **kwargs) def replace(self, converter='string', *args, **kwargs): kwargs.update({'methods': ['PUT']}) return self.flexible_route('/<{}:uid>'.format(converter), *args, **kwargs) def destroy(self, converter='string', *args, **kwargs): kwargs.update({'methods': ['DELETE']}) return self.flexible_route('/<{}:uid>'.format(converter), *args, **kwargs) class UTC(tzinfo): """tzinfo for a UTC timezone.""" def dst(self, dt_obj): """Return the DST offset in minutes from UTC.""" return 0 def fromutc(self, dt_obj): """Return a datetime object in local time from a UTC datetime.""" return dt_obj def tzname(self, dt_obj): """Return the name of the timezone from a datetime obj.""" return 'UTC/GMT' def utcoffset(self, dt_obj): """Return a timedelta showing offset from UTC. Negative values indicating West of UTC """ return td()
bsd-3-clause
-1,679,549,717,610,896,600
32.573427
78
0.586961
false
4.034454
false
false
false
ActiveState/code
recipes/Python/577336_Fast_reentrant_optimistic_lock_implemented/recipe-577336.py
1
4351
from cpython cimport pythread from cpython.exc cimport PyErr_NoMemory cdef class FastRLock: """Fast, re-entrant locking. Under uncongested conditions, the lock is never acquired but only counted. Only when a second thread comes in and notices that the lock is needed, it acquires the lock and notifies the first thread to release it when it's done. This is all made possible by the wonderful GIL. """ cdef pythread.PyThread_type_lock _real_lock cdef long _owner # ID of thread owning the lock cdef int _count # re-entry count cdef int _pending_requests # number of pending requests for real lock cdef bint _is_locked # whether the real lock is acquired def __cinit__(self): self._owner = -1 self._count = 0 self._is_locked = False self._pending_requests = 0 self._real_lock = pythread.PyThread_allocate_lock() if self._real_lock is NULL: PyErr_NoMemory() def __dealloc__(self): if self._real_lock is not NULL: pythread.PyThread_free_lock(self._real_lock) self._real_lock = NULL def acquire(self, bint blocking=True): return lock_lock(self, pythread.PyThread_get_thread_ident(), blocking) def release(self): if self._owner != pythread.PyThread_get_thread_ident(): raise RuntimeError("cannot release un-acquired lock") unlock_lock(self) # compatibility with threading.RLock def __enter__(self): # self.acquire() return lock_lock(self, pythread.PyThread_get_thread_ident(), True) def __exit__(self, t, v, tb): # self.release() if self._owner != pythread.PyThread_get_thread_ident(): raise RuntimeError("cannot release un-acquired lock") unlock_lock(self) def _is_owned(self): return self._owner == pythread.PyThread_get_thread_ident() cdef inline bint lock_lock(FastRLock lock, long current_thread, bint blocking) nogil: # Note that this function *must* hold the GIL when being called. # We just use 'nogil' in the signature to make sure that no Python # code execution slips in that might free the GIL if lock._count: # locked! - by myself? if current_thread == lock._owner: lock._count += 1 return 1 elif not lock._pending_requests: # not locked, not requested - go! lock._owner = current_thread lock._count = 1 return 1 # need to get the real lock return _acquire_lock( lock, current_thread, pythread.WAIT_LOCK if blocking else pythread.NOWAIT_LOCK) cdef bint _acquire_lock(FastRLock lock, long current_thread, int wait) nogil: # Note that this function *must* hold the GIL when being called. # We just use 'nogil' in the signature to make sure that no Python # code execution slips in that might free the GIL if not lock._is_locked and not lock._pending_requests: # someone owns it but didn't acquire the real lock - do that # now and tell the owner to release it when done. Note that we # do not release the GIL here as we must absolutely be the one # who acquires the lock now. if not pythread.PyThread_acquire_lock(lock._real_lock, wait): return 0 #assert not lock._is_locked lock._is_locked = True lock._pending_requests += 1 with nogil: # wait for the lock owning thread to release it locked = pythread.PyThread_acquire_lock(lock._real_lock, wait) lock._pending_requests -= 1 #assert not lock._is_locked #assert lock._count == 0 if not locked: return 0 lock._is_locked = True lock._owner = current_thread lock._count = 1 return 1 cdef inline void unlock_lock(FastRLock lock) nogil: # Note that this function *must* hold the GIL when being called. # We just use 'nogil' in the signature to make sure that no Python # code execution slips in that might free the GIL #assert lock._owner == pythread.PyThread_get_thread_ident() #assert lock._count > 0 lock._count -= 1 if lock._count == 0: lock._owner = -1 if lock._is_locked: pythread.PyThread_release_lock(lock._real_lock) lock._is_locked = False
mit
3,017,226,937,884,119,000
36.188034
85
0.638934
false
3.864121
false
false
false
rodrigofaccioli/drugdesign
virtualscreening/vina/spark/hydrogen_bond_crud.py
1
4480
from pyspark.sql import SQLContext, Row from vina_utils import get_ligand_from_receptor_ligand_model """ Creates data frame of residue list sqlCtx - spark SQL context residue_listRDD - RDD for creating data frame. It had been created by load_file_select_hydrogen_bond function """ def create_df_residue_list(sqlCtx, residue_listRDD): df_residue_list = sqlCtx.createDataFrame(residue_listRDD) df_residue_list.registerTempTable("residue_list") return df_residue_list """ Creates data frame of all residues for hydrogen bond sqlCtx - spark SQL context residue_listRDD - RDD for creating data frame. It had been created by load_file_all_residue_hbonds function """ def create_df_all_residue(sqlCtx, all_residue_split): df_all_residue = sqlCtx.createDataFrame(all_residue_split) df_all_residue.registerTempTable("all_residue") return df_all_residue """ Creates data frame of all residues filtered by residue list sqlCtx - spark SQL context Important: Before running this function must execute the functions create_df_all_residue and create_df_residue_list """ def create_df_all_residue_filtered_by_res_list(sqlCtx): #Getting all information based on list of residues sql = """ SELECT all_residue.* FROM all_residue JOIN residue_list ON residue_list.residue = all_residue.receptor_residue """ df_result = sqlCtx.sql(sql) df_result.registerTempTable("residues_filtered_by_list") return df_result """ Group by poses all residues filtered by residue list sqlCtx - spark SQL context Important: Before running this function must execute the function create_df_all_residue_filtered_by_res_list """ def get_group_by_poses_all_residue_filtered_by_res_list(sqlCtx): sql = """ SELECT pose, count(*) as num_res FROM residues_filtered_by_list GROUP BY pose ORDER BY num_res DESC """ df_result = sqlCtx.sql(sql) return df_result """ Creates dataframe normalized Hydrogen Bond by donors and acceptors sqlCtx - spark SQL context df_only_poses - data frame created by get_group_by_poses_all_residue_filtered_by_res_list function Important: database is created by load_database function from database_io file. This load_database function creates RDD only. Therefore, the lines below must be executed before calling this function #Loading database rdd_database = load_database(sc, ligand_database) #Creating Dataframe database_table = sqlCtx.createDataFrame(rdd_database) database_table.registerTempTable("database") """ def create_df_normalized_by_donors_acceptors(sqlCtx, df_only_poses): normalizedRDD = df_only_poses.map(lambda p: Row(num_res=int(p.num_res), ligand=get_ligand_from_receptor_ligand_model(p.pose), pose=str(p.pose) ) ).collect() #Creating Dataframe normalized_residues_filtered_by_list_table = sqlCtx.createDataFrame(normalizedRDD) normalized_residues_filtered_by_list_table.registerTempTable("normalized_residues_filtered_by_list") # Normalized Hydrogen Bond by donors and acceptors sql = """ SELECT pose, (b.num_res / a.hb_donors_acceptors) as normalized_hb FROM database a JOIN normalized_residues_filtered_by_list b ON b.ligand = a.ligand ORDER BY normalized_hb DESC """ df_result = sqlCtx.sql(sql) return df_result """ Creates dataframe normalized Hydrogen Bond by heavy atoms sqlCtx - spark SQL context Important: database is created by load_database function from database_io file. This load_database function creates RDD only. Therefore, the lines below must be executed before calling this function #Loading database rdd_database = load_database(sc, ligand_database) #Creating Dataframe database_table = sqlCtx.createDataFrame(rdd_database) database_table.registerTempTable("database") """ def create_df_normalized_by_heavy_atoms(sqlCtx): # Normalized Hydrogen Bond by heavy atoms sql = """ SELECT pose, (b.num_res / a.heavyAtom) as normalized_hb FROM database a JOIN normalized_residues_filtered_by_list b ON b.ligand = a.ligand ORDER BY normalized_hb DESC """ df_result = sqlCtx.sql(sql) return df_result """ Creates dataframe of hydrogen bond sqlCtx - spark SQL context rdd_hydrogen_bond - RDD for creating dataframe. It had been created by load_file_summary_hbonds function """ def create_df_hydrogen_bond(sqlCtx, rdd_hydrogen_bond): hydrogen_bond_table = sqlCtx.createDataFrame(rdd_hydrogen_bond) hydrogen_bond_table.registerTempTable("hydrogenbond") return hydrogen_bond_table
apache-2.0
4,840,884,905,067,512,000
36.647059
157
0.75692
false
3.098202
false
false
false
benediktkr/lokun-record
record/sec.py
1
2077
from random import randint def compare1toN(str1, strl): return any([compare(str1, a) for a in strl]) def compare(str1, str2): return compare_const2(str1, str2) def compare_const2(str1, str2): if len(str1) != len(str2): return False result = 0 for x, y in zip(str1, str2): result |= ord(x) ^ ord(y) return result == 0 def compare_const(str1, str2): """Constant-time string comparasion, to avoid timing attacks. Leaks the lenght, but that's ok since we are always comparing hashes, and the only information the adversary has to gain by the length of a hash as a better guess at what hashing algorithm is being used. At which point, i'd like to point out Shannons Maxim.""" length = min(len(str1), len(str2)) ret = True for i in xrange(length): if str1[i] != str2[i]: ret = False if len(str1) != len(str2): ret = False return ret def compare_noleak(str1, str2): """A non-random version that doesn't leak the length, made for Baldur :) str1 should be the user-supplied string, and str2 the string you comare against. NOTE: Pads with 0x00, only inteded to compare strings, not byte-lists.""" l1 = len(str1) l2 = len(str2) if l1 > l2: # If the user string is longer than the source string, pad. delta = l1 - l2 str2 += "\x00"*delta ret = True for i in xrange(l1): if str1[i] != str2[i]: ret = False return ret def compare_rnd(str1, str2): """Constant-time string comparasion, to avoid timing attacks. Start in a random char of the string. Doesn't leak the length, since the starting point (and thus the breaking point) as randomly chosen.""" length = min(len(str1), len(str2)) start = randint(0, length-1) for i in xrange(length): j = (start+i) % length if str1[j] != str2[j]: return False if len(str1) != len(str2): return False return True
agpl-3.0
-8,581,726,397,058,848,000
24.329268
77
0.601348
false
3.484899
false
false
false
dcrosta/mongo-disco
app/job.py
1
2372
#!/usr/bin/env python # encoding: utf-8 ''' File: DiscoJob.py Author: NYU ITP team Description: Disco Job Wrapper ''' from disco.core import Job, result_iterator from disco.worker.classic.worker import Params from disco.worker.classic.modutil import locate_modules,find_modules from mongodb_io import mongodb_output_stream,mongodb_input_stream from splitter import calculate_splits as do_split class DiscoJob(): def __init__(self,config,map,reduce): import config_util self.config = config_util.config #if the user doesn't specify output, print to stdout if not config.get('output_uri') and not config.get('print_to_stdout'): config['print_to_stdout'] = True for item in config: self.config[item] = config[item] self.map = map self.reduce = reduce self.job = Job() self.params = Params() for key in self.config: self.params.__dict__[key] = self.config[key] def run(self): if self.config['print_to_stdout']: self.job.run(input = do_split(self.config), map = self.map, reduce = self.reduce, params = self.params, map_input_stream = mongodb_input_stream, required_modules= ['mongodb_io', 'mongodb_input', 'config_util', 'mongo_util', 'mongodb_output']) for key, value in result_iterator(self.job.wait(show=True)): print key, value else: self.job.run(input = do_split(self.config), map = self.map, reduce = self.reduce, params = self.params, map_input_stream = mongodb_input_stream, reduce_output_stream = mongodb_output_stream, required_modules= ['mongodb_io', 'mongodb_input', 'config_util', 'mongo_util', 'mongodb_output']) if self.config.get("job_wait",False): self.job.wait(show=True)
apache-2.0
1,969,891,187,076,187,000
32.885714
78
0.49747
false
4.458647
true
false
false
ropable/resource_tracking
tracking/migrations/0004_auto_20200102_0914.py
1
1126
# Generated by Django 2.1.11 on 2020-01-02 01:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tracking', '0003_auto_20190308_1114'), ] operations = [ migrations.AlterField( model_name='device', name='symbol', field=models.CharField(choices=[('2 wheel drive', '2-Wheel Drive'), ('4 wheel drive passenger', '4-Wheel Drive Passenger'), ('4 wheel drive ute', '4-Wheel Drive (Ute)'), ('light unit', 'Light Unit'), ('heavy duty', 'Heavy Duty'), ('gang truck', 'Gang Truck'), ('snorkel', 'Snorkel'), ('dozer', 'Dozer'), ('grader', 'Grader'), ('loader', 'Loader'), ('tender', 'Tender'), ('float', 'Float'), ('fixed wing aircraft', 'Waterbomber'), ('rotary aircraft', 'Rotary'), ('spotter aircraft', 'Spotter'), ('helitac', 'Helitac'), ('rescue helicopter', 'Rescue Helicopter'), ('aviation fuel truck', 'Aviation Fuel Truck'), (None, ''), ('comms bus', 'Communications Bus'), ('boat', 'Boat'), ('person', 'Person'), ('other', 'Other'), ('unknown', 'Unknown')], default='other', max_length=32), ), ]
bsd-3-clause
-3,249,550,416,547,287,600
61.555556
788
0.60746
false
3.263768
false
false
false
NathanW2/QGIS
tests/src/python/test_qgsfieldformatters.py
1
13493
# -*- coding: utf-8 -*- """QGIS Unit tests for field formatters. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = 'Matthias Kuhn' __date__ = '05/12/2016' __copyright__ = 'Copyright 2016, The QGIS Project' # This will get replaced with a git SHA1 when you do a git archive __revision__ = '$Format:%H$' import qgis # NOQA from qgis.core import (QgsFeature, QgsProject, QgsRelation, QgsVectorLayer, QgsValueMapFieldFormatter, QgsValueRelationFieldFormatter, QgsRelationReferenceFieldFormatter, QgsRangeFieldFormatter, QgsSettings) from qgis.testing import start_app, unittest start_app() class TestQgsValueMapFieldFormatter(unittest.TestCase): VALUEMAP_NULL_TEXT = "{2839923C-8B7D-419E-B84B-CA2FE9B80EC7}" def test_representValue(self): QgsSettings().setValue("qgis/nullValue", "NULL") layer = QgsVectorLayer("none?field=number1:integer&field=number2:double&field=text1:string&field=number3:integer&field=number4:double&field=text2:string", "layer", "memory") self.assertTrue(layer.isValid()) QgsProject.instance().addMapLayer(layer) f = QgsFeature() f.setAttributes([2, 2.5, 'NULL', None, None, None]) layer.dataProvider().addFeatures([f]) fieldFormatter = QgsValueMapFieldFormatter() # Tests with different value types occurring in the value map config = {'map': {'two': '2', 'twoandhalf': '2.5', 'NULL text': 'NULL', 'nothing': self.VALUEMAP_NULL_TEXT}} self.assertEqual(fieldFormatter.representValue(layer, 0, config, None, 2), 'two') self.assertEqual(fieldFormatter.representValue(layer, 1, config, None, 2.5), 'twoandhalf') self.assertEqual(fieldFormatter.representValue(layer, 2, config, None, 'NULL'), 'NULL text') # Tests with null values of different types, if value map contains null self.assertEqual(fieldFormatter.representValue(layer, 3, config, None, None), 'nothing') self.assertEqual(fieldFormatter.representValue(layer, 4, config, None, None), 'nothing') self.assertEqual(fieldFormatter.representValue(layer, 5, config, None, None), 'nothing') # Tests with fallback display for different value types config = {} self.assertEqual(fieldFormatter.representValue(layer, 0, config, None, 2), '(2)') self.assertEqual(fieldFormatter.representValue(layer, 1, config, None, 2.5), '(2.50000)') self.assertEqual(fieldFormatter.representValue(layer, 2, config, None, 'NULL'), '(NULL)') # Tests with fallback display for null in different types of fields self.assertEqual(fieldFormatter.representValue(layer, 3, config, None, None), '(NULL)') self.assertEqual(fieldFormatter.representValue(layer, 4, config, None, None), '(NULL)') self.assertEqual(fieldFormatter.representValue(layer, 5, config, None, None), '(NULL)') QgsProject.instance().removeAllMapLayers() class TestQgsValueRelationFieldFormatter(unittest.TestCase): def test_representValue(self): first_layer = QgsVectorLayer("none?field=foreign_key:integer", "first_layer", "memory") self.assertTrue(first_layer.isValid()) second_layer = QgsVectorLayer("none?field=pkid:integer&field=decoded:string", "second_layer", "memory") self.assertTrue(second_layer.isValid()) QgsProject.instance().addMapLayer(second_layer) f = QgsFeature() f.setAttributes([123]) first_layer.dataProvider().addFeatures([f]) f = QgsFeature() f.setAttributes([123, 'decoded_val']) second_layer.dataProvider().addFeatures([f]) fieldFormatter = QgsValueRelationFieldFormatter() # Everything valid config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'decoded'} self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), 'decoded_val') # Code not find match in foreign layer config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'decoded'} self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)') # Missing Layer config = {'Key': 'pkid', 'Value': 'decoded'} self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)') # Invalid Layer config = {'Layer': 'invalid', 'Key': 'pkid', 'Value': 'decoded'} self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)') # Invalid Key config = {'Layer': second_layer.id(), 'Key': 'invalid', 'Value': 'decoded'} self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)') # Invalid Value config = {'Layer': second_layer.id(), 'Key': 'pkid', 'Value': 'invalid'} self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '(456)') QgsProject.instance().removeMapLayer(second_layer.id()) def test_valueToStringList(self): def _test(a, b): self.assertEqual(QgsValueRelationFieldFormatter.valueToStringList(a), b) _test([1, 2, 3], ["1", "2", "3"]) _test("{1,2,3}", ["1", "2", "3"]) _test(['1', '2', '3'], ["1", "2", "3"]) _test('not an array', ['not an array']) class TestQgsRelationReferenceFieldFormatter(unittest.TestCase): def test_representValue(self): first_layer = QgsVectorLayer("none?field=foreign_key:integer", "first_layer", "memory") self.assertTrue(first_layer.isValid()) second_layer = QgsVectorLayer("none?field=pkid:integer&field=decoded:string", "second_layer", "memory") self.assertTrue(second_layer.isValid()) QgsProject.instance().addMapLayers([first_layer, second_layer]) f = QgsFeature() f.setAttributes([123]) first_layer.dataProvider().addFeatures([f]) f = QgsFeature() f.setAttributes([123, 'decoded_val']) second_layer.dataProvider().addFeatures([f]) relMgr = QgsProject.instance().relationManager() fieldFormatter = QgsRelationReferenceFieldFormatter() rel = QgsRelation() rel.setId('rel1') rel.setName('Relation Number One') rel.setReferencingLayer(first_layer.id()) rel.setReferencedLayer(second_layer.id()) rel.addFieldPair('foreign_key', 'pkid') self.assertTrue(rel.isValid()) relMgr.addRelation(rel) # Everything valid config = {'Relation': rel.id()} second_layer.setDisplayExpression('decoded') self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), 'decoded_val') # Code not find match in foreign layer config = {'Relation': rel.id()} second_layer.setDisplayExpression('decoded') self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '456'), '456') # Invalid relation id config = {'Relation': 'invalid'} second_layer.setDisplayExpression('decoded') self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123') # No display expression config = {'Relation': rel.id()} second_layer.setDisplayExpression(None) self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123') # Invalid display expression config = {'Relation': rel.id()} second_layer.setDisplayExpression('invalid +') self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123') # Missing relation config = {} second_layer.setDisplayExpression('decoded') self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123') # Inconsistent layer provided to representValue() config = {'Relation': rel.id()} second_layer.setDisplayExpression('decoded') self.assertEqual(fieldFormatter.representValue(second_layer, 0, config, None, '123'), '123') # Inconsistent idx provided to representValue() config = {'Relation': rel.id()} second_layer.setDisplayExpression('decoded') self.assertEqual(fieldFormatter.representValue(first_layer, 1, config, None, '123'), '123') # Invalid relation rel = QgsRelation() rel.setId('rel2') rel.setName('Relation Number Two') rel.setReferencingLayer(first_layer.id()) rel.addFieldPair('foreign_key', 'pkid') self.assertFalse(rel.isValid()) relMgr.addRelation(rel) config = {'Relation': rel.id()} second_layer.setDisplayExpression('decoded') self.assertEqual(fieldFormatter.representValue(first_layer, 0, config, None, '123'), '123') QgsProject.instance().removeAllMapLayers() class TestQgsRangeFieldFormatter(unittest.TestCase): def test_representValue(self): layer = QgsVectorLayer("point?field=int:integer&field=double:double", "layer", "memory") self.assertTrue(layer.isValid()) QgsProject.instance().addMapLayers([layer]) fieldFormatter = QgsRangeFieldFormatter() # Precision is ignored for integers self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, '123'), '123') self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, '123000'), '123000') self.assertEqual(fieldFormatter.representValue(layer, 0, {'Precision': 1}, None, None), 'NULL') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 1}, None, None), 'NULL') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 1}, None, '123'), '123.0') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, None), 'NULL') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123000'), '123000.00') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0'), '0.00') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123'), '123.00') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.123'), '0.12') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.127'), '0.13') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0'), '0.000') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0.127'), '0.127') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '1.27e-1'), '0.127') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-123'), '-123.00') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.123'), '-0.12') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.127'), '-0.13') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-0.127'), '-0.127') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-1.27e-1'), '-0.127') QgsSettings().setValue("locale/overrideFlag", True) QgsSettings().setValue("locale/userLocale", 'it') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, None), 'NULL') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123000'), '123000,00') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0'), '0,00') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '123'), '123,00') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.123'), '0,12') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '0.127'), '0,13') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0'), '0,000') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '0.127'), '0,127') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '1.27e-1'), '0,127') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-123'), '-123,00') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.123'), '-0,12') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 2}, None, '-0.127'), '-0,13') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-0.127'), '-0,127') self.assertEqual(fieldFormatter.representValue(layer, 1, {'Precision': 3}, None, '-1.27e-1'), '-0,127') QgsProject.instance().removeAllMapLayers() if __name__ == '__main__': unittest.main()
gpl-2.0
9,146,336,332,509,080,000
49.347015
162
0.646261
false
3.896333
true
false
false
flacjacket/sympy
sympy/core/tests/test_expr.py
1
48018
from __future__ import division from sympy import (Add, Basic, S, Symbol, Wild, Float, Integer, Rational, I, sin, cos, tan, exp, log, nan, oo, sqrt, symbols, Integral, sympify, WildFunction, Poly, Function, Derivative, Number, pi, NumberSymbol, zoo, Piecewise, Mul, Pow, nsimplify, ratsimp, trigsimp, radsimp, powsimp, simplify, together, collect, factorial, apart, combsimp, factor, refine, cancel, Tuple, default_sort_key, DiracDelta, gamma, Dummy, Sum, E, exp_polar, Lambda) from sympy.core.function import AppliedUndef from sympy.abc import a, b, c, d, e, n, t, u, x, y, z from sympy.physics.secondquant import FockState from sympy.physics.units import meter from sympy.utilities.pytest import raises, XFAIL class DummyNumber(object): """ Minimal implementation of a number that works with SymPy. If one has a Number class (e.g. Sage Integer, or some other custom class) that one wants to work well with SymPy, one has to implement at least the methods of this class DummyNumber, resp. its subclasses I5 and F1_1. Basically, one just needs to implement either __int__() or __float__() and then one needs to make sure that the class works with Python integers and with itself. """ def __radd__(self, a): if isinstance(a, (int, float)): return a + self.number return NotImplemented def __truediv__(a, b): return a.__div__(b) def __rtruediv__(a, b): return a.__rdiv__(b) def __add__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number + a return NotImplemented def __rsub__(self, a): if isinstance(a, (int, float)): return a - self.number return NotImplemented def __sub__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number - a return NotImplemented def __rmul__(self, a): if isinstance(a, (int, float)): return a * self.number return NotImplemented def __mul__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number * a return NotImplemented def __rdiv__(self, a): if isinstance(a, (int, float)): return a / self.number return NotImplemented def __div__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number / a return NotImplemented def __rpow__(self, a): if isinstance(a, (int, float)): return a ** self.number return NotImplemented def __pow__(self, a): if isinstance(a, (int, float, DummyNumber)): return self.number ** a return NotImplemented def __pos__(self): return self.number def __neg__(self): return - self.number class I5(DummyNumber): number = 5 def __int__(self): return self.number class F1_1(DummyNumber): number = 1.1 def __float__(self): return self.number i5 = I5() f1_1 = F1_1() # basic sympy objects basic_objs = [ Rational(2), Float("1.3"), x, y, pow(x,y)*y, ] # all supported objects all_objs = basic_objs + [ 5, 5.5, i5, f1_1 ] def dotest(s): for x in all_objs: for y in all_objs: s(x,y) return True def test_basic(): def j(a,b): x = a x = +a x = -a x = a+b x = a-b x = a*b x = a/b x = a**b assert dotest(j) def test_ibasic(): def s(a,b): x = a x += b x = a x -= b x = a x *= b x = a x /= b assert dotest(s) def test_relational(): assert (pi < 3) == False assert (pi <= 3) == False assert (pi > 3) == True assert (pi >= 3) == True assert (-pi < 3) == True assert (-pi <= 3) == True assert (-pi > 3) == False assert (-pi >= 3) == False assert (x - 2 < x - 3) == False def test_relational_noncommutative(): from sympy import Lt, Gt, Le, Ge A, B = symbols('A,B', commutative=False) assert (A < B) == Lt(A, B) assert (A <= B) == Le(A, B) assert (A > B) == Gt(A, B) assert (A >= B) == Ge(A, B) def test_basic_nostr(): for obj in basic_objs: raises(TypeError, lambda: obj + '1') raises(TypeError, lambda: obj - '1') if obj == 2: if hasattr(int, '__index__'): # Python 2.5+ (PEP 357) assert obj * '1' == '11' else: raises(TypeError, lambda: obj * '1') raises(TypeError, lambda: obj / '1') raises(TypeError, lambda: obj ** '1') def test_leadterm(): assert (3+2*x**(log(3)/log(2)-1)).leadterm(x) == (3,0) assert (1/x**2+1+x+x**2).leadterm(x)[1] == -2 assert (1/x+1+x+x**2).leadterm(x)[1] == -1 assert (x**2+1/x).leadterm(x)[1] == -1 assert (1+x**2).leadterm(x)[1] == 0 assert (x+1).leadterm(x)[1] == 0 assert (x+x**2).leadterm(x)[1] == 1 assert (x**2).leadterm(x)[1] == 2 def test_as_leading_term(): assert (3+2*x**(log(3)/log(2)-1)).as_leading_term(x) == 3 assert (1/x**2+1+x+x**2).as_leading_term(x) == 1/x**2 assert (1/x+1+x+x**2).as_leading_term(x) == 1/x assert (x**2+1/x).as_leading_term(x) == 1/x assert (1+x**2).as_leading_term(x) == 1 assert (x+1).as_leading_term(x) == 1 assert (x+x**2).as_leading_term(x) == x assert (x**2).as_leading_term(x) == x**2 assert (x + oo).as_leading_term(x) == oo def test_leadterm2(): assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).leadterm(x) == \ (sin(1 + sin(1)), 0) def test_leadterm3(): assert (y+z+x).leadterm(x) == (y+z, 0) def test_as_leading_term2(): assert (x*cos(1)*cos(1 + sin(1)) + sin(1 + sin(1))).as_leading_term(x) == \ sin(1 + sin(1)) def test_as_leading_term3(): assert (2+pi+x).as_leading_term(x) == 2 + pi assert (2*x+pi*x+x**2).as_leading_term(x) == (2+pi)*x def test_as_leading_term_stub(): class foo(Function): pass assert foo(1/x).as_leading_term(x) == foo(1/x) assert foo(1).as_leading_term(x) == foo(1) raises(NotImplementedError, lambda: foo(x).as_leading_term(x)) def test_atoms(): assert sorted(list(x.atoms())) == [x] assert sorted(list((1+x).atoms())) == sorted([1, x]) assert sorted(list((1+2*cos(x)).atoms(Symbol))) == [x] assert sorted(list((1+2*cos(x)).atoms(Symbol,Number))) == sorted([1, 2, x]) assert sorted(list((2*(x**(y**x))).atoms())) == sorted([2, x, y]) assert sorted(list(Rational(1,2).atoms())) == [S.Half] assert sorted(list(Rational(1,2).atoms(Symbol))) == [] assert sorted(list(sin(oo).atoms(oo))) == [oo] assert sorted(list(Poly(0, x).atoms())) == [S.Zero] assert sorted(list(Poly(1, x).atoms())) == [S.One] assert sorted(list(Poly(x, x).atoms())) == [x] assert sorted(list(Poly(x, x, y).atoms())) == [x] assert sorted(list(Poly(x + y, x, y).atoms())) == sorted([x, y]) assert sorted(list(Poly(x + y, x, y, z).atoms())) == sorted([x, y]) assert sorted(list(Poly(x + y*t, x, y, z).atoms())) == sorted([t, x, y]) assert list((I*pi).atoms(NumberSymbol)) == [pi] assert sorted((I*pi).atoms(NumberSymbol, I)) == \ sorted((I*pi).atoms(I,NumberSymbol)) == [pi, I] assert exp(exp(x)).atoms(exp) == set([exp(exp(x)), exp(x)]) assert (1 + x*(2 + y)+exp(3 + z)).atoms(Add) == set( [1 + x*(2 + y)+exp(3 + z), 2 + y, 3 + z]) # issue 3033 f = Function('f') e = (f(x) + sin(x) + 2) assert e.atoms(AppliedUndef) == \ set([f(x)]) assert e.atoms(AppliedUndef, Function) == \ set([f(x), sin(x)]) assert e.atoms(Function) == \ set([f(x), sin(x)]) assert e.atoms(AppliedUndef, Number) == \ set([f(x), S(2)]) assert e.atoms(Function, Number) == \ set([S(2), sin(x), f(x)]) def test_is_polynomial(): k = Symbol('k', nonnegative=True, integer=True) assert Rational(2).is_polynomial(x, y, z) == True assert (S.Pi).is_polynomial(x, y, z) == True assert x.is_polynomial(x) == True assert x.is_polynomial(y) == True assert (x**2).is_polynomial(x) == True assert (x**2).is_polynomial(y) == True assert (x**(-2)).is_polynomial(x) == False assert (x**(-2)).is_polynomial(y) == True assert (2**x).is_polynomial(x) == False assert (2**x).is_polynomial(y) == True assert (x**k).is_polynomial(x) == False assert (x**k).is_polynomial(k) == False assert (x**x).is_polynomial(x) == False assert (k**k).is_polynomial(k) == False assert (k**x).is_polynomial(k) == False assert (x**(-k)).is_polynomial(x) == False assert ((2*x)**k).is_polynomial(x) == False assert (x**2 + 3*x - 8).is_polynomial(x) == True assert (x**2 + 3*x - 8).is_polynomial(y) == True assert (x**2 + 3*x - 8).is_polynomial() == True assert sqrt(x).is_polynomial(x) == False assert (sqrt(x)**3).is_polynomial(x) == False assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(x) == True assert (x**2 + 3*x*sqrt(y) - 8).is_polynomial(y) == False assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial() == True assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial() == False assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(2)).is_polynomial(x, y) == True assert ((x**2)*(y**2) + x*(y**2) + y*x + exp(x)).is_polynomial(x, y) == False def test_is_rational_function(): assert Integer(1).is_rational_function() == True assert Integer(1).is_rational_function(x) == True assert Rational(17,54).is_rational_function() == True assert Rational(17,54).is_rational_function(x) == True assert (12/x).is_rational_function() == True assert (12/x).is_rational_function(x) == True assert (x/y).is_rational_function() == True assert (x/y).is_rational_function(x) == True assert (x/y).is_rational_function(x, y) == True assert (x**2+1/x/y).is_rational_function() == True assert (x**2+1/x/y).is_rational_function(x) == True assert (x**2+1/x/y).is_rational_function(x, y) == True assert (sin(y)/x).is_rational_function() == False assert (sin(y)/x).is_rational_function(y) == False assert (sin(y)/x).is_rational_function(x) == True assert (sin(y)/x).is_rational_function(x, y) == False def test_SAGE1(): #see http://code.google.com/p/sympy/issues/detail?id=247 class MyInt: def _sympy_(self): return Integer(5) m = MyInt() e = Rational(2)*m assert e == 10 raises(TypeError, lambda: Rational(2)*MyInt) def test_SAGE2(): class MyInt(object): def __int__(self): return 5 assert sympify(MyInt()) == 5 e = Rational(2)*MyInt() assert e == 10 raises(TypeError, lambda: Rational(2)*MyInt) def test_SAGE3(): class MySymbol: def __rmul__(self, other): return ('mys', other, self) o = MySymbol() e = x*o assert e == ('mys', x, o) def test_len(): e = x*y assert len(e.args) == 2 e = x+y+z assert len(e.args) == 3 def test_doit(): a = Integral(x**2, x) assert isinstance(a.doit(), Integral) == False assert isinstance(a.doit(integrals=True), Integral) == False assert isinstance(a.doit(integrals=False), Integral) == True assert (2*Integral(x, x)).doit() == x**2 def test_attribute_error(): raises(AttributeError, lambda: x.cos()) raises(AttributeError, lambda: x.sin()) raises(AttributeError, lambda: x.exp()) def test_args(): assert (x*y).args in ((x, y), (y, x)) assert (x+y).args in ((x, y), (y, x)) assert (x*y+1).args in ((x*y, 1), (1, x*y)) assert sin(x*y).args == (x*y,) assert sin(x*y).args[0] == x*y assert (x**y).args == (x,y) assert (x**y).args[0] == x assert (x**y).args[1] == y def test_iter_basic_args(): assert list(sin(x*y).iter_basic_args()) == [x*y] assert list((x**y).iter_basic_args()) == [x, y] def test_noncommutative_expand_issue658(): A, B, C = symbols('A,B,C', commutative=False) assert A*B - B*A != 0 assert (A*(A+B)*B).expand() == A**2*B + A*B**2 assert (A*(A+B+C)*B).expand() == A**2*B + A*B**2 + A*C*B def test_as_numer_denom(): a, b, c = symbols('a, b, c') assert nan.as_numer_denom() == (nan, 1) assert oo.as_numer_denom() == (oo, 1) assert (-oo).as_numer_denom() == (-oo, 1) assert zoo.as_numer_denom() == (zoo, 1) assert (-zoo).as_numer_denom() == (zoo, 1) assert x.as_numer_denom() == (x, 1) assert (1/x).as_numer_denom() == (1, x) assert (x/y).as_numer_denom() == (x, y) assert (x/2).as_numer_denom() == (x, 2) assert (x*y/z).as_numer_denom() == (x*y, z) assert (x/(y*z)).as_numer_denom() == (x, y*z) assert Rational(1, 2).as_numer_denom() == (1, 2) assert (1/y**2).as_numer_denom() == (1, y**2) assert (x/y**2).as_numer_denom() == (x, y**2) assert ((x**2+1)/y).as_numer_denom() == (x**2+1, y) assert (x*(y+1)/y**7).as_numer_denom() == (x*(y+1), y**7) assert (x**-2).as_numer_denom() == (1, x**2) assert (a/x + b/2/x + c/3/x).as_numer_denom() == \ (6*a + 3*b + 2*c, 6*x) assert (a/x + b/2/x + c/3/y).as_numer_denom() == \ (2*c*x + y*(6*a + 3*b), 6*x*y) assert (a/x + b/2/x + c/.5/x).as_numer_denom() == \ (2*a + b + 4.0*c, 2*x) # this should take no more than a few seconds assert int(log(Add(*[Dummy()/i/x for i in xrange(1, 705)] ).as_numer_denom()[1]/x).n(4)) == 705 for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]: assert (i + x/3).as_numer_denom() == \ (x + i, 3) assert (S.Infinity + x/3 + y/4).as_numer_denom() == \ (4*x + 3*y + S.Infinity, 12) assert (oo*x + zoo*y).as_numer_denom() == \ (zoo*y + oo*x, 1) A, B, C = symbols('A,B,C', commutative=False) assert (A*B*C**-1).as_numer_denom() == (A*B*C**-1, 1) assert (A*B*C**-1/x).as_numer_denom() == (A*B*C**-1, x) assert (C**-1*A*B).as_numer_denom() == (C**-1*A*B, 1) assert (C**-1*A*B/x).as_numer_denom() == (C**-1*A*B, x) assert ((A*B*C)**-1).as_numer_denom() == ((A*B*C)**-1, 1) assert ((A*B*C)**-1/x).as_numer_denom() == ((A*B*C)**-1, x) def test_as_independent(): assert (2*x*sin(x)+y+x).as_independent(x) == (y, x + 2*x*sin(x)) assert (2*x*sin(x)+y+x).as_independent(y) == (x + 2*x*sin(x), y) assert (2*x*sin(x)+y+x).as_independent(x, y) == (0, y + x + 2*x*sin(x)) assert (x*sin(x)*cos(y)).as_independent(x) == (cos(y), x*sin(x)) assert (x*sin(x)*cos(y)).as_independent(y) == (x*sin(x), cos(y)) assert (x*sin(x)*cos(y)).as_independent(x, y) == (1, x*sin(x)*cos(y)) assert (sin(x)).as_independent(x) == (1, sin(x)) assert (sin(x)).as_independent(y) == (sin(x), 1) assert (2*sin(x)).as_independent(x) == (2, sin(x)) assert (2*sin(x)).as_independent(y) == (2*sin(x), 1) # issue 1804 = 1766b n1, n2, n3 = symbols('n1 n2 n3', commutative=False) assert (n1 + n1*n2).as_independent(n2) == (n1, n1*n2) assert (n2*n1 + n1*n2).as_independent(n2) == (0, n1*n2 + n2*n1) assert (n1*n2*n1).as_independent(n2) == (n1, n2*n1) assert (n1*n2*n1).as_independent(n1) == (1, n1*n2*n1) assert (3*x).as_independent(x, as_Add=True) == (0, 3*x) assert (3*x).as_independent(x, as_Add=False) == (3, x) assert (3+x).as_independent(x, as_Add=True) == (3, x) assert (3+x).as_independent(x, as_Add=False) == (1, 3 + x) # issue 2380 assert (3*x).as_independent(Symbol) == (3, x) # issue 2549 assert (n1*x*y).as_independent(x) == (n1*y, x) assert ((x + n1)*(x - y)).as_independent(x) == (1, (x + n1)*(x - y)) assert ((x + n1)*(x - y)).as_independent(y) == (x + n1, x - y) assert (DiracDelta(x - n1)*DiracDelta(x - y)).as_independent(x) == (1, DiracDelta(x - n1)*DiracDelta(x - y)) assert (x*y*n1*n2*n3).as_independent(n2) == (x*y*n1, n2*n3) assert (x*y*n1*n2*n3).as_independent(n1) == (x*y, n1*n2*n3) assert (x*y*n1*n2*n3).as_independent(n3) == (x*y*n1*n2, n3) assert (DiracDelta(x - n1)*DiracDelta(y - n1)*DiracDelta(x - n2)).as_independent(y) == \ (DiracDelta(x - n1), DiracDelta(y - n1)*DiracDelta(x - n2)) # issue 2685 assert (x + Integral(x, (x, 1, 2))).as_independent(x, strict=True) == \ (Integral(x, (x, 1, 2)), x) def test_call(): # See the long history of this in issues 1927 and 2006. # No effect as there are no callables assert sin(x)(1) == sin(x) assert (1+sin(x))(1) == 1+sin(x) # Effect in the pressence of callables l = Lambda(x, 2*x) assert (l+x)(y) == 2*y+x assert (x**l)(2) == x**4 # TODO UndefinedFunction does not subclass Expr #f = Function('f') #assert (2*f)(x) == 2*f(x) def test_replace(): f = log(sin(x)) + tan(sin(x**2)) assert f.replace(sin, cos) == log(cos(x)) + tan(cos(x**2)) assert f.replace(sin, lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2)) a = Wild('a') assert f.replace(sin(a), cos(a)) == log(cos(x)) + tan(cos(x**2)) assert f.replace(sin(a), lambda a: sin(2*a)) == log(sin(2*x)) + tan(sin(2*x**2)) g = 2*sin(x**3) assert g.replace(lambda expr: expr.is_Number, lambda expr: expr**2) == 4*sin(x**9) assert cos(x).replace(cos, sin, map=True) == (sin(x), {cos(x): sin(x)}) assert sin(x).replace(cos, sin) == sin(x) assert (y*sin(x)).replace(sin, lambda expr: sin(expr)/y) == sin(x) def test_find(): expr = (x + y + 2 + sin(3*x)) assert expr.find(lambda u: u.is_Integer) == set([S(2), S(3)]) assert expr.find(lambda u: u.is_Symbol) == set([x, y]) assert expr.find(lambda u: u.is_Integer, group=True) == {S(2): 1, S(3): 1} assert expr.find(lambda u: u.is_Symbol, group=True) == {x: 2, y: 1} assert expr.find(Integer) == set([S(2), S(3)]) assert expr.find(Symbol) == set([x, y]) assert expr.find(Integer, group=True) == {S(2): 1, S(3): 1} assert expr.find(Symbol, group=True) == {x: 2, y: 1} a = Wild('a') expr = sin(sin(x)) + sin(x) + cos(x) + x assert expr.find(lambda u: type(u) is sin) == set([sin(x), sin(sin(x))]) assert expr.find(lambda u: type(u) is sin, group=True) == {sin(x): 2, sin(sin(x)): 1} assert expr.find(sin(a)) == set([sin(x), sin(sin(x))]) assert expr.find(sin(a), group=True) == {sin(x): 2, sin(sin(x)): 1} assert expr.find(sin) == set([sin(x), sin(sin(x))]) assert expr.find(sin, group=True) == {sin(x): 2, sin(sin(x)): 1} def test_count(): expr = (x + y + 2 + sin(3*x)) assert expr.count(lambda u: u.is_Integer) == 2 assert expr.count(lambda u: u.is_Symbol) == 3 assert expr.count(Integer) == 2 assert expr.count(Symbol) == 3 assert expr.count(2) == 1 a = Wild('a') assert expr.count(sin) == 1 assert expr.count(sin(a)) == 1 assert expr.count(lambda u: type(u) is sin) == 1 def test_has_basics(): f = Function('f') g = Function('g') p = Wild('p') assert sin(x).has(x) assert sin(x).has(sin) assert not sin(x).has(y) assert not sin(x).has(cos) assert f(x).has(x) assert f(x).has(f) assert not f(x).has(y) assert not f(x).has(g) assert f(x).diff(x).has(x) assert f(x).diff(x).has(f) assert f(x).diff(x).has(Derivative) assert not f(x).diff(x).has(y) assert not f(x).diff(x).has(g) assert not f(x).diff(x).has(sin) assert (x**2).has(Symbol) assert not (x**2).has(Wild) assert (2*p).has(Wild) assert not x.has() def test_has_multiple(): f = x**2*y + sin(2**t + log(z)) assert f.has(x) assert f.has(y) assert f.has(z) assert f.has(t) assert not f.has(u) assert f.has(x, y, z, t) assert f.has(x, y, z, t, u) i = Integer(4400) assert not i.has(x) assert (i*x**i).has(x) assert not (i*y**i).has(x) assert (i*y**i).has(x, y) assert not (i*y**i).has(x, z) def test_has_piecewise(): f = (x*y + 3/y)**(3 + 2) g = Function('g') h = Function('h') p = Piecewise((g(x), x < -1), (1, x <= 1), (f, True)) assert p.has(x) assert p.has(y) assert not p.has(z) assert p.has(1) assert p.has(3) assert not p.has(4) assert p.has(f) assert p.has(g) assert not p.has(h) def test_has_iterative(): A, B, C = symbols('A,B,C', commutative=False) f = x*gamma(x)*sin(x)*exp(x*y)*A*B*C*cos(x*A*B) assert f.has(x) assert f.has(x*y) assert f.has(x*sin(x)) assert not f.has(x*sin(y)) assert f.has(x*A) assert f.has(x*A*B) assert not f.has(x*A*C) assert f.has(x*A*B*C) assert not f.has(x*A*C*B) assert f.has(x*sin(x)*A*B*C) assert not f.has(x*sin(x)*A*C*B) assert not f.has(x*sin(y)*A*B*C) assert f.has(x*gamma(x)) assert not f.has(x + sin(x)) assert (x & y & z).has(x & z) def test_has_integrals(): f = Integral(x**2 + sin(x*y*z), (x, 0, x + y + z)) assert f.has(x + y) assert f.has(x + z) assert f.has(y + z) assert f.has(x*y) assert f.has(x*z) assert f.has(y*z) assert not f.has(2*x + y) assert not f.has(2*x*y) def test_has_tuple(): f = Function('f') g = Function('g') h = Function('h') assert Tuple(x, y).has(x) assert not Tuple(x, y).has(z) assert Tuple(f(x), g(x)).has(x) assert not Tuple(f(x), g(x)).has(y) assert Tuple(f(x), g(x)).has(f) assert Tuple(f(x), g(x)).has(f(x)) assert not Tuple(f, g).has(x) assert Tuple(f, g).has(f) assert not Tuple(f, g).has(h) assert Tuple(True).has(True) is True # .has(1) will also be True def test_has_units(): from sympy.physics.units import m, s assert (x*m/s).has(x) assert (x*m/s).has(y, z) is False def test_has_polys(): poly = Poly(x**2 + x*y*sin(z), x, y, t) assert poly.has(x) assert poly.has(x, y, z) assert poly.has(x, y, z, t) def test_has_physics(): assert FockState((x, y)).has(x) def test_as_poly_as_expr(): f = x**2 + 2*x*y assert f.as_poly().as_expr() == f assert f.as_poly(x, y).as_expr() == f assert (f + sin(x)).as_poly(x, y) is None p = Poly(f, x, y) assert p.as_poly() == p def test_nonzero(): assert bool(S.Zero) == False assert bool(S.One) == True assert bool(x) == True assert bool(x+y) == True assert bool(x-x) == False assert bool(x*y) == True assert bool(x*1) == True assert bool(x*0) == False def test_is_number(): assert Float(3.14).is_number == True assert Integer(737).is_number == True assert Rational(3, 2).is_number == True assert Rational(8).is_number == True assert x.is_number == False assert (2*x).is_number == False assert (x + y).is_number == False assert log(2).is_number == True assert log(x).is_number == False assert (2 + log(2)).is_number == True assert (8+log(2)).is_number == True assert (2 + log(x)).is_number == False assert (8+log(2)+x).is_number == False assert (1+x**2/x-x).is_number == True assert Tuple(Integer(1)).is_number == False assert Add(2, x).is_number == False assert Mul(3, 4).is_number == True assert Pow(log(2), 2).is_number == True assert oo.is_number == True g = WildFunction('g') assert g.is_number == False assert (2*g).is_number == False assert (x**2).subs(x, 3).is_number == True # test extensibility of .is_number # on subinstances of Basic class A(Basic): pass a = A() assert a.is_number == False def test_as_coeff_add(): assert S(2).as_coeff_add() == (2, ()) assert S(3.0).as_coeff_add() == (0, (S(3.0),)) assert S(-3.0).as_coeff_add() == (0, (S(-3.0),)) assert x .as_coeff_add() == ( 0, (x,)) assert (-1+x).as_coeff_add() == (-1, (x,)) assert ( 2+x).as_coeff_add() == ( 2, (x,)) assert ( 1+x).as_coeff_add() == ( 1, (x,)) assert (x + y).as_coeff_add(y) == (x, (y,)) assert (3*x).as_coeff_add(y) == (3*x, ()) # don't do expansion e = (x + y)**2 assert e.as_coeff_add(y) == (0, (e,)) def test_as_coeff_mul(): assert S(2).as_coeff_mul() == (2, ()) assert S(3.0).as_coeff_mul() == (1, (S(3.0),)) assert S(-3.0).as_coeff_mul() == (-1, (S(3.0),)) assert x .as_coeff_mul() == ( 1, (x,)) assert (-x).as_coeff_mul() == (-1, (x,)) assert (2*x).as_coeff_mul() == (2, (x,)) assert (x*y).as_coeff_mul(y) == (x, (y,)) assert (3 + x).as_coeff_mul(y) == (3 + x, ()) # don't do expansion e = exp(x + y) assert e.as_coeff_mul(y) == (1, (e,)) e = 2**(x + y) assert e.as_coeff_mul(y) == (1, (e,)) def test_as_coeff_exponent(): assert (3*x**4).as_coeff_exponent(x) == (3, 4) assert (2*x**3).as_coeff_exponent(x) == (2, 3) assert (4*x**2).as_coeff_exponent(x) == (4, 2) assert (6*x**1).as_coeff_exponent(x) == (6, 1) assert (3*x**0).as_coeff_exponent(x) == (3, 0) assert (2*x**0).as_coeff_exponent(x) == (2, 0) assert (1*x**0).as_coeff_exponent(x) == (1, 0) assert (0*x**0).as_coeff_exponent(x) == (0, 0) assert (-1*x**0).as_coeff_exponent(x) == (-1, 0) assert (-2*x**0).as_coeff_exponent(x) == (-2, 0) assert (2*x**3+pi*x**3).as_coeff_exponent(x) == (2+pi, 3) assert (x*log(2)/(2*x + pi*x)).as_coeff_exponent(x) == \ (log(2)/(2+pi), 0) # 1685 D = Derivative f = Function('f') fx = D(f(x), x) assert fx.as_coeff_exponent(f(x)) == (fx ,0) def test_extractions(): assert ((x*y)**3).extract_multiplicatively(x**2 * y) == x*y**2 assert ((x*y)**3).extract_multiplicatively(x**4 * y) == None assert (2*x).extract_multiplicatively(2) == x assert (2*x).extract_multiplicatively(3) == None assert (2*x).extract_multiplicatively(-1) == None assert (Rational(1, 2)*x).extract_multiplicatively(3) == x/6 assert (sqrt(x)).extract_multiplicatively(x) == None assert (sqrt(x)).extract_multiplicatively(1/x) == None assert ((x*y)**3).extract_additively(1) == None assert (x + 1).extract_additively(x) == 1 assert (x + 1).extract_additively(2*x) == None assert (x + 1).extract_additively(-x) == None assert (-x + 1).extract_additively(2*x) == None assert (2*x + 3).extract_additively(x) == x + 3 assert (2*x + 3).extract_additively(2) == 2*x + 1 assert (2*x + 3).extract_additively(3) == 2*x assert (2*x + 3).extract_additively(-2) == None assert (2*x + 3).extract_additively(3*x) == None assert (2*x + 3).extract_additively(2*x) == 3 assert x.extract_additively(0) == x assert S(2).extract_additively(x) is None assert S(2.).extract_additively(2) == S.Zero assert S(2*x + 3).extract_additively(x + 1) == x + 2 assert S(2*x + 3).extract_additively(y + 1) is None assert S(2*x - 3).extract_additively(x + 1) is None assert S(2*x - 3).extract_additively(y + z) is None assert ((a + 1)*x*4 + y).extract_additively(x).expand() == \ 4*a*x + 3*x + y assert ((a + 1)*x*4 + 3*y).extract_additively(x + 2*y).expand() == \ 4*a*x + 3*x + y assert (y*(x + 1)).extract_additively(x + 1) is None assert ((y + 1)*(x + 1) + 3).extract_additively(x + 1) == \ y*(x + 1) + 3 assert ((x + y)*(x + 1) + x + y + 3).extract_additively(x + y) == \ x*(x + y) + 3 assert (x + y + 2*((x + y)*(x + 1)) + 3).extract_additively((x + y)*(x + 1)) == \ x + y + (x + 1)*(x + y) + 3 assert ((y + 1)*(x + 2*y + 1) + 3).extract_additively(y + 1) == \ (x + 2*y)*(y + 1) + 3 n = Symbol("n", integer=True) assert (Integer(-3)).could_extract_minus_sign() == True assert (-n*x+x).could_extract_minus_sign() != (n*x-x).could_extract_minus_sign() assert (x-y).could_extract_minus_sign() != (-x+y).could_extract_minus_sign() assert (1-x-y).could_extract_minus_sign() == True assert (1-x+y).could_extract_minus_sign() == False assert ((-x-x*y)/y).could_extract_minus_sign() == True assert (-(x+x*y)/y).could_extract_minus_sign() == True assert ((x+x*y)/(-y)).could_extract_minus_sign() == True assert ((x+x*y)/y).could_extract_minus_sign() == False assert (x*(-x-x**3)).could_extract_minus_sign() == True # used to give inf recurs assert ((-x-y)/(x+y)).could_extract_minus_sign() == True # is_Mul odd case # The results of each of these will vary on different machines, e.g. # the first one might be False and the other (then) is true or vice versa, # so both are included. assert ((-x-y)/(x-y)).could_extract_minus_sign() == False or\ ((-x-y)/(y-x)).could_extract_minus_sign() == False # is_Mul even case assert ( x - y).could_extract_minus_sign() == False assert (-x + y).could_extract_minus_sign() == True def test_coeff(): assert (x+1).coeff(x+1) == 1 assert (3*x).coeff(0) == 0 assert (z*(1+x)*x**2).coeff(1+x) == z*x**2 assert (1+2*x*x**(1+x)).coeff(x*x**(1+x)) == 2 assert (1+2*x**(y+z)).coeff(x**(y+z)) == 2 assert (3+2*x+4*x**2).coeff(1) == 0 assert (3+2*x+4*x**2).coeff(-1) == 0 assert (3+2*x+4*x**2).coeff(x) == 2 assert (3+2*x+4*x**2).coeff(x**2) == 4 assert (3+2*x+4*x**2).coeff(x**3) == 0 assert (-x/8 + x*y).coeff(x) == -S(1)/8 + y assert (-x/8 + x*y).coeff(-x) == S(1)/8 assert (4*x).coeff(2*x) == 0 assert (2*x).coeff(2*x) == 1 assert (-oo*x).coeff(x*oo) == -1 n1, n2 = symbols('n1 n2', commutative=False) assert (n1*n2).coeff(n1) == 1 assert (n1*n2).coeff(n2) == n1 assert (n1*n2 + x*n1).coeff(n1) == 1 # 1*n1*(n2+x) assert (n2*n1 + x*n1).coeff(n1) == n2 + x assert (n2*n1 + x*n1**2).coeff(n1) == n2 assert (n1**x).coeff(n1) == 0 assert (n1*n2 + n2*n1).coeff(n1) == 0 assert (2*(n1+n2)*n2).coeff(n1+n2, right=1) == n2 assert (2*(n1+n2)*n2).coeff(n1+n2, right=0) == 2 f = Function('f') assert (2*f(x) + 3*f(x).diff(x)).coeff(f(x)) == 2 expr = z*(x+y)**2 expr2 = z*(x+y)**2 + z*(2*x + 2*y)**2 assert expr.coeff(z) == (x+y)**2 assert expr.coeff(x+y) == 0 assert expr2.coeff(z) == (x+y)**2 + (2*x + 2*y)**2 assert (x + y + 3*z).coeff(1) == x + y assert (-x + 2*y).coeff(-1) == x assert (x - 2*y).coeff(-1) == 2*y assert (3 + 2*x + 4*x**2).coeff(1) == 0 assert (-x - 2*y).coeff(2) == -y assert (x + sqrt(2)*x).coeff(sqrt(2)) == x assert (3 + 2*x + 4*x**2).coeff(x) == 2 assert (3 + 2*x + 4*x**2).coeff(x**2) == 4 assert (3 + 2*x + 4*x**2).coeff(x**3) == 0 assert (z*(x + y)**2).coeff((x + y)**2) == z assert (z*(x + y)**2).coeff(x + y) == 0 assert (2 + 2*x + (x + 1)*y).coeff(x + 1) == y assert (x + 2*y + 3).coeff(1) == x assert (x + 2*y + 3).coeff(x, 0) == 2*y + 3 assert (x**2 + 2*y + 3*x).coeff(x**2, 0) == 2*y + 3*x assert x.coeff(0, 0) == 0 assert x.coeff(x, 0) == 0 n, m, o, l = symbols('n m o l', commutative=False) assert n.coeff(n) == 1 assert y.coeff(n) == 0 assert (3*n).coeff(n) == 3 assert (2 + n).coeff(x*m) == 0 assert (2*x*n*m).coeff(x) == 2*n*m assert (2 + n).coeff(x*m*n + y) == 0 assert (2*x*n*m).coeff(3*n) == 0 assert (n*m + m*n*m).coeff(n) == 1 + m assert (n*m + m*n*m).coeff(n, right=True) == m # = (1 + m)*n*m assert (n*m + m*n).coeff(n) == 0 assert (n*m + o*m*n).coeff(m*n) == o assert (n*m + o*m*n).coeff(m*n, right=1) == 1 assert (n*m + n*m*n).coeff(n*m, right=1) == 1 + n # = n*m*(n + 1) def test_coeff2(): r, kappa = symbols('r, kappa') psi = Function("psi") g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2)) g = g.expand() assert g.coeff((psi(r).diff(r))) == 2/r def test_coeff2_0(): r, kappa = symbols('r, kappa') psi = Function("psi") g = 1/r**2 * (2*r*psi(r).diff(r, 1) + r**2 * psi(r).diff(r, 2)) g = g.expand() assert g.coeff(psi(r).diff(r, 2)) == 1 def test_coeff_expand(): expr = z*(x+y)**2 expr2 = z*(x+y)**2 + z*(2*x + 2*y)**2 assert expr.coeff(z) == (x+y)**2 assert expr2.coeff(z) == (x+y)**2 + (2*x + 2*y)**2 def test_integrate(): assert x.integrate(x) == x**2/2 assert x.integrate((x, 0, 1)) == S(1)/2 def test_as_base_exp(): assert x.as_base_exp() == (x, S.One) assert (x*y*z).as_base_exp() == (x*y*z, S.One) assert (x+y+z).as_base_exp() == (x+y+z, S.One) assert ((x+y)**z).as_base_exp() == (x+y, z) def test_issue1864(): assert hasattr(Mul(x, y), "is_commutative") assert hasattr(Mul(x, y, evaluate=False), "is_commutative") assert hasattr(Pow(x, y), "is_commutative") assert hasattr(Pow(x, y, evaluate=False), "is_commutative") expr = Mul(Pow(2, 2, evaluate=False), 3, evaluate=False) + 1 assert hasattr(expr, "is_commutative") def test_action_verbs(): assert nsimplify((1/(exp(3*pi*x/5)+1))) == (1/(exp(3*pi*x/5)+1)).nsimplify() assert ratsimp(1/x + 1/y) == (1/x + 1/y).ratsimp() assert trigsimp(log(x), deep=True) == (log(x)).trigsimp(deep = True) assert radsimp(1/(2+sqrt(2))) == (1/(2+sqrt(2))).radsimp() assert powsimp(x**y*x**z*y**z, combine='all') == (x**y*x**z*y**z).powsimp(combine='all') assert simplify(x**y*x**z*y**z) == (x**y*x**z*y**z).simplify() assert together(1/x + 1/y) == (1/x + 1/y).together() # Not tested because it's deprecated #assert separate((x*(y*z)**3)**2) == ((x*(y*z)**3)**2).separate() assert collect(a*x**2 + b*x**2 + a*x - b*x + c, x) == (a*x**2 + b*x**2 + a*x - b*x + c).collect(x) assert apart(y/(y+2)/(y+1), y) == (y/(y+2)/(y+1)).apart(y) assert combsimp(y/(x+2)/(x+1)) == (y/(x+2)/(x+1)).combsimp() assert factor(x**2+5*x+6) == (x**2+5*x+6).factor() assert refine(sqrt(x**2)) == sqrt(x**2).refine() assert cancel((x**2+5*x+6)/(x+2)) == ((x**2+5*x+6)/(x+2)).cancel() def test_as_powers_dict(): assert x.as_powers_dict() == {x: 1} assert (x**y*z).as_powers_dict() == {x: y, z: 1} assert Mul(2, 2, **dict(evaluate=False)).as_powers_dict() == {S(2): S(2)} def test_as_coefficients_dict(): check = [S(1), x, y, x*y, 1] assert [Add(3*x, 2*x, y, 3).as_coefficients_dict()[i] for i in check] == \ [3, 5, 1, 0, 0] assert [(3*x*y).as_coefficients_dict()[i] for i in check] == \ [0, 0, 0, 3, 0] assert (3.0*x*y).as_coefficients_dict()[3.0*x*y] == 1 def test_args_cnc(): A = symbols('A', commutative=False) assert (x+A).args_cnc() == \ [[], [x + A]] assert (x+a).args_cnc() == \ [[a + x], []] assert (x*a).args_cnc() == \ [[a, x], []] assert (x*y*A*(A+1)).args_cnc(cset=True) == \ [set([x, y]), [A, 1 + A]] assert Mul(x, x, evaluate=False).args_cnc(cset=True, warn=False) == \ [set([x]), []] assert Mul(x, x**2, evaluate=False).args_cnc(cset=True, warn=False) == \ [set([x, x**2]), []] raises(ValueError, lambda: Mul(x, x, evaluate=False).args_cnc(cset=True)) assert Mul(x, y, x, evaluate=False).args_cnc() == \ [[x, y, x], []] def test_new_rawargs(): n = Symbol('n', commutative=False) a = x + n assert a.is_commutative is False assert a._new_rawargs(x).is_commutative assert a._new_rawargs(x, y).is_commutative assert a._new_rawargs(x, n).is_commutative is False assert a._new_rawargs(x, y, n).is_commutative is False m = x*n assert m.is_commutative is False assert m._new_rawargs(x).is_commutative assert m._new_rawargs(n).is_commutative is False assert m._new_rawargs(x, y).is_commutative assert m._new_rawargs(x, n).is_commutative is False assert m._new_rawargs(x, y, n).is_commutative is False assert m._new_rawargs(x, n, reeval=False).is_commutative is False assert m._new_rawargs(S.One) is S.One def test_2127(): assert Add(evaluate=False) == 0 assert Mul(evaluate=False) == 1 assert Mul(x+y, evaluate=False).is_Add def test_free_symbols(): # free_symbols should return the free symbols of an object assert S(1).free_symbols == set() assert (x).free_symbols == set([x]) assert Integral(x, (x, 1, y)).free_symbols == set([y]) assert (-Integral(x, (x, 1, y))).free_symbols == set([y]) assert meter.free_symbols == set() assert (meter**x).free_symbols == set([x]) def test_issue2201(): x = Symbol('x', commutative=False) assert x*sqrt(2)/sqrt(6) == x*sqrt(3)/3 def test_issue_2061(): assert sqrt(-1.0*x) == 1.0*sqrt(-x) assert sqrt(1.0*x) == 1.0*sqrt(x) def test_as_coeff_Mul(): assert Integer(3).as_coeff_Mul() == (Integer(3), Integer(1)) assert Rational(3, 4).as_coeff_Mul() == (Rational(3, 4), Integer(1)) assert Float(5.0).as_coeff_Mul() == (Float(5.0), Integer(1)) assert (Integer(3)*x).as_coeff_Mul() == (Integer(3), x) assert (Rational(3, 4)*x).as_coeff_Mul() == (Rational(3, 4), x) assert (Float(5.0)*x).as_coeff_Mul() == (Float(5.0), x) assert (Integer(3)*x*y).as_coeff_Mul() == (Integer(3), x*y) assert (Rational(3, 4)*x*y).as_coeff_Mul() == (Rational(3, 4), x*y) assert (Float(5.0)*x*y).as_coeff_Mul() == (Float(5.0), x*y) assert (x).as_coeff_Mul() == (S.One, x) assert (x*y).as_coeff_Mul() == (S.One, x*y) def test_as_coeff_Add(): assert Integer(3).as_coeff_Add() == (Integer(3), Integer(0)) assert Rational(3, 4).as_coeff_Add() == (Rational(3, 4), Integer(0)) assert Float(5.0).as_coeff_Add() == (Float(5.0), Integer(0)) assert (Integer(3) + x).as_coeff_Add() == (Integer(3), x) assert (Rational(3, 4) + x).as_coeff_Add() == (Rational(3, 4), x) assert (Float(5.0) + x).as_coeff_Add() == (Float(5.0), x) assert (Integer(3) + x + y).as_coeff_Add() == (Integer(3), x + y) assert (Rational(3, 4) + x + y).as_coeff_Add() == (Rational(3, 4), x + y) assert (Float(5.0) + x + y).as_coeff_Add() == (Float(5.0), x + y) assert (x).as_coeff_Add() == (S.Zero, x) assert (x*y).as_coeff_Add() == (S.Zero, x*y) def test_expr_sorting(): f, g = symbols('f,g', cls=Function) exprs = [1/x**2, 1/x, sqrt(sqrt(x)), sqrt(x), x, sqrt(x)**3, x**2] assert sorted(exprs, key=default_sort_key) == exprs exprs = [x, 2*x, 2*x**2, 2*x**3, x**n, 2*x**n, sin(x), sin(x)**n, sin(x**2), cos(x), cos(x**2), tan(x)] assert sorted(exprs, key=default_sort_key) == exprs exprs = [x + 1, x**2 + x + 1, x**3 + x**2 + x + 1] assert sorted(exprs, key=default_sort_key) == exprs exprs = [S(4), x - 3*I/2, x + 3*I/2, x - 4*I + 1, x + 4*I + 1] assert sorted(exprs, key=default_sort_key) == exprs exprs = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)] assert sorted(exprs, key=default_sort_key) == exprs exprs = [f(x), g(x), exp(x), sin(x), cos(x), factorial(x)] assert sorted(exprs, key=default_sort_key) == exprs exprs = [Tuple(x, y), Tuple(x, z), Tuple(x, y, z)] assert sorted(exprs, key=default_sort_key) == exprs exprs = [[3], [1, 2]] assert sorted(exprs, key=default_sort_key) == exprs exprs = [[1, 2], [2, 3]] assert sorted(exprs, key=default_sort_key) == exprs exprs = [[1, 2], [1, 2, 3]] assert sorted(exprs, key=default_sort_key) == exprs exprs = [{x: -y}, {x: y}] assert sorted(exprs, key=default_sort_key) == exprs exprs = [set([1]), set([1, 2])] assert sorted(exprs, key=default_sort_key) == exprs def test_as_ordered_factors(): f, g = symbols('f,g', cls=Function) assert x.as_ordered_factors() == [x] assert (2*x*x**n*sin(x)*cos(x)).as_ordered_factors() == [Integer(2), x, x**n, sin(x), cos(x)] args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)] expr = Mul(*args) assert expr.as_ordered_factors() == args A, B = symbols('A,B', commutative=False) assert (A*B).as_ordered_factors() == [A, B] assert (B*A).as_ordered_factors() == [B, A] def test_as_ordered_terms(): f, g = symbols('f,g', cls=Function) assert x.as_ordered_terms() == [x] assert (sin(x)**2*cos(x) + sin(x)*cos(x)**2 + 1).as_ordered_terms() == [sin(x)**2*cos(x), sin(x)*cos(x)**2, 1] args = [f(1), f(2), f(3), f(1, 2, 3), g(1), g(2), g(3), g(1, 2, 3)] expr = Add(*args) assert expr.as_ordered_terms() == args assert (1 + 4*sqrt(3)*pi*x).as_ordered_terms() == [4*pi*x*sqrt(3), 1] assert ( 2 + 3*I).as_ordered_terms() == [ 2, 3*I] assert (-2 + 3*I).as_ordered_terms() == [-2, 3*I] assert ( 2 - 3*I).as_ordered_terms() == [ 2, -3*I] assert (-2 - 3*I).as_ordered_terms() == [-2, -3*I] assert ( 4 + 3*I).as_ordered_terms() == [ 4, 3*I] assert (-4 + 3*I).as_ordered_terms() == [-4, 3*I] assert ( 4 - 3*I).as_ordered_terms() == [ 4, -3*I] assert (-4 - 3*I).as_ordered_terms() == [-4, -3*I] f = x**2*y**2 + x*y**4 + y + 2 assert f.as_ordered_terms(order="lex") == [x**2*y**2, x*y**4, y, 2] assert f.as_ordered_terms(order="grlex") == [x*y**4, x**2*y**2, y, 2] assert f.as_ordered_terms(order="rev-lex") == [2, y, x*y**4, x**2*y**2] assert f.as_ordered_terms(order="rev-grlex") == [2, y, x**2*y**2, x*y**4] def test_sort_key_atomic_expr(): from sympy.physics.units import m, s assert sorted([-m, s], key=lambda arg: arg.sort_key()) == [-m, s] def test_issue_1100(): # first subs and limit gives NaN a = x/y assert a._eval_interval(x, 0, oo)._eval_interval(y, oo, 0) is S.NaN # second subs and limit gives NaN assert a._eval_interval(x, 0, oo)._eval_interval(y, 0, oo) is S.NaN # difference gives S.NaN a = x - y assert a._eval_interval(x, 1, oo)._eval_interval(y, oo, 1) is S.NaN raises(ValueError, lambda: x._eval_interval(x, None, None)) def test_primitive(): assert (3*(x + 1)**2).primitive() == (3, (x + 1)**2) assert (6*x + 2).primitive() == (2, 3*x + 1) assert (x/2 + 3).primitive() == (S(1)/2, x + 6) eq = (6*x + 2)*(x/2 + 3) assert eq.primitive()[0] == 1 eq = (2 + 2*x)**2 assert eq.primitive()[0] == 1 assert (4.0*x).primitive() == (1, 4.0*x) assert (4.0*x + y/2).primitive() == (S.Half, 8.0*x + y) assert (-2*x).primitive() == (2, -x) assert Add(5*z/7, 0.5*x, 3*y/2, evaluate=False).primitive() == \ (S(1)/14, 7.0*x + 21*y + 10*z) for i in [S.Infinity, S.NegativeInfinity, S.ComplexInfinity]: assert (i + x/3).primitive() == \ (S(1)/3, i + x) assert (S.Infinity + 2*x/3 + 4*y/7).primitive() == \ (S(1)/21, 14*x + 12*y + oo) assert S.Zero.primitive() == (S.One, S.Zero) def test_issue_2744(): a = 1 + x assert (2*a).extract_multiplicatively(a) == 2 assert (4*a).extract_multiplicatively(2*a) == 2 assert ((3*a)*(2*a)).extract_multiplicatively(a) == 6*a def test_is_constant(): from sympy.solvers.solvers import checksol Sum(x, (x, 1, 10)).is_constant() == True Sum(x, (x, 1, n)).is_constant() == False Sum(x, (x, 1, n)).is_constant(y) == True Sum(x, (x, 1, n)).is_constant(n) == False Sum(x, (x, 1, n)).is_constant(x) == True eq = a*cos(x)**2 + a*sin(x)**2 - a eq.is_constant() == True assert eq.subs({x:pi, a:2}) == eq.subs({x:pi, a:3}) == 0 assert x.is_constant() is False assert x.is_constant(y) is True assert checksol(x, x, Sum(x, (x, 1, n))) == False assert checksol(x, x, Sum(x, (x, 1, n))) == False f = Function('f') assert checksol(x, x, f(x)) == False p = symbols('p', positive=True) assert Pow(x, S(0), evaluate=False).is_constant() == True # == 1 assert Pow(S(0), x, evaluate=False).is_constant() == False # == 0 or 1 assert Pow(S(0), p, evaluate=False).is_constant() == True # == 1 assert (2**x).is_constant() == False assert Pow(S(2), S(3), evaluate=False).is_constant() == True z1, z2 = symbols('z1 z2', zero=True) assert (z1 + 2*z2).is_constant() is True assert meter.is_constant() is True assert (3*meter).is_constant() is True assert (x*meter).is_constant() is False def test_equals(): assert (-3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2).equals(0) assert (x**2 - 1).equals((x + 1)*(x - 1)) assert (cos(x)**2 + sin(x)**2).equals(1) assert (a*cos(x)**2 + a*sin(x)**2).equals(a) r = sqrt(2) assert (-1/(r + r*x) + 1/r/(1 + x)).equals(0) assert factorial(x + 1).equals((x + 1)*factorial(x)) assert sqrt(3).equals(2*sqrt(3)) is False assert (sqrt(5)*sqrt(3)).equals(sqrt(3)) is False assert (sqrt(5) + sqrt(3)).equals(0) is False assert (sqrt(5) + pi).equals(0) is False assert meter.equals(0) is False assert (3*meter**2).equals(0) is False # from integrate(x*sqrt(1+2*x), x); # diff is zero only when assumptions allow i = 2*sqrt(2)*x**(S(5)/2)*(1 + 1/(2*x))**(S(5)/2)/5 + \ 2*sqrt(2)*x**(S(3)/2)*(1 + 1/(2*x))**(S(5)/2)/(-6 - 3/x) ans = sqrt(2*x + 1)*(6*x**2 + x - 1)/15 diff = i - ans assert diff.equals(0) is False assert diff.subs(x, -S.Half/2) == 7*sqrt(2)/120 # there are regions for x for which the expression is True, for # example, when x < -1/2 or x > 0 the expression is zero p = Symbol('p', positive=True) assert diff.subs(x, p).equals(0) is True assert diff.subs(x, -1).equals(0) is True def test_random(): from sympy import posify assert posify(x)[0]._random() is not None def test_round(): from sympy.abc import x assert Float('0.1249999').round(2) == 0.12 d20 = 12345678901234567890 ans = S(d20).round(2) assert ans.is_Float and ans == d20 ans = S(d20).round(-2) assert ans.is_Float and ans == 12345678901234567900 assert S('1/7').round(4) == 0.1429 assert S('.[12345]').round(4) == 0.1235 assert S('.1349').round(2) == 0.13 n = S(12345) ans = n.round() assert ans.is_Float assert ans == n ans = n.round(1) assert ans.is_Float assert ans == n ans = n.round(4) assert ans.is_Float assert ans == n assert n.round(-1) == 12350 r = n.round(-4) assert r == 10000 # in fact, it should equal many values since __eq__ # compares at equal precision assert all(r == i for i in range(9984, 10049)) assert n.round(-5) == 0 assert (pi + sqrt(2)).round(2) == 4.56 assert (10*(pi + sqrt(2))).round(-1) == 50 raises(TypeError, lambda: round(x + 2, 2)) assert S(2.3).round(1) == 2.3 e = S(12.345).round(2) assert e == round(12.345, 2) assert type(e) is Float assert (Float(.3, 3) + 2*pi).round() == 7 assert (Float(.3, 3) + 2*pi*100).round() == 629 assert (Float(.03, 3) + 2*pi/100).round(5) == 0.09283 assert (Float(.03, 3) + 2*pi/100).round(4) == 0.0928 assert (pi + 2*E*I).round() == 3 + 5*I assert S.Zero.round() == 0 a = (Add(1, Float('1.'+'9'*27, ''), evaluate=0)) assert a.round(10) == Float('3.0000000000','') assert a.round(25) == Float('3.0000000000000000000000000','') assert a.round(26) == Float('3.00000000000000000000000000','') assert a.round(27) == Float('2.999999999999999999999999999','') assert a.round(30) == Float('2.999999999999999999999999999','') raises(TypeError, lambda: x.round()) # exact magnitude of 10 assert str(S(1).round()) == '1.' assert str(S(100).round()) == '100.' # applied to real and imaginary portions assert (2*pi + E*I).round() == 6 + 3*I assert (2*pi + I/10).round() == 6 assert (pi/10 + 2*I).round() == 2*I # the lhs re and im parts are Float with dps of 2 # and those on the right have dps of 15 so they won't compare # equal unless we use string or compare components (which will # then coerce the floats to the same precision) or re-create # the floats assert str((pi/10 + E*I).round(2)) == '0.31 + 2.72*I' assert (pi/10 + E*I).round(2).as_real_imag() == (0.31, 2.72) assert (pi/10 + E*I).round(2) == Float(0.31, 2) + I*Float(2.72, 3) # issue 3815 assert (I**(I+3)).round(3) == Float('-0.208','')*I def test_extract_branch_factor(): assert exp_polar(2.0*I*pi).extract_branch_factor() == (1, 1)
bsd-3-clause
625,356,421,304,624,600
33.645022
114
0.54313
false
2.594586
true
false
false
praekelt/vumi-go
go/apps/tests/view_helpers.py
1
2691
from django.core.urlresolvers import reverse from zope.interface import implements from vumi.tests.helpers import generate_proxies, IHelper from go.base import utils as base_utils from go.base.tests.helpers import DjangoVumiApiHelper from go.vumitools.tests.helpers import GoMessageHelper from .helpers import ApplicationHelper class AppViewsHelper(object): implements(IHelper) def __init__(self, conversation_type): self.conversation_type = conversation_type self.vumi_helper = DjangoVumiApiHelper() self._app_helper = ApplicationHelper( conversation_type, self.vumi_helper) # Proxy methods from our helpers. generate_proxies(self, self._app_helper) generate_proxies(self, self.vumi_helper) def setup(self): # Create the things we need to create self.vumi_helper.setup() self.vumi_helper.make_django_user() def cleanup(self): return self.vumi_helper.cleanup() def get_new_view_url(self): return reverse('conversations:new_conversation') def get_conversation_helper(self, conversation): return ConversationViewHelper(self, conversation.key) def create_conversation_helper(self, *args, **kw): conversation = self.create_conversation(*args, **kw) return self.get_conversation_helper(conversation) def get_api_commands_sent(self): return base_utils.connection.get_commands() class ConversationViewHelper(object): def __init__(self, app_views_helper, conversation_key): self.conversation_key = conversation_key self.conversation_type = app_views_helper.conversation_type self.app_helper = app_views_helper def get_view_url(self, view): view_def = base_utils.get_conversation_view_definition( self.conversation_type) return view_def.get_view_url( view, conversation_key=self.conversation_key) def get_action_view_url(self, action_name): return reverse('conversations:conversation_action', kwargs={ 'conversation_key': self.conversation_key, 'action_name': action_name, }) def get_conversation(self): return self.app_helper.get_conversation(self.conversation_key) def add_stored_inbound(self, count, **kw): msg_helper = GoMessageHelper(vumi_helper=self.app_helper) conv = self.get_conversation() return msg_helper.add_inbound_to_conv(conv, count, **kw) def add_stored_replies(self, msgs): msg_helper = GoMessageHelper(vumi_helper=self.app_helper) conv = self.get_conversation() return msg_helper.add_replies_to_conv(conv, msgs)
bsd-3-clause
-5,930,899,879,608,765,000
33.5
70
0.687105
false
3.666213
false
false
false
palominodb/tableizer
tableizer/ttt_gui/rrd.py
1
5968
# rrd.py # Copyright (C) 2009-2013 PalominoDB, Inc. # # You may contact the maintainers at [email protected]. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import os from django.conf import settings import rrdtool from utilities.utils import flatten, titleize, str_to_datetime, datetime_to_int class Rrdtool(object): def server_graph(self, servers, since, type_='full'): msgs = [] ok = True for srv in flatten([servers]): path = settings.FORMATTER_OPTIONS.get('rrd', {}).get('path', '') rrd_path = os.path.join(path, srv.name, 'server_%s.rrd' % (srv.name)) opts = self.__common_opts('server_%s' % (srv.name), since, type_, 'Server Aggregate - %s' % (srv.name)) opts.append(map(lambda ds: self.__common_ds_opts(ds, rrd_path), [ ['data_length', ['AREA%s:STACK', '#00ff40']], ['index_length', ['AREA%s', '#0040ff']], #['data_free', ['LINE2%s', '#0f00f0']], ])) opts = flatten(opts) opts = map(lambda x: str(x), opts) try: rrdtool.graph(opts) except Exception, e: msgs.append(e) ok = False return [ok, msgs] def database_graph(self, databases, since, type_='full'): msgs = [] ok = True for db in flatten([databases]): path = settings.FORMATTER_OPTIONS.get('rrd', {}).get('path', '') rrd_path = os.path.join(path, db.server.name, 'database_%s.rrd' % (db.name)) opts = self.__common_opts('database_%s_%s' % (db.server.name, db.name), since, type_, 'Database Aggregate - %s.%s' % (db.server.name, db.name)) opts.append(map(lambda ds: self.__common_ds_opts(ds, rrd_path), [ ['data_length', ['AREA%s:STACK', '#00ff40']], ['index_length', ['AREA%s', '#0040ff']], #['data_free', ['LINE2%s', '#0f00f0']], ])) opts = flatten(opts) opts = map(lambda x: str(x), opts) try: rrdtool.graph(opts) except Exception, e: msgs.append(e) ok = False return [ok, msgs] def table_graph(self, tables, since, type_='full'): msgs = [] ok = True for tbl in flatten([tables]): path = settings.FORMATTER_OPTIONS.get('rrd', {}).get('path', '') rrd_path = os.path.join(path, tbl.schema.server.name, tbl.schema.name, '%s.rrd' % (tbl.name)) opts = self.__common_opts('table_%s_%s_%s' % (tbl.schema.server.name, tbl.schema.name, tbl.name), since, type_, 'Table - %s.%s.%s' % (tbl.schema.server.name, tbl.schema.name, tbl.name)) opts.append(map(lambda ds: self.__common_ds_opts(ds, rrd_path), [ ['data_length', ['AREA%s:STACK', '#00ff40']], ['index_length', ['AREA%s', '#0040ff']], #['data_free', ['LINE2%s', '#0f00f0']], ])) opts = flatten(opts) opts = map(lambda x: str(x), opts) try: rrdtool.graph(opts) except Exception, e: msgs.append(e) ok = False return [ok, msgs] def __common_opts(self, path_frag, since, type_, title): filename = '%s.%s.%s.png' % (path_frag, since, type_) since = str_to_datetime(since) since = datetime_to_int(since) if not os.path.isdir(os.path.join(settings.MEDIA_ROOT, 'graphs')): os.makedirs(os.path.join(settings.MEDIA_ROOT, 'graphs')) path = os.path.join(settings.MEDIA_ROOT, 'graphs', filename) o = [path, '-s', str(since), '--width', '640' if type_ == 'full' else '128', '-e', 'now', '--title', '%s' % (str(title))] if type_ == 'thumb': o.append('-j') o.append('--height') o.append('16') return o def __common_ds_opts(self, ds, rrd_path): dsname = ds[0] gitems = ds[1:] ret = [] ret.append('DEF:avg_{0}={1}:{0}:AVERAGE'.format(dsname, rrd_path)) ret.append('DEF:min_{0}={1}:{0}:MIN'.format(dsname, rrd_path)) ret.append('DEF:max_{0}={1}:{0}:MAX'.format(dsname, rrd_path)) ret.append('VDEF:v_last_{0}=avg_{0},LAST'.format(dsname)) ret.append('VDEF:v_avg_{0}=avg_{0},AVERAGE'.format(dsname)) ret.append('VDEF:v_min_{0}=avg_{0},MINIMUM'.format(dsname)) ret.append('VDEF:v_max_{0}=avg_{0},MAXIMUM'.format(dsname)) for gi in gitems: ret.append(gi[0] % ':avg_{0}{1}:"{2}"'.format(dsname, gi[1], titleize(dsname))) ret.append('GPRINT:v_last_{0}:"Current\\: %0.2lf%s"'.format(dsname)) ret.append('GPRINT:v_avg_{0}:"Avg\\: %0.2lf%s"'.format(dsname)) ret.append('GPRINT:v_min_{0}:"Min\\: %0.2lf%s"'.format(dsname)) ret.append('GPRINT:v_max_{0}:"Max\\: %0.2lf%s"'.format(dsname)) ret.append('COMMENT:"\\s"') ret.append('COMMENT:"\\s"') return ret
gpl-2.0
-9,085,685,221,464,438,000
42.562044
127
0.525637
false
3.435809
false
false
false
florian-wagner/gimli
python/pygimli/gui/vtk/wxVTKRenderWindowInteractor.py
1
24830
# -*- coding: utf-8 -*- """ A VTK RenderWindowInteractor widget for wxPython. Find wxPython info at http://wxPython.org Created by Prabhu Ramachandran, April 2002 Based on wxVTKRenderWindow.py Fixes and updates by Charl P. Botha 2003-2008 Updated to new wx namespace and some cleaning up by Andrea Gavana, December 2006 """ """ Please see the example at the end of this file. ---------------------------------------- Creation: wxVTKRenderWindowInteractor(parent, ID, stereo=0, [wx keywords]): You should create a wx.PySimpleApp() or some other wx**App before creating the window. Behaviour: Uses __getattr__ to make the wxVTKRenderWindowInteractor behave just like a vtkGenericRenderWindowInteractor. ---------------------------------------- """ # import usual libraries import math import sys import os baseClass = object _useCapture = None try: import wx # a few configuration items, see what works best on your system # Use GLCanvas as base class instead of wx.Window. # This is sometimes necessary under wxGTK or the image is blank. # (in wxWindows 2.3.1 and earlier, the GLCanvas had scroll bars) if wx.Platform == "__WXGTK__": import wx.glcanvas baseClass = wx.glcanvas.GLCanvas # Keep capturing mouse after mouse is dragged out of window # (in wxGTK 2.3.2 there is a bug that keeps this from working, # but it is only relevant in wxGTK if there are multiple windows) _useCapture = (wx.Platform == "__WXMSW__") except ImportError as e: import traceback #traceback.print_exc(file=sys.stdout) sys.stderr.write("No proper wx installed'.\n") try: import vtk except Exception as e: sys.stderr.write("No proper vtk installed'.\n") # end of configuration items class EventTimer(wx.Timer): """Simple wx.Timer class.""" def __init__(self, iren): """ Default class constructor. @param iren: current render window """ wx.Timer.__init__(self) self.iren = iren def Notify(self): """The timer has expired.""" self.iren.TimerEvent() class wxVTKRenderWindowInteractor(baseClass): """ A wxRenderWindow for wxPython. Use GetRenderWindow() to get the vtkRenderWindow. Create with the keyword stereo=1 in order to generate a stereo-capable window. """ # class variable that can also be used to request instances that use # stereo; this is overridden by the stereo=1/0 parameter. If you set # it to True, the NEXT instantiated object will attempt to allocate a # stereo visual. E.g.: # wxVTKRenderWindowInteractor.USE_STEREO = True # myRWI = wxVTKRenderWindowInteractor(parent, -1) USE_STEREO = False def __init__(self, parent, ID, *args, **kw): """ Default class constructor. @param parent: parent window @param ID: window id @param **kw: wxPython keywords (position, size, style) plus the 'stereo' keyword """ # private attributes self.__RenderWhenDisabled = 0 # First do special handling of some keywords: # stereo, position, size, style stereo = 0 if 'stereo' in kw: if kw['stereo']: stereo = 1 del kw['stereo'] elif self.USE_STEREO: stereo = 1 position, size = wx.DefaultPosition, wx.DefaultSize if 'position' in kw: position = kw['position'] del kw['position'] if 'size' in kw: size = kw['size'] del kw['size'] # wx.WANTS_CHARS says to give us e.g. TAB # wx.NO_FULL_REPAINT_ON_RESIZE cuts down resize flicker under GTK style = wx.WANTS_CHARS | wx.NO_FULL_REPAINT_ON_RESIZE if 'style' in kw: style = style | kw['style'] del kw['style'] # the enclosing frame must be shown under GTK or the windows # don't connect together properly if wx.Platform != '__WXMSW__': l = [] p = parent while p: # make a list of all parents l.append(p) p = p.GetParent() l.reverse() # sort list into descending order for p in l: p.Show(1) if baseClass.__name__ == 'GLCanvas': # code added by cpbotha to enable stereo and double # buffering correctly where the user requests this; remember # that the glXContext in this case is NOT allocated by VTK, # but by WX, hence all of this. # Initialize GLCanvas with correct attriblist attribList = [wx.glcanvas.WX_GL_RGBA, wx.glcanvas.WX_GL_MIN_RED, 1, wx.glcanvas.WX_GL_MIN_GREEN, 1, wx.glcanvas.WX_GL_MIN_BLUE, 1, wx.glcanvas.WX_GL_DEPTH_SIZE, 16, wx.glcanvas.WX_GL_DOUBLEBUFFER] if stereo: attribList.append(wx.glcanvas.WX_GL_STEREO) try: baseClass.__init__(self, parent, id = ID, pos = position, size = size, style = style, attribList=attribList) except wx.PyAssertionError: # visual couldn't be allocated, so we go back to default baseClass.__init__(self, parent, ID, position, size, style) if stereo: # and make sure everyone knows that the stereo # visual wasn't set. stereo = 0 else: baseClass.__init__(self, parent, ID, position, size, style) # create the RenderWindow and initialize it self._Iren = vtk.vtkGenericRenderWindowInteractor() self._Iren.SetRenderWindow( vtk.vtkRenderWindow() ) self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer) self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer) self._Iren.GetRenderWindow().AddObserver('CursorChangedEvent', self.CursorChangedEvent) try: self._Iren.GetRenderWindow().SetSize(size.width, size.height) except AttributeError: self._Iren.GetRenderWindow().SetSize(size[0], size[1]) if stereo: self._Iren.GetRenderWindow().StereoCapableWindowOn() self._Iren.GetRenderWindow().SetStereoTypeToCrystalEyes() self.__handle = None self.BindEvents() # with this, we can make sure that the reparenting logic in # Render() isn't called before the first OnPaint() has # successfully been run (and set up the VTK/WX display links) self.__has_painted = False # set when we have captured the mouse. self._own_mouse = False # used to store WHICH mouse button led to mouse capture self._mouse_capture_button = 0 # A mapping for cursor changes. self._cursor_map = {0: wx.CURSOR_ARROW, # VTK_CURSOR_DEFAULT 1: wx.CURSOR_ARROW, # VTK_CURSOR_ARROW 2: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZENE 3: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZENWSE 4: wx.CURSOR_SIZENESW, # VTK_CURSOR_SIZESW 5: wx.CURSOR_SIZENWSE, # VTK_CURSOR_SIZESE 6: wx.CURSOR_SIZENS, # VTK_CURSOR_SIZENS 7: wx.CURSOR_SIZEWE, # VTK_CURSOR_SIZEWE 8: wx.CURSOR_SIZING, # VTK_CURSOR_SIZEALL 9: wx.CURSOR_HAND, # VTK_CURSOR_HAND 10: wx.CURSOR_CROSS, # VTK_CURSOR_CROSSHAIR } def BindEvents(self): """Binds all the necessary events for navigation, sizing, drawing.""" # refresh window by doing a Render self.Bind(wx.EVT_PAINT, self.OnPaint) # turn off background erase to reduce flicker self.Bind(wx.EVT_ERASE_BACKGROUND, lambda e: None) # Bind the events to the event converters self.Bind(wx.EVT_RIGHT_DOWN, self.OnButtonDown) self.Bind(wx.EVT_LEFT_DOWN, self.OnButtonDown) self.Bind(wx.EVT_MIDDLE_DOWN, self.OnButtonDown) self.Bind(wx.EVT_RIGHT_UP, self.OnButtonUp) self.Bind(wx.EVT_LEFT_UP, self.OnButtonUp) self.Bind(wx.EVT_MIDDLE_UP, self.OnButtonUp) self.Bind(wx.EVT_MOUSEWHEEL, self.OnMouseWheel) self.Bind(wx.EVT_MOTION, self.OnMotion) self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter) self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave) # If we use EVT_KEY_DOWN instead of EVT_CHAR, capital versions # of all characters are always returned. EVT_CHAR also performs # other necessary keyboard-dependent translations. self.Bind(wx.EVT_CHAR, self.OnKeyDown) self.Bind(wx.EVT_KEY_UP, self.OnKeyUp) self.Bind(wx.EVT_SIZE, self.OnSize) # the wx 2.8.7.1 documentation states that you HAVE to handle # this event if you make use of CaptureMouse, which we do. if _useCapture and hasattr(wx, 'EVT_MOUSE_CAPTURE_LOST'): self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self.OnMouseCaptureLost) def __getattr__(self, attr): """Makes the object behave like a vtkGenericRenderWindowInteractor.""" if attr == '__vtk__': return lambda t=self._Iren: t elif hasattr(self._Iren, attr): return getattr(self._Iren, attr) else: raise AttributeError(self.__class__.__name__ + \ " has no attribute named " + attr) def CreateTimer(self, obj, evt): """Creates a timer.""" self._timer = EventTimer(self) self._timer.Start(10, True) def DestroyTimer(self, obj, evt): """The timer is a one shot timer so will expire automatically.""" return 1 def _CursorChangedEvent(self, obj, evt): """Change the wx cursor if the renderwindow's cursor was changed.""" cur = self._cursor_map[obj.GetCurrentCursor()] c = wx.StockCursor(cur) self.SetCursor(c) def CursorChangedEvent(self, obj, evt): """Called when the CursorChangedEvent fires on the render window.""" # This indirection is needed since when the event fires, the # current cursor is not yet set so we defer this by which time # the current cursor should have been set. wx.CallAfter(self._CursorChangedEvent, obj, evt) def HideCursor(self): """Hides the cursor.""" c = wx.StockCursor(wx.CURSOR_BLANK) self.SetCursor(c) def ShowCursor(self): """Shows the cursor.""" rw = self._Iren.GetRenderWindow() cur = self._cursor_map[rw.GetCurrentCursor()] c = wx.StockCursor(cur) self.SetCursor(c) def GetDisplayId(self): """ Function to get X11 Display ID from WX and return it in a format that can be used by VTK Python. We query the X11 Display with a new call that was added in wxPython 2.6.0.1. The call returns a SWIG object which we can query for the address and subsequently turn into an old-style SWIG-mangled string representation to pass to VTK. """ d = None try: d = wx.GetXDisplay() except NameError: # wx.GetXDisplay was added by Robin Dunn in wxPython 2.6.0.1 # if it's not available, we can't pass it. In general, # things will still work; on some setups, it'll break. pass else: # wx returns None on platforms where wx.GetXDisplay is not relevant if d: d = hex(d) # On wxPython-2.6.3.2 and above there is no leading '0x'. if not d.startswith('0x'): d = '0x' + d # we now have 0xdeadbeef # VTK wants it as: _deadbeef_void_p (pre-SWIG-1.3 style) d = '_%s_%s' % (d[2:], 'void_p') return d def OnMouseCaptureLost(self, event): """ This is signalled when we lose mouse capture due to an external event, such as when a dialog box is shown. See the wx documentation. """ # the documentation seems to imply that by this time we've # already lost capture. I have to assume that we don't need # to call ReleaseMouse ourselves. if _useCapture and self._own_mouse: self._own_mouse = False def OnPaint(self,event): """Handles the wx.EVT_PAINT event for wxVTKRenderWindowInteractor.""" # wx should continue event processing after this handler. # We call this BEFORE Render(), so that if Render() raises # an exception, wx doesn't re-call OnPaint repeatedly. event.Skip() dc = wx.PaintDC(self) # make sure the RenderWindow is sized correctly self._Iren.GetRenderWindow().SetSize(self.GetSizeTuple()) # Tell the RenderWindow to render inside the wx.Window. if not self.__handle: # on relevant platforms, set the X11 Display ID d = self.GetDisplayId() if d: self._Iren.GetRenderWindow().SetDisplayId(d) # store the handle self.__handle = self.GetHandle() # and give it to VTK self._Iren.GetRenderWindow().SetWindowInfo(str(self.__handle)) # now that we've painted once, the Render() reparenting logic # is safe self.__has_painted = True self.Render() def OnSize(self,event): """Handles the wx.EVT_SIZE event for wxVTKRenderWindowInteractor.""" # event processing should continue (we call this before the # Render(), in case it raises an exception) event.Skip() try: width, height = event.GetSize() except: width = event.GetSize().width height = event.GetSize().height self._Iren.SetSize(width, height) self._Iren.ConfigureEvent() # this will check for __handle self.Render() def OnMotion(self,event): """Handles the wx.EVT_MOTION event for wxVTKRenderWindowInteractor.""" # event processing should continue # we call this early in case any of the VTK code raises an # exception. event.Skip() self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(), event.ControlDown(), event.ShiftDown(), chr(0), 0, None) self._Iren.MouseMoveEvent() def OnEnter(self,event): """Handles the wx.EVT_ENTER_WINDOW event for wxVTKRenderWindowInteractor.""" # event processing should continue event.Skip() self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(), event.ControlDown(), event.ShiftDown(), chr(0), 0, None) self._Iren.EnterEvent() def OnLeave(self,event): """Handles the wx.EVT_LEAVE_WINDOW event for wxVTKRenderWindowInteractor.""" # event processing should continue event.Skip() self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(), event.ControlDown(), event.ShiftDown(), chr(0), 0, None) self._Iren.LeaveEvent() def OnButtonDown(self,event): """Handles the wx.EVT_LEFT/RIGHT/MIDDLE_DOWN events for wxVTKRenderWindowInteractor.""" # allow wx event processing to continue # on wxPython 2.6.0.1, omitting this will cause problems with # the initial focus, resulting in the wxVTKRWI ignoring keypresses # until we focus elsewhere and then refocus the wxVTKRWI frame # we do it this early in case any of the following VTK code # raises an exception. event.Skip() ctrl, shift = event.ControlDown(), event.ShiftDown() self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(), ctrl, shift, chr(0), 0, None) button = 0 if event.RightDown(): self._Iren.RightButtonPressEvent() button = 'Right' elif event.LeftDown(): self._Iren.LeftButtonPressEvent() button = 'Left' elif event.MiddleDown(): self._Iren.MiddleButtonPressEvent() button = 'Middle' # save the button and capture mouse until the button is released # we only capture the mouse if it hasn't already been captured if _useCapture and not self._own_mouse: self._own_mouse = True self._mouse_capture_button = button self.CaptureMouse() def OnButtonUp(self,event): """Handles the wx.EVT_LEFT/RIGHT/MIDDLE_UP events for wxVTKRenderWindowInteractor.""" # event processing should continue event.Skip() button = 0 if event.RightUp(): button = 'Right' elif event.LeftUp(): button = 'Left' elif event.MiddleUp(): button = 'Middle' # if the same button is released that captured the mouse, and # we have the mouse, release it. # (we need to get rid of this as soon as possible; if we don't # and one of the event handlers raises an exception, mouse # is never released.) if _useCapture and self._own_mouse and \ button==self._mouse_capture_button: self.ReleaseMouse() self._own_mouse = False ctrl, shift = event.ControlDown(), event.ShiftDown() self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(), ctrl, shift, chr(0), 0, None) if button == 'Right': self._Iren.RightButtonReleaseEvent() elif button == 'Left': self._Iren.LeftButtonReleaseEvent() elif button == 'Middle': self._Iren.MiddleButtonReleaseEvent() def OnMouseWheel(self,event): """Handles the wx.EVT_MOUSEWHEEL event for wxVTKRenderWindowInteractor.""" # event processing should continue event.Skip() ctrl, shift = event.ControlDown(), event.ShiftDown() self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(), ctrl, shift, chr(0), 0, None) if event.GetWheelRotation() > 0: self._Iren.MouseWheelForwardEvent() else: self._Iren.MouseWheelBackwardEvent() def OnKeyDown(self,event): """Handles the wx.EVT_KEY_DOWN event for wxVTKRenderWindowInteractor.""" # event processing should continue event.Skip() ctrl, shift = event.ControlDown(), event.ShiftDown() keycode, keysym = event.GetKeyCode(), None key = chr(0) if keycode < 256: key = chr(keycode) # wxPython 2.6.0.1 does not return a valid event.Get{X,Y}() # for this event, so we use the cached position. (x,y)= self._Iren.GetEventPosition() self._Iren.SetEventInformation(x, y, ctrl, shift, key, 0, keysym) self._Iren.KeyPressEvent() self._Iren.CharEvent() def OnKeyUp(self,event): """Handles the wx.EVT_KEY_UP event for wxVTKRenderWindowInteractor.""" # event processing should continue event.Skip() ctrl, shift = event.ControlDown(), event.ShiftDown() keycode, keysym = event.GetKeyCode(), None key = chr(0) if keycode < 256: key = chr(keycode) self._Iren.SetEventInformationFlipY(event.GetX(), event.GetY(), ctrl, shift, key, 0, keysym) self._Iren.KeyReleaseEvent() def GetRenderWindow(self): """Returns the render window (vtkRenderWindow).""" return self._Iren.GetRenderWindow() def Render(self): """Actually renders the VTK scene on screen.""" RenderAllowed = 1 if not self.__RenderWhenDisabled: # the user doesn't want us to render when the toplevel frame # is disabled - first find the top level parent topParent = wx.GetTopLevelParent(self) if topParent: # if it exists, check whether it's enabled # if it's not enabeld, RenderAllowed will be false RenderAllowed = topParent.IsEnabled() if RenderAllowed: if self.__handle and self.__handle == self.GetHandle(): self._Iren.GetRenderWindow().Render() elif self.GetHandle() and self.__has_painted: # this means the user has reparented us; let's adapt to the # new situation by doing the WindowRemap dance self._Iren.GetRenderWindow().SetNextWindowInfo( str(self.GetHandle())) # make sure the DisplayId is also set correctly d = self.GetDisplayId() if d: self._Iren.GetRenderWindow().SetDisplayId(d) # do the actual remap with the new parent information self._Iren.GetRenderWindow().WindowRemap() # store the new situation self.__handle = self.GetHandle() self._Iren.GetRenderWindow().Render() def SetRenderWhenDisabled(self, newValue): """ Change value of __RenderWhenDisabled ivar. If __RenderWhenDisabled is false (the default), this widget will not call Render() on the RenderWindow if the top level frame (i.e. the containing frame) has been disabled. This prevents recursive rendering during wx.SafeYield() calls. wx.SafeYield() can be called during the ProgressMethod() callback of a VTK object to have progress bars and other GUI elements updated - it does this by disabling all windows (disallowing user-input to prevent re-entrancy of code) and then handling all outstanding GUI events. However, this often triggers an OnPaint() method for wxVTKRWIs, resulting in a Render(), resulting in Update() being called whilst still in progress. """ self.__RenderWhenDisabled = bool(newValue) #-------------------------------------------------------------------- def wxVTKRenderWindowInteractorConeExample(): """Like it says, just a simple example.""" # every wx app needs an app app = wx.PySimpleApp() # create the top-level frame, sizer and wxVTKRWI frame = wx.Frame(None, -1, "wxVTKRenderWindowInteractor", size=(400,400)) widget = wxVTKRenderWindowInteractor(frame, -1) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(widget, 1, wx.EXPAND) frame.SetSizer(sizer) frame.Layout() # It would be more correct (API-wise) to call widget.Initialize() and # widget.Start() here, but Initialize() calls RenderWindow.Render(). # That Render() call will get through before we can setup the # RenderWindow() to render via the wxWidgets-created context; this # causes flashing on some platforms and downright breaks things on # other platforms. Instead, we call widget.Enable(). This means # that the RWI::Initialized ivar is not set, but in THIS SPECIFIC CASE, # that doesn't matter. widget.Enable(1) widget.AddObserver("ExitEvent", lambda o,e,f=frame: f.Close()) ren = vtk.vtkRenderer() widget.GetRenderWindow().AddRenderer(ren) cone = vtk.vtkConeSource() cone.SetResolution(8) coneMapper = vtk.vtkPolyDataMapper() coneMapper.SetInput(cone.GetOutput()) coneActor = vtk.vtkActor() coneActor.SetMapper(coneMapper) ren.AddActor(coneActor) # show the window frame.Show() app.MainLoop() if __name__ == "__main__": wxVTKRenderWindowInteractorConeExample()
gpl-3.0
-454,796,966,052,879,700
34.573066
102
0.57499
false
4.141094
false
false
false
spencerlyon2/pygments
pygments/lexers/_clbuiltins.py
2
14050
# -*- coding: utf-8 -*- """ pygments.lexers._clbuiltins ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ANSI Common Lisp builtins. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ BUILTIN_FUNCTIONS = set(( # 638 functions '<', '<=', '=', '>', '>=', '-', '/', '/=', '*', '+', '1-', '1+', 'abort', 'abs', 'acons', 'acos', 'acosh', 'add-method', 'adjoin', 'adjustable-array-p', 'adjust-array', 'allocate-instance', 'alpha-char-p', 'alphanumericp', 'append', 'apply', 'apropos', 'apropos-list', 'aref', 'arithmetic-error-operands', 'arithmetic-error-operation', 'array-dimension', 'array-dimensions', 'array-displacement', 'array-element-type', 'array-has-fill-pointer-p', 'array-in-bounds-p', 'arrayp', 'array-rank', 'array-row-major-index', 'array-total-size', 'ash', 'asin', 'asinh', 'assoc', 'assoc-if', 'assoc-if-not', 'atan', 'atanh', 'atom', 'bit', 'bit-and', 'bit-andc1', 'bit-andc2', 'bit-eqv', 'bit-ior', 'bit-nand', 'bit-nor', 'bit-not', 'bit-orc1', 'bit-orc2', 'bit-vector-p', 'bit-xor', 'boole', 'both-case-p', 'boundp', 'break', 'broadcast-stream-streams', 'butlast', 'byte', 'byte-position', 'byte-size', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-next-method', 'car', 'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar', 'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr', 'ceiling', 'cell-error-name', 'cerror', 'change-class', 'char', 'char<', 'char<=', 'char=', 'char>', 'char>=', 'char/=', 'character', 'characterp', 'char-code', 'char-downcase', 'char-equal', 'char-greaterp', 'char-int', 'char-lessp', 'char-name', 'char-not-equal', 'char-not-greaterp', 'char-not-lessp', 'char-upcase', 'cis', 'class-name', 'class-of', 'clear-input', 'clear-output', 'close', 'clrhash', 'code-char', 'coerce', 'compile', 'compiled-function-p', 'compile-file', 'compile-file-pathname', 'compiler-macro-function', 'complement', 'complex', 'complexp', 'compute-applicable-methods', 'compute-restarts', 'concatenate', 'concatenated-stream-streams', 'conjugate', 'cons', 'consp', 'constantly', 'constantp', 'continue', 'copy-alist', 'copy-list', 'copy-pprint-dispatch', 'copy-readtable', 'copy-seq', 'copy-structure', 'copy-symbol', 'copy-tree', 'cos', 'cosh', 'count', 'count-if', 'count-if-not', 'decode-float', 'decode-universal-time', 'delete', 'delete-duplicates', 'delete-file', 'delete-if', 'delete-if-not', 'delete-package', 'denominator', 'deposit-field', 'describe', 'describe-object', 'digit-char', 'digit-char-p', 'directory', 'directory-namestring', 'disassemble', 'documentation', 'dpb', 'dribble', 'echo-stream-input-stream', 'echo-stream-output-stream', 'ed', 'eighth', 'elt', 'encode-universal-time', 'endp', 'enough-namestring', 'ensure-directories-exist', 'ensure-generic-function', 'eq', 'eql', 'equal', 'equalp', 'error', 'eval', 'evenp', 'every', 'exp', 'export', 'expt', 'fboundp', 'fceiling', 'fdefinition', 'ffloor', 'fifth', 'file-author', 'file-error-pathname', 'file-length', 'file-namestring', 'file-position', 'file-string-length', 'file-write-date', 'fill', 'fill-pointer', 'find', 'find-all-symbols', 'find-class', 'find-if', 'find-if-not', 'find-method', 'find-package', 'find-restart', 'find-symbol', 'finish-output', 'first', 'float', 'float-digits', 'floatp', 'float-precision', 'float-radix', 'float-sign', 'floor', 'fmakunbound', 'force-output', 'format', 'fourth', 'fresh-line', 'fround', 'ftruncate', 'funcall', 'function-keywords', 'function-lambda-expression', 'functionp', 'gcd', 'gensym', 'gentemp', 'get', 'get-decoded-time', 'get-dispatch-macro-character', 'getf', 'gethash', 'get-internal-real-time', 'get-internal-run-time', 'get-macro-character', 'get-output-stream-string', 'get-properties', 'get-setf-expansion', 'get-universal-time', 'graphic-char-p', 'hash-table-count', 'hash-table-p', 'hash-table-rehash-size', 'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test', 'host-namestring', 'identity', 'imagpart', 'import', 'initialize-instance', 'input-stream-p', 'inspect', 'integer-decode-float', 'integer-length', 'integerp', 'interactive-stream-p', 'intern', 'intersection', 'invalid-method-error', 'invoke-debugger', 'invoke-restart', 'invoke-restart-interactively', 'isqrt', 'keywordp', 'last', 'lcm', 'ldb', 'ldb-test', 'ldiff', 'length', 'lisp-implementation-type', 'lisp-implementation-version', 'list', 'list*', 'list-all-packages', 'listen', 'list-length', 'listp', 'load', 'load-logical-pathname-translations', 'log', 'logand', 'logandc1', 'logandc2', 'logbitp', 'logcount', 'logeqv', 'logical-pathname', 'logical-pathname-translations', 'logior', 'lognand', 'lognor', 'lognot', 'logorc1', 'logorc2', 'logtest', 'logxor', 'long-site-name', 'lower-case-p', 'machine-instance', 'machine-type', 'machine-version', 'macroexpand', 'macroexpand-1', 'macro-function', 'make-array', 'make-broadcast-stream', 'make-concatenated-stream', 'make-condition', 'make-dispatch-macro-character', 'make-echo-stream', 'make-hash-table', 'make-instance', 'make-instances-obsolete', 'make-list', 'make-load-form', 'make-load-form-saving-slots', 'make-package', 'make-pathname', 'make-random-state', 'make-sequence', 'make-string', 'make-string-input-stream', 'make-string-output-stream', 'make-symbol', 'make-synonym-stream', 'make-two-way-stream', 'makunbound', 'map', 'mapc', 'mapcan', 'mapcar', 'mapcon', 'maphash', 'map-into', 'mapl', 'maplist', 'mask-field', 'max', 'member', 'member-if', 'member-if-not', 'merge', 'merge-pathnames', 'method-combination-error', 'method-qualifiers', 'min', 'minusp', 'mismatch', 'mod', 'muffle-warning', 'name-char', 'namestring', 'nbutlast', 'nconc', 'next-method-p', 'nintersection', 'ninth', 'no-applicable-method', 'no-next-method', 'not', 'notany', 'notevery', 'nreconc', 'nreverse', 'nset-difference', 'nset-exclusive-or', 'nstring-capitalize', 'nstring-downcase', 'nstring-upcase', 'nsublis', 'nsubst', 'nsubst-if', 'nsubst-if-not', 'nsubstitute', 'nsubstitute-if', 'nsubstitute-if-not', 'nth', 'nthcdr', 'null', 'numberp', 'numerator', 'nunion', 'oddp', 'open', 'open-stream-p', 'output-stream-p', 'package-error-package', 'package-name', 'package-nicknames', 'packagep', 'package-shadowing-symbols', 'package-used-by-list', 'package-use-list', 'pairlis', 'parse-integer', 'parse-namestring', 'pathname', 'pathname-device', 'pathname-directory', 'pathname-host', 'pathname-match-p', 'pathname-name', 'pathnamep', 'pathname-type', 'pathname-version', 'peek-char', 'phase', 'plusp', 'position', 'position-if', 'position-if-not', 'pprint', 'pprint-dispatch', 'pprint-fill', 'pprint-indent', 'pprint-linear', 'pprint-newline', 'pprint-tab', 'pprint-tabular', 'prin1', 'prin1-to-string', 'princ', 'princ-to-string', 'print', 'print-object', 'probe-file', 'proclaim', 'provide', 'random', 'random-state-p', 'rassoc', 'rassoc-if', 'rassoc-if-not', 'rational', 'rationalize', 'rationalp', 'read', 'read-byte', 'read-char', 'read-char-no-hang', 'read-delimited-list', 'read-from-string', 'read-line', 'read-preserving-whitespace', 'read-sequence', 'readtable-case', 'readtablep', 'realp', 'realpart', 'reduce', 'reinitialize-instance', 'rem', 'remhash', 'remove', 'remove-duplicates', 'remove-if', 'remove-if-not', 'remove-method', 'remprop', 'rename-file', 'rename-package', 'replace', 'require', 'rest', 'restart-name', 'revappend', 'reverse', 'room', 'round', 'row-major-aref', 'rplaca', 'rplacd', 'sbit', 'scale-float', 'schar', 'search', 'second', 'set', 'set-difference', 'set-dispatch-macro-character', 'set-exclusive-or', 'set-macro-character', 'set-pprint-dispatch', 'set-syntax-from-char', 'seventh', 'shadow', 'shadowing-import', 'shared-initialize', 'short-site-name', 'signal', 'signum', 'simple-bit-vector-p', 'simple-condition-format-arguments', 'simple-condition-format-control', 'simple-string-p', 'simple-vector-p', 'sin', 'sinh', 'sixth', 'sleep', 'slot-boundp', 'slot-exists-p', 'slot-makunbound', 'slot-missing', 'slot-unbound', 'slot-value', 'software-type', 'software-version', 'some', 'sort', 'special-operator-p', 'sqrt', 'stable-sort', 'standard-char-p', 'store-value', 'stream-element-type', 'stream-error-stream', 'stream-external-format', 'streamp', 'string', 'string<', 'string<=', 'string=', 'string>', 'string>=', 'string/=', 'string-capitalize', 'string-downcase', 'string-equal', 'string-greaterp', 'string-left-trim', 'string-lessp', 'string-not-equal', 'string-not-greaterp', 'string-not-lessp', 'stringp', 'string-right-trim', 'string-trim', 'string-upcase', 'sublis', 'subseq', 'subsetp', 'subst', 'subst-if', 'subst-if-not', 'substitute', 'substitute-if', 'substitute-if-not', 'subtypep','svref', 'sxhash', 'symbol-function', 'symbol-name', 'symbolp', 'symbol-package', 'symbol-plist', 'symbol-value', 'synonym-stream-symbol', 'syntax:', 'tailp', 'tan', 'tanh', 'tenth', 'terpri', 'third', 'translate-logical-pathname', 'translate-pathname', 'tree-equal', 'truename', 'truncate', 'two-way-stream-input-stream', 'two-way-stream-output-stream', 'type-error-datum', 'type-error-expected-type', 'type-of', 'typep', 'unbound-slot-instance', 'unexport', 'unintern', 'union', 'unread-char', 'unuse-package', 'update-instance-for-different-class', 'update-instance-for-redefined-class', 'upgraded-array-element-type', 'upgraded-complex-part-type', 'upper-case-p', 'use-package', 'user-homedir-pathname', 'use-value', 'values', 'values-list', 'vector', 'vectorp', 'vector-pop', 'vector-push', 'vector-push-extend', 'warn', 'wild-pathname-p', 'write', 'write-byte', 'write-char', 'write-line', 'write-sequence', 'write-string', 'write-to-string', 'yes-or-no-p', 'y-or-n-p', 'zerop', )) SPECIAL_FORMS = set(( 'block', 'catch', 'declare', 'eval-when', 'flet', 'function', 'go', 'if', 'labels', 'lambda', 'let', 'let*', 'load-time-value', 'locally', 'macrolet', 'multiple-value-call', 'multiple-value-prog1', 'progn', 'progv', 'quote', 'return-from', 'setq', 'symbol-macrolet', 'tagbody', 'the', 'throw', 'unwind-protect', )) MACROS = set(( 'and', 'assert', 'call-method', 'case', 'ccase', 'check-type', 'cond', 'ctypecase', 'decf', 'declaim', 'defclass', 'defconstant', 'defgeneric', 'define-compiler-macro', 'define-condition', 'define-method-combination', 'define-modify-macro', 'define-setf-expander', 'define-symbol-macro', 'defmacro', 'defmethod', 'defpackage', 'defparameter', 'defsetf', 'defstruct', 'deftype', 'defun', 'defvar', 'destructuring-bind', 'do', 'do*', 'do-all-symbols', 'do-external-symbols', 'dolist', 'do-symbols', 'dotimes', 'ecase', 'etypecase', 'formatter', 'handler-bind', 'handler-case', 'ignore-errors', 'incf', 'in-package', 'lambda', 'loop', 'loop-finish', 'make-method', 'multiple-value-bind', 'multiple-value-list', 'multiple-value-setq', 'nth-value', 'or', 'pop', 'pprint-exit-if-list-exhausted', 'pprint-logical-block', 'pprint-pop', 'print-unreadable-object', 'prog', 'prog*', 'prog1', 'prog2', 'psetf', 'psetq', 'push', 'pushnew', 'remf', 'restart-bind', 'restart-case', 'return', 'rotatef', 'setf', 'shiftf', 'step', 'time', 'trace', 'typecase', 'unless', 'untrace', 'when', 'with-accessors', 'with-compilation-unit', 'with-condition-restarts', 'with-hash-table-iterator', 'with-input-from-string', 'with-open-file', 'with-open-stream', 'with-output-to-string', 'with-package-iterator', 'with-simple-restart', 'with-slots', 'with-standard-io-syntax', )) LAMBDA_LIST_KEYWORDS = set(( '&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional', '&rest', '&whole', )) DECLARATIONS = set(( 'dynamic-extent', 'ignore', 'optimize', 'ftype', 'inline', 'special', 'ignorable', 'notinline', 'type', )) BUILTIN_TYPES = set(( 'atom', 'boolean', 'base-char', 'base-string', 'bignum', 'bit', 'compiled-function', 'extended-char', 'fixnum', 'keyword', 'nil', 'signed-byte', 'short-float', 'single-float', 'double-float', 'long-float', 'simple-array', 'simple-base-string', 'simple-bit-vector', 'simple-string', 'simple-vector', 'standard-char', 'unsigned-byte', # Condition Types 'arithmetic-error', 'cell-error', 'condition', 'control-error', 'division-by-zero', 'end-of-file', 'error', 'file-error', 'floating-point-inexact', 'floating-point-overflow', 'floating-point-underflow', 'floating-point-invalid-operation', 'parse-error', 'package-error', 'print-not-readable', 'program-error', 'reader-error', 'serious-condition', 'simple-condition', 'simple-error', 'simple-type-error', 'simple-warning', 'stream-error', 'storage-condition', 'style-warning', 'type-error', 'unbound-variable', 'unbound-slot', 'undefined-function', 'warning', )) BUILTIN_CLASSES = set(( 'array', 'broadcast-stream', 'bit-vector', 'built-in-class', 'character', 'class', 'complex', 'concatenated-stream', 'cons', 'echo-stream', 'file-stream', 'float', 'function', 'generic-function', 'hash-table', 'integer', 'list', 'logical-pathname', 'method-combination', 'method', 'null', 'number', 'package', 'pathname', 'ratio', 'rational', 'readtable', 'real', 'random-state', 'restart', 'sequence', 'standard-class', 'standard-generic-function', 'standard-method', 'standard-object', 'string-stream', 'stream', 'string', 'structure-class', 'structure-object', 'symbol', 'synonym-stream', 't', 'two-way-stream', 'vector', ))
bsd-2-clause
8,959,820,288,131,650,000
59.560345
80
0.629751
false
2.994459
false
false
false
mmerce/python
bigml/tests/create_forecast_steps.py
1
1792
# -*- coding: utf-8 -*- # # Copyright 2017-2020 BigML # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time from nose.tools import assert_almost_equals, eq_ from datetime import datetime from .world import world from bigml.api import HTTP_CREATED from bigml.api import FINISHED, FAULTY from bigml.api import get_status from .read_forecast_steps import i_get_the_forecast def i_create_a_forecast(step, data=None): if data is None: data = "{}" time_series = world.time_series['resource'] data = json.loads(data) resource = world.api.create_forecast(time_series, data) world.status = resource['code'] eq_(world.status, HTTP_CREATED) world.location = resource['location'] world.forecast = resource['object'] world.forecasts.append(resource['resource']) def the_forecast_is(step, predictions): predictions = json.loads(predictions) attrs = ["point_forecast", "model"] for field_id in predictions: forecast = world.forecast['forecast']['result'][field_id] prediction = predictions[field_id] eq_(len(forecast), len(prediction), "forecast: %s" % forecast) for index in range(len(forecast)): for attr in attrs: eq_(forecast[index][attr], prediction[index][attr])
apache-2.0
7,671,300,440,271,742,000
34.137255
75
0.704241
false
3.820896
false
false
false
terrycojones/dark-matter
dark/mutations.py
1
16454
import os from collections import defaultdict import numpy as np try: import matplotlib if not os.environ.get('DISPLAY'): # Use non-interactive Agg backend matplotlib.use('Agg') import matplotlib.pyplot as plt except ImportError: import platform if platform.python_implementation() == 'PyPy': # PyPy doesn't have a version of matplotlib. Make a fake # class that raises if it is used. This allows us to use other # 'dark' code that happens to import dark.mutations but not use the # functions that rely on matplotlib. class plt(object): def __getattr__(self, _): raise NotImplementedError( 'matplotlib is not supported under pypy') else: raise from random import choice, uniform from dark import ncbidb def basePlotter(blastHits, title): """ Plot the reads and the subject, so that bases in the reads which are different from the subject are shown. Else a '.' is shown. like so: subject_gi ATGCGTACGTACGACACC read_1 A......TTC..T @param blastHits: A L{dark.blast.BlastHits} instance. @param title: A C{str} sequence title that was matched by BLAST. We plot the reads that matched this title. """ result = [] params = blastHits.plotParams assert params is not None, ('Oops, it looks like you forgot to run ' 'computePlotInfo.') sequence = ncbidb.getSequence(title, blastHits.records.blastDb) subject = sequence.seq gi = title.split('|')[1] sub = '%s\t \t \t%s' % (gi, subject) result.append(sub) plotInfo = blastHits.titles[title]['plotInfo'] assert plotInfo is not None, ('Oops, it looks like you forgot to run ' 'computePlotInfo.') items = plotInfo['items'] count = 0 for item in items: count += 1 hsp = item['hsp'] queryTitle = blastHits.fasta[item['readNum']].id # If the product of the subject and query frame values is +ve, # then they're either both +ve or both -ve, so we just use the # query as is. Otherwise, we need to reverse complement it. if item['frame']['subject'] * item['frame']['query'] > 0: query = blastHits.fasta[item['readNum']].seq reverse = False else: # One of the subject or query has negative sense. query = blastHits.fasta[ item['readNum']].reverse_complement().seq reverse = True query = query.upper() queryStart = hsp['queryStart'] subjectStart = hsp['subjectStart'] queryEnd = hsp['queryEnd'] subjectEnd = hsp['subjectEnd'] # Before comparing the read to the subject, make a string of the # same length as the subject, which contains the read and # has ' ' where the read does not match. # 3 parts need to be taken into account: # 1) the left offset (if the query doesn't stick out to the left) # 2) the query. if the frame is -1, it has to be reversed. # The query consists of 3 parts: left, middle (control for gaps) # 3) the right offset # Do part 1) and 2). if queryStart < 0: # The query is sticking out to the left. leftQuery = '' if subjectStart == 0: # The match starts at the first base of the subject. middleLeftQuery = '' else: # The match starts into the subject. # Determine the length of the not matching query # part to the left. leftOffset = -1 * queryStart rightOffset = subjectStart + leftOffset middleLeftQuery = query[leftOffset:rightOffset] else: # The query is not sticking out to the left # make the left offset. leftQuery = queryStart * ' ' leftQueryOffset = subjectStart - queryStart middleLeftQuery = query[:leftQueryOffset] # Do part 3). # Disregard gaps in subject while adding. matchQuery = item['origHsp'].query matchSubject = item['origHsp'].sbjct index = 0 mid = '' for item in range(len(matchQuery)): if matchSubject[index] != ' ': mid += matchQuery[index] index += 1 # if the query has been reversed, turn the matched part around if reverse: rev = '' toReverse = mid reverseDict = {' ': ' ', '-': '-', 'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', '.': '.', 'N': 'N'} for item in toReverse: newItem = reverseDict[item] rev += newItem mid = rev[::-1] middleQuery = middleLeftQuery + mid # add right not-matching part of the query rightQueryOffset = queryEnd - subjectEnd rightQuery = query[-rightQueryOffset:] middleQuery += rightQuery read = leftQuery + middleQuery # do part 3) offset = len(subject) - len(read) # if the read is sticking out to the right # chop it off if offset < 0: read = read[:offset] # if it's not sticking out, fill the space with ' ' elif offset > 0: read += offset * ' ' # compare the subject and the read, make a string # called 'comparison', which contains a '.' if the bases # are equal and the letter of the read if they are not. comparison = '' for readBase, subjectBase in zip(read, subject): if readBase == ' ': comparison += ' ' elif readBase == subjectBase: comparison += '.' elif readBase != subjectBase: comparison += readBase index += 1 que = '%s \t %s' % (queryTitle, comparison) result.append(que) # sanity checks assert (len(comparison) == len(subject)), ( '%d != %d' % (len(comparison), len(subject))) index = 0 if comparison[index] == ' ': index += 1 else: start = index - 1 assert (start == queryStart or start == -1), ( '%s != %s or %s != -1' % (start, queryStart, start)) return result def getAPOBECFrequencies(dotAlignment, orig, new, pattern): """ Gets mutation frequencies if they are in a certain pattern. @param dotAlignment: result from calling basePlotter @param orig: A C{str}, naming the original base @param new: A C{str}, what orig was mutated to @param pattern: A C{str}m which pattern we're looking for (must be one of 'cPattern', 'tPattern') """ cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT', 'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT'] tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT', 'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT'] # choose the right pattern if pattern == 'cPattern': patterns = cPattern middleBase = 'C' else: patterns = tPattern middleBase = 'T' # generate the freqs dict with the right pattern freqs = defaultdict(int) for pattern in patterns: freqs[pattern] = 0 # get the subject sequence from dotAlignment subject = dotAlignment[0].split('\t')[3] # exclude the subject from the dotAlignment, so just the queries # are left over queries = dotAlignment[1:] for item in queries: query = item.split('\t')[1] index = 0 for queryBase in query: qBase = query[index] sBase = subject[index] if qBase == new and sBase == orig: try: plusSb = subject[index + 1] minusSb = subject[index - 1] except IndexError: plusSb = 'end' motif = '%s%s%s' % (minusSb, middleBase, plusSb) if motif in freqs: freqs[motif] += 1 index += 1 return freqs def getCompleteFreqs(blastHits): """ Make a dictionary which collects all mutation frequencies from all reads. Calls basePlotter to get dotAlignment, which is passed to getAPOBECFrequencies with the respective parameter, to collect the frequencies. @param blastHits: A L{dark.blast.BlastHits} instance. """ allFreqs = {} for title in blastHits.titles: allFreqs[title] = { 'C>A': {}, 'C>G': {}, 'C>T': {}, 'T>A': {}, 'T>C': {}, 'T>G': {}, } basesPlotted = basePlotter(blastHits, title) for mutation in allFreqs[title]: orig = mutation[0] new = mutation[2] if orig == 'C': pattern = 'cPattern' else: pattern = 'tPattern' freqs = getAPOBECFrequencies(basesPlotted, orig, new, pattern) allFreqs[title][mutation] = freqs numberOfReads = len(blastHits.titles[title]['plotInfo']['items']) allFreqs[title]['numberOfReads'] = numberOfReads allFreqs[title]['bitScoreMax'] = blastHits.titles[ title]['plotInfo']['bitScoreMax'] return allFreqs def makeFrequencyGraph(allFreqs, title, substitution, pattern, color='blue', createFigure=True, showFigure=True, readsAx=False): """ For a title, make a graph showing the frequencies. @param allFreqs: result from getCompleteFreqs @param title: A C{str}, title of virus of which frequencies should be plotted. @param substitution: A C{str}, which substitution should be plotted; must be one of 'C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G'. @param pattern: A C{str}, which pattern we're looking for ( must be one of 'cPattern', 'tPattern') @param color: A C{str}, color of bars. @param createFigure: If C{True}, create a figure. @param showFigure: If C{True}, show the created figure. @param readsAx: If not None, use this as the subplot for displaying reads. """ cPattern = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT', 'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT'] tPattern = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT', 'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT'] # choose the right pattern if pattern == 'cPattern': patterns = cPattern else: patterns = tPattern fig = plt.figure(figsize=(10, 10)) ax = readsAx or fig.add_subplot(111) # how many bars N = 16 ind = np.arange(N) width = 0.4 # make a list in the right order, so that it can be plotted easily divisor = allFreqs[title]['numberOfReads'] toPlot = allFreqs[title][substitution] index = 0 data = [] for item in patterns: newData = toPlot[patterns[index]] / divisor data.append(newData) index += 1 # create the bars ax.bar(ind, data, width, color=color) maxY = np.max(data) + 5 # axes and labels if createFigure: title = title.split('|')[4][:50] ax.set_title('%s \n %s' % (title, substitution), fontsize=20) ax.set_ylim(0, maxY) ax.set_ylabel('Absolute Number of Mutations', fontsize=16) ax.set_xticks(ind + width) ax.set_xticklabels(patterns, rotation=45, fontsize=8) if createFigure is False: ax.set_xticks(ind + width) ax.set_xticklabels(patterns, rotation=45, fontsize=0) else: if showFigure: plt.show() return maxY def makeFrequencyPanel(allFreqs, patientName): """ For a title, make a graph showing the frequencies. @param allFreqs: result from getCompleteFreqs @param patientName: A C{str}, title for the panel """ titles = sorted( iter(allFreqs.keys()), key=lambda title: (allFreqs[title]['bitScoreMax'], title)) origMaxY = 0 cols = 6 rows = len(allFreqs) figure, ax = plt.subplots(rows, cols, squeeze=False) substitutions = ['C>A', 'C>G', 'C>T', 'T>A', 'T>C', 'T>G'] colors = ['blue', 'black', 'red', 'yellow', 'green', 'orange'] for i, title in enumerate(titles): for index in range(6): for subst in allFreqs[str(title)]: substitution = substitutions[index] print(i, index, title, 'substitution', substitutions[index]) if substitution[0] == 'C': pattern = 'cPattern' else: pattern = 'tPattern' maxY = makeFrequencyGraph(allFreqs, title, substitution, pattern, color=colors[index], createFigure=False, showFigure=False, readsAx=ax[i][index]) if maxY > origMaxY: origMaxY = maxY # add title for individual plot. # if used for other viruses, this will have to be adapted. if index == 0: gi = title.split('|')[1] titles = title.split(' ') try: typeIndex = titles.index('type') except ValueError: typeNumber = 'gi: %s' % gi else: typeNumber = titles[typeIndex + 1] ax[i][index].set_ylabel(('Type %s \n maxBitScore: %s' % ( typeNumber, allFreqs[title]['bitScoreMax'])), fontsize=10) # add xAxis tick labels if i == 0: ax[i][index].set_title(substitution, fontsize=13) if i == len(allFreqs) - 1 or i == (len(allFreqs) - 1) / 2: if index < 3: pat = ['ACA', 'ACC', 'ACG', 'ACT', 'CCA', 'CCC', 'CCG', 'CCT', 'GCA', 'GCC', 'GCG', 'GCT', 'TCA', 'TCC', 'TCG', 'TCT'] else: pat = ['ATA', 'ATC', 'ATG', 'ATT', 'CTA', 'CTC', 'CTG', 'CTT', 'GTA', 'GTC', 'GTG', 'GTT', 'TTA', 'TTC', 'TTG', 'TTT'] ax[i][index].set_xticklabels(pat, rotation=45, fontsize=8) # make Y-axis equal for i, title in enumerate(allFreqs): for index in range(6): a = ax[i][index] a.set_ylim([0, origMaxY]) # add title of whole panel figure.suptitle('Mutation Signatures in %s' % patientName, fontsize=20) figure.set_size_inches(5 * cols, 3 * rows, forward=True) figure.show() return allFreqs def mutateString(original, n, replacements='acgt'): """ Mutate C{original} in C{n} places with chars chosen from C{replacements}. @param original: The original C{str} to mutate. @param n: The C{int} number of locations to mutate. @param replacements: The C{str} of replacement letters. @return: A new C{str} with C{n} places of C{original} mutated. @raises ValueError: if C{n} is too high, or C{replacement} contains duplicates, or if no replacement can be made at a certain locus because C{replacements} is of length one, or if C{original} is of zero length. """ if not original: raise ValueError('Empty original string passed.') if n > len(original): raise ValueError('Cannot make %d mutations in a string of length %d' % (n, len(original))) if len(replacements) != len(set(replacements)): raise ValueError('Replacement string contains duplicates') if len(replacements) == 1 and original.find(replacements) != -1: raise ValueError('Impossible replacement') result = list(original) length = len(original) for offset in range(length): if uniform(0.0, 1.0) < float(n) / (length - offset): # Mutate. while True: new = choice(replacements) if new != result[offset]: result[offset] = new break n -= 1 if n == 0: break return ''.join(result)
mit
-974,948,910,823,843,200
35.64588
79
0.544427
false
3.942967
false
false
false
nati/fun
cube.py
1
4119
import copy import math import re import subprocess import sys import time ret = subprocess.check_output(["resize"]) m = re.match("COLUMNS=(\d+);\nLINES=(\d+);", ret) WIDTH = int(m.group(1)) HEIGHT = int(m.group(2)) SCALE = 7 X = 0 Y = 1 Z = 2 POINTS = [ [-1, -1, 1], [-1, 1, 1], [1, 1, 1], [1, -1, 1], [-1, -1, -1], [-1, 1, -1], [1, 1, -1], [1, -1, -1] ] LINES = [ [0, 1], [1, 2], [2, 3], [0, 3], [4, 5], [5, 6], [6, 7], [7, 4], [0, 4], [1, 5], [2, 6], [3, 7], ] POINTS2 = [ [-1, -1, 0], [-1, 1, 0], [1, 1, 0], [1, -1, 0], [0, 0, 3], ] LINES2 = [ [0, 1], [1, 2], [2, 3], [3, 0], [0, 4], [1, 4], [2, 4], [3, 4] ] class Campas(object): def draw_line(self, p1, p2): steep = abs(p2[Y] - p1[Y]) > abs(p2[X] - p1[X]) if steep: p1[X], p1[Y] = p1[Y], p1[X] p2[X], p2[Y] = p2[Y], p2[X] if p1[X] > p2[X]: p1[X], p2[X] = p2[X], p1[X] p1[Y], p2[Y] = p2[Y], p1[Y] dx = p2[X] - p1[X] dy = abs(p2[Y] - p1[Y]) error = dx / 2.0 y = p1[Y] if p1[Y] < p2[Y]: ystep = 1 else: ystep = -1 for x in range(p1[X], p2[X]): if steep: self.draw_point([y, x]) else: self.draw_point([x, y]) error = error - dy if error < 0: y = y + ystep error = error + dx def draw_point(self, p, char="#"): if p[X] >= WIDTH or 0 > p[X]: return if p[Y] >= HEIGHT or 0 > p[Y]: return sys.stdout.write("\033[%i;%iH%s" % (p[Y], p[X], char)) def clear_screen(self): sys.stdout.write("\033[2J") def flush(self): sys.stdout.flush() class Poly(object): points = [] lines = [] def __init__(self, points, lines, campas): self.points = copy.deepcopy(points) self.lines = copy.deepcopy(lines) self.campas = campas self.base_point = [0, 0, 1] def mult(self, transform): self.points = [self.mult_m_p(transform, p) for p in self.points] def move(self, axis, distance): self.base_point[axis] = distance def mult_m_p(self, m, p): x, y, z = p r1 = sum([m[0][0] * x, m[0][1] * y, m[0][2] * z]) r2 = sum([m[1][0] * x, m[1][1] * y, m[1][2] * z]) r3 = sum([m[2][0] * x, m[2][1] * y, m[2][2] * z]) return [r1, r2, r3] def projection(self, p): cx, cy = WIDTH / 2, HEIGHT / 2 x = (p[X] + self.base_point[X]) * SCALE / self.base_point[Z] + cx y = (p[Y] + self.base_point[Y]) * SCALE / self.base_point[Z] + cy return [int(x), int(y)] def draw(self): if self.base_point[Z] <= 0: return for point in self.points: self.campas.draw_point(self.projection(point)) for line in self.lines: self.campas.draw_line(self.projection(self.points[line[0]]), self.projection(self.points[line[1]])) def matrix_rotate_x(a): return [[1, 0, 0], [0, math.cos(a), -math.sin(a)], [0, math.sin(a), math.cos(a)]] def matrix_rotate_y(a): return [[math.cos(a), 0, math.sin(a)], [0, 1, 0], [-math.sin(a), 0, math.cos(a)]] campas = Campas() campas.clear_screen() cube = Poly(POINTS, LINES, campas) cube2 = Poly(POINTS2, LINES2, campas) cube3 = Poly(POINTS, LINES, campas) i = math.pi / 100.0 j = 0 mx = matrix_rotate_x(i * 1) my = matrix_rotate_y(i * 5) while True: campas.clear_screen() cube.mult(mx) cube.mult(my) cube3.mult(mx) cube3.mult(my) cube.move(Z, math.sin(j) + 1.5) cube.move(X, 10 * math.cos(j)) cube3.move(Z, math.sin(j + math.pi / 2) + 1.5) cube3.move(Y, 3 * math.cos(j + math.pi / 2)) j += math.pi / 50.0 cube2.mult(mx) cube2.mult(my) cube2.move(Z, 1.5) cube.draw() cube2.draw() cube3.draw() campas.flush() time.sleep(0.1)
apache-2.0
-4,906,687,155,164,076,000
20.793651
73
0.453265
false
2.525445
false
false
false
chemiron/aiopool
aiopool/fork.py
1
6082
import asyncio import logging import os import signal from struct import Struct import time from .base import (WorkerProcess, ChildProcess, IDLE_CHECK, IDLE_TIME) MSG_HEAD = 0x0 MSG_PING = 0x1 MSG_PONG = 0x2 MSG_CLOSE = 0x3 PACK_MSG = Struct('!BB').pack UNPACK_MSG = Struct('!BB').unpack logger = logging.getLogger(__name__) class ConnectionClosedError(Exception): pass @asyncio.coroutine def connect_write_pipe(file): loop = asyncio.get_event_loop() transport, _ = yield from loop.connect_write_pipe(asyncio.Protocol, file) return PipeWriter(transport) @asyncio.coroutine def connect_read_pipe(file): loop = asyncio.get_event_loop() pipe_reader = PipeReader(loop=loop) transport, _ = yield from loop.connect_read_pipe( lambda: PipeReadProtocol(pipe_reader), file) pipe_reader.transport = transport return pipe_reader class PipeWriter: def __init__(self, transport): self.transport = transport def _send(self, msg): self.transport.write(PACK_MSG(MSG_HEAD, msg)) def ping(self): self._send(MSG_PING) def pong(self): self._send(MSG_PONG) def stop(self): self._send(MSG_CLOSE) def close(self): if self.transport is not None: self.transport.close() class PipeReadProtocol(asyncio.Protocol): def __init__(self, reader): self.reader = reader def data_received(self, data): self.reader.feed(data) def connection_lost(self, exc): self.reader.close() class PipeReader: closed = False transport = None def __init__(self, loop): self.loop = loop self._waiters = asyncio.Queue() def close(self): self.closed = True while not self._waiters.empty(): waiter = self._waiters.get_nowait() if not waiter.done(): waiter.set_exception(ConnectionClosedError()) if self.transport is not None: self.transport.close() def feed(self, data): asyncio.async(self._feed_waiter(data)) @asyncio.coroutine def _feed_waiter(self, data): waiter = yield from self._waiters.get() waiter.set_result(data) @asyncio.coroutine def read(self): if self.closed: raise ConnectionClosedError() waiter = asyncio.Future(loop=self.loop) yield from self._waiters.put(waiter) data = yield from waiter hdr, msg = UNPACK_MSG(data) if hdr == MSG_HEAD: return msg class ForkChild(ChildProcess): _heartbeat_task = None def __init__(self, parent_read, parent_write, loader, **options): ChildProcess.__init__(self, loader, **options) self.parent_read = parent_read self.parent_write = parent_write @asyncio.coroutine def on_start(self): self._heartbeat_task = asyncio.Task(self.heartbeat()) def stop(self): if self._heartbeat_task is not None: self._heartbeat_task.cancel() ChildProcess.stop(self) @asyncio.coroutine def heartbeat(self): # setup pipes reader = yield from connect_read_pipe( os.fdopen(self.parent_read, 'rb')) writer = yield from connect_write_pipe( os.fdopen(self.parent_write, 'wb')) while True: try: msg = yield from reader.read() except ConnectionClosedError: logger.info('Parent is dead, {} stopping...' ''.format(os.getpid())) break if msg == MSG_PING: writer.pong() elif msg.tp == MSG_CLOSE: break reader.close() writer.close() self.stop() class ForkWorker(WorkerProcess): pid = ping = None reader = writer = None chat_task = heartbeat_task = None def start_child(self): parent_read, child_write = os.pipe() child_read, parent_write = os.pipe() pid = os.fork() if pid: # parent os.close(parent_read) os.close(parent_write) asyncio.async(self.connect(pid, child_write, child_read)) else: # child os.close(child_write) os.close(child_read) # cleanup after fork asyncio.set_event_loop(None) # setup process process = ForkChild(parent_read, parent_write, self.loader) process.start() def kill_child(self): self.chat_task.cancel() self.heartbeat_task.cancel() self.reader.close() self.writer.close() try: os.kill(self.pid, signal.SIGTERM) os.waitpid(self.pid, 0) except ProcessLookupError: pass @asyncio.coroutine def heartbeat(self, writer): idle_time = self.options.get('idle_time', IDLE_TIME) idle_check = self.options.get('idle_check', IDLE_CHECK) while True: yield from asyncio.sleep(idle_check) if (time.monotonic() - self.ping) < idle_time: writer.ping() else: self.restart() return @asyncio.coroutine def chat(self, reader): while True: try: msg = yield from reader.read() except ConnectionClosedError: self.restart() return if msg == MSG_PONG: self.ping = time.monotonic() @asyncio.coroutine def connect(self, pid, up_write, down_read): # setup pipes reader = yield from connect_read_pipe( os.fdopen(down_read, 'rb')) writer = yield from connect_write_pipe( os.fdopen(up_write, 'wb')) # store info self.pid = pid self.ping = time.monotonic() self.reader = reader self.writer = writer self.chat_task = asyncio.Task(self.chat(reader)) self.heartbeat_task = asyncio.Task(self.heartbeat(writer))
mit
982,928,408,585,555,200
24.447699
77
0.57366
false
4.017173
false
false
false
hypebeast/etapi
etapi/utils.py
1
1765
# -*- coding: utf-8 -*- '''Helper utilities and decorators.''' import time from flask import flash def flash_errors(form, category="warning"): '''Flash all errors for a form.''' for field, errors in form.errors.items(): for error in errors: flash("{0} - {1}" .format(getattr(form, field).label.text, error), category) def pretty_date(dt, default=None): """ Returns string representing "time since" e.g. 3 days ago, 5 hours ago etc. Ref: https://bitbucket.org/danjac/newsmeme/src/a281babb9ca3/newsmeme/ """ if default is None: default = 'just now' now = datetime.utcnow() diff = now - dt periods = ( (diff.days / 365, 'year', 'years'), (diff.days / 30, 'month', 'months'), (diff.days / 7, 'week', 'weeks'), (diff.days, 'day', 'days'), (diff.seconds / 3600, 'hour', 'hours'), (diff.seconds / 60, 'minute', 'minutes'), (diff.seconds, 'second', 'seconds'), ) for period, singular, plural in periods: if not period: continue if period == 1: return u'%d %s ago' % (period, singular) else: return u'%d %s ago' % (period, plural) return default def pretty_seconds_to_hhmmss(seconds): if not seconds: return None m, s = divmod(seconds, 60) h, m = divmod(m, 60) return "%d h %d m %s s" % (h, m, s) def pretty_seconds_to_hhmm(seconds): if not seconds: return None m, s = divmod(seconds, 60) h, m = divmod(m, 60) return "%d h %d m" % (h, m) def pretty_seconds_to_hh(seconds): if not seconds: return None m, s = divmod(seconds, 60) h, m = divmod(m, 60) return "%d h" % (h)
bsd-3-clause
-7,001,446,943,259,879,000
24.955882
78
0.549575
false
3.374761
false
false
false
DBeath/flask-feedrsub
tests/period_test.py
1
1488
from datetime import datetime from dateutil.relativedelta import relativedelta from feedrsub.database import db from feedrsub.models.period import PERIOD, Period from feedrsub.models.populate_db import populate_periods def test_populate_periods(session): populate_periods() daily = Period.query.filter_by(name=PERIOD.DAILY).first() assert daily.name == PERIOD.DAILY immediate = Period.query.filter_by(name=PERIOD.IMMEDIATE).first() assert immediate.name == PERIOD.IMMEDIATE weekly = Period.query.filter_by(name=PERIOD.WEEKLY).first() assert weekly.name == PERIOD.WEEKLY monthly = Period.query.filter_by(name=PERIOD.MONTHLY).first() assert monthly.name == PERIOD.MONTHLY def test_period_creation(session): period_desc = "A Yearly period" period_name = "YEARLY" period = Period(period_name, period_desc) db.session.add(period) db.session.commit() yearly = Period.query.filter_by(name=period_name).first() assert yearly.name == period_name assert yearly.description == period_desc def test_get_from_date_with_name(session): now = datetime.utcnow() past = now - relativedelta(days=1) from_date = Period.get_from_date(PERIOD.DAILY, now) assert from_date == past def test_get_from_date_with_period(session): now = datetime.utcnow() past = now - relativedelta(days=1) period = Period(name=PERIOD.DAILY) from_date = Period.get_from_date(period, now) assert from_date == past
mit
7,589,990,972,609,637,000
27.615385
69
0.715054
false
3.43649
false
false
false
nigelb/Static-UPnP
examples/Chromecast/StaticUPnP_StaticServices.py
1
3345
# static_upnp responds to upnp search requests with statically configures responses. # Copyright (C) 2016 NigelB # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import socket from dnslib import DNSQuestion, QTYPE from static_upnp.chromecast_helpers import get_chromecast_uuid, get_date, get_chromecast_mdns_response from static_upnp.chromecast_helpers import get_service_descriptor, get_chromecast_friendly_name from static_upnp.mDNS import StaticMDNDService from static_upnp.static import StaticService OK = """HTTP/1.1 200 OK CACHE-CONTROL: max-age={max_age} DATE: {date} EXT: LOCATION: http://{ip}:{port}/ssdp/device-desc.xml OPT: "http://schemas.upnp.org/upnp/1/0/"; ns=01 01-NLS: 161d2e68-1dd2-11b2-9fd5-f9d9dc2ad10b SERVER: Linux/3.8.13+, UPnP/1.0, Portable SDK for UPnP devices/1.6.18 X-User-Agent: redsonic ST: {st} USN: {usn} BOOTID.UPNP.ORG: 4 CONFIGID.UPNP.ORG: 2 """ NOTIFY = """NOTIFY * HTTP/1.1 HOST: 239.255.255.250:1900 CACHE-CONTROL: max-age=1800 LOCATION: http://{ip}:{port}/ssdp/device-desc.xml NT: {st} NTS: {nts} OPT: "http://schemas.upnp.org/upnp/1/0/"; ns=01 01-NLS: 161d2e68-1dd2-11b2-9fd5-f9d9dc2ad10b SERVER: Linux/3.8.13+, UPnP/1.0, Portable SDK for UPnP devices/1.6.18 X-User-Agent: redsonic USN: {uuid} """ chromecast_ip = socket.gethostbyname_ex("Chromecast")[2][0] chromecast_port = 8008 chromecast_service_descriptor = get_service_descriptor(chromecast_ip, chromecast_port) chromecast_uuid = get_chromecast_uuid(chromecast_service_descriptor) chromecast_friendly_name = get_chromecast_friendly_name(chromecast_service_descriptor) chromecast_bs = "XXXXXXXXXXXX" chromecast_cd = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" services = [ StaticService({ "ip": chromecast_ip, "port": chromecast_port, "uuid": chromecast_uuid, "max_age": "1800", "date": get_date }, 1024, OK=OK, NOTIFY=NOTIFY, services=[ { "st": "upnp:rootdevice", "usn": "uuid:{uuid}::{st}" }, { "st": "uuid:{uuid}", "usn": "uuid:{uuid}" }, { "st": "urn:dial-multiscreen-org:device:dial:1", "usn": "uuid:{uuid}::{st}" }, { "st": "urn:dial-multiscreen-org:service:dial:1", "usn": "uuid:{uuid}::{st}" }, ]) ] mdns_services=[StaticMDNDService( response_generator=lambda query: get_chromecast_mdns_response(query, chromecast_ip, chromecast_uuid, chromecast_friendly_name, chromecast_bs, chromecast_cd), dns_question=DNSQuestion(qname="_googlecast._tcp.local", qtype=QTYPE.PTR, qclass=32769) )]
gpl-2.0
871,185,580,901,961,200
31.794118
161
0.676233
false
3.117428
false
false
false
ponty/MyElectronicProjects
pavement.py
1
1718
from easyprocess import Proc from paver.easy import * import paver.doctools import paver.virtual import paver.misctasks from paved import * from paved.dist import * from paved.util import * from paved.docs import * from paved.pycheck import * from paved.pkg import * options( sphinx=Bunch( docroot='docs', builddir="_build", ), # pdf=Bunch( # builddir='_build', # builder='latex', # ), ) options.paved.clean.rmdirs += ['.tox', 'dist', 'build', ] options.paved.clean.patterns += ['*.pickle', '*.doctree', '*.gz', 'nosetests.xml', 'sloccount.sc', '*.pdf', '*.tex', '*_sch_*.png', '*_brd_*.png', '*.b#*', '*.s#*', # eagle #'*.pro', '*.hex', '*.zip', 'distribute_setup.py', '*.bak', # kicad '$savepcb.brd', '*.erc', '*.000', ] options.paved.dist.manifest.include.remove('distribute_setup.py') options.paved.dist.manifest.include.remove('paver-minilib.zip') @task @needs( # 'clean', 'cog', 'html', 'pdf', ) def alltest(): 'all tasks to check' pass
bsd-2-clause
2,015,404,359,761,255,000
25.430769
65
0.360885
false
4.569149
false
false
false
GoogleCloudPlatform/python-docs-samples
appengine/standard/endpoints-frameworks-v2/quickstart/main_test.py
1
1894
# Copyright 2016 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from endpoints import message_types import mock import main def test_list_greetings(testbed): api = main.GreetingApi() response = api.list_greetings(message_types.VoidMessage()) assert len(response.items) == 2 def test_get_greeting(testbed): api = main.GreetingApi() request = main.GreetingApi.get_greeting.remote.request_type(id=1) response = api.get_greeting(request) assert response.message == 'goodbye world!' def test_multiply_greeting(testbed): api = main.GreetingApi() request = main.GreetingApi.multiply_greeting.remote.request_type( times=4, message='help I\'m trapped in a test case.') response = api.multiply_greeting(request) assert response.message == 'help I\'m trapped in a test case.' * 4 def test_authed_greet(testbed): api = main.AuthedGreetingApi() with mock.patch('main.endpoints.get_current_user') as user_mock: user_mock.return_value = None response = api.greet(message_types.VoidMessage()) assert response.message == 'Hello, Anonymous' user_mock.return_value = mock.Mock() user_mock.return_value.email.return_value = '[email protected]' response = api.greet(message_types.VoidMessage()) assert response.message == 'Hello, [email protected]'
apache-2.0
-4,486,417,427,585,140,700
34.074074
74
0.712777
false
3.663443
true
false
false
OSU-CS-325/Project_Two_Coin_Change
run-files/analysisQ7.py
1
2957
import sys import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import random import datetime # Import the three change making algorithms sys.path.insert(0, "../divide-conquer/") sys.path.insert(0, "../dynamic-programming") sys.path.insert(0, "../greedy") from changeslow import changeslow from changegreedy import changegreedy from changedp import changedp ### QUESTION 7 ### def Q7(slow, minChange, maxChange): lenV = [] runtimeGreedy = [] runtimeDP = [] runtimeSlow = [] numExp = 10 maxRange = 1000 if (slow): maxRange = 10 # some much smaller number for i in range(1, maxRange): # V can be of length 1 to (maxRange - 1) print "\n------ running V length=" + str(i) + "------" lenV.append(i) #print "lenV:", lenV runtimeGreedy.append(0) runtimeDP.append(0) runtimeSlow.append(0) for j in range(numExp): # run numExp experiments for this length of V print "\n ---- running experiment=" + str(j + 1) + " ----" coinArray = [] for k in range(i): # generate V of size i [1, rand, ..., rand, max=1 + 5*(maxRange-2)] if (k == 0): coinArray.append(1) else: randFrom = coinArray[len(coinArray) - 1] + 1 randTo = coinArray[len(coinArray) - 1] + 5 coinArray.append(random.randint(randFrom, randTo)) change = random.randint(minChange, maxChange) #print " coinArray:", coinArray #print " change:", change print " running greedy..." start = datetime.datetime.now() _, _ = changegreedy(coinArray, change) end = datetime.datetime.now() delta = end - start delta = int(delta.total_seconds() * 1000000) print " " + str(delta) runtimeGreedy[i - 1] += delta print " running DP..." start = datetime.datetime.now() _, _ = changedp(coinArray, change) end = datetime.datetime.now() delta = end - start delta = int(delta.total_seconds() * 1000000) print " " + str(delta) runtimeDP[i - 1] += delta if (slow): print " running slow..." start = datetime.datetime.now() _, _ = changeslow(coinArray, change) end = datetime.datetime.now() delta = end - start delta = int(delta.total_seconds() * 1000000) print " " + str(delta) runtimeSlow[i - 1] += delta runtimeGreedy[i - 1] /= numExp runtimeDP[i - 1] /= numExp if (slow): runtimeSlow[i - 1] /= numExp plt.figure(21) plt.plot(lenV, runtimeGreedy, 'b-', linewidth=2.0, label='Greedy') plt.plot(lenV, runtimeDP, 'r--', linewidth=2.0, label='DP') if (slow): plt.plot(lenV, runtimeSlow, 'g-.', linewidth=2.0, label='Slow') plt.legend(loc='upper left') plt.title('Runtime vs len(V[]) for randomized V[] and A') plt.ylabel('Avg. Runtime (10^-6 sec)') plt.xlabel('len(V[])') plt.grid(True) if (slow): plt.savefig('img/Q7slow_runtime.png', bbox_inches='tight') else: plt.savefig('img/Q7_runtime.png', bbox_inches='tight') def main(): Q7(False, 100, 100) #Q7(True) if __name__ == "__main__": main()
mit
992,061,664,401,384,600
26.37963
89
0.631721
false
2.882066
false
false
false
mikedh/trimesh
trimesh/creation.py
1
40606
""" creation.py -------------- Create meshes from primitives, or with operations. """ from .base import Trimesh from .constants import log, tol from .geometry import faces_to_edges, align_vectors, plane_transform from . import util from . import grouping from . import triangles from . import transformations as tf import numpy as np import collections try: # shapely is a soft dependency from shapely.geometry import Polygon from shapely.wkb import loads as load_wkb except BaseException as E: # shapely will sometimes raise OSErrors # on import rather than just ImportError from . import exceptions # re-raise the exception when someone tries # to use the module that they don't have Polygon = exceptions.closure(E) load_wkb = exceptions.closure(E) def revolve(linestring, angle=None, sections=None, transform=None, **kwargs): """ Revolve a 2D line string around the 2D Y axis, with a result with the 2D Y axis pointing along the 3D Z axis. This function is intended to handle the complexity of indexing and is intended to be used to create all radially symmetric primitives, eventually including cylinders, annular cylinders, capsules, cones, and UV spheres. Note that if your linestring is closed, it needs to be counterclockwise if you would like face winding and normals facing outwards. Parameters ------------- linestring : (n, 2) float Lines in 2D which will be revolved angle : None or float Angle in radians to revolve curve by sections : None or int Number of sections result should have If not specified default is 32 per revolution transform : None or (4, 4) float Transform to apply to mesh after construction **kwargs : dict Passed to Trimesh constructor Returns -------------- revolved : Trimesh Mesh representing revolved result """ linestring = np.asanyarray(linestring, dtype=np.float64) # linestring must be ordered 2D points if len(linestring.shape) != 2 or linestring.shape[1] != 2: raise ValueError('linestring must be 2D!') if angle is None: # default to closing the revolution angle = np.pi * 2 closed = True else: # check passed angle value closed = angle >= ((np.pi * 2) - 1e-8) if sections is None: # default to 32 sections for a full revolution sections = int(angle / (np.pi * 2) * 32) # change to face count sections += 1 # create equally spaced angles theta = np.linspace(0, angle, sections) # 2D points around the revolution points = np.column_stack((np.cos(theta), np.sin(theta))) # how many points per slice per = len(linestring) # use the 2D X component as radius radius = linestring[:, 0] # use the 2D Y component as the height along revolution height = linestring[:, 1] # a lot of tiling to get our 3D vertices vertices = np.column_stack(( np.tile(points, (1, per)).reshape((-1, 2)) * np.tile(radius, len(points)).reshape((-1, 1)), np.tile(height, len(points)))) if closed: # should be a duplicate set of vertices assert np.allclose(vertices[:per], vertices[-per:]) # chop off duplicate vertices vertices = vertices[:-per] if transform is not None: # apply transform to vertices vertices = tf.transform_points(vertices, transform) # how many slices of the pie slices = len(theta) - 1 # start with a quad for every segment # this is a superset which will then be reduced quad = np.array([0, per, 1, 1, per, per + 1]) # stack the faces for a single slice of the revolution single = np.tile(quad, per).reshape((-1, 3)) # `per` is basically the stride of the vertices single += np.tile(np.arange(per), (2, 1)).T.reshape((-1, 1)) # remove any zero-area triangle # this covers many cases without having to think too much single = single[triangles.area(vertices[single]) > tol.merge] # how much to offset each slice # note arange multiplied by vertex stride # but tiled by the number of faces we actually have offset = np.tile(np.arange(slices) * per, (len(single), 1)).T.reshape((-1, 1)) # stack a single slice into N slices stacked = np.tile(single.ravel(), slices).reshape((-1, 3)) if tol.strict: # make sure we didn't screw up stacking operation assert np.allclose(stacked.reshape((-1, single.shape[0], 3)) - single, 0) # offset stacked and wrap vertices faces = (stacked + offset) % len(vertices) # create the mesh from our vertices and faces mesh = Trimesh(vertices=vertices, faces=faces, **kwargs) # strict checks run only in unit tests if (tol.strict and np.allclose(radius[[0, -1]], 0.0) or np.allclose(linestring[0], linestring[-1])): # if revolved curve starts and ends with zero radius # it should really be a valid volume, unless the sign # reversed on the input linestring assert mesh.is_volume return mesh def extrude_polygon(polygon, height, transform=None, triangle_args=None, **kwargs): """ Extrude a 2D shapely polygon into a 3D mesh Parameters ---------- polygon : shapely.geometry.Polygon 2D geometry to extrude height : float Distance to extrude polygon along Z triangle_args : str or None Passed to triangle **kwargs: passed to Trimesh Returns ---------- mesh : trimesh.Trimesh Resulting extrusion as watertight body """ # create a triangulation from the polygon vertices, faces = triangulate_polygon( polygon, triangle_args=triangle_args, **kwargs) # extrude that triangulation along Z mesh = extrude_triangulation(vertices=vertices, faces=faces, height=height, transform=transform, **kwargs) return mesh def sweep_polygon(polygon, path, angles=None, **kwargs): """ Extrude a 2D shapely polygon into a 3D mesh along an arbitrary 3D path. Doesn't handle sharp curvature well. Parameters ---------- polygon : shapely.geometry.Polygon Profile to sweep along path path : (n, 3) float A path in 3D angles : (n,) float Optional rotation angle relative to prior vertex at each vertex Returns ------- mesh : trimesh.Trimesh Geometry of result """ path = np.asanyarray(path, dtype=np.float64) if not util.is_shape(path, (-1, 3)): raise ValueError('Path must be (n, 3)!') # Extract 2D vertices and triangulation verts_2d = np.array(polygon.exterior)[:-1] base_verts_2d, faces_2d = triangulate_polygon(polygon, **kwargs) n = len(verts_2d) # Create basis for first planar polygon cap x, y, z = util.generate_basis(path[0] - path[1]) tf_mat = np.ones((4, 4)) tf_mat[:3, :3] = np.c_[x, y, z] tf_mat[:3, 3] = path[0] # Compute 3D locations of those vertices verts_3d = np.c_[verts_2d, np.zeros(n)] verts_3d = tf.transform_points(verts_3d, tf_mat) base_verts_3d = np.c_[base_verts_2d, np.zeros(len(base_verts_2d))] base_verts_3d = tf.transform_points(base_verts_3d, tf_mat) # keep matching sequence of vertices and 0- indexed faces vertices = [base_verts_3d] faces = [faces_2d] # Compute plane normals for each turn -- # each turn induces a plane halfway between the two vectors v1s = util.unitize(path[1:-1] - path[:-2]) v2s = util.unitize(path[1:-1] - path[2:]) norms = np.cross(np.cross(v1s, v2s), v1s + v2s) norms[(norms == 0.0).all(1)] = v1s[(norms == 0.0).all(1)] norms = util.unitize(norms) final_v1 = util.unitize(path[-1] - path[-2]) norms = np.vstack((norms, final_v1)) v1s = np.vstack((v1s, final_v1)) # Create all side walls by projecting the 3d vertices into each plane # in succession for i in range(len(norms)): verts_3d_prev = verts_3d # Rotate if needed if angles is not None: tf_mat = tf.rotation_matrix(angles[i], norms[i], path[i]) verts_3d_prev = tf.transform_points(verts_3d_prev, tf_mat) # Project vertices onto plane in 3D ds = np.einsum('ij,j->i', (path[i + 1] - verts_3d_prev), norms[i]) ds = ds / np.dot(v1s[i], norms[i]) verts_3d_new = np.einsum('i,j->ij', ds, v1s[i]) + verts_3d_prev # Add to face and vertex lists new_faces = [[i + n, (i + 1) % n, i] for i in range(n)] new_faces.extend([[(i - 1) % n + n, i + n, i] for i in range(n)]) # save faces and vertices into a sequence faces.append(np.array(new_faces)) vertices.append(np.vstack((verts_3d, verts_3d_new))) verts_3d = verts_3d_new # do the main stack operation from a sequence to (n,3) arrays # doing one vstack provides a substantial speedup by # avoiding a bunch of temporary allocations vertices, faces = util.append_faces(vertices, faces) # Create final cap x, y, z = util.generate_basis(path[-1] - path[-2]) vecs = verts_3d - path[-1] coords = np.c_[np.einsum('ij,j->i', vecs, x), np.einsum('ij,j->i', vecs, y)] base_verts_2d, faces_2d = triangulate_polygon(Polygon(coords)) base_verts_3d = (np.einsum('i,j->ij', base_verts_2d[:, 0], x) + np.einsum('i,j->ij', base_verts_2d[:, 1], y)) + path[-1] faces = np.vstack((faces, faces_2d + len(vertices))) vertices = np.vstack((vertices, base_verts_3d)) return Trimesh(vertices, faces) def extrude_triangulation(vertices, faces, height, transform=None, **kwargs): """ Extrude a 2D triangulation into a watertight mesh. Parameters ---------- vertices : (n, 2) float 2D vertices faces : (m, 3) int Triangle indexes of vertices height : float Distance to extrude triangulation **kwargs : dict Passed to Trimesh constructor Returns --------- mesh : trimesh.Trimesh Mesh created from extrusion """ vertices = np.asanyarray(vertices, dtype=np.float64) height = float(height) faces = np.asanyarray(faces, dtype=np.int64) if not util.is_shape(vertices, (-1, 2)): raise ValueError('Vertices must be (n,2)') if not util.is_shape(faces, (-1, 3)): raise ValueError('Faces must be (n,3)') if np.abs(height) < tol.merge: raise ValueError('Height must be nonzero!') # make sure triangulation winding is pointing up normal_test = triangles.normals( [util.stack_3D(vertices[faces[0]])])[0] normal_dot = np.dot(normal_test, [0.0, 0.0, np.sign(height)])[0] # make sure the triangulation is aligned with the sign of # the height we've been passed if normal_dot < 0.0: faces = np.fliplr(faces) # stack the (n,3) faces into (3*n, 2) edges edges = faces_to_edges(faces) edges_sorted = np.sort(edges, axis=1) # edges which only occur once are on the boundary of the polygon # since the triangulation may have subdivided the boundary of the # shapely polygon, we need to find it again edges_unique = grouping.group_rows( edges_sorted, require_count=1) # (n, 2, 2) set of line segments (positions, not references) boundary = vertices[edges[edges_unique]] # we are creating two vertical triangles for every 2D line segment # on the boundary of the 2D triangulation vertical = np.tile(boundary.reshape((-1, 2)), 2).reshape((-1, 2)) vertical = np.column_stack((vertical, np.tile([0, height, 0, height], len(boundary)))) vertical_faces = np.tile([3, 1, 2, 2, 1, 0], (len(boundary), 1)) vertical_faces += np.arange(len(boundary)).reshape((-1, 1)) * 4 vertical_faces = vertical_faces.reshape((-1, 3)) # stack the (n,2) vertices with zeros to make them (n, 3) vertices_3D = util.stack_3D(vertices) # a sequence of zero- indexed faces, which will then be appended # with offsets to create the final mesh faces_seq = [faces[:, ::-1], faces.copy(), vertical_faces] vertices_seq = [vertices_3D, vertices_3D.copy() + [0.0, 0, height], vertical] # append sequences into flat nicely indexed arrays vertices, faces = util.append_faces(vertices_seq, faces_seq) if transform is not None: # apply transform here to avoid later bookkeeping vertices = tf.transform_points( vertices, transform) # if the transform flips the winding flip faces back # so that the normals will be facing outwards if tf.flips_winding(transform): # fliplr makes arrays non-contiguous faces = np.ascontiguousarray(np.fliplr(faces)) # create mesh object with passed keywords mesh = Trimesh(vertices=vertices, faces=faces, **kwargs) # only check in strict mode (unit tests) if tol.strict: assert mesh.volume > 0.0 return mesh def triangulate_polygon(polygon, triangle_args=None, engine=None, **kwargs): """ Given a shapely polygon create a triangulation using a python interface to `triangle.c` or mapbox-earcut. > pip install triangle > pip install mapbox_earcut Parameters --------- polygon : Shapely.geometry.Polygon Polygon object to be triangulated triangle_args : str or None Passed to triangle.triangulate i.e: 'p', 'pq30' engine : None or str Any value other than 'earcut' will use `triangle` Returns -------------- vertices : (n, 2) float Points in space faces : (n, 3) int Index of vertices that make up triangles """ if engine == 'earcut': from mapbox_earcut import triangulate_float64 # get vertices as sequence where exterior is the first value vertices = [np.array(polygon.exterior)] vertices.extend(np.array(i) for i in polygon.interiors) # record the index from the length of each vertex array rings = np.cumsum([len(v) for v in vertices]) # stack vertices into (n, 2) float array vertices = np.vstack(vertices) # run triangulation faces = triangulate_float64(vertices, rings).reshape( (-1, 3)).astype(np.int64).reshape((-1, 3)) return vertices, faces # do the import here for soft requirement from triangle import triangulate # set default triangulation arguments if not specified if triangle_args is None: triangle_args = 'p' # turn the polygon in to vertices, segments, and hole points arg = _polygon_to_kwargs(polygon) # run the triangulation result = triangulate(arg, triangle_args) return result['vertices'], result['triangles'] def _polygon_to_kwargs(polygon): """ Given a shapely polygon generate the data to pass to the triangle mesh generator Parameters --------- polygon : Shapely.geometry.Polygon Input geometry Returns -------- result : dict Has keys: vertices, segments, holes """ if not polygon.is_valid: raise ValueError('invalid shapely polygon passed!') def round_trip(start, length): """ Given a start index and length, create a series of (n, 2) edges which create a closed traversal. Examples --------- start, length = 0, 3 returns: [(0,1), (1,2), (2,0)] """ tiled = np.tile(np.arange(start, start + length).reshape((-1, 1)), 2) tiled = tiled.reshape(-1)[1:-1].reshape((-1, 2)) tiled = np.vstack((tiled, [tiled[-1][-1], tiled[0][0]])) return tiled def add_boundary(boundary, start): # coords is an (n, 2) ordered list of points on the polygon boundary # the first and last points are the same, and there are no # guarantees on points not being duplicated (which will # later cause meshpy/triangle to shit a brick) coords = np.array(boundary.coords) # find indices points which occur only once, and sort them # to maintain order unique = np.sort(grouping.unique_rows(coords)[0]) cleaned = coords[unique] vertices.append(cleaned) facets.append(round_trip(start, len(cleaned))) # holes require points inside the region of the hole, which we find # by creating a polygon from the cleaned boundary region, and then # using a representative point. You could do things like take the mean of # the points, but this is more robust (to things like concavity), if # slower. test = Polygon(cleaned) holes.append(np.array(test.representative_point().coords)[0]) return len(cleaned) # sequence of (n,2) points in space vertices = collections.deque() # sequence of (n,2) indices of vertices facets = collections.deque() # list of (2) vertices in interior of hole regions holes = collections.deque() start = add_boundary(polygon.exterior, 0) for interior in polygon.interiors: try: start += add_boundary(interior, start) except BaseException: log.warning('invalid interior, continuing') continue # create clean (n,2) float array of vertices # and (m, 2) int array of facets # by stacking the sequence of (p,2) arrays vertices = np.vstack(vertices) facets = np.vstack(facets).tolist() # shapely polygons can include a Z component # strip it out for the triangulation if vertices.shape[1] == 3: vertices = vertices[:, :2] result = {'vertices': vertices, 'segments': facets} # holes in meshpy lingo are a (h, 2) list of (x,y) points # which are inside the region of the hole # we added a hole for the exterior, which we slice away here holes = np.array(holes)[1:] if len(holes) > 0: result['holes'] = holes return result def box(extents=None, transform=None, **kwargs): """ Return a cuboid. Parameters ------------ extents : float, or (3,) float Edge lengths transform: (4, 4) float Transformation matrix **kwargs: passed to Trimesh to create box Returns ------------ geometry : trimesh.Trimesh Mesh of a cuboid """ # vertices of the cube vertices = np.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1], order='C', dtype=np.float64).reshape((-1, 3)) vertices -= 0.5 # resize cube based on passed extents if extents is not None: extents = np.asanyarray(extents, dtype=np.float64) if extents.shape != (3,): raise ValueError('Extents must be (3,)!') vertices *= extents else: extents = np.asarray((1.0, 1.0, 1.0), dtype=np.float64) # hardcoded face indices faces = [1, 3, 0, 4, 1, 0, 0, 3, 2, 2, 4, 0, 1, 7, 3, 5, 1, 4, 5, 7, 1, 3, 7, 2, 6, 4, 2, 2, 7, 6, 6, 5, 4, 7, 5, 6] faces = np.array(faces, order='C', dtype=np.int64).reshape((-1, 3)) face_normals = [-1, 0, 0, 0, -1, 0, -1, 0, 0, 0, 0, -1, 0, 0, 1, 0, -1, 0, 0, 0, 1, 0, 1, 0, 0, 0, -1, 0, 1, 0, 1, 0, 0, 1, 0, 0] face_normals = np.asanyarray(face_normals, order='C', dtype=np.float64).reshape(-1, 3) if 'metadata' not in kwargs: kwargs['metadata'] = dict() kwargs['metadata'].update( {'shape': 'box', 'extents': extents}) box = Trimesh(vertices=vertices, faces=faces, face_normals=face_normals, process=False, **kwargs) # do the transform here to preserve face normals if transform is not None: box.apply_transform(transform) return box def icosahedron(): """ Create an icosahedron, a 20 faced polyhedron. Returns ------------- ico : trimesh.Trimesh Icosahederon centered at the origin. """ t = (1.0 + 5.0**.5) / 2.0 vertices = [-1, t, 0, 1, t, 0, -1, -t, 0, 1, -t, 0, 0, -1, t, 0, 1, t, 0, -1, -t, 0, 1, -t, t, 0, -1, t, 0, 1, -t, 0, -1, -t, 0, 1] faces = [0, 11, 5, 0, 5, 1, 0, 1, 7, 0, 7, 10, 0, 10, 11, 1, 5, 9, 5, 11, 4, 11, 10, 2, 10, 7, 6, 7, 1, 8, 3, 9, 4, 3, 4, 2, 3, 2, 6, 3, 6, 8, 3, 8, 9, 4, 9, 5, 2, 4, 11, 6, 2, 10, 8, 6, 7, 9, 8, 1] # scale vertices so each vertex radius is 1.0 vertices = np.reshape(vertices, (-1, 3)) / np.sqrt(2.0 + t) faces = np.reshape(faces, (-1, 3)) mesh = Trimesh(vertices=vertices, faces=faces, process=False) return mesh def icosphere(subdivisions=3, radius=1.0, color=None): """ Create an isophere centered at the origin. Parameters ---------- subdivisions : int How many times to subdivide the mesh. Note that the number of faces will grow as function of 4 ** subdivisions, so you probably want to keep this under ~5 radius : float Desired radius of sphere color: (3,) float or uint8 Desired color of sphere Returns --------- ico : trimesh.Trimesh Meshed sphere """ def refine_spherical(): vectors = ico.vertices scalar = (vectors ** 2).sum(axis=1)**.5 unit = vectors / scalar.reshape((-1, 1)) offset = radius - scalar ico.vertices += unit * offset.reshape((-1, 1)) ico = icosahedron() ico._validate = False for j in range(subdivisions): ico = ico.subdivide() refine_spherical() ico._validate = True if color is not None: ico.visual.face_colors = color ico.metadata.update({'shape': 'sphere', 'radius': radius}) return ico def uv_sphere(radius=1.0, count=[32, 32], theta=None, phi=None): """ Create a UV sphere (latitude + longitude) centered at the origin. Roughly one order of magnitude faster than an icosphere but slightly uglier. Parameters ---------- radius : float Radius of sphere count : (2,) int Number of latitude and longitude lines theta : (n,) float Optional theta angles in radians phi : (n,) float Optional phi angles in radians Returns ---------- mesh : trimesh.Trimesh Mesh of UV sphere with specified parameters """ count = np.array(count, dtype=np.int64) count += np.mod(count, 2) count[1] *= 2 # generate vertices on a sphere using spherical coordinates if theta is None: theta = np.linspace(0, np.pi, count[0]) if phi is None: phi = np.linspace(0, np.pi * 2, count[1])[:-1] spherical = np.dstack((np.tile(phi, (len(theta), 1)).T, np.tile(theta, (len(phi), 1)))).reshape((-1, 2)) vertices = util.spherical_to_vector(spherical) * radius # generate faces by creating a bunch of pie wedges c = len(theta) # a quad face as two triangles pairs = np.array([[c, 0, 1], [c + 1, c, 1]]) # increment both triangles in each quad face by the same offset incrementor = np.tile(np.arange(c - 1), (2, 1)).T.reshape((-1, 1)) # create the faces for a single pie wedge of the sphere strip = np.tile(pairs, (c - 1, 1)) strip += incrementor # the first and last faces will be degenerate since the first # and last vertex are identical in the two rows strip = strip[1:-1] # tile pie wedges into a sphere faces = np.vstack([strip + (i * c) for i in range(len(phi))]) # poles are repeated in every strip, so a mask to merge them mask = np.arange(len(vertices)) # the top pole are all the same vertex mask[0::c] = 0 # the bottom pole are all the same vertex mask[c - 1::c] = c - 1 # faces masked to remove the duplicated pole vertices # and mod to wrap to fill in the last pie wedge faces = mask[np.mod(faces, len(vertices))] # we save a lot of time by not processing again # since we did some bookkeeping mesh is watertight mesh = Trimesh(vertices=vertices, faces=faces, process=False, metadata={'shape': 'sphere', 'radius': radius}) return mesh def capsule(height=1.0, radius=1.0, count=[32, 32]): """ Create a mesh of a capsule, or a cylinder with hemispheric ends. Parameters ---------- height : float Center to center distance of two spheres radius : float Radius of the cylinder and hemispheres count : (2,) int Number of sections on latitude and longitude Returns ---------- capsule : trimesh.Trimesh Capsule geometry with: - cylinder axis is along Z - one hemisphere is centered at the origin - other hemisphere is centered along the Z axis at height """ height = float(height) radius = float(radius) count = np.array(count, dtype=np.int64) count += np.mod(count, 2) # create a theta where there is a double band around the equator # so that we can offset the top and bottom of a sphere to # get a nicely meshed capsule theta = np.linspace(0, np.pi, count[0]) center = np.clip(np.arctan(tol.merge / radius), tol.merge, np.inf) offset = np.array([-center, center]) + (np.pi / 2) theta = np.insert(theta, int(len(theta) / 2), offset) capsule = uv_sphere(radius=radius, count=count, theta=theta) top = capsule.vertices[:, 2] > tol.zero capsule.vertices[top] += [0, 0, height] capsule.metadata.update({'shape': 'capsule', 'height': height, 'radius': radius}) return capsule def cone(radius, height, sections=None, transform=None, **kwargs): """ Create a mesh of a cone along Z centered at the origin. Parameters ---------- radius : float The radius of the cylinder height : float The height of the cylinder sections : int or None How many pie wedges per revolution transform : (4, 4) float or None Transform to apply after creation **kwargs : dict Passed to Trimesh constructor Returns ---------- cone: trimesh.Trimesh Resulting mesh of a cone """ # create the 2D outline of a cone linestring = [[0, 0], [radius, 0], [0, height]] # revolve the profile to create a cone if 'metadata' not in kwargs: kwargs['metadata'] = dict() kwargs['metadata'].update( {'shape': 'cone', 'radius': radius, 'height': height}) cone = revolve(linestring=linestring, sections=sections, transform=transform, **kwargs) return cone def cylinder(radius, height=None, sections=None, segment=None, transform=None, **kwargs): """ Create a mesh of a cylinder along Z centered at the origin. Parameters ---------- radius : float The radius of the cylinder height : float or None The height of the cylinder sections : int or None How many pie wedges should the cylinder have segment : (2, 3) float Endpoints of axis, overrides transform and height transform : (4, 4) float Transform to apply **kwargs: passed to Trimesh to create cylinder Returns ---------- cylinder: trimesh.Trimesh Resulting mesh of a cylinder """ if segment is not None: # override transform and height with the segment transform, height = _segment_to_cylinder(segment=segment) if height is None: raise ValueError('either `height` or `segment` must be passed!') half = abs(float(height)) / 2.0 # create a profile to revolve linestring = [[0, -half], [radius, -half], [radius, half], [0, half]] if 'metadata' not in kwargs: kwargs['metadata'] = dict() kwargs['metadata'].update( {'shape': 'cylinder', 'height': height, 'radius': radius}) # generate cylinder through simple revolution return revolve(linestring=linestring, sections=sections, transform=transform, **kwargs) def annulus(r_min, r_max, height=None, sections=None, transform=None, segment=None, **kwargs): """ Create a mesh of an annular cylinder along Z centered at the origin. Parameters ---------- r_min : float The inner radius of the annular cylinder r_max : float The outer radius of the annular cylinder height : float The height of the annular cylinder sections : int or None How many pie wedges should the annular cylinder have transform : (4, 4) float or None Transform to apply to move result from the origin segment : None or (2, 3) float Override transform and height with a line segment **kwargs: passed to Trimesh to create annulus Returns ---------- annulus : trimesh.Trimesh Mesh of annular cylinder """ if segment is not None: # override transform and height with the segment if passed transform, height = _segment_to_cylinder(segment=segment) if height is None: raise ValueError('either `height` or `segment` must be passed!') r_min = abs(float(r_min)) # if center radius is zero this is a cylinder if r_min < tol.merge: return cylinder(radius=r_max, height=height, sections=sections, transform=transform) r_max = abs(float(r_max)) # we're going to center at XY plane so take half the height half = abs(float(height)) / 2.0 # create counter-clockwise rectangle linestring = [[r_min, -half], [r_max, -half], [r_max, half], [r_min, half], [r_min, -half]] if 'metadata' not in kwargs: kwargs['metadata'] = dict() kwargs['metadata'].update( {'shape': 'annulus', 'r_min': r_min, 'r_max': r_max, 'height': height}) # revolve the curve annulus = revolve(linestring=linestring, sections=sections, transform=transform, **kwargs) return annulus def _segment_to_cylinder(segment): """ Convert a line segment to a transform and height for a cylinder or cylinder-like primitive. Parameters ----------- segment : (2, 3) float 3D line segment in space Returns ----------- transform : (4, 4) float Matrix to move a Z-extruded origin cylinder to segment height : float The height of the cylinder needed """ segment = np.asanyarray(segment, dtype=np.float64) if segment.shape != (2, 3): raise ValueError('segment must be 2 3D points!') vector = segment[1] - segment[0] # override height with segment length height = np.linalg.norm(vector) # point in middle of line midpoint = segment[0] + (vector * 0.5) # align Z with our desired direction rotation = align_vectors([0, 0, 1], vector) # translate to midpoint of segment translation = tf.translation_matrix(midpoint) # compound the rotation and translation transform = np.dot(translation, rotation) return transform, height def random_soup(face_count=100): """ Return random triangles as a Trimesh Parameters ----------- face_count : int Number of faces desired in mesh Returns ----------- soup : trimesh.Trimesh Geometry with face_count random faces """ vertices = np.random.random((face_count * 3, 3)) - 0.5 faces = np.arange(face_count * 3).reshape((-1, 3)) soup = Trimesh(vertices=vertices, faces=faces) return soup def axis(origin_size=0.04, transform=None, origin_color=None, axis_radius=None, axis_length=None): """ Return an XYZ axis marker as a Trimesh, which represents position and orientation. If you set the origin size the other parameters will be set relative to it. Parameters ---------- transform : (4, 4) float Transformation matrix origin_size : float Radius of sphere that represents the origin origin_color : (3,) float or int, uint8 or float Color of the origin axis_radius : float Radius of cylinder that represents x, y, z axis axis_length: float Length of cylinder that represents x, y, z axis Returns ------- marker : trimesh.Trimesh Mesh geometry of axis indicators """ # the size of the ball representing the origin origin_size = float(origin_size) # set the transform and use origin-relative # sized for other parameters if not specified if transform is None: transform = np.eye(4) if origin_color is None: origin_color = [255, 255, 255, 255] if axis_radius is None: axis_radius = origin_size / 5.0 if axis_length is None: axis_length = origin_size * 10.0 # generate a ball for the origin axis_origin = uv_sphere(radius=origin_size, count=[10, 10]) axis_origin.apply_transform(transform) # apply color to the origin ball axis_origin.visual.face_colors = origin_color # create the cylinder for the z-axis translation = tf.translation_matrix( [0, 0, axis_length / 2]) z_axis = cylinder( radius=axis_radius, height=axis_length, transform=transform.dot(translation)) # XYZ->RGB, Z is blue z_axis.visual.face_colors = [0, 0, 255] # create the cylinder for the y-axis translation = tf.translation_matrix( [0, 0, axis_length / 2]) rotation = tf.rotation_matrix(np.radians(-90), [1, 0, 0]) y_axis = cylinder( radius=axis_radius, height=axis_length, transform=transform.dot(rotation).dot(translation)) # XYZ->RGB, Y is green y_axis.visual.face_colors = [0, 255, 0] # create the cylinder for the x-axis translation = tf.translation_matrix( [0, 0, axis_length / 2]) rotation = tf.rotation_matrix(np.radians(90), [0, 1, 0]) x_axis = cylinder( radius=axis_radius, height=axis_length, transform=transform.dot(rotation).dot(translation)) # XYZ->RGB, X is red x_axis.visual.face_colors = [255, 0, 0] # append the sphere and three cylinders marker = util.concatenate([axis_origin, x_axis, y_axis, z_axis]) return marker def camera_marker(camera, marker_height=0.4, origin_size=None): """ Create a visual marker for a camera object, including an axis and FOV. Parameters --------------- camera : trimesh.scene.Camera Camera object with FOV and transform defined marker_height : float How far along the camera Z should FOV indicators be origin_size : float Sphere radius of the origin (default: marker_height / 10.0) Returns ------------ meshes : list Contains Trimesh and Path3D objects which can be visualized """ # create sane origin size from marker height if origin_size is None: origin_size = marker_height / 10.0 # append the visualizations to an array meshes = [axis(origin_size=origin_size)] try: # path is a soft dependency from .path.exchange.load import load_path except ImportError: # they probably don't have shapely installed log.warning('unable to create FOV visualization!', exc_info=True) return meshes # calculate vertices from camera FOV angles x = marker_height * np.tan(np.deg2rad(camera.fov[0]) / 2.0) y = marker_height * np.tan(np.deg2rad(camera.fov[1]) / 2.0) z = marker_height # combine the points into the vertices of an FOV visualization points = np.array( [(0, 0, 0), (-x, -y, z), (x, -y, z), (x, y, z), (-x, y, z)], dtype=float) # create line segments for the FOV visualization # a segment from the origin to each bound of the FOV segments = np.column_stack( (np.zeros_like(points), points)).reshape( (-1, 3)) # add a loop for the outside of the FOV then reshape # the whole thing into multiple line segments segments = np.vstack((segments, points[[1, 2, 2, 3, 3, 4, 4, 1]])).reshape((-1, 2, 3)) # add a single Path3D object for all line segments meshes.append(load_path(segments)) return meshes def truncated_prisms(tris, origin=None, normal=None): """ Return a mesh consisting of multiple watertight prisms below a list of triangles, truncated by a specified plane. Parameters ------------- triangles : (n, 3, 3) float Triangles in space origin : None or (3,) float Origin of truncation plane normal : None or (3,) float Unit normal vector of truncation plane Returns ----------- mesh : trimesh.Trimesh Triangular mesh """ if origin is None: transform = np.eye(4) else: transform = plane_transform(origin=origin, normal=normal) # transform the triangles to the specified plane transformed = tf.transform_points( tris.reshape((-1, 3)), transform).reshape((-1, 9)) # stack triangles such that every other one is repeated vs = np.column_stack((transformed, transformed)).reshape((-1, 3, 3)) # set the Z value of the second triangle to zero vs[1::2, :, 2] = 0 # reshape triangles to a flat array of points and transform back to original frame vertices = tf.transform_points( vs.reshape((-1, 3)), matrix=np.linalg.inv(transform)) # face indexes for a *single* truncated triangular prism f = np.array([[2, 1, 0], [3, 4, 5], [0, 1, 4], [1, 2, 5], [2, 0, 3], [4, 3, 0], [5, 4, 1], [3, 5, 2]]) # find the projection of each triangle with the normal vector cross = np.dot([0, 0, 1], triangles.cross(transformed.reshape((-1, 3, 3))).T) # stack faces into one prism per triangle f_seq = np.tile(f, (len(transformed), 1)).reshape((-1, len(f), 3)) # if the normal of the triangle was positive flip the winding f_seq[cross > 0] = np.fliplr(f) # offset stacked faces to create correct indices faces = (f_seq + (np.arange(len(f_seq)) * 6).reshape((-1, 1, 1))).reshape((-1, 3)) # create a mesh from the data mesh = Trimesh(vertices=vertices, faces=faces, process=False) return mesh
mit
7,771,530,752,950,177,000
30.973228
86
0.580727
false
3.824261
false
false
false
stormvirux/vturra-cli
vturra/asys.py
1
1936
#!/usr/bin/env python # -*- coding: utf-8 -*- import pandas as pd import numpy as np import matplotlib.pyplot as plt # from scipy import stats # import statsmodels.api as sm # from numpy.random import randn import matplotlib as mpl # import seaborn as sns # sns.set_color_palette("deep", desat=.6) mpl.rc("figure", figsize=(8, 4)) def Compavg(): data=Total() markMax=[] markAvg=[] N = 5 ind = np.arange(N) width = 0.35 fig = plt.figure() ax = fig.add_subplot(111) markMax.extend((data["Total"].max(),data["Total.1"].max(),data["Total.2"].max(),data["Total.3"].max(),data["Total.4"].max())) markAvg.extend((data["Total"].mean(),data["Total.1"].mean(),data["Total.2"].mean(),data["Total.3"].mean(),data["Total.4"].mean())) rects1 = ax.bar(ind, markMax, width, color='black') rects2 = ax.bar(ind+width, markAvg, width, color='green') ax.set_xlim(-width,len(ind)+width) ax.set_ylim(0,120) ax.set_ylabel('Marks') ax.set_title('Max, Mean and Your Marks') xTickMarks = ['Subject'+str(i) for i in range(1,6)] ax.set_xticks(ind+width) xtickNames = ax.set_xticklabels(xTickMarks) plt.setp(xtickNames, rotation=10, fontsize=10) ax.legend( (rects1[0], rects2[0]), ('Max', 'Mean') ) plt.show() def compSub(): # max_data = np.r_[data["Total"]].max() # bins = np.linspace(0, max_data, max_data + 1) data=Total() plt.hist(data['Total'],linewidth=0, alpha=.7) plt.hist(data['Total.1'],linewidth=0,alpha=.7) plt.hist(data['Total.2'],linewidth=0,alpha=.7) plt.hist(data['Total.3'],linewidth=0,alpha=.7) plt.hist(data['Total.4'],linewidth=0,alpha=.7) plt.title("Total marks Histogram") plt.xlabel("Value") plt.ylabel("Frequency") plt.show() def Total(): data=pd.read_csv("output10cs.csv") df3=data[['Total','Total.1','Total.2','Total.3','Total.4','Total.5','Total.6','Total.7']] data["Main Total"]=df3.sum(axis=1) data = data.dropna() data.reset_index(drop=True) return data #compSub() # Compavg()
mit
5,560,022,484,515,166,000
29.730159
131
0.66064
false
2.564238
false
false
false