repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
mementum/backtrader
samples/vctest/vctest.py
1
15011
#!/usr/bin/env python # -*- coding: utf-8; py-indent-offset:4 -*- ############################################################################### # # Copyright (C) 2015-2020 Daniel Rodriguez # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### from __future__ import (absolute_import, division, print_function, unicode_literals) import argparse import datetime # The above could be sent to an independent module import backtrader as bt from backtrader.utils import flushfile # win32 quick stdout flushing from backtrader.utils.py3 import string_types class TestStrategy(bt.Strategy): params = dict( smaperiod=5, trade=False, stake=10, exectype=bt.Order.Market, stopafter=0, valid=None, cancel=0, donotsell=False, price=None, pstoplimit=None, ) def __init__(self): # To control operation entries self.orderid = list() self.order = None self.counttostop = 0 self.datastatus = 0 # Create SMA on 2nd data self.sma = bt.indicators.MovAv.SMA(self.data, period=self.p.smaperiod) print('--------------------------------------------------') print('Strategy Created') print('--------------------------------------------------') def notify_data(self, data, status, *args, **kwargs): print('*' * 5, 'DATA NOTIF:', data._getstatusname(status), *args) if status == data.LIVE: self.counttostop = self.p.stopafter self.datastatus = 1 def notify_store(self, msg, *args, **kwargs): print('*' * 5, 'STORE NOTIF:', msg) def notify_order(self, order): if order.status in [order.Completed, order.Cancelled, order.Rejected]: self.order = None print('-' * 50, 'ORDER BEGIN', datetime.datetime.now()) print(order) print('-' * 50, 'ORDER END') def notify_trade(self, trade): print('-' * 50, 'TRADE BEGIN', datetime.datetime.now()) print(trade) print('-' * 50, 'TRADE END') def prenext(self): self.next(frompre=True) def next(self, frompre=False): txt = list() txt.append('%04d' % len(self)) dtfmt = '%Y-%m-%dT%H:%M:%S.%f' txt.append('%s' % self.data.datetime.datetime(0).strftime(dtfmt)) txt.append('{}'.format(self.data.open[0])) txt.append('{}'.format(self.data.high[0])) txt.append('{}'.format(self.data.low[0])) txt.append('{}'.format(self.data.close[0])) txt.append('{}'.format(self.data.volume[0])) txt.append('{}'.format(self.data.openinterest[0])) txt.append('{}'.format(self.sma[0])) print(', '.join(txt)) if len(self.datas) > 1: txt = list() txt.append('%04d' % len(self)) dtfmt = '%Y-%m-%dT%H:%M:%S.%f' txt.append('%s' % self.data1.datetime.datetime(0).strftime(dtfmt)) txt.append('{}'.format(self.data1.open[0])) txt.append('{}'.format(self.data1.high[0])) txt.append('{}'.format(self.data1.low[0])) txt.append('{}'.format(self.data1.close[0])) txt.append('{}'.format(self.data1.volume[0])) txt.append('{}'.format(self.data1.openinterest[0])) txt.append('{}'.format(float('NaN'))) print(', '.join(txt)) if self.counttostop: # stop after x live lines self.counttostop -= 1 if not self.counttostop: self.env.runstop() return if not self.p.trade: return # if True and len(self.orderid) < 1: if self.datastatus and not self.position and len(self.orderid) < 1: self.order = self.buy(size=self.p.stake, exectype=self.p.exectype, price=self.p.price, plimit=self.p.pstoplimit, valid=self.p.valid) self.orderid.append(self.order) elif self.position.size > 0 and not self.p.donotsell: if self.order is None: size = self.p.stake // 2 if not size: size = self.position.size # use the remaining self.order = self.sell(size=size, exectype=bt.Order.Market) elif self.order is not None and self.p.cancel: if self.datastatus > self.p.cancel: self.cancel(self.order) if self.datastatus: self.datastatus += 1 def start(self): header = ['Datetime', 'Open', 'High', 'Low', 'Close', 'Volume', 'OpenInterest', 'SMA'] print(', '.join(header)) self.done = False def runstrategy(): args = parse_args() # Create a cerebro cerebro = bt.Cerebro() storekwargs = dict() if not args.nostore: vcstore = bt.stores.VCStore(**storekwargs) if args.broker: brokerargs = dict(account=args.account, **storekwargs) if not args.nostore: broker = vcstore.getbroker(**brokerargs) else: broker = bt.brokers.VCBroker(**brokerargs) cerebro.setbroker(broker) timeframe = bt.TimeFrame.TFrame(args.timeframe) if args.resample or args.replay: datatf = bt.TimeFrame.Ticks datacomp = 1 else: datatf = timeframe datacomp = args.compression fromdate = None if args.fromdate: dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.fromdate)) fromdate = datetime.datetime.strptime(args.fromdate, dtformat) todate = None if args.todate: dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.todate)) todate = datetime.datetime.strptime(args.todate, dtformat) VCDataFactory = vcstore.getdata if not args.nostore else bt.feeds.VCData datakwargs = dict( timeframe=datatf, compression=datacomp, fromdate=fromdate, todate=todate, historical=args.historical, qcheck=args.qcheck, tz=args.timezone ) if args.nostore and not args.broker: # neither store nor broker datakwargs.update(storekwargs) # pass the store args over the data data0 = VCDataFactory(dataname=args.data0, tradename=args.tradename, **datakwargs) data1 = None if args.data1 is not None: data1 = VCDataFactory(dataname=args.data1, **datakwargs) rekwargs = dict( timeframe=timeframe, compression=args.compression, bar2edge=not args.no_bar2edge, adjbartime=not args.no_adjbartime, rightedge=not args.no_rightedge, ) if args.replay: cerebro.replaydata(data0, **rekwargs) if data1 is not None: cerebro.replaydata(data1, **rekwargs) elif args.resample: cerebro.resampledata(data0, **rekwargs) if data1 is not None: cerebro.resampledata(data1, **rekwargs) else: cerebro.adddata(data0) if data1 is not None: cerebro.adddata(data1) if args.valid is None: valid = None else: try: valid = float(args.valid) except: dtformat = '%Y-%m-%d' + ('T%H:%M:%S' * ('T' in args.valid)) valid = datetime.datetime.strptime(args.valid, dtformat) else: valid = datetime.timedelta(seconds=args.valid) # Add the strategy cerebro.addstrategy(TestStrategy, smaperiod=args.smaperiod, trade=args.trade, exectype=bt.Order.ExecType(args.exectype), stake=args.stake, stopafter=args.stopafter, valid=valid, cancel=args.cancel, donotsell=args.donotsell, price=args.price, pstoplimit=args.pstoplimit) # Live data ... avoid long data accumulation by switching to "exactbars" cerebro.run(exactbars=args.exactbars) if args.plot and args.exactbars < 1: # plot if possible cerebro.plot() def parse_args(): parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Test Visual Chart 6 integration') parser.add_argument('--exactbars', default=1, type=int, required=False, action='store', help='exactbars level, use 0/-1/-2 to enable plotting') parser.add_argument('--plot', required=False, action='store_true', help='Plot if possible') parser.add_argument('--stopafter', default=0, type=int, required=False, action='store', help='Stop after x lines of LIVE data') parser.add_argument('--nostore', required=False, action='store_true', help='Do not Use the store pattern') parser.add_argument('--qcheck', default=0.5, type=float, required=False, action='store', help=('Timeout for periodic ' 'notification/resampling/replaying check')) parser.add_argument('--no-timeoffset', required=False, action='store_true', help=('Do not Use TWS/System time offset for non ' 'timestamped prices and to align resampling')) parser.add_argument('--data0', default=None, required=True, action='store', help='data 0 into the system') parser.add_argument('--tradename', default=None, required=False, action='store', help='Actual Trading Name of the asset') parser.add_argument('--data1', default=None, required=False, action='store', help='data 1 into the system') parser.add_argument('--timezone', default=None, required=False, action='store', help='timezone to get time output into (pytz names)') parser.add_argument('--historical', required=False, action='store_true', help='do only historical download') parser.add_argument('--fromdate', required=False, action='store', help=('Starting date for historical download ' 'with format: YYYY-MM-DD[THH:MM:SS]')) parser.add_argument('--todate', required=False, action='store', help=('End date for historical download ' 'with format: YYYY-MM-DD[THH:MM:SS]')) parser.add_argument('--smaperiod', default=5, type=int, required=False, action='store', help='Period to apply to the Simple Moving Average') pgroup = parser.add_mutually_exclusive_group(required=False) pgroup.add_argument('--replay', required=False, action='store_true', help='replay to chosen timeframe') pgroup.add_argument('--resample', required=False, action='store_true', help='resample to chosen timeframe') parser.add_argument('--timeframe', default=bt.TimeFrame.Names[0], choices=bt.TimeFrame.Names, required=False, action='store', help='TimeFrame for Resample/Replay') parser.add_argument('--compression', default=1, type=int, required=False, action='store', help='Compression for Resample/Replay') parser.add_argument('--no-bar2edge', required=False, action='store_true', help='no bar2edge for resample/replay') parser.add_argument('--no-adjbartime', required=False, action='store_true', help='no adjbartime for resample/replay') parser.add_argument('--no-rightedge', required=False, action='store_true', help='no rightedge for resample/replay') parser.add_argument('--broker', required=False, action='store_true', help='Use VisualChart as broker') parser.add_argument('--account', default=None, required=False, action='store', help='Choose broker account (else first)') parser.add_argument('--trade', required=False, action='store_true', help='Do Sample Buy/Sell operations') parser.add_argument('--donotsell', required=False, action='store_true', help='Do not sell after a buy') parser.add_argument('--exectype', default=bt.Order.ExecTypes[0], choices=bt.Order.ExecTypes, required=False, action='store', help='Execution to Use when opening position') parser.add_argument('--price', default=None, type=float, required=False, action='store', help='Price in Limit orders or Stop Trigger Price') parser.add_argument('--pstoplimit', default=None, type=float, required=False, action='store', help='Price for the limit in StopLimit') parser.add_argument('--stake', default=10, type=int, required=False, action='store', help='Stake to use in buy operations') parser.add_argument('--valid', default=None, required=False, action='store', help='Seconds or YYYY-MM-DD') parser.add_argument('--cancel', default=0, type=int, required=False, action='store', help=('Cancel a buy order after n bars in operation,' ' to be combined with orders like Limit')) return parser.parse_args() if __name__ == '__main__': runstrategy()
gpl-3.0
3,710,281,282,891,024,000
35.612195
79
0.541136
false
4.155869
false
false
false
vivisect/synapse
synapse/lib/trigger.py
1
1918
import logging import synapse.lib.cache as s_cache logger = logging.getLogger(__name__) class Triggers: def __init__(self): self._trig_list = [] self._trig_match = s_cache.MatchCache() self._trig_byname = s_cache.Cache(onmiss=self._onTrigNameMiss) def clear(self): ''' Clear all previously registered triggers ''' self._trig_list = [] self._trig_byname.clear() def add(self, func, perm): ''' Add a new callback to the triggers. Args: func (function): The function to call perm (str,dict): The permission tufo Returns: (None) ''' self._trig_list.append((perm, func)) self._trig_byname.clear() def _onTrigNameMiss(self, name): retn = [] for perm, func in self._trig_list: if self._trig_match.match(name, perm[0]): retn.append((perm, func)) return retn def _cmpperm(self, perm, must): for prop, match in must[1].items(): valu = perm[1].get(prop) if valu is None: return False if not self._trig_match.match(valu, match): return False return True def trigger(self, perm, *args, **kwargs): ''' Fire any matching trigger functions for the given perm. Args: perm ((str,dict)): The perm tufo to trigger *args (list): args list to use calling the trigger function **kwargs (dict): kwargs dict to use calling the trigger function Returns: (None) ''' for must, func in self._trig_byname.get(perm[0]): if self._cmpperm(perm, must): try: func(*args, **kwargs) except Exception as e: logger.exception(e)
apache-2.0
-5,746,460,234,072,582,000
25.638889
79
0.519291
false
4.063559
false
false
false
orionzhou/robin
utils/counter.py
1
6732
""" Counter class for py2.6 back compat. <http://code.activestate.com/recipes/576611/> """ from operator import itemgetter from heapq import nlargest from itertools import repeat, ifilter class Counter(dict): '''Dict subclass for counting hashable objects. Sometimes called a bag or multiset. Elements are stored as dictionary keys and their counts are stored as dictionary values. >>> Counter('zyzygy') Counter({'y': 3, 'z': 2, 'g': 1}) ''' def __init__(self, iterable=None, **kwds): '''Create a new, empty Counter object. And if given, count elements from an input iterable. Or, initialize the count from another mapping of elements to their counts. >>> c = Counter() # a new, empty counter >>> c = Counter('gallahad') # a new counter from an iterable >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping >>> c = Counter(a=4, b=2) # a new counter from keyword args ''' self.update(iterable, **kwds) def __missing__(self, key): return 0 def most_common(self, n=None): '''List the n most common elements and their counts from the most common to the least. If n is None, then list all element counts. >>> Counter('abracadabra').most_common(3) [('a', 5), ('r', 2), ('b', 2)] ''' if n is None: return sorted(self.iteritems(), key=itemgetter(1), reverse=True) return nlargest(n, self.iteritems(), key=itemgetter(1)) def elements(self): '''Iterator over elements repeating each as many times as its count. >>> c = Counter('ABCABC') >>> sorted(c.elements()) ['A', 'A', 'B', 'B', 'C', 'C'] If an element's count has been set to zero or is a negative number, elements() will ignore it. ''' for elem, count in self.iteritems(): for _ in repeat(None, count): yield elem # Override dict methods where the meaning changes for Counter objects. @classmethod def fromkeys(cls, iterable, v=None): raise NotImplementedError( 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') def update(self, iterable=None, **kwds): '''Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter('which') >>> c.update('witch') # add elements from another iterable >>> d = Counter('watch') >>> c.update(d) # add elements from another counter >>> c['h'] # four 'h' in which, witch, and watch 4 ''' if iterable is not None: if hasattr(iterable, 'iteritems'): if self: self_get = self.get for elem, count in iterable.iteritems(): self[elem] = self_get(elem, 0) + count else: dict.update(self, iterable) # fast path when counter is empty else: self_get = self.get for elem in iterable: self[elem] = self_get(elem, 0) + 1 if kwds: self.update(kwds) def copy(self): 'Like dict.copy() but returns a Counter instance instead of a dict.' return Counter(self) def __delitem__(self, elem): 'Like dict.__delitem__() but does not raise KeyError for missing values.' if elem in self: dict.__delitem__(self, elem) def __repr__(self): if not self: return '%s()' % self.__class__.__name__ items = ', '.join(map('%r: %r'.__mod__, self.most_common())) return '%s({%s})' % (self.__class__.__name__, items) # Multiset-style mathematical operations discussed in: # Knuth TAOCP Volume II section 4.6.3 exercise 19 # and at http://en.wikipedia.org/wiki/Multiset # # Outputs guaranteed to only include positive counts. # # To strip negative and zero counts, add-in an empty counter: # c += Counter() def __add__(self, other): '''Add counts from two counters. >>> Counter('abbb') + Counter('bcc') Counter({'b': 4, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem in set(self) | set(other): newcount = self[elem] + other[elem] if newcount > 0: result[elem] = newcount return result def __sub__(self, other): ''' Subtract count, but keep only results with positive counts. >>> Counter('abbbc') - Counter('bccd') Counter({'b': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented result = Counter() for elem in set(self) | set(other): newcount = self[elem] - other[elem] if newcount > 0: result[elem] = newcount return result def __or__(self, other): '''Union is the maximum of value in either of the input counters. >>> Counter('abbb') | Counter('bcc') Counter({'b': 3, 'c': 2, 'a': 1}) ''' if not isinstance(other, Counter): return NotImplemented _max = max result = Counter() for elem in set(self) | set(other): newcount = _max(self[elem], other[elem]) if newcount > 0: result[elem] = newcount return result def __and__(self, other): ''' Intersection is the minimum of corresponding counts. >>> Counter('abbb') & Counter('bcc') Counter({'b': 1}) ''' if not isinstance(other, Counter): return NotImplemented _min = min result = Counter() if len(self) < len(other): self, other = other, self for elem in ifilter(self.__contains__, other): newcount = _min(self[elem], other[elem]) if newcount > 0: result[elem] = newcount return result def report(self, sep=", ", percentage=False): total = sum(self.values()) items = [] for k, v in sorted(self.items(), key=lambda x: -x[-1]): item = "{0}:{1}".format(k, v) if percentage: item += " ({0:.1f}%)".format(v * 100. / total) items.append(item) return sep.join(items) if __name__ == '__main__': import doctest print(doctest.testmod())
gpl-2.0
-6,222,579,013,777,309,000
32
85
0.531491
false
4.210131
false
false
false
wjakob/layerlab
recipes/utils/materials.py
1
6279
# Complex-valued IOR curves for a few metals from scipy import interpolate lambda_gold = [298.75705, 302.400421, 306.133759, 309.960449, 313.884003, 317.908142, 322.036835, 326.274139, 330.624481, 335.092377, 339.682678, 344.400482, 349.251221, 354.240509, 359.37442, 364.659332, 370.10202, 375.709625, 381.489777, 387.450562, 393.600555, 399.948975, 406.505493, 413.280579, 420.285339, 427.531647, 435.032196, 442.800629, 450.851562, 459.200653, 467.864838, 476.862213, 486.212463, 495.936707, 506.057861, 516.600769, 527.592224, 539.061646, 551.040771, 563.564453, 576.670593, 590.400818, 604.800842, 619.920898, 635.816284, 652.548279, 670.184753, 688.800964, 708.481018, 729.318665, 751.41925, 774.901123, 799.897949, 826.561157, 855.063293, 885.601257] eta_gold = [1.795+1.920375j, 1.812+1.92j, 1.822625+1.918875j, 1.83+1.916j, 1.837125+1.911375j, 1.84+1.904j, 1.83425+1.891375j, 1.824+1.878j, 1.812+1.86825j, 1.798+1.86j, 1.782+1.85175j, 1.766+1.846j, 1.7525+1.84525j, 1.74+1.848j, 1.727625+1.852375j, 1.716+1.862j, 1.705875+1.883j, 1.696+1.906j, 1.68475+1.9225j, 1.674+1.936j, 1.666+1.94775j, 1.658+1.956j, 1.64725+1.959375j, 1.636+1.958j, 1.628+1.951375j, 1.616+1.94j, 1.59625+1.9245j, 1.562+1.904j, 1.502125+1.875875j, 1.426+1.846j, 1.345875+1.814625j, 1.242+1.796j, 1.08675+1.797375j, 0.916+1.84j, 0.7545+1.9565j, 0.608+2.12j, 0.49175+2.32625j, 0.402+2.54j, 0.3455+2.730625j, 0.306+2.88j, 0.267625+2.940625j, 0.236+2.97j, 0.212375+3.015j, 0.194+3.06j, 0.17775+3.07j, 0.166+3.15j, 0.161+3.445812j, 0.16+3.8j, 0.160875+4.087687j, 0.164+4.357j, 0.1695+4.610188j, 0.176+4.86j, 0.181375+5.125813j, 0.188+5.39j, 0.198125+5.63125j, 0.21+5.88j] lambda_aluminium = [298.75705, 302.400421, 306.133759, 309.960449, 313.884003, 317.908142, 322.036835, 326.274139, 330.624481, 335.092377, 339.682678, 344.400482, 349.251221, 354.240509, 359.37442, 364.659332, 370.10202, 375.709625, 381.489777, 387.450562, 393.600555, 399.948975, 406.505493, 413.280579, 420.285339, 427.531647, 435.032196, 442.800629, 450.851562, 459.200653, 467.864838, 476.862213, 486.212463, 495.936707, 506.057861, 516.600769, 527.592224, 539.061646, 551.040771, 563.564453, 576.670593, 590.400818, 604.800842, 619.920898, 635.816284, 652.548279, 670.184753, 688.800964, 708.481018, 729.318665, 751.41925, 774.901123, 799.897949, 826.561157, 855.063293, 885.601257] eta_aluminium = [(0.273375+3.59375j), (0.28+3.64j), (0.286813+3.689375j), (0.294+3.74j), (0.301875+3.789375j), (0.31+3.84j), (0.317875+3.894375j), (0.326+3.95j), (0.33475+4.005j), (0.344+4.06j), (0.353813+4.11375j), (0.364+4.17j), (0.374375+4.23375j), (0.385+4.3j), (0.39575+4.365j), (0.407+4.43j), (0.419125+4.49375j), (0.432+4.56j), (0.445688+4.63375j), (0.46+4.71j), (0.474688+4.784375j), (0.49+4.86j), (0.506188+4.938125j), (0.523+5.02j), (0.540063+5.10875j), (0.558+5.2j), (0.577313+5.29j), (0.598+5.38j), (0.620313+5.48j), (0.644+5.58j), (0.668625+5.69j), (0.695+5.8j), (0.72375+5.915j), (0.755+6.03j), (0.789+6.15j), (0.826+6.28j), (0.867+6.42j), (0.912+6.55j), (0.963+6.7j), (1.02+6.85j), (1.08+7j), (1.15+7.15j), (1.22+7.31j), (1.3+7.48j), (1.39+7.65j), (1.49+7.82j), (1.6+8.01j), (1.74+8.21j), (1.91+8.39j), (2.14+8.57j), (2.41+8.62j), (2.63+8.6j), (2.8+8.45j), (2.74+8.31j), (2.58+8.21j), (2.24+8.21j)] lambda_copper = [302.400421, 306.133759, 309.960449, 313.884003, 317.908142, 322.036835, 326.274139, 330.624481, 335.092377, 339.682678, 344.400482, 349.251221, 354.240509, 359.37442, 364.659332, 370.10202, 375.709625, 381.489777, 387.450562, 393.600555, 399.948975, 406.505493, 413.280579, 420.285339, 427.531647, 435.032196, 442.800629, 450.851562, 459.200653, 467.864838, 476.862213, 486.212463, 495.936707, 506.057861, 516.600769, 527.592224, 539.061646, 551.040771, 563.564453, 576.670593, 590.400818, 604.800842, 619.920898, 635.816284, 652.548279, 670.184753, 688.800964, 708.481018, 729.318665, 751.41925, 774.901123, 799.897949, 826.561157, 855.063293, 885.601257] eta_copper = [(1.38+1.687j), (1.358438+1.703313j), (1.34+1.72j), (1.329063+1.744563j), (1.325+1.77j), (1.3325+1.791625j), (1.34+1.81j), (1.334375+1.822125j), (1.325+1.834j), (1.317812+1.85175j), (1.31+1.872j), (1.300313+1.89425j), (1.29+1.916j), (1.281563+1.931688j), (1.27+1.95j), (1.249062+1.972438j), (1.225+2.015j), (1.2+2.121562j), (1.18+2.21j), (1.174375+2.177188j), (1.175+2.13j), (1.1775+2.160063j), (1.18+2.21j), (1.178125+2.249938j), (1.175+2.289j), (1.172812+2.326j), (1.17+2.362j), (1.165312+2.397625j), (1.16+2.433j), (1.155312+2.469187j), (1.15+2.504j), (1.142812+2.535875j), (1.135+2.564j), (1.131562+2.589625j), (1.12+2.605j), (1.092437+2.595562j), (1.04+2.583j), (0.950375+2.5765j), (0.826+2.599j), (0.645875+2.678062j), (0.468+2.809j), (0.35125+3.01075j), (0.272+3.24j), (0.230813+3.458187j), (0.214+3.67j), (0.20925+3.863125j), (0.213+4.05j), (0.21625+4.239563j), (0.223+4.43j), (0.2365+4.619563j), (0.25+4.817j), (0.254188+5.034125j), (0.26+5.26j), (0.28+5.485625j), (0.3+5.717j)] lambda_chrome = [300.194, 307.643005, 316.276001, 323.708008, 333.279999, 341.542999, 351.217987, 362.514984, 372.312012, 385.031006, 396.10202, 409.175018, 424.58902, 438.09201, 455.80899, 471.406982, 490.040009, 512.314026, 532.102966, 558.468018, 582.06604, 610.739014, 700.452026, 815.65802, 826.53302, 849.17804, 860.971985, 885.570984] eta_chrome = [(0.98+2.67j), (1.02+2.76j), (1.06+2.85j), (1.12+2.95j), (1.18+3.04j), (1.26+3.12j), (1.33+3.18j), (1.39+3.24j), (1.43+3.31j), (1.44+3.4j), (1.48+3.54j), (1.54+3.71j), (1.65+3.89j), (1.8+4.06j), (1.99+4.22j), (2.22+4.36j), (2.49+4.44j), (2.75+4.46j), (2.98+4.45j), (3.18+4.41j), (3.34+4.38j), (3.48+4.36j), (3.84+4.37j), (4.23+4.34j), (4.27+4.33j), (4.31+4.32j), (4.33+4.32j), (4.38+4.31j)] gold = interpolate.interp1d(lambda_gold, eta_gold, kind='cubic') copper = interpolate.interp1d(lambda_copper, eta_copper, kind='cubic') aluminium = interpolate.interp1d(lambda_aluminium, eta_aluminium, kind='cubic') chrome = interpolate.interp1d(lambda_chrome, eta_chrome, kind='cubic')
bsd-2-clause
1,225,152,362,439,384,800
60.558824
85
0.637363
false
1.763764
false
false
false
angus-ai/angus-jumpingsumo
wrapper.py
1
3347
# -*- coding: utf-8 -*- #!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import subprocess import threading import time import angus WIDTH = 640 def img_generator(file_path): with open(file_path, "rb") as f: buff = "" for chunk in f: buff += chunk s = buff.find('\xff\xd8') e = buff.find('\xff\xd9') if s != -1 and e != -1: jpg = buff[s:e + 2] buff = buff[e + 2:] yield jpg def command(img, service): file_path = '/tmp/imgtmp.jpg' with open(file_path, 'wb') as f: f.write(img) job = service.process({'image': open(file_path, 'rb')}) result = job.result['faces'] if len(result) > 0 and result[0]['roi_confidence'] > 0.5: roi = result[0]['roi'] x = roi[0] w = roi[2] cmd_angle = (x + w * 0.5) - WIDTH / 2 print w if abs(cmd_angle) > WIDTH / 8: if cmd_angle > 0: return "Right" else: return "Left" elif w > 100: return "Back" elif w < 80: return "Forw" return None def command_loop(singleton, sub, service): img = singleton[0] if img is None: return cmd = command(img, service) if cmd == "Right": sub.stdin.write("u") sub.stdin.flush() elif cmd == "Left": sub.stdin.write("y") sub.stdin.flush() elif cmd == "Back": sub.stdin.write("i") sub.stdin.flush() elif cmd == "Forw": sub.stdin.write("o") sub.stdin.flush() def loop(singleton, sub, service): while True: command_loop(singleton, sub, service) # print "Loop" time.sleep(1) def launch(input_path, sub, service): singleton = [None] count = 0 thread = threading.Thread(target=loop, args=(singleton, sub, service)) thread.daemon = True thread.start() for img in img_generator(input_path): singleton[0] = img count += 1 if count > 600: break sub.stdin.write("q") sub.stdin.flush() def main(): os.environ[ 'LD_LIBRARY_PATH'] = "../ARSDKBuildUtils/Targets/Unix/Install/lib" sub = subprocess.Popen( ["./JumpingSumoInterface"], stdin=subprocess.PIPE, stdout=None, stderr=subprocess.STDOUT) time.sleep(2) conn = angus.connect() service = conn.services.get_service('face_detection', 1) launch("./video_fifo", sub, service) if __name__ == "__main__": main()
apache-2.0
-9,205,598,472,601,172,000
24.356061
74
0.58052
false
3.649945
false
false
false
nhatbui/pysuite
pookeeper/pookeeper/pookeeper.py
1
7385
import os from collections import defaultdict, OrderedDict from twisted.internet.protocol import Factory from twisted.protocols.basic import LineReceiver from twisted.internet import reactor class ZooKeeper(LineReceiver): def __init__(self, connection_addr, znodes, ephem_nodes): self.address = connection_addr self.znodes = znodes self.ephem_nodes = ephem_nodes def connectionMade(self): self.sendLine("true:Connected") def connectionLost(self): # Delete all ephemeral nodes associated # with this connection/address. for node in self.ephem_nodes[self.address]: self.delete_node(node) del self.ephem_nodes[self.address] def delete_node(self, node): # Delete node from parent's children listing parent, child_name = os.path.split(node) del self.znodes[parent]['children'][child_name] # Delete node and all its children :( stack = [node] while len(stack): curr_node = stack.pop() stack.extend(self.znodes[curr_node]['children'].keys()) # Notify watchers self.notify_watchers(curr_node) del self.znodes[curr_node] def notify_watchers(self, node): # Notify watchers while len(self.znodes[node]['watchers']): watcher = self.znodes[node]['watchers'].pop() watcher.sendLine('true:WATCHER_NOTICE:DELETED:{}'.format(node)) def lineReceived(self, msg): # Check command idx = msg.find(':') if idx == -1: self.sendLine('false:bad message') cmd = msg[:idx] if cmd == 'CREATE': self.handle_CREATENODE(msg[(idx+1):]) elif cmd == 'ECREATE': self.handle_CREATEEPHEMERALNODE(msg[(idx+1):]) elif cmd == 'DELETE': self.handle_DELETENODE(msg[(idx+1):]) elif cmd == 'EXISTS': self.handle_EXISTSNODE(msg[(idx+1):]) elif cmd == 'GET': self.handle_GET(msg[(idx+1):]) elif cmd == 'SET': self.handle_SET(msg[(idx+1):]) elif cmd == 'CHILDREN': self.handle_GETCHILDREN(msg[(idx+1):]) elif cmd == 'WATCH': self.handle_WATCH(msg[(idx+1):]) else: self.sendLine('false:unknown command') def handle_CREATENODE(self, node): # Check if znode path starts with a slash if node[0] != '/': self.sendLine('false') # Check path up to node exists p, _ = os.path.split(node) if p not in self.znodes: self.sendLine('false') return # Check if node already exists if node in self.znodes: self.sendLine('false:node already exists') return parent, child = os.path.split(node) self.znodes[node] = { 'parent': parent, 'children': {}, 'watchers': []} self.znodes[parent]['children'][child] = True self.sendLine('true:CREATED:{}'.format(node)) def handle_CREATEEPHEMERALNODE(self, node): # Check if znode path starts with a slash if node[0] != '/': self.sendLine('false:bad node name') # Check path up to node exists p, _ = os.path.split(node) if p not in self.znodes: self.sendLine('false:path up to node does not exist') else: parent, child = os.path.split(node) self.znodes[node] = { 'parent': parent, 'children': {}, 'watchers': []} self.znodes[parent]['children'][child] = True # Add as ephemeral node self.ephem_nodes[self.address].append(node) self.sendLine('true:CREATED_ENODE:{}'.format(node)) def handle_DELETENODE(self, node): # Check if znode path starts with a slash if node[0] != '/': self.sendLine('false') # Check that node exists if node in self.znodes: # Delete node from parent's children listing parent, child_name = os.path.split(node) del self.znodes[parent]['children'][child_name] # Delete node and all its children :( stack = [node] while len(stack): curr_node = stack.pop() stack.extend(self.znodes[curr_node]['children'].keys()) # Notify watchers while len(self.znodes[curr_node]['watchers']): watcher = self.znodes[curr_node]['watchers'].pop() watcher.sendLine('true:WATCHER_NOTICE:DELETED:{}'.format(curr_node)) del self.znodes[curr_node] self.sendLine('true:DELETED:{}'.format(node)) else: self.sendLine('false:NOT DELETED:{}'.format(node)) def handle_EXISTSNODE(self, node): # Check if znode path starts with a slash if node[0] != '/': self.sendLine('false') # Check that node exists if node in self.znodes: self.sendLine('true') else: self.sendLine('false') def handle_GET(self, node): # Check if znode path starts with a slash if node[0] != '/': self.sendLine('false') # Check that node exists if node in self.znodes: self.sendLine(self.znodes[node]['data']) else: self.sendLine('false') def handle_SET(self, msg): idx = msg.find(':') if idx == -1: self.sendLine('false') node = msg[:idx] data = msg[(idx+1):] # Check if znode path starts with a slash if node[0] != '/': self.sendLine('false') # Check that node exists if node in self.znodes: self.znodes[node]['data'] = data # Notify watchers while len(self.znodes[node]['watchers']): watcher = self.znodes[node]['watchers'].pop() watcher.sendLine('true:WATCHER_NOFITY:CHANGED:{}'.format(node)) self.sendLine('true:SET:{}'.format(node)) else: self.sendLine('false') def handle_GETCHILDREN(self, node): # Check if znode path starts with a slash if node[0] != '/': self.sendLine('false') # Check that node exists if node in self.znodes: self.sendLine(','.join(self.znodes[node]['children'].keys())) else: self.sendLine('false') def handle_WATCH(self, node): # Check if znode path starts with a slash if node[0] != '/': self.sendLine('false:WATCHING:improper naming:{}'.format(node)) # Check that node exists if node in self.znodes: self.znodes[node]['watchers'].append(self) self.sendLine('true:WATCHING:{}'.format(node)) else: self.sendLine('false:WATCHING:node does not exist:{}'.format(node)) class ZooKeeperFactory(Factory): def __init__(self): self.znodes = {'/': { 'parent': None, 'children': OrderedDict(), 'watchers': [] } } self.ephem_nodes = defaultdict(list) def buildProtocol(self, addr): return ZooKeeper(addr, self.znodes, self.ephem_nodes) if __name__ == '__main__': reactor.listenTCP(8123, ZooKeeperFactory()) print('Starting on port 8123') reactor.run()
mit
-7,960,860,430,897,966,000
29.899582
91
0.55545
false
3.870545
false
false
false
FluidityStokes/fluidity
tests/mms_tracer_P1dg_cdg_diff_steady_3d_cjc_inhNmnnbc/cdg3d.py
1
1504
import os from fluidity_tools import stat_parser from sympy import * from numpy import array,max,abs meshtemplate=''' Point(1) = {0.0,0.0,0,0.1}; Extrude {1,0,0} { Point{1}; Layers{<layers>}; } Extrude {0,1,0} { Line{1}; Layers{<layers>}; } Extrude {0,0,1} { Surface{5}; Layers{<layers>}; } //Z-normal surface, z=0 Physical Surface(28) = {5}; //Z-normal surface, z=1 Physical Surface(29) = {27}; //Y-normal surface, y=0 Physical Surface(30) = {14}; //Y-normal surface, y=1 Physical Surface(31) = {22}; //X-normal surface, x=0 Physical Surface(32) = {26}; //X-normal surface, x=1 Physical Surface(33) = {18}; Physical Volume(34) = {1}; ''' def generate_meshfile(name,layers): geo = meshtemplate.replace('<layers>',str(layers)) open(name+".geo",'w').write(geo) os.system("gmsh -3 "+name+".geo") os.system("../../bin/gmsh2triangle "+name+".msh") def run_test(layers, binary): '''run_test(layers, binary) Run a single test of the channel problem. Layers is the number of mesh points in the cross-channel direction. The mesh is unstructured and isotropic. binary is a string containing the fluidity command to run. The return value is the error in u and p at the end of the simulation.''' generate_meshfile("channel",layers) os.system(binary+" channel_viscous.flml") s=stat_parser("channel-flow-dg.stat") return (s["Water"]['AnalyticUVelocitySolutionError']['l2norm'][-1], s["Water"]['AnalyticPressureSolutionError']['l2norm'][-1])
lgpl-2.1
-6,174,876,977,776,289,000
25.857143
77
0.664229
false
2.94902
false
false
false
hzlf/openbroadcast
website/apps/__rework_in_progress/importer/api.py
1
7486
from django.conf import settings from django.conf.urls.defaults import * from django.contrib.auth.models import User from django.db.models import Count import json from tastypie import fields from tastypie.authentication import * from tastypie.authorization import * from tastypie.resources import ModelResource, Resource, ALL, ALL_WITH_RELATIONS from tastypie.cache import SimpleCache from tastypie.utils import trailing_slash from tastypie.exceptions import ImmediateHttpResponse from django.http import HttpResponse from importer.models import Import, ImportFile from alibrary.api import MediaResource # file = request.FILES[u'files[]'] class ImportFileResource(ModelResource): import_session = fields.ForeignKey('importer.api.ImportResource', 'import_session', null=True, full=False) media = fields.ForeignKey('alibrary.api.MediaResource', 'media', null=True, full=True) class Meta: queryset = ImportFile.objects.all() list_allowed_methods = ['get', 'post'] detail_allowed_methods = ['get', 'post', 'put', 'delete'] resource_name = 'importfile' # excludes = ['type','results_musicbrainz'] excludes = ['type',] authentication = Authentication() authorization = Authorization() always_return_data = True filtering = { 'import_session': ALL_WITH_RELATIONS, 'created': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'], } def dehydrate(self, bundle): bundle.data['status'] = bundle.obj.get_status_display().lower(); # offload json parsing to the backend # TODO: remove in js, enable here """ bundle.data['import_tag'] = json.loads(bundle.data['import_tag']) bundle.data['results_acoustid'] = json.loads(bundle.data['results_acoustid']) bundle.data['results_musicbrainz'] = json.loads(bundle.data['results_musicbrainz']) bundle.data['results_discogs'] = json.loads(bundle.data['results_discogs']) bundle.data['results_tag'] = json.loads(bundle.data['results_tag']) """ return bundle def obj_update(self, bundle, request, **kwargs): #import time #time.sleep(3) return super(ImportFileResource, self).obj_update(bundle, request, **kwargs) def obj_create(self, bundle, request, **kwargs): """ Little switch to play with jquery fileupload """ try: #import_id = request.GET['import_session'] import_id = request.GET.get('import_session', None) uuid_key = request.GET.get('uuid_key', None) print "####################################" print request.FILES[u'files[]'] if import_id: imp = Import.objects.get(pk=import_id) bundle.data['import_session'] = imp elif uuid_key: imp, created = Import.objects.get_or_create(uuid_key=uuid_key, user=request.user) bundle.data['import_session'] = imp else: bundle.data['import_session'] = None bundle.data['file'] = request.FILES[u'files[]'] except Exception, e: print e return super(ImportFileResource, self).obj_create(bundle, request, **kwargs) class ImportResource(ModelResource): files = fields.ToManyField('importer.api.ImportFileResource', 'files', full=True, null=True) class Meta: queryset = Import.objects.all() list_allowed_methods = ['get', 'post'] detail_allowed_methods = ['get', 'post', 'put', 'delete'] #list_allowed_methods = ['get',] #detail_allowed_methods = ['get',] resource_name = 'import' excludes = ['updated',] include_absolute_url = True authentication = Authentication() authorization = Authorization() always_return_data = True filtering = { #'channel': ALL_WITH_RELATIONS, 'created': ['exact', 'range', 'gt', 'gte', 'lt', 'lte'], } def save_related(self, obj): return True # additional methods def prepend_urls(self): return [ url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/import-all%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('import_all'), name="importer_api_import_all"), url(r"^(?P<resource_name>%s)/(?P<pk>\w[\w/-]*)/apply-to-all%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('apply_to_all'), name="importer_api_apply_to_all"), ] def import_all(self, request, **kwargs): self.method_check(request, allowed=['get']) self.is_authenticated(request) self.throttle_check(request) import_session = Import.objects.get(**self.remove_api_resource_names(kwargs)) import_files = import_session.files.filter(status=2) # first to a batch update import_files.update(status=6) # save again to trigger pos-save actions for import_file in import_files: import_file.status = 6 import_file.save() bundle = self.build_bundle(obj=import_session, request=request) bundle = self.full_dehydrate(bundle) self.log_throttled_access(request) return self.create_response(request, bundle) """ mass aply import tag """ def apply_to_all(self, request, **kwargs): self.method_check(request, allowed=['post']) self.is_authenticated(request) self.throttle_check(request) import_session = Import.objects.get(**self.remove_api_resource_names(kwargs)) item_id = request.POST.get('item_id', None) ct = request.POST.get('ct', None) print 'item_id: %s' % item_id print 'ct: %s' % ct if not (ct and item_id): raise ImmediateHttpResponse(response=HttpResponse(status=410)) import_files = import_session.files.filter(status__in=(2,4)) source = import_files.filter(pk=item_id) # exclude current one import_files = import_files.exclude(pk=item_id) try: source = source[0] print source # print source.import_tag except: source = None if source: sit = source.import_tag for import_file in import_files: dit = import_file.import_tag if ct == 'artist': map = ('artist', 'alibrary_artist_id', 'mb_artist_id', 'force_artist') if ct == 'release': map = ('release', 'alibrary_release_id', 'mb_release_id', 'force_release') for key in map: src = sit.get(key, None) if src: dit[key] = src else: dit.pop(key, None) import_file.import_tag = dit import_file.save() bundle = self.build_bundle(obj=import_session, request=request) bundle = self.full_dehydrate(bundle) self.log_throttled_access(request) return self.create_response(request, bundle)
gpl-3.0
2,424,500,771,755,327,000
31.837719
190
0.56786
false
4.050866
false
false
false
crempp/mdweb
mdweb/SiteMapView.py
1
2696
"""MDWeb SiteMap View Object.""" import datetime import logging import numbers import os import pytz import time from flask import ( current_app as app, make_response, render_template_string, url_for, ) from flask.views import View #: Template string to use for the sitemap generation # (is there a better place to put this?, not in the theme) # pylint: disable=C0301 SITEMAP_TEMPLATE = """<?xml version="1.0" encoding="utf-8"?> <urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9 http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd"> {% for page in pages -%} <url> <loc>{{page.loc|safe}}</loc> <lastmod>{{page.lastmod|safe}}</lastmod> {%- if page.changefreq %} <changefreq>{{page.changefreq|safe}}</changefreq> {%- endif %} {%- if page.priority %} <priority>{{page.priority|safe}}</priority> {%- endif %} </url> {%- endfor %} </urlset> """ class SiteMapView(View): """Sitemap View Object.""" sitemap_cache = None def dispatch_request(self): """Flask dispatch method.""" if self.sitemap_cache is None: self.sitemap_cache = self.generate_sitemap() response = make_response(self.sitemap_cache) response.headers["Content-Type"] = "application/xml" return response @classmethod def generate_sitemap(cls): """Generate sitemap.xml. Makes a list of urls and date modified.""" logging.info("Generating sitemap...") start = time.time() pages = [] index_url = url_for('index', _external=True) for url, page in app.navigation.get_page_dict().items(): if page.meta_inf.published: mtime = os.path.getmtime(page.page_path) if isinstance(mtime, numbers.Real): mtime = datetime.datetime.fromtimestamp(mtime) mtime.replace(tzinfo=pytz.UTC) # lastmod = mtime.strftime('%Y-%m-%dT%H:%M:%S%z') lastmod = mtime.strftime('%Y-%m-%d') pages.append({ 'loc': "%s%s" % (index_url, url), 'lastmod': lastmod, 'changefreq': page.meta_inf.sitemap_changefreq, 'priority': page.meta_inf.sitemap_priority, }) sitemap_xml = render_template_string(SITEMAP_TEMPLATE, pages=pages) end = time.time() logging.info("completed sitemap generation in %s seconds", (end - start)) return sitemap_xml
mit
-4,282,121,319,264,830,000
30.717647
124
0.585682
false
3.845934
false
false
false
armyofevilrobots/reticulatus
reticulatus/gui/reticulate_main.py
1
13375
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'reticulate_main.ui' # # Created: Thu Oct 25 21:48:45 2012 # by: pyside-uic 0.2.13 running on PySide 1.1.0 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui class Ui_main_window(object): def setupUi(self, main_window): main_window.setObjectName("main_window") main_window.resize(925, 633) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(main_window.sizePolicy().hasHeightForWidth()) main_window.setSizePolicy(sizePolicy) main_window.setMinimumSize(QtCore.QSize(512, 384)) main_window.setAutoFillBackground(False) self.centralwidget = QtGui.QWidget(main_window) self.centralwidget.setObjectName("centralwidget") self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget) self.horizontalLayout.setObjectName("horizontalLayout") self.object_tabs = QtGui.QTabWidget(self.centralwidget) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.object_tabs.sizePolicy().hasHeightForWidth()) self.object_tabs.setSizePolicy(sizePolicy) self.object_tabs.setObjectName("object_tabs") self.object_3d = QtGui.QWidget() self.object_3d.setCursor(QtCore.Qt.CrossCursor) self.object_3d.setLayoutDirection(QtCore.Qt.RightToLeft) self.object_3d.setObjectName("object_3d") self.object_3d_layout = QtGui.QHBoxLayout(self.object_3d) self.object_3d_layout.setObjectName("object_3d_layout") self.frame = QtGui.QFrame(self.object_3d) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth()) self.frame.setSizePolicy(sizePolicy) self.frame.setMaximumSize(QtCore.QSize(50, 16777215)) self.frame.setLayoutDirection(QtCore.Qt.RightToLeft) self.frame.setFrameShape(QtGui.QFrame.NoFrame) self.frame.setFrameShadow(QtGui.QFrame.Raised) self.frame.setLineWidth(0) self.frame.setObjectName("frame") self.slider_container_layout = QtGui.QVBoxLayout(self.frame) self.slider_container_layout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint) self.slider_container_layout.setObjectName("slider_container_layout") self.layer_slider = QtGui.QSlider(self.frame) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.layer_slider.sizePolicy().hasHeightForWidth()) self.layer_slider.setSizePolicy(sizePolicy) self.layer_slider.setMaximumSize(QtCore.QSize(50, 16777215)) self.layer_slider.setMinimum(0) self.layer_slider.setMaximum(9999) self.layer_slider.setProperty("value", 0) self.layer_slider.setOrientation(QtCore.Qt.Vertical) self.layer_slider.setInvertedAppearance(False) self.layer_slider.setObjectName("layer_slider") self.slider_container_layout.addWidget(self.layer_slider) self.layer_lcd = QtGui.QLCDNumber(self.frame) self.layer_lcd.setMaximumSize(QtCore.QSize(100, 16777215)) font = QtGui.QFont() font.setWeight(75) font.setBold(True) self.layer_lcd.setFont(font) self.layer_lcd.setNumDigits(4) self.layer_lcd.setObjectName("layer_lcd") self.slider_container_layout.addWidget(self.layer_lcd) self.object_3d_layout.addWidget(self.frame) self.object_tabs.addTab(self.object_3d, "") self.gcode = QtGui.QWidget() sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.gcode.sizePolicy().hasHeightForWidth()) self.gcode.setSizePolicy(sizePolicy) self.gcode.setObjectName("gcode") self.gcode_hlayout = QtGui.QHBoxLayout(self.gcode) self.gcode_hlayout.setObjectName("gcode_hlayout") self.gcode_editor = QtGui.QTextEdit(self.gcode) self.gcode_editor.setObjectName("gcode_editor") self.gcode_hlayout.addWidget(self.gcode_editor) self.object_tabs.addTab(self.gcode, "") self.horizontalLayout.addWidget(self.object_tabs) main_window.setCentralWidget(self.centralwidget) self.menubar = QtGui.QMenuBar(main_window) self.menubar.setGeometry(QtCore.QRect(0, 0, 925, 23)) self.menubar.setObjectName("menubar") self.menuFile = QtGui.QMenu(self.menubar) self.menuFile.setObjectName("menuFile") self.menu_edit = QtGui.QMenu(self.menubar) self.menu_edit.setObjectName("menu_edit") self.menu_Settings = QtGui.QMenu(self.menubar) self.menu_Settings.setObjectName("menu_Settings") self.menu_Help = QtGui.QMenu(self.menubar) self.menu_Help.setObjectName("menu_Help") self.menuActions = QtGui.QMenu(self.menubar) self.menuActions.setObjectName("menuActions") self.menu_Windows = QtGui.QMenu(self.menubar) self.menu_Windows.setObjectName("menu_Windows") main_window.setMenuBar(self.menubar) self.statusbar = QtGui.QStatusBar(main_window) self.statusbar.setEnabled(True) self.statusbar.setSizeGripEnabled(True) self.statusbar.setObjectName("statusbar") main_window.setStatusBar(self.statusbar) self.layers_dock = QtGui.QDockWidget(main_window) self.layers_dock.setMinimumSize(QtCore.QSize(120, 160)) self.layers_dock.setMaximumSize(QtCore.QSize(1024, 1024)) self.layers_dock.setObjectName("layers_dock") self.dock_contents = QtGui.QWidget() self.dock_contents.setObjectName("dock_contents") self.verticalLayout = QtGui.QVBoxLayout(self.dock_contents) self.verticalLayout.setObjectName("verticalLayout") self.label = QtGui.QLabel(self.dock_contents) self.label.setObjectName("label") self.verticalLayout.addWidget(self.label) self.layer_list_widget = QtGui.QListWidget(self.dock_contents) self.layer_list_widget.setObjectName("layer_list_widget") self.verticalLayout.addWidget(self.layer_list_widget) self.layers_dock.setWidget(self.dock_contents) main_window.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.layers_dock) self.tools_dock = QtGui.QDockWidget(main_window) self.tools_dock.setMinimumSize(QtCore.QSize(120, 160)) self.tools_dock.setObjectName("tools_dock") self.dockWidgetContents = QtGui.QWidget() self.dockWidgetContents.setObjectName("dockWidgetContents") self.tools_dock.setWidget(self.dockWidgetContents) main_window.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.tools_dock) self.action_file = QtGui.QAction(main_window) self.action_file.setObjectName("action_file") self.action_new = QtGui.QAction(main_window) self.action_new.setObjectName("action_new") self.action_open = QtGui.QAction(main_window) self.action_open.setObjectName("action_open") self.action_save = QtGui.QAction(main_window) self.action_save.setObjectName("action_save") self.action_quit = QtGui.QAction(main_window) self.action_quit.setObjectName("action_quit") self.action_print_settings = QtGui.QAction(main_window) self.action_print_settings.setObjectName("action_print_settings") self.action_slice_settings = QtGui.QAction(main_window) self.action_slice_settings.setObjectName("action_slice_settings") self.action_help = QtGui.QAction(main_window) self.action_help.setObjectName("action_help") self.action_about = QtGui.QAction(main_window) self.action_about.setObjectName("action_about") self.action_display_settings = QtGui.QAction(main_window) self.action_display_settings.setObjectName("action_display_settings") self.action_slice = QtGui.QAction(main_window) self.action_slice.setObjectName("action_slice") self.action_Layers = QtGui.QAction(main_window) self.action_Layers.setObjectName("action_Layers") self.action_Toolbox = QtGui.QAction(main_window) self.action_Toolbox.setObjectName("action_Toolbox") self.menuFile.addAction(self.action_new) self.menuFile.addAction(self.action_open) self.menuFile.addAction(self.action_save) self.menuFile.addSeparator() self.menuFile.addAction(self.action_quit) self.menu_Settings.addAction(self.action_print_settings) self.menu_Settings.addAction(self.action_slice_settings) self.menu_Settings.addAction(self.action_display_settings) self.menu_Help.addAction(self.action_help) self.menu_Help.addSeparator() self.menu_Help.addAction(self.action_about) self.menuActions.addAction(self.action_slice) self.menu_Windows.addAction(self.action_Layers) self.menu_Windows.addAction(self.action_Toolbox) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menu_edit.menuAction()) self.menubar.addAction(self.menuActions.menuAction()) self.menubar.addAction(self.menu_Settings.menuAction()) self.menubar.addAction(self.menu_Windows.menuAction()) self.menubar.addAction(self.menu_Help.menuAction()) self.retranslateUi(main_window) self.object_tabs.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(main_window) def retranslateUi(self, main_window): main_window.setWindowTitle(QtGui.QApplication.translate("main_window", "Reticulatus", None, QtGui.QApplication.UnicodeUTF8)) self.layer_slider.setToolTip(QtGui.QApplication.translate("main_window", "Layer clip plane", None, QtGui.QApplication.UnicodeUTF8)) self.object_tabs.setTabText(self.object_tabs.indexOf(self.object_3d), QtGui.QApplication.translate("main_window", "3D Object", None, QtGui.QApplication.UnicodeUTF8)) self.object_tabs.setTabText(self.object_tabs.indexOf(self.gcode), QtGui.QApplication.translate("main_window", "GCode", None, QtGui.QApplication.UnicodeUTF8)) self.menuFile.setTitle(QtGui.QApplication.translate("main_window", "&File", None, QtGui.QApplication.UnicodeUTF8)) self.menu_edit.setTitle(QtGui.QApplication.translate("main_window", "&Edit", None, QtGui.QApplication.UnicodeUTF8)) self.menu_Settings.setTitle(QtGui.QApplication.translate("main_window", "&Settings", None, QtGui.QApplication.UnicodeUTF8)) self.menu_Help.setTitle(QtGui.QApplication.translate("main_window", "&Help", None, QtGui.QApplication.UnicodeUTF8)) self.menuActions.setTitle(QtGui.QApplication.translate("main_window", "&Actions", None, QtGui.QApplication.UnicodeUTF8)) self.menu_Windows.setTitle(QtGui.QApplication.translate("main_window", "&Windows", None, QtGui.QApplication.UnicodeUTF8)) self.label.setText(QtGui.QApplication.translate("main_window", "Layers", None, QtGui.QApplication.UnicodeUTF8)) self.action_file.setText(QtGui.QApplication.translate("main_window", "&file", None, QtGui.QApplication.UnicodeUTF8)) self.action_new.setText(QtGui.QApplication.translate("main_window", "&New", None, QtGui.QApplication.UnicodeUTF8)) self.action_open.setText(QtGui.QApplication.translate("main_window", "&Open", None, QtGui.QApplication.UnicodeUTF8)) self.action_save.setText(QtGui.QApplication.translate("main_window", "&Save", None, QtGui.QApplication.UnicodeUTF8)) self.action_quit.setText(QtGui.QApplication.translate("main_window", "&Quit", None, QtGui.QApplication.UnicodeUTF8)) self.action_print_settings.setText(QtGui.QApplication.translate("main_window", "&Printer", None, QtGui.QApplication.UnicodeUTF8)) self.action_slice_settings.setText(QtGui.QApplication.translate("main_window", "S&licing", None, QtGui.QApplication.UnicodeUTF8)) self.action_help.setText(QtGui.QApplication.translate("main_window", "&Help", None, QtGui.QApplication.UnicodeUTF8)) self.action_about.setText(QtGui.QApplication.translate("main_window", "&About", None, QtGui.QApplication.UnicodeUTF8)) self.action_display_settings.setText(QtGui.QApplication.translate("main_window", "&Display", None, QtGui.QApplication.UnicodeUTF8)) self.action_slice.setText(QtGui.QApplication.translate("main_window", "&Slice", None, QtGui.QApplication.UnicodeUTF8)) self.action_Layers.setText(QtGui.QApplication.translate("main_window", "&Layers", None, QtGui.QApplication.UnicodeUTF8)) self.action_Toolbox.setText(QtGui.QApplication.translate("main_window", "&Toolbox", None, QtGui.QApplication.UnicodeUTF8))
gpl-3.0
-5,630,036,228,393,934,000
61.209302
173
0.717458
false
3.835675
false
false
false
mitodl/micromasters
cms/migrations/0025_infolinks.py
1
1226
# -*- coding: utf-8 -*- # Generated by Django 1.10.3 on 2016-12-05 22:18 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import modelcluster.fields class Migration(migrations.Migration): dependencies = [ ('cms', '0024_programtabpage'), ] operations = [ migrations.CreateModel( name='InfoLinks', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('sort_order', models.IntegerField(blank=True, editable=False, null=True)), ('url', models.URLField(blank=True, help_text='A url for an external page. There will be a link to this url from the program page.', null=True)), ('title_url', models.TextField(blank=True, help_text='The text for the link to an external homepage.')), ('program_page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='info_links', to='cms.ProgramPage')), ], options={ 'ordering': ['sort_order'], 'abstract': False, }, ), ]
bsd-3-clause
-5,478,774,938,762,284,000
38.548387
161
0.604405
false
4.127946
false
false
false
vpelletier/neoppod
neo/master/backup_app.py
1
16200
# # Copyright (C) 2012-2016 Nexedi SA # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import random, weakref from bisect import bisect from collections import defaultdict from neo.lib import logging from neo.lib.bootstrap import BootstrapManager from neo.lib.exception import PrimaryFailure from neo.lib.handler import EventHandler from neo.lib.node import NodeManager from neo.lib.protocol import CellStates, ClusterStates, \ NodeStates, NodeTypes, Packets, uuid_str, INVALID_TID, ZERO_TID from neo.lib.util import add64, dump from .app import StateChangedException from .pt import PartitionTable from .handlers.backup import BackupHandler """ Backup algorithm This implementation relies on normal storage replication. Storage nodes that are specialised for backup are not in the same NEO cluster, but are managed by another master in a different cluster. When the cluster is in BACKINGUP state, its master acts like a client to the master of the main cluster. It gets notified of new data thanks to invalidation, and notifies in turn its storage nodes what/when to replicate. Storages stay in UP_TO_DATE state, even if partitions are synchronized up to different tids. Storage nodes remember they are in such state and when switching into RUNNING state, the cluster cuts the DB at the "backup TID", which is the last TID for which we have all data. This TID can't be guessed from 'trans' and 'obj' tables, like it is done in normal mode, so: - The master must even notify storages of transactions that don't modify their partitions: see Replicate packets without any source. - 'backup_tid' properties exist in many places, on the master and the storages, so that the DB can be made consistent again at any moment, without losing any (or little) data. Out of backup storage nodes assigned to a partition, one is chosen as primary for that partition. It means only this node will fetch data from the upstream cluster, to minimize bandwidth between clusters. Other replicas will synchronize from the primary node. There is no UUID conflict between the 2 clusters: - Storage nodes connect anonymously to upstream. - Master node receives a new from upstream master and uses it only when communicating with it. """ class BackupApplication(object): pt = None def __init__(self, app, name, master_addresses): self.app = weakref.proxy(app) self.name = name self.nm = NodeManager() for master_address in master_addresses: self.nm.createMaster(address=master_address) em = property(lambda self: self.app.em) ssl = property(lambda self: self.app.ssl) def close(self): self.nm.close() del self.__dict__ def log(self): self.nm.log() if self.pt is not None: self.pt.log() def provideService(self): logging.info('provide backup') poll = self.em.poll app = self.app pt = app.pt while True: app.changeClusterState(ClusterStates.STARTING_BACKUP) bootstrap = BootstrapManager(self, self.name, NodeTypes.CLIENT) # {offset -> node} self.primary_partition_dict = {} # [[tid]] self.tid_list = tuple([] for _ in xrange(pt.getPartitions())) try: while True: for node in pt.getNodeSet(readable=True): if not app.isStorageReady(node.getUUID()): break else: break poll(1) node, conn, uuid, num_partitions, num_replicas = \ bootstrap.getPrimaryConnection() try: app.changeClusterState(ClusterStates.BACKINGUP) del bootstrap, node if num_partitions != pt.getPartitions(): raise RuntimeError("inconsistent number of partitions") self.pt = PartitionTable(num_partitions, num_replicas) conn.setHandler(BackupHandler(self)) conn.ask(Packets.AskNodeInformation()) conn.ask(Packets.AskPartitionTable()) conn.ask(Packets.AskLastTransaction()) # debug variable to log how big 'tid_list' can be. self.debug_tid_count = 0 while True: poll(1) except PrimaryFailure, msg: logging.error('upstream master is down: %s', msg) finally: app.backup_tid = pt.getBackupTid() try: conn.close() except PrimaryFailure: pass try: del self.pt except AttributeError: pass except StateChangedException, e: if e.args[0] != ClusterStates.STOPPING_BACKUP: raise app.changeClusterState(*e.args) tid = app.backup_tid # Wait for non-primary partitions to catch up, # so that all UP_TO_DATE cells are really UP_TO_DATE. # XXX: Another possibility could be to outdate such cells, and # they would be quickly updated at the beginning of the # RUNNING phase. This may simplify code. # Any unfinished replication from upstream will be truncated. while pt.getBackupTid(min) < tid: poll(1) last_tid = app.getLastTransaction() handler = EventHandler(app) if tid < last_tid: assert tid != ZERO_TID logging.warning("Truncating at %s (last_tid was %s)", dump(app.backup_tid), dump(last_tid)) else: # We will do a dummy truncation, just to leave backup mode, # so it's fine to start automatically if there's any # missing storage. # XXX: Consider using another method to leave backup mode, # at least when there's nothing to truncate. Because # in case of StoppedOperation during VERIFYING state, # this flag will be wrongly set to False. app._startup_allowed = True # If any error happened before reaching this line, we'd go back # to backup mode, which is the right mode to recover. del app.backup_tid # Now back to RECOVERY... return tid finally: del self.primary_partition_dict, self.tid_list pt.clearReplicating() def nodeLost(self, node): getCellList = self.app.pt.getCellList trigger_set = set() for offset, primary_node in self.primary_partition_dict.items(): if primary_node is not node: continue cell_list = getCellList(offset, readable=True) cell = max(cell_list, key=lambda cell: cell.backup_tid) tid = cell.backup_tid self.primary_partition_dict[offset] = primary_node = cell.getNode() p = Packets.Replicate(tid, '', {offset: primary_node.getAddress()}) for cell in cell_list: cell.replicating = tid if cell.backup_tid < tid: logging.debug( "ask %s to replicate partition %u up to %s from %s", uuid_str(cell.getUUID()), offset, dump(tid), uuid_str(primary_node.getUUID())) cell.getNode().getConnection().notify(p) trigger_set.add(primary_node) for node in trigger_set: self.triggerBackup(node) def invalidatePartitions(self, tid, partition_set): app = self.app prev_tid = app.getLastTransaction() app.setLastTransaction(tid) pt = app.pt trigger_set = set() untouched_dict = defaultdict(dict) for offset in xrange(pt.getPartitions()): try: last_max_tid = self.tid_list[offset][-1] except IndexError: last_max_tid = prev_tid if offset in partition_set: self.tid_list[offset].append(tid) node_list = [] for cell in pt.getCellList(offset, readable=True): node = cell.getNode() assert node.isConnected(), node if cell.backup_tid == prev_tid: # Let's given 4 TID t0,t1,t2,t3: if a cell is only # modified by t0 & t3 and has all data for t0, 4 values # are possible for its 'backup_tid' until it replicates # up to t3: t0, t1, t2 or t3 - 1 # Choosing the smallest one (t0) is easier to implement # but when leaving backup mode, we would always lose # data if the last full transaction does not modify # all partitions. t1 is wrong for the same reason. # So we have chosen the highest one (t3 - 1). # t2 should also work but maybe harder to implement. cell.backup_tid = add64(tid, -1) logging.debug( "partition %u: updating backup_tid of %r to %s", offset, cell, dump(cell.backup_tid)) else: assert cell.backup_tid < last_max_tid, ( cell.backup_tid, last_max_tid, prev_tid, tid) if app.isStorageReady(node.getUUID()): node_list.append(node) assert node_list trigger_set.update(node_list) # Make sure we have a primary storage for this partition. if offset not in self.primary_partition_dict: self.primary_partition_dict[offset] = \ random.choice(node_list) else: # Partition not touched, so increase 'backup_tid' of all # "up-to-date" replicas, without having to replicate. for cell in pt.getCellList(offset, readable=True): if last_max_tid <= cell.backup_tid: cell.backup_tid = tid untouched_dict[cell.getNode()][offset] = None elif last_max_tid <= cell.replicating: # Same for 'replicating' to avoid useless orders. logging.debug("silently update replicating order" " of %s for partition %u, up to %s", uuid_str(cell.getUUID()), offset, dump(tid)) cell.replicating = tid for node, untouched_dict in untouched_dict.iteritems(): if app.isStorageReady(node.getUUID()): node.notify(Packets.Replicate(tid, '', untouched_dict)) for node in trigger_set: self.triggerBackup(node) count = sum(map(len, self.tid_list)) if self.debug_tid_count < count: logging.debug("Maximum number of tracked tids: %u", count) self.debug_tid_count = count def triggerBackup(self, node): tid_list = self.tid_list tid = self.app.getLastTransaction() replicate_list = [] for offset, cell in self.app.pt.iterNodeCell(node): max_tid = tid_list[offset] if max_tid and self.primary_partition_dict[offset] is node and \ max(cell.backup_tid, cell.replicating) < max_tid[-1]: cell.replicating = tid replicate_list.append(offset) if not replicate_list: return getCellList = self.pt.getCellList source_dict = {} address_set = set() for offset in replicate_list: cell_list = getCellList(offset, readable=True) random.shuffle(cell_list) assert cell_list, offset for cell in cell_list: addr = cell.getAddress() if addr in address_set: break else: address_set.add(addr) source_dict[offset] = addr logging.debug("ask %s to replicate partition %u up to %s from %r", uuid_str(node.getUUID()), offset, dump(tid), addr) node.getConnection().notify(Packets.Replicate( tid, self.name, source_dict)) def notifyReplicationDone(self, node, offset, tid): app = self.app cell = app.pt.getCell(offset, node.getUUID()) tid_list = self.tid_list[offset] if tid_list: # may be empty if the cell is out-of-date # or if we're not fully initialized if tid < tid_list[0]: cell.replicating = tid else: try: tid = add64(tid_list[bisect(tid_list, tid)], -1) except IndexError: last_tid = app.getLastTransaction() if tid < last_tid: tid = last_tid node.notify(Packets.Replicate(tid, '', {offset: None})) logging.debug("partition %u: updating backup_tid of %r to %s", offset, cell, dump(tid)) cell.backup_tid = tid # Forget tids we won't need anymore. cell_list = app.pt.getCellList(offset, readable=True) del tid_list[:bisect(tid_list, min(x.backup_tid for x in cell_list))] primary_node = self.primary_partition_dict.get(offset) primary = primary_node is node result = None if primary else app.pt.setUpToDate(node, offset) assert cell.isReadable() if result: # was out-of-date if primary_node is not None: max_tid, = [x.backup_tid for x in cell_list if x.getNode() is primary_node] if tid < max_tid: cell.replicating = max_tid logging.debug( "ask %s to replicate partition %u up to %s from %s", uuid_str(node.getUUID()), offset, dump(max_tid), uuid_str(primary_node.getUUID())) node.notify(Packets.Replicate(max_tid, '', {offset: primary_node.getAddress()})) else: if app.getClusterState() == ClusterStates.BACKINGUP: self.triggerBackup(node) if primary: # Notify secondary storages that they can replicate from # primary ones, even if they are already replicating. p = Packets.Replicate(tid, '', {offset: node.getAddress()}) for cell in cell_list: if max(cell.backup_tid, cell.replicating) < tid: cell.replicating = tid logging.debug( "ask %s to replicate partition %u up to %s from %s", uuid_str(cell.getUUID()), offset, dump(tid), uuid_str(node.getUUID())) cell.getNode().notify(p) return result
gpl-2.0
-8,403,937,226,814,069,000
45.685879
80
0.555
false
4.473902
false
false
false
redhat-openstack/glance
glance/cmd/registry.py
1
2664
#!/usr/bin/env python # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Reference implementation server for Glance Registry """ import eventlet import os import sys # Monkey patch socket and time eventlet.patcher.monkey_patch(all=False, socket=True, time=True, thread=True) # If ../glance/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')): sys.path.insert(0, possible_topdir) from oslo.config import cfg import osprofiler.notifier import osprofiler.web from glance.common import config from glance.common import wsgi from glance import notifier from glance.openstack.common import log from glance.openstack.common import systemd CONF = cfg.CONF CONF.import_group("profiler", "glance.common.wsgi") def main(): try: config.parse_args() wsgi.set_eventlet_hub() log.setup('glance') if cfg.CONF.profiler.enabled: _notifier = osprofiler.notifier.create("Messaging", notifier.messaging, {}, notifier.get_transport(), "glance", "registry", cfg.CONF.bind_host) osprofiler.notifier.set(_notifier) else: osprofiler.web.disable() server = wsgi.Server() server.start(config.load_paste_app('glance-registry'), default_port=9191) systemd.notify_once() server.wait() except RuntimeError as e: sys.exit("ERROR: %s" % e) if __name__ == '__main__': main()
apache-2.0
-6,918,632,415,636,315,000
32.3
78
0.626502
false
4.188679
false
false
false
adusca/treeherder
treeherder/perf/models.py
1
2417
from django.core.validators import MinLengthValidator from django.db import models from django.utils.encoding import python_2_unicode_compatible from jsonfield import JSONField from treeherder.model.models import (MachinePlatform, OptionCollection, Repository) SIGNATURE_HASH_LENGTH = 40L @python_2_unicode_compatible class PerformanceFramework(models.Model): name = models.SlugField(max_length=255L, unique=True) class Meta: db_table = 'performance_framework' def __str__(self): return self.name @python_2_unicode_compatible class PerformanceSignature(models.Model): signature_hash = models.CharField(max_length=SIGNATURE_HASH_LENGTH, validators=[ MinLengthValidator(SIGNATURE_HASH_LENGTH) ], unique=True, db_index=True) framework = models.ForeignKey(PerformanceFramework) platform = models.ForeignKey(MachinePlatform) option_collection = models.ForeignKey(OptionCollection) suite = models.CharField(max_length=80L) test = models.CharField(max_length=80L, blank=True) # extra properties to distinguish the test (that don't fit into # option collection for whatever reason) extra_properties = JSONField(max_length=1024) class Meta: db_table = 'performance_signature' def __str__(self): return self.signature_hash @python_2_unicode_compatible class PerformanceDatum(models.Model): repository = models.ForeignKey(Repository) job_id = models.PositiveIntegerField(db_index=True) result_set_id = models.PositiveIntegerField(db_index=True) signature = models.ForeignKey(PerformanceSignature) value = models.FloatField() push_timestamp = models.DateTimeField(db_index=True) class Meta: db_table = 'performance_datum' index_together = [('repository', 'signature', 'push_timestamp'), ('repository', 'job_id'), ('repository', 'result_set_id')] unique_together = ('repository', 'job_id', 'result_set_id', 'signature', 'push_timestamp') def __str__(self): return "{} {}".format(self.value, self.push_timestamp)
mpl-2.0
-5,983,352,198,108,781,000
33.042254
83
0.620604
false
4.621415
false
false
false
lovetox/gajim
src/common/crypto.py
1
4823
# common crypto functions (mostly specific to XEP-0116, but useful elsewhere) # -*- coding:utf-8 -*- ## src/common/crypto.py ## ## Copyright (C) 2007 Brendan Taylor <whateley AT gmail.com> ## ## This file is part of Gajim. ## ## Gajim is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published ## by the Free Software Foundation; version 3 only. ## ## Gajim is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Gajim. If not, see <http://www.gnu.org/licenses/>. ## import sys import os import math from hashlib import sha256 as SHA256 # convert a large integer to a big-endian bitstring def encode_mpi(n): if n >= 256: return encode_mpi(n // 256) + bytes([n % 256]) else: return bytes([n]) # convert a large integer to a big-endian bitstring, padded with \x00s to # a multiple of 16 bytes def encode_mpi_with_padding(n): return pad_to_multiple(encode_mpi(n), 16, '\x00', True) # pad 'string' to a multiple of 'multiple_of' with 'char'. # pad on the left if 'left', otherwise pad on the right. def pad_to_multiple(string, multiple_of, char, left): mod = len(string) % multiple_of if mod == 0: return string else: padding = (multiple_of - mod) * char if left: return padding + string else: return string + padding # convert a big-endian bitstring to an integer def decode_mpi(s): if len(s) == 0: return 0 else: return 256 * decode_mpi(s[:-1]) + s[-1] def sha256(string): sh = SHA256() sh.update(string) return sh.digest() base28_chr = "acdefghikmopqruvwxy123456789" def sas_28x5(m_a, form_b): sha = sha256(m_a + form_b + b'Short Authentication String') lsb24 = decode_mpi(sha[-3:]) return base28(lsb24) def base28(n): if n >= 28: return base28(n // 28) + base28_chr[n % 28] else: return base28_chr[n] def add_entropy_sources_OpenSSL(): # Other possibly variable data. This are very low quality sources of # entropy, but some of them are installation dependent and can be hard # to guess for the attacker. # Data available on all platforms Unix, Windows sources = [sys.argv, sys.builtin_module_names, sys.copyright, sys.getfilesystemencoding(), sys.hexversion, sys.modules, sys.path, sys.version, sys.api_version, os.environ, os.getcwd(), os.getpid()] for s in sources: OpenSSL.rand.add(str(s).encode('utf-8'), 1) # On Windows add the current contents of the screen to the PRNG state. # if os.name == 'nt': # OpenSSL.rand.screen() # The /proc filesystem on POSIX systems contains many random variables: # memory statistics, interrupt counts, network packet counts if os.name == 'posix': dirs = ['/proc', '/proc/net', '/proc/self'] for d in dirs: if os.access(d, os.R_OK): for filename in os.listdir(d): OpenSSL.rand.add(filename.encode('utf-8'), 0) try: with open(d + os.sep + filename, "r") as fp: # Limit the ammount of read bytes, in case a memory # file was opened OpenSSL.rand.add(str(fp.read(5000)).encode('utf-8'), 1) except: # Ignore all read and access errors pass PYOPENSSL_PRNG_PRESENT = False try: import OpenSSL.rand PYOPENSSL_PRNG_PRESENT = True except ImportError: # PyOpenSSL PRNG not available pass def random_bytes(bytes_): if PYOPENSSL_PRNG_PRESENT: OpenSSL.rand.add(os.urandom(bytes_), bytes_) return OpenSSL.rand.bytes(bytes_) else: return os.urandom(bytes_) def generate_nonce(): return random_bytes(8) # generate a random number between 'bottom' and 'top' def srand(bottom, top): # minimum number of bytes needed to represent that range bytes = int(math.ceil(math.log(top - bottom, 256))) # in retrospect, this is horribly inadequate. return (decode_mpi(random_bytes(bytes)) % (top - bottom)) + bottom # a faster version of (base ** exp) % mod # taken from <http://lists.danga.com/pipermail/yadis/2005-September/001445.html> def powmod(base, exp, mod): square = base % mod result = 1 while exp > 0: if exp & 1: # exponent is odd result = (result * square) % mod square = (square * square) % mod exp //= 2 return result
gpl-3.0
1,936,273,987,310,581,000
30.940397
94
0.6243
false
3.601942
false
false
false
python-dirbtuves/it-brandos-egzaminai
exams/E2018/pagrindinis/u2/u2.py
1
1377
from itertools import islice from pathlib import Path from typing import Dict def seconds(v: int, m: int, s: int) -> int: # Ši funkcija verčia valandas, minutes ir sekundes į sekundes. return v * 3600 + m * 60 + s def save_results(path: Path, pabaiga: Dict[str, int]) -> None: with path.open('w') as f: # Rūšiuojame slidininkus pagal laiką ir vardus. for laikas, slidininkas in sorted((v, k) for k, v in pabaiga.items()): # Sekundes verčiame į minutes ir sekundes. m, s = divmod(laikas, 60) print(f'{slidininkas:<20}{m} {s}', file=f) def main(path: Path) -> None: startas: Dict[str, int] = {} pabaiga: Dict[str, int] = {} with open(path / 'U2.txt') as f: # Skaitome starto duomenis. n = int(next(f)) for eilute in islice(f, n): slidininkas = eilute[:20] laikas = map(int, eilute[20:].split()) startas[slidininkas] = seconds(*laikas) # Skaitome finišo duomenis. m = int(next(f)) for eilute in islice(f, m): slidininkas = eilute[:20] laikas = map(int, eilute[20:].split()) # Įsimename per kiek laiko sekundėmis slidininkas pasiekė finišą. pabaiga[slidininkas] = seconds(*laikas) - startas[slidininkas] save_results(path / 'U2rez.txt', pabaiga)
agpl-3.0
-4,870,201,727,215,256,000
33.075
78
0.590609
false
2.720559
false
false
false
Woraufhin/logic
formula.py
1
1112
import itertools import string from abc import ABCMeta, abstractproperty import attr def is_valid_formula(inst, attr, value): if not isinstance(value, (Formula, str)): raise ValueError('{} is not a valid formula type.'.format(value)) class Formula(object): __metaclass__ = ABCMeta group = {'open': '(', 'close': ')'} @abstractproperty def token(self): pass @attr.s class Atomic(Formula): token = list(itertools.chain.from_iterable( [string.uppercase, string.lowercase])) exp = attr.ib(validator=is_valid_formula) @attr.s class And(Formula): token = ['^', '&'] left = attr.ib(validator=is_valid_formula) right = attr.ib(validator=is_valid_formula) @attr.s class Or(Formula): token = ['|'] left = attr.ib(validator=is_valid_formula) right = attr.ib(validator=is_valid_formula) @attr.s class Imply(Formula): token = ['>'] left = attr.ib(validator=is_valid_formula) right = attr.ib(validator=is_valid_formula) @attr.s class Not(Formula): token = ['~'] exp = attr.ib(validator=is_valid_formula)
mit
1,520,875,086,732,139,300
19.592593
73
0.642986
false
3.379939
false
false
false
googleapis/python-dataflow-client
google/cloud/dataflow_v1beta3/types/snapshots.py
1
5677
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore from google.protobuf import duration_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore __protobuf__ = proto.module( package="google.dataflow.v1beta3", manifest={ "SnapshotState", "PubsubSnapshotMetadata", "Snapshot", "GetSnapshotRequest", "DeleteSnapshotRequest", "DeleteSnapshotResponse", "ListSnapshotsRequest", "ListSnapshotsResponse", }, ) class SnapshotState(proto.Enum): r"""Snapshot state.""" UNKNOWN_SNAPSHOT_STATE = 0 PENDING = 1 RUNNING = 2 READY = 3 FAILED = 4 DELETED = 5 class PubsubSnapshotMetadata(proto.Message): r"""Represents a Pubsub snapshot. Attributes: topic_name (str): The name of the Pubsub topic. snapshot_name (str): The name of the Pubsub snapshot. expire_time (google.protobuf.timestamp_pb2.Timestamp): The expire time of the Pubsub snapshot. """ topic_name = proto.Field(proto.STRING, number=1,) snapshot_name = proto.Field(proto.STRING, number=2,) expire_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,) class Snapshot(proto.Message): r"""Represents a snapshot of a job. Attributes: id (str): The unique ID of this snapshot. project_id (str): The project this snapshot belongs to. source_job_id (str): The job this snapshot was created from. creation_time (google.protobuf.timestamp_pb2.Timestamp): The time this snapshot was created. ttl (google.protobuf.duration_pb2.Duration): The time after which this snapshot will be automatically deleted. state (google.cloud.dataflow_v1beta3.types.SnapshotState): State of the snapshot. pubsub_metadata (Sequence[google.cloud.dataflow_v1beta3.types.PubsubSnapshotMetadata]): PubSub snapshot metadata. description (str): User specified description of the snapshot. Maybe empty. disk_size_bytes (int): The disk byte size of the snapshot. Only available for snapshots in READY state. region (str): Cloud region where this snapshot lives in, e.g., "us-central1". """ id = proto.Field(proto.STRING, number=1,) project_id = proto.Field(proto.STRING, number=2,) source_job_id = proto.Field(proto.STRING, number=3,) creation_time = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, ) ttl = proto.Field(proto.MESSAGE, number=5, message=duration_pb2.Duration,) state = proto.Field(proto.ENUM, number=6, enum="SnapshotState",) pubsub_metadata = proto.RepeatedField( proto.MESSAGE, number=7, message="PubsubSnapshotMetadata", ) description = proto.Field(proto.STRING, number=8,) disk_size_bytes = proto.Field(proto.INT64, number=9,) region = proto.Field(proto.STRING, number=10,) class GetSnapshotRequest(proto.Message): r"""Request to get information about a snapshot Attributes: project_id (str): The ID of the Cloud Platform project that the snapshot belongs to. snapshot_id (str): The ID of the snapshot. location (str): The location that contains this snapshot. """ project_id = proto.Field(proto.STRING, number=1,) snapshot_id = proto.Field(proto.STRING, number=2,) location = proto.Field(proto.STRING, number=3,) class DeleteSnapshotRequest(proto.Message): r"""Request to delete a snapshot. Attributes: project_id (str): The ID of the Cloud Platform project that the snapshot belongs to. snapshot_id (str): The ID of the snapshot. location (str): The location that contains this snapshot. """ project_id = proto.Field(proto.STRING, number=1,) snapshot_id = proto.Field(proto.STRING, number=2,) location = proto.Field(proto.STRING, number=3,) class DeleteSnapshotResponse(proto.Message): r"""Response from deleting a snapshot. """ class ListSnapshotsRequest(proto.Message): r"""Request to list snapshots. Attributes: project_id (str): The project ID to list snapshots for. job_id (str): If specified, list snapshots created from this job. location (str): The location to list snapshots in. """ project_id = proto.Field(proto.STRING, number=1,) job_id = proto.Field(proto.STRING, number=3,) location = proto.Field(proto.STRING, number=2,) class ListSnapshotsResponse(proto.Message): r"""List of snapshots. Attributes: snapshots (Sequence[google.cloud.dataflow_v1beta3.types.Snapshot]): Returned snapshots. """ snapshots = proto.RepeatedField(proto.MESSAGE, number=1, message="Snapshot",) __all__ = tuple(sorted(__protobuf__.manifest))
apache-2.0
4,473,963,810,072,339,500
31.815029
95
0.648758
false
4.084173
false
false
false
invariantor/ImageSplit-Classification
image split and classification/image_split.py
1
6276
import numpy as np import pylab import mahotas as mh import types # constants upper_distance = 100 #the start searching approxWidth = 40 threshold = 300 border = 1 def pre_process(image): """ pre_process will return black_white image, given a colorful image as input. """ T = mh.thresholding.otsu(image) image1 =image > T image2 = [[0]* image1.shape[1] for i in range(image1.shape[0])] for i in range(image1.shape[0]): for j in range(image1.shape[1]): if (image1[i][j] != [0,0,0]).any(): image2[i][j] = 1 image2 = np.array(image2, dtype = np.uint8) return image2 def locate(image): """ Given an screenshot as input, return the position of the matching game as well as the size of the game(num_x,num_y) and the size of each grids(size_x,size_y). """ image = pre_process(image) height,width = image.shape # stop going down when a grid is found up = upper_distance while True: num_white =0 for j in range(width): num_white+=image[up][j] if num_white>(approxWidth/2): break up +=1 # stop going up when a grid is found down = height-1 pre_num_white =0 #the number of white pixels in the last step for j in range(width): pre_num_white+=image[down][j] while True: num_white =0 for j in range(width): num_white+=image[down][j] if num_white-pre_num_white>(approxWidth/2): break pre_num_white = num_white down -=1 current_image = image[up:] """cut the top part(including the time bar, all sorts of buttons) away which will interfere with our searching process""" current_image = np.array(current_image) c_height,c_width = current_image.shape # stop going right when a grid is found left = 0 pre_num_white =0 for i in range(c_height): pre_num_white+=current_image[i][left] while True: num_white =0 for i in range(c_height): num_white+=current_image[i][left] if num_white-pre_num_white>(approxWidth/2): break pre_num_white = num_white left +=1 # stop going left when a grid is found right = c_width-1 pre_num_white =0 for i in range(c_height): pre_num_white+=current_image[i][right] while True: num_white =0 for i in range(c_height): num_white+=current_image[i][right] if num_white-pre_num_white>(approxWidth/2): break pre_num_white = num_white right -=1 temp = [0]*(down+1-up) for i in range(len(temp)): temp[i] = current_image[i][left:right+1] current_image = np.array(temp) height,width = current_image.shape divd_x = [] for i in range(height): num_white = sum(current_image[i]) if num_white < approxWidth/2: divd_x.append(i) temp_x = [divd_x[i] for i in range(len(divd_x)) if ((i==0) or (i==len(divd_x)-1)) or not (divd_x[i-1]+1==divd_x[i] and divd_x[i+1]-1==divd_x[i])] # only keep the truly dividing lines, namely those marginal lines. divd_x =temp_x divd_y = [] for j in range(width): num_white = 0 for i in range(height): num_white += current_image[i][j] if num_white < approxWidth/2: divd_y.append(j) temp_y = [divd_y[i] for i in range(len(divd_y)) if ((i==0) or (i==len(divd_y)-1)) or not (divd_y[i-1]+1==divd_y[i] and divd_y[i+1]-1==divd_y[i])] # only keep the truly dividing lines, namely those marginal lines. divd_y = temp_y #print divd_x #print divd_y """ This part needs further refinement. """ if len(divd_x): size_x = divd_x[0] num_x = divd_x[-1] / size_x +1 else: size_x = height - 1 num_x = 1 if len(divd_y): size_y = divd_y[0] num_y = divd_y[-1] / size_y +1 else: size_y = height - 1 num_y = 1 position = (up,down,left,right) info = (size_x,size_y,num_x,num_y) return position, info def split(image,position,info): """ Return a 2d matrix label, which labels different kinds of grids using natural numbers. (By convention, the empty grid is labeled 0) """ size_x, size_y, num_x, num_y = info up, down, left, right = position T = mh.thresholding.otsu(image) image = image >T temp = [0]* (down+1-up) for i in range(len(temp)): temp[i] = image[up+i][left:right+1] temp = np.array(temp) image = temp game = [[0]* num_y for j in range(num_x)] for i in range(num_x): for j in range(num_y): grid = [0]* size_x for k in range(size_x): grid[k] = image[i*(size_x+1)+k][j*(size_y+1):(j+1)*(size_y+1)-1] game[i][j] = grid # using a quite naive method -- calculating the statistical distance between two grids # improvement is needed here, to speed up the program black = [[[0]*3]*size_y]*size_x records = [black] label = [[0]* num_y for j in range(num_x)] for i in range(num_x): for j in range(num_y): find = False for index in range(len(records)): if distance(records[index],game[i][j])< threshold: label[i][j] = index find =True break if not find: records.append(game[i][j]) label[i][j] = len(records)-1 return label def distance(a1,a2): """ recursively calculate the distance between a1 and a2 """ if (type(a1)== np.uint8) or (type(a1) == types.IntType) or (type(a1)==np.bool_): return abs(int(a1)-int(a2)) if len(a1)!= len(a2): print "Wrong Format","len(a1)=",len(a1),"len(a2)=",len(a2) return dis =0 for i in range(len(a1)): dis += distance(a1[i],a2[i]) return dis
mit
-6,850,167,160,207,502,000
28.608491
149
0.53362
false
3.273865
false
false
false
Maselkov/GW2Bot
guildwars2/evtc.py
1
12990
import datetime import aiohttp import discord from discord.ext import commands from discord.ext.commands.cooldowns import BucketType from .exceptions import APIError from .utils.chat import (embed_list_lines, en_space, magic_space, zero_width_space) UTC_TZ = datetime.timezone.utc BASE_URL = "https://dps.report/" UPLOAD_URL = BASE_URL + "uploadContent" JSON_URL = BASE_URL + "getJson" TOKEN_URL = BASE_URL + "getUserToken" ALLOWED_FORMATS = (".evtc", ".zevtc", ".zip") class EvtcMixin: async def get_dpsreport_usertoken(self, user): doc = await self.bot.database.get(user, self) token = doc.get("dpsreport_token") if not token: try: async with self.session.get(TOKEN_URL) as r: data = await r.json() token = data["userToken"] await self.bot.database.set( user, {"dpsreport_token": token}, self) return token except: return None async def upload_log(self, file, user): params = {"json": 1} token = await self.get_dpsreport_usertoken(user) if token: params["userToken"] = token data = aiohttp.FormData() data.add_field("file", await file.read(), filename=file.filename) async with self.session.post( UPLOAD_URL, data=data, params=params) as r: resp = await r.json() error = resp["error"] if error: raise APIError(error) return resp async def find_duplicate_dps_report(self, doc): margin_of_error = datetime.timedelta(seconds=10) doc = await self.db.encounters.find_one({ "boss_id": doc["boss_id"], "players": { "$eq": doc["players"] }, "date": { "$gte": doc["date"] - margin_of_error, "$lt": doc["date"] + margin_of_error }, "start_date": { "$gte": doc["start_date"] - margin_of_error, "$lt": doc["start_date"] + margin_of_error }, }) return True if doc else False async def upload_embed(self, ctx, result): if not result["encounter"]["jsonAvailable"]: return None async with self.session.get( JSON_URL, params={"id": result["id"]}) as r: data = await r.json() lines = [] targets = data["phases"][0]["targets"] group_dps = 0 for target in targets: group_dps += sum( p["dpsTargets"][target][0]["dps"] for p in data["players"]) def get_graph(percentage): bar_count = round(percentage / 5) bars = "" bars += "▀" * bar_count bars += "━" * (20 - bar_count) return bars def get_dps(player): bars = "" dps = player["dps"] if not group_dps or not dps: percentage = 0 else: percentage = round(100 / group_dps * dps) bars = get_graph(percentage) bars += f"` **{dps}** DPS | **{percentage}%** of group DPS" return bars players = [] for player in data["players"]: dps = 0 for target in targets: dps += player["dpsTargets"][target][0]["dps"] player["dps"] = dps players.append(player) players.sort(key=lambda p: p["dps"], reverse=True) for player in players: down_count = player["defenses"][0]["downCount"] prof = self.get_emoji(ctx, player["profession"]) line = f"{prof} **{player['name']}** *({player['account']})*" if down_count: line += (f" | {self.get_emoji(ctx, 'downed')}Downed " f"count: **{down_count}**") lines.append(line) dpses = [] charater_name_max_length = 19 for player in players: line = self.get_emoji(ctx, player["profession"]) align = (charater_name_max_length - len(player["name"])) * " " line += "`" + player["name"] + align + get_dps(player) dpses.append(line) dpses.append(f"> Group DPS: **{group_dps}**") color = discord.Color.green( ) if data["success"] else discord.Color.red() minutes, seconds = data["duration"].split()[:2] minutes = int(minutes[:-1]) seconds = int(seconds[:-1]) duration_time = (minutes * 60) + seconds duration = f"**{minutes}** minutes, **{seconds}** seconds" embed = discord.Embed( title="DPS Report", description="Encounter duration: " + duration, url=result["permalink"], color=color) boss_lines = [] for target in targets: target = data["targets"][target] if data["success"]: health_left = 0 else: percent_burned = target["healthPercentBurned"] health_left = 100 - percent_burned health_left = round(health_left, 2) if len(targets) > 1: boss_lines.append(f"**{target['name']}**") boss_lines.append(f"Health: **{health_left}%**") boss_lines.append(get_graph(health_left)) embed.add_field(name="> **BOSS**", value="\n".join(boss_lines)) buff_lines = [] sought_buffs = ["Might", "Fury", "Quickness", "Alacrity"] buffs = [] for buff in sought_buffs: for key, value in data["buffMap"].items(): if value["name"] == buff: buffs.append({ "name": value["name"], "id": int(key[1:]), "stacking": value["stacking"] }) break separator = 2 * en_space line = zero_width_space + (en_space * (charater_name_max_length + 6)) for buff in sought_buffs: line += self.get_emoji( ctx, buff, fallback=True, fallback_fmt="{:1.1}") + f"{separator}{2 * en_space}" buff_lines.append(line) groups = [] for player in players: if player["group"] not in groups: groups.append(player["group"]) if len(groups) > 1: players.sort(key=lambda p: p["group"]) current_group = None for player in players: if "buffUptimes" not in player: continue if len(groups) > 1: if not current_group or player["group"] != current_group: current_group = player["group"] buff_lines.append(f"> **GROUP {current_group}**") line = "`" line = self.get_emoji(ctx, player["profession"]) align = (3 + charater_name_max_length - len(player["name"])) * " " line += "`" + player["name"] + align for buff in buffs: for buff_uptime in player["buffUptimes"]: if buff["id"] == buff_uptime["id"]: uptime = str(buff_uptime["buffData"][0]["uptime"]) break else: uptime = "0" if not buff["stacking"]: uptime += "%" line += uptime line += separator + ((6 - len(uptime)) * magic_space) line += '`' buff_lines.append(line) embed = embed_list_lines(embed, lines, "> **PLAYERS**") embed = embed_list_lines(embed, dpses, "> **DPS**") embed = embed_list_lines(embed, buff_lines, "> **BUFFS**") boss = self.gamedata["bosses"].get(str(result["encounter"]["bossId"])) date_format = "%Y-%m-%d %H:%M:%S %z" date = datetime.datetime.strptime(data["timeEnd"] + "00", date_format) start_date = datetime.datetime.strptime(data["timeStart"] + "00", date_format) date = date.astimezone(datetime.timezone.utc) start_date = start_date.astimezone(datetime.timezone.utc) doc = { "boss_id": result["encounter"]["bossId"], "start_date": start_date, "date": date, "players": sorted([player["account"] for player in data["players"]]), "permalink": result["permalink"], "success": data["success"], "duration": duration_time } duplicate = await self.find_duplicate_dps_report(doc) if not duplicate: await self.db.encounters.insert_one(doc) embed.timestamp = date embed.set_footer(text="Recorded at", icon_url=self.bot.user.avatar_url) if boss: embed.set_author(name=data["fightName"], icon_url=boss["icon"]) return embed @commands.group(case_insensitive=True) async def evtc(self, ctx): """Process an EVTC combat log or enable automatic processing Simply upload your file and in the "add a comment" field type $evtc, in other words invoke this command while uploading a file. Use this command ($evtc) without uploading a file to see other commands Accepted formats are: .evtc, .zevtc, .zip It's highly recommended to enable compression in your Arc settings. With the setting enabled logs sized will rarely, if ever, be higher than the Discord upload limit """ if ctx.invoked_subcommand is None and not ctx.message.attachments: return await ctx.send_help(ctx.command) for attachment in ctx.message.attachments: if attachment.filename.endswith(ALLOWED_FORMATS): break else: return await ctx.send_help(ctx.command) if ctx.guild: doc = await self.bot.database.get(ctx.channel, self) settings = doc.get("evtc", {}) enabled = settings.get("enabled") if not ctx.channel.permissions_for(ctx.me).embed_links: return await ctx.send( "I need embed links permission to process logs.") if enabled: return await self.process_evtc(ctx.message) @commands.cooldown(1, 5, BucketType.guild) @commands.guild_only() @commands.has_permissions(manage_guild=True) @evtc.command(name="channel") async def evtc_channel(self, ctx): """Sets this channel to be automatically used to process logs""" doc = await self.bot.database.get(ctx.channel, self) enabled = not doc.get("evtc.enabled", False) await self.bot.database.set(ctx.channel, {"evtc.enabled": enabled}, self) if enabled: msg = ("Automatic EVTC processing enabled. Simply upload the file " "wish to be processed in this channel. Accepted " "formats: `.evtc`, `.zevtc`, `.zip` ") if not ctx.channel.permissions_for(ctx.me).embed_links: await ctx.send("I won't be able to process logs without Embed " "Links permission.") else: msg = ("Automatic EVTC processing diasbled") await ctx.send(msg) async def process_evtc(self, message): embeds = [] prompt = await message.channel.send("Processing logs... " + self.get_emoji(message, "loading")) for attachment in message.attachments: if attachment.filename.endswith(ALLOWED_FORMATS): try: resp = await self.upload_log(attachment, message.author) embeds.append(await self.upload_embed(message, resp)) except Exception as e: self.log.exception( "Exception processing EVTC log ", exc_info=e) return await prompt.edit( content="Error processing your log! :x:") for embed in embeds: await message.channel.send(embed=embed) try: await prompt.delete() await message.delete() except discord.HTTPException: pass @commands.Cog.listener() async def on_message(self, message): if not message.attachments: return if not message.guild: return for attachment in message.attachments: if attachment.filename.endswith(ALLOWED_FORMATS): break else: return doc = await self.bot.database.get(message.channel, self) settings = doc.get("evtc", {}) enabled = settings.get("enabled") if not enabled: return await self.process_evtc(message)
mit
-8,968,317,482,185,168,000
39.702194
79
0.522027
false
4.15488
false
false
false
mit-ll/LO-PHI
lophi-automation/lophi_automation/dataconsumers/logudp.py
1
1294
""" Class to handle logging over UDP (c) 2015 Massachusetts Institute of Technology """ # Native import socket import logging logger = logging.getLogger(__name__) class LogUDP: def __init__(self,address,port): """ Intialize our UDP logger @param address: Address of remote server @param port: port of listening server """ self.address = address self.port = port self.SOCK = None self.connected = False def _connect(self): """ Create our socket """ if self.connected: return True try: self.SOCK = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.connected = True return True except: logger.error("Could not open UDP socket") return False def append(self, data): """ Write raw data to the UDP socket @param data: Data to be written to the UDP socket """ assert self._connect() try: self.SOCK.sendto(data,(self.address,self.port)) except: logger.error("Could not send UDP packet")
bsd-3-clause
-5,603,330,343,461,050,000
22.125
72
0.506955
false
4.957854
false
false
false
jprawiharjo/Nerddit
Storm/Streaming/Push_to_Cassandra_Bolt.py
1
3976
# -*- coding: utf-8 -*- """ Created on Sat Jan 23 13:37:20 2016 @author: jprawiharjo """ from cassandra.cluster import Cluster import cassandra from collections import namedtuple from pyleus.storm import SimpleBolt from Streaming.Doc_Processor import DataFrame import logging log = logging.getLogger('cassandra_bolt') # create CassandraCluster CassandraCluster = Cluster(["ec2-52-27-157-187.us-west-2.compute.amazonaws.com", "ec2-52-34-178-13.us-west-2.compute.amazonaws.com", "ec2-52-35-186-215.us-west-2.compute.amazonaws.com", 'ec2-52-10-19-240.us-west-2.compute.amazonaws.com']) keyspace = 'wikidata' tablename = "titlelinks" class Push_to_Cassandra(SimpleBolt): def initialize(self): self.session = CassandraCluster.connect(keyspace) self.session.default_consistency_level = cassandra.ConsistencyLevel.ALL #self.session.encoder.mapping[tuple] = self.session.encoder.cql_encode_set_collection queryAddNew1 = "INSERT INTO {} (id, title, linksto) VALUES (?, ?, ?) IF NOT EXISTS".format(tablename) self.preparedAddNew1 = self.session.prepare(queryAddNew1) queryAddNew2 = "INSERT INTO {} (id, title, linksto, referredby) VALUES (?, ?, ?, ?) IF NOT EXISTS".format(tablename) self.preparedAddNew2 = self.session.prepare(queryAddNew2) queryUpdateReferredbyTitle = "UPDATE {} SET id = ?, linksto = ? WHERE title = ? IF EXISTS".format(tablename) self.preparedReferredbyTitle = self.session.prepare(queryUpdateReferredbyTitle) queryUpdateReferredbyOnly = "UPDATE {} SET referredby = referredby + ? WHERE title = ? IF EXISTS".format(tablename) self.preparedReferredbyOnly = self.session.prepare(queryUpdateReferredbyOnly) queryAddNewReferredBy = "INSERT INTO {} (title, referredby) VALUES (?, ?) IF NOT EXISTS".format(tablename) self.preparedAddNewReferredBy = self.session.prepare(queryAddNewReferredBy) self.bulk_data = [] log.debug("Initialized") def process_tick(self): log.debug("Process Tick") log.debug(len(self.bulk_data)) linkage = {} for row in self.bulk_data: if len(row.Links) > 0: log.debug('Processing Links') for link in row.Links: if link in linkage.keys(): linkage[link].add(row.Title) else: linkage[link] = set([row.Title]) for row in self.bulk_data: log.debug(row.Title) if row.Title in linkage.keys(): bound1 = self.preparedAddNew2.bind((str(row.Id), str(row.Title), row.Links, linkage[row.Title])) else: bound1 = self.preparedAddNew1.bind((str(row.Id), str(row.Title), row.Links)) res = self.session.execute(bound1) res = res.current_rows[0].applied #log.debug("Insertion Result = " + str(res)) if not(res): bound2 = self.preparedReferredbyTitle.bind((str(row.Id), row.Links, str(row.Title))) self.session.execute_async(bound2) #Inserting into database for k,v in linkage.iteritems(): log.debug(k) log.debug(v) bound3 = self.preparedReferredbyOnly.bind((v, k)) res = self.session.execute(bound3) res = res.current_rows[0].applied if not(res): bound4 = self.preparedAddNewReferredBy.bind((k, v)) res = self.session.execute_async(bound4) self.bulk_data = [] def process_tuple(self, tup): result = DataFrame(*tup.values) self.bulk_data.append(result) if __name__ == '__main__': logging.basicConfig( level=logging.DEBUG, filename='/tmp/cassandra_bolt.log', filemode='a', ) Push_to_Cassandra().run()
gpl-3.0
9,044,771,838,797,030,000
37.240385
124
0.608903
false
3.740357
false
false
false
googleapis/googleapis-gen
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/enums/types/placeholder_type.py
1
1630
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package='google.ads.googleads.v7.enums', marshal='google.ads.googleads.v7', manifest={ 'PlaceholderTypeEnum', }, ) class PlaceholderTypeEnum(proto.Message): r"""Container for enum describing possible placeholder types for a feed mapping. """ class PlaceholderType(proto.Enum): r"""Possible placeholder types for a feed mapping.""" UNSPECIFIED = 0 UNKNOWN = 1 SITELINK = 2 CALL = 3 APP = 4 LOCATION = 5 AFFILIATE_LOCATION = 6 CALLOUT = 7 STRUCTURED_SNIPPET = 8 MESSAGE = 9 PRICE = 10 PROMOTION = 11 AD_CUSTOMIZER = 12 DYNAMIC_EDUCATION = 13 DYNAMIC_FLIGHT = 14 DYNAMIC_CUSTOM = 15 DYNAMIC_HOTEL = 16 DYNAMIC_REAL_ESTATE = 17 DYNAMIC_TRAVEL = 18 DYNAMIC_LOCAL = 19 DYNAMIC_JOB = 20 IMAGE = 21 __all__ = tuple(sorted(__protobuf__.manifest))
apache-2.0
-9,196,301,452,706,389,000
27.103448
74
0.634969
false
3.773148
false
false
false
julierthanjulie/PedestrianTracking
generate_frames.py
1
3979
""" This code generates frames from CSV values that can be stiched together using FFMPEG to animate pedestrian data. This version produces an animation at 4x speed. """ print "Importing..." # Please ensure the following dependencies are installed before use: import pylab import numpy as np import itertools import sys, getopt import operator import collections drawing_by_frame = [] # def generate_frames(argv): # Some default values if nothing is provided in command line arguments. traces = 'bubble_pop_traces.csv' background = 'trails_480.png' # Get command line arguments. # -f specify a file name. This code expects csv files in the format PedestrianID, X, Y, FrameNum # -b specify a backgroun image. Any format available to pylab is acceptable. try: opts,args = getopt.getopt(argv, "f:b:") except getopt.GetoptError: print "Getopt Error" exit(2) for opt, arg in opts: if opt == "-f": traces = arg elif opt == "-b": background = arg # Name each frame based on the filename figure_name = traces.split("/")[-1].split(".")[-2] # Load up csv file trace = np.loadtxt(traces, comments=';', delimiter=',') traces = itertools.groupby(trace, lambda x:x[0]) # These values should match those in pedestrian_tracking.py w,h=640,360 border=20 # Some values from trail validation valid = 0 avg_length = 0 num_traces = 0 # Load up background image. background = pylab.imread(background) pylab.imshow(background) for id,t in traces: pts = np.array(list(t)) invalid = False # Validate Trails if (pts[0,1]>border and pts[0,1]<w-border) and (pts[0,2]>border and pts[0,2]<h-border): invalid = True if (pts[-1,1]>border and pts[-1,1]<w-border) and (pts[-1,2]>border and pts[-1,2]<h-border): invalid = True if len(pts) < 200: invalid = True if ((pts[0,2] > h-border) and (pts[0,1] > w/2-75 and pts[0,1] < w/2+75) or (pts[-1,2] > h-border) and (pts[-1,1] > w/2-75 and pts[-1,1] < w/2+75)): invalid = True # For all valid trails, prepare them for generating animated trails by frame number if not invalid: num_traces += 1 avg_length += len(pts) # Drawing colour for traces given as RGB colour = (0,0,1) for pt in pts: this_frame = [pt[3], pt[1], pt[2], pt[0]] drawing_by_frame.append(this_frame) valid += 1 x = np.clip(pts[:,1],0,w) y = np.clip(pts[:,2],0,h) print "Valid Trails: " , valid, " Average Length:" , avg_length/num_traces drawing_by_frame.sort() last_frame = drawing_by_frame[-1][0] current_frame = drawing_by_frame[0][0] drawing_dict = collections.defaultdict(list) count = 0 while len(drawing_by_frame) > 0: #print "Next Frame, " , current_frame pylab.imshow(background) while drawing_by_frame[0][0] == current_frame: list_one = drawing_by_frame.pop(0) x = drawing_dict[list_one[3]] x.append([list_one[1], list_one[2]]) drawing_dict[list_one[3]] = x # Adjust mod value here to adjust frame drawing frequency # Draw stuff here if (current_frame % 10 ==0): print "Percentage Complete: " , (current_frame/last_frame)*100 draw_dict(drawing_dict, w, h, border, figure_name, current_frame, count) count += 1 pylab.clf() current_frame = drawing_by_frame[0][0] def draw_dict(dict, w, h, border, figure_name, frame, count): for trace in dict: print trace pts = dict[trace] pylab.plot([p[0] for p in pts], [p[1] for p in pts],'-',color=(0,0,1),alpha=0.5, linewidth=2) pylab.xlim(0,w) pylab.ylim(h,0) pylab.axis('off') pylab.subplots_adjust(0,0,1,1,0,0) pylab.savefig("Frames/" + figure_name + "_" + str(count).zfill(6) + '.png', dpi=150,bbox_inches='tight', pad_inches=0) #pylab.savefig("Frames/" + 'frame' + str(int(frame)) + '.png', dpi=150,bbox_inches='tight', pad_inches=0) if __name__ == "__main__": print "Starting Frame Generation" generate_frames(sys.argv[1:])
mit
-6,176,049,097,840,916,000
22.96988
149
0.643629
false
2.821986
false
false
false
by46/simplekit
simplekit/email/__init__.py
1
4151
import httplib import os.path import requests import six from simplekit import settings from simplekit.exceptions import MailException PRIORITY_NORMAL = 0 PRIORITY_LOW = 1 PRIORITY_HIGH = 2 CONTENT_TYPE_HTML = 0 CONTENT_TYPE_TEXT = 1 ENCODING_UTF8 = 0 ENCODING_ASCII = 1 ENCODING_UTF32 = 2 ENCODING_UNICODE = 3 MEDIA_TYPE_GIF = 0 MEDIA_TYPE_JPEG = 1 MEDIA_TYPE_TIFF = 2 MEDIA_TYPE_PDF = 3 MEDIA_TYPE_RTF = 4 MEDIA_TYPE_SOAP = 5 MEDIA_TYPE_ZIP = 6 MEDIA_TYPE_OTHER = 7 MAIL_TYPE_SMTP = 1 MAIL_TYPE_LONDON2 = 0 class SmtpSetting(dict): def __init__(self, subject_encoding, body_encoding, attachments=None): kwargs = dict(SubjectEncoding=subject_encoding, BodyEncoding=body_encoding, Attachments=attachments) super(SmtpSetting, self).__init__(**kwargs) self.__dict__ = self class MailAttachment(dict): def __init__(self, filename, file_content, media_type=MEDIA_TYPE_OTHER): kwargs = dict(FileName=filename, FileContent=file_content, MediaType=media_type) super(MailAttachment, self).__init__(**kwargs) self.__dict__ = self class LondonIISetting(dict): def __init__(self, company_code, country_code, language_code, system_id, template_id, mail_template_variables): kwargs = dict(CompanyCode=company_code, CountryCode=country_code, LanguageCode=language_code, SystemID=system_id, TemplateID=template_id, MailTemplateVariables=mail_template_variables) super(LondonIISetting, self).__init__(**kwargs) self.__dict__ = self class MailTemplateVariable(dict): def __init__(self, key, value): kwargs = dict(Key=key, Value=value) super(MailTemplateVariable, self).__init__(**kwargs) def send_email_inner(sender, to, subject, body, cc=None, bcc=None, priority=PRIORITY_NORMAL, content_type=CONTENT_TYPE_TEXT, mail_type=None, smtp_setting=None, london_2_setting=None): if isinstance(to, (list, tuple)): to = ';'.join(to) body = dict(From=sender, To=to, CC=cc, BCC=bcc, Subject=subject, Body=body, Priority=priority, ContentType=content_type, MailType=mail_type, SmtpSetting=smtp_setting, LondonIISetting=london_2_setting) response = requests.post(settings.URL_EMAIL, json=body, headers={'Content-Type': 'Application/json', 'accept': 'application/json'}) if response.status_code != httplib.OK: del body['SmtpSetting'] raise MailException("Send mail use api {0} status code: {1}\n body : {2}\n response content : {3}".format( settings.URL_EMAIL, response.status_code, body, response.content)) def send_email(sender, to, subject, body, cc=None, bcc=None, priority=PRIORITY_NORMAL, content_type=CONTENT_TYPE_TEXT, files=None): attachments = [] import base64 if files: for item in files: if isinstance(item, six.string_types): filename = os.path.basename(item) file_content = open(item, 'rb').read() file_content = base64.b64encode(file_content) media_type = MEDIA_TYPE_OTHER attachment = MailAttachment(filename, file_content, media_type) attachments.append(attachment) else: attachments.append(item) smtp_setting = SmtpSetting(ENCODING_UTF8, ENCODING_UTF8, attachments) send_email_inner(sender, to, subject, body, cc, bcc, priority, content_type, MAIL_TYPE_SMTP, smtp_setting) if __name__ == '__main__': send_email('[email protected]', '[email protected]', '(info) testing', 'testing body', files=['__init__.py'])
mit
-3,068,504,204,268,324,000
33.177966
115
0.582751
false
3.847081
false
false
false
pentestfail/TA-FireEye_TAP
bin/input_module_fireeye_tap_incidents.py
1
4568
# encoding = utf-8 import os import sys import time import datetime import json def validate_input(helper, definition): api_env = definition.parameters.get('api_env', None) instanceid = definition.parameters.get('instance_id', None) apikey = definition.parameters.get('apikey', None) api_limit = definition.parameters.get('api_limit', None) api_timeout = definition.parameters.get('api_timeout', None) pass def collect_events(helper, ew): # Retrieve runtime variables opt_environment = helper.get_arg('api_env') opt_instanceid = helper.get_arg('instance_id') opt_apikey = helper.get_arg('apikey') opt_limit = helper.get_arg('api_limit') opt_timeout = float(helper.get_arg('api_timeout')) # Create checkpoint key opt_checkpoint = "incidents_" + opt_environment + "_" + opt_instanceid #Create last status entry for storage as checkpoint current_status = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) #Check for last query execution data in kvstore & generate if not present try: last_status = helper.get_check_point(opt_checkpoint) or time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(0)) helper.log_debug("[" + opt_instanceid + "] TAP Incidents - Last successful checkpoint time: " + str(last_status)) except Exception as e: helper.log_error("[" + opt_instanceid + "] TAP Incidents - Unable to retrieve last execution checkpoint!") raise e # use simple rest call to load the events header = {} data = {} parameter = {} parameter['limit'] = opt_limit parameter['sort'] = "-createDate" parameter['withCount'] = "1" parameter['includes'] = "revisions._updatedBy" parameter['query'] = str('{"updateDate":{"$gte":"' + last_status + '"}}') url = "https://" + opt_environment + ".fireeye.com/tap/id/" + opt_instanceid + "/api/v1/incidents" method = 'GET' header['x-mansfield-key'] = opt_apikey try: # Leverage helper function to send http request response = helper.send_http_request(url, method, parameters=parameter, payload=None, headers=header, cookies=None, verify=True, cert=None, timeout=opt_timeout, use_proxy=True) # Return API response code r_status = response.status_code # Return API request status_code if r_status is not 200: helper.log_error("[" + opt_instanceid + "] Incidents API unsuccessful status_code=" + str(r_status)) response.raise_for_status() # Return API request as JSON obj = response.json() if obj is None: helper.log_info("[" + opt_instanceid + "] No new incidents retrieved from TAP.") # Iterate over incidents in array & index i=0 for incident in obj.get("incidents"): singleIncident = (obj.get("incidents")[i]) singleIncident['tap_instance'] = opt_instanceid singleIncident['tap_environment'] = opt_environment # Rename underscore fields so Splunk will index values singleIncident['_alert'] = singleIncident['_alert'] singleIncident['updatedBy'] = singleIncident['_updatedBy'] singleIncident['createdBy'] = singleIncident['_createdBy'] singleIncident['assignedTo'] = singleIncident['_assignedTo'] # Remove underscore fieldnames and values del singleIncident['_alert'] del singleIncident['_updatedBy'] del singleIncident['_createdBy'] del singleIncident['_assignedTo'] event = helper.new_event(source=helper.get_input_type(), index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=json.dumps(singleIncident)) try: ew.write_event(event) helper.log_debug("[" + opt_instanceid + "] Added incident:" + str(singleIncident['id'])) except Exception as error: helper.log_error("[" + opt_instanceid + "] Unable to add incident:" + str(singleIncident['id'])) i = i + 1 #Update last completed execution time helper.save_check_point(opt_checkpoint, current_status) helper.log_info("[" + opt_instanceid + "] Incidents collection complete. Records added: " + str(i)) helper.log_debug("[" + opt_instanceid + "] TAP Incidents - Storing checkpoint time: " + current_status) except Exception as error: helper.log_error("[" + opt_instanceid + "] TAP Incidents - An unknown error occurred!") raise error
mit
-2,108,804,593,542,460,400
43.794118
183
0.632443
false
3.910959
false
false
false
turtlewit/GSHS_RPG
AdventureEngine/CoreEngine/input.py
2
3088
#------------------------------------------------------------------------------# # Copyright 2016-2017 Golden Sierra Game Development Class # # This file is part of Verloren (GSHS_RPG). # # # # Verloren (GSHS_RPG) is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # Verloren (GSHS_RPG) is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with Verloren (GSHS_RPG). If not, see <http://www.gnu.org/licenses/>. # #------------------------------------------------------------------------------# import sys import curses class Input: #renderer = None commandHistory = [] command = None unf_command = "" cheese = "cheese" takeTextInput = False char = None def Update(self, renderer): Input.command = None Input.char = None if renderer: currentCharacter = renderer.m_screen.getch() if currentCharacter != -1: if currentCharacter != curses.KEY_RESIZE: Input.char = currentCharacter if Input.takeTextInput: if currentCharacter == ord('\n'): if len(Input.unf_command.split()) > 0: Input.commandHistory.insert(0,Input.command) Input.command = Input.unf_command else: Input.command = 10 renderer.m_cmd = "" Input.unf_command = "" if sys.platform == 'linux' \ or sys.platform == 'linux2' \ or sys.platform == 'linux-armv7l': if currentCharacter == 127 \ or currentCharacter == curses.KEY_BACKSPACE: renderer.m_cmd = renderer.m_cmd[:-1] Input.unf_command = Input.unf_command[:-1] else: if currentCharacter == 8: renderer.m_cmd = renderer.m_cmd[:-1] Input.unf_command = Input.unf_command[:-1] if currentCharacter >=32 and currentCharacter <= 126: if renderer.m_vorCmd: if len(Input.unf_command) \ < renderer.BUFFER_X \ - len(renderer.m_vorCmd) \ - 1: renderer.m_cmd += chr(currentCharacter) Input.unf_command += chr(currentCharacter) if currentCharacter in [ curses.KEY_UP, curses.KEY_DOWN, curses.KEY_LEFT, curses.KEY_RIGHT, 27 ]: Input.command = currentCharacter
gpl-3.0
1,061,857,247,540,585,600
35.204819
80
0.510687
false
3.979381
false
false
false
TheOriginalBDM/Lazy-Cleaner-9000
code/clean_sweep_vision.py
1
6258
#!/usr/bin/env python from picamera.array import PiRGBArray from picamera import PiCamera import cv2 import time from colormath.color_diff import delta_e_cie2000 from colormath.color_objects import LabColor, sRGBColor from colormath.color_conversions import convert_color def nothing(*arg): pass def is_allowed_color(cur_int, avg_int, m_val): b = abs(cur_int[0] - avg_int[0]) g = abs(cur_int[1] - avg_int[1]) r = abs(cur_int[2] - avg_int[2]) if (b > m_val or g > m_val or r > m_val): return True else: return False def make_gt_val(val, min_val): if val < min_val: val = min_val return val def make_odd(val): if val % 2 == 0: val += 1 return val def get_avg_bgr(in_img, in_cntrs): ttlA = 0 sum_roiA_mean = (0, 0, 0) avg_roiA_mean = (0, 0, 0) ttlA = len(in_cntrs) for cnt2 in in_cntrs: x2, y2, w2, h2 = cv2.boundingRect(cnt2) roiA = in_img[y:y2+w2, x:x2+h2] roiA_mean = cv2.mean(roiA) int_roiA_mean = (int(roiA_mean[0]), int(roiA_mean[1]), int(roiA_mean[2])) sum_roiA_mean = (int_roiA_mean[0] + sum_roiA_mean[0], int_roiA_mean[1] + sum_roiA_mean[1], int_roiA_mean[2] + sum_roiA_mean[2]) if ttlA > 0: avg_roiA_mean = (sum_roiA_mean[0]/ttlA, sum_roiA_mean[1]/ttlA, sum_roiA_mean[2]/ttlA) return avg_roiA_mean window_nm = 'img_cntrls' cam_res_w = 640 cam_res_h = 480 cam_fr_rt = 32 cv2.namedWindow(window_nm) cv2.createTrackbar('blur_size', window_nm, 7 , 21, nothing) cv2.createTrackbar('canny_min', window_nm, 156, 255, nothing) cv2.createTrackbar('thresh_min', window_nm, 7 , 255, nothing) cv2.createTrackbar('min_area', window_nm, 5 , 2000, nothing) cv2.createTrackbar('max_area', window_nm, 40000 , 90000, nothing) cv2.createTrackbar('max_delta', window_nm, 20 , 100, nothing) cv2.createTrackbar('get_avg', window_nm, 0 , 1, nothing) cv2.createTrackbar('get_mode', window_nm, 0, 7, nothing) camera = PiCamera() camera.resolution = (cam_res_w, cam_res_h) camera.framerate = cam_fr_rt rawCapture = PiRGBArray(camera, size=(cam_res_w, cam_res_h)) time.sleep(0.2) avg_roi_mean = (0, 0, 0) #b, g, r delta_color = 000.0000 for frame in camera.capture_continuous(rawCapture, format='bgr', use_video_port=True): ############################################# ### GET THE CURRENT FRAME FROM THE CAMERA ### ############################################# im = frame.array im_raw = im #keep a copy in case we want to look at it later #################### ### GET SETTINGS ### #################### s = cv2.getTrackbarPos('get_avg', window_nm) blur_size = cv2.getTrackbarPos('blur_size',window_nm) canny_min = cv2.getTrackbarPos('canny_min',window_nm) thresh_min = cv2.getTrackbarPos('thresh_min',window_nm) min_area = cv2.getTrackbarPos('min_area',window_nm) max_area = cv2.getTrackbarPos('max_area',window_nm) max_delta = cv2.getTrackbarPos('max_delta',window_nm) mode = cv2.getTrackbarPos('get_mode', window_nm) ############################ ### ENSURE CORRECT VALUE ### ############################ blur_size = make_odd(blur_size) blur_size = make_gt_val(blur_size, 0) thresh_min = make_odd(thresh_min) thresh_min = make_gt_val(thresh_min, 0) ######################################################## ### START IMAGE PROCESSING TO FIND OBJECTS IN RANGE ### ######################################################## imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) blur = cv2.blur(imgray, (blur_size, blur_size)) #edged = cv2.Canny(blur, canny_min, 255) ret3, thresh = cv2.threshold(blur, thresh_min, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU) contours, hierarchy = cv2.findContours(thresh, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) ###S = 1 means get an average of the overall RGB picture if s == 1: blur_size == 0 thresh_size = 1 min_area = 0 ovr_avg = get_avg_bgr(im, contours) avg_roi_mean = ovr_avg print avg_roi_mean cv2.setTrackbarPos('get_avg', window_nm, 0) else: ttl_area = 0 ttl_cntrs = len(contours) ttl_color = 0 sum_roi_mean = (0, 0, 0) for cnt in contours: a = cv2.contourArea(cnt) ### DO WE HAVE SOMETHING IN THE RIGHT SIZE (NO NEED TO PICK UP CARS) ### if min_area < a < max_area: ttl_area += 1 x, y, h, w = cv2.boundingRect(cnt) roi = im[y:y+h, x:x+w] roi_mean = cv2.mean(roi) int_roi_mean = (int(roi_mean[0]), int(roi_mean[1]), int(roi_mean[2])) b, g, r = avg_roi_mean bckgrnd_lab = convert_color(sRGBColor(r, g, b), LabColor) contColor_lab = convert_color(sRGBColor(roi_mean[2],roi_mean[1], roi_mean[0]), LabColor) delta_color = round(delta_e_cie2000(bckgrnd_lab, contColor_lab),1) if delta_color >= max_delta: # if is_allowed_color(int_roi_mean, avg_roi_mean, max_dev): cv2.rectangle(im, (x, y), (x+h, y+w), int_roi_mean, 2) ttl_color += 1 strLoc = str(x) + ',' + str(y) + ':' + str(delta_color) cv2.putText(im, strLoc, (x,y), cv2.FONT_HERSHEY_PLAIN, 1.0, (0,0,0), 1) strTTL = str(ttl_cntrs) + ' - ' + str(ttl_area) + ' - ' + str(ttl_color) cv2.putText(im, str(strTTL), (20,20), cv2.FONT_HERSHEY_PLAIN, 2.0, (0, 0, 0), 2) cv2.putText(im, str(avg_roi_mean), (20, cam_res_h - 20) ,cv2.FONT_HERSHEY_PLAIN, 2.0, avg_roi_mean, 2) if mode == 0: cv2.imshow('imgview', im_raw) print 'Raw image view' elif mode == 1: cv2.imshow('imgview', imgray) print 'Grayscale view' elif mode == 2: cv2.imshow('imgview', blur) print 'Blur view' elif mode == 3: cv2.imshow('imgview', blur) print 'Blur view' elif mode == 4: cv2.imshow('imgview', thresh) print 'Threshold view' else: cv2.imshow('imgview', im) print 'Contour overlay on raw view' ch = cv2.waitKey(5) rawCapture.truncate(0) if ch == 27: break cv2.destroyAllWindows()
gpl-3.0
5,175,269,980,451,224,000
30.606061
135
0.563279
false
2.843253
false
false
false
raiden-network/raiden
raiden/utils/upgrades.py
1
8374
import os import sqlite3 import sys from contextlib import closing from glob import escape, glob from pathlib import Path import filelock import structlog from raiden.constants import RAIDEN_DB_VERSION from raiden.storage.sqlite import SQLiteStorage from raiden.storage.versions import VERSION_RE, filter_db_names, latest_db_file from raiden.utils.typing import Any, Callable, DatabasePath, List, NamedTuple class UpgradeRecord(NamedTuple): from_version: int function: Callable UPGRADES_LIST: List[UpgradeRecord] = [] log = structlog.get_logger(__name__) def get_file_lock(db_filename: Path) -> filelock.FileLock: lock_file_name = f"{db_filename}.lock" return filelock.FileLock(lock_file_name) def update_version(storage: SQLiteStorage, version: int) -> None: cursor = storage.conn.cursor() cursor.execute( 'INSERT OR REPLACE INTO settings(name, value) VALUES("version", ?)', (str(version),) ) def get_file_version(db_path: Path) -> int: match = VERSION_RE.match(os.path.basename(db_path)) assert match, f'Database name "{db_path}" does not match our format' file_version = int(match.group(1)) return file_version def get_db_version(db_filename: Path) -> int: """Return the version value stored in the db""" msg = f"Path '{db_filename}' expected, but not found" assert os.path.exists(db_filename), msg # Perform a query directly through SQL rather than using # storage.get_version() # as get_version will return the latest version if it doesn't # find a record in the database. conn = sqlite3.connect(str(db_filename), detect_types=sqlite3.PARSE_DECLTYPES) cursor = conn.cursor() try: cursor.execute('SELECT value FROM settings WHERE name="version";') result = cursor.fetchone() except sqlite3.OperationalError: raise RuntimeError("Corrupted database. Database does not the settings table.") if not result: raise RuntimeError( "Corrupted database. Settings table does not contain an entry the db version." ) return int(result[0]) def _copy(old_db_filename: Path, current_db_filename: Path) -> None: old_conn = sqlite3.connect(old_db_filename, detect_types=sqlite3.PARSE_DECLTYPES) current_conn = sqlite3.connect(current_db_filename, detect_types=sqlite3.PARSE_DECLTYPES) with closing(old_conn), closing(current_conn): old_conn.backup(current_conn) def delete_dbs_with_failed_migrations(valid_db_names: List[Path]) -> None: for db_path in valid_db_names: file_version = get_file_version(db_path) with get_file_lock(db_path): db_version = get_db_version(db_path) # The version matches, nothing to do. if db_version == file_version: continue elif db_version > file_version: raise RuntimeError( f"Impossible database version. " f"The database {db_path} has too high a version ({db_version}), " f"this should never happen." ) # The version number in the database is smaller then the current # target, this means that a migration failed to execute and the db # is partially upgraded. else: os.remove(db_path) class UpgradeManager: """Run migrations when a database upgrade is necessary. Skip the upgrade if either: - There is no previous DB - There is a current DB file and the version in settings matches. Upgrade procedure: - Delete corrupted databases. - Copy the old file to the latest version (e.g. copy version v16 as v18). - In a transaction: Run every migration. Each migration must decide whether to proceed or not. """ def __init__(self, db_filename: DatabasePath, **kwargs: Any) -> None: base_name = os.path.basename(db_filename) match = VERSION_RE.match(base_name) assert match, f'Database name "{base_name}" does not match our format' self._current_db_filename = Path(db_filename) self._kwargs = kwargs def run(self) -> None: # First clear up any partially upgraded databases. # # A database will be partially upgraded if the process receives a # SIGKILL/SIGINT while executing migrations. NOTE: It's very probable # the content of the database remains consistent, because the upgrades # are executed inside a migration, however making a second copy of the # database does no harm. escaped_path = escape(str(self._current_db_filename.parent)) paths = glob(f"{escaped_path}/v*_log.db") valid_db_names = filter_db_names(paths) delete_dbs_with_failed_migrations(valid_db_names) # At this point we know every file version and db version match # (assuming there are no concurrent runs). paths = glob(f"{escaped_path}/v*_log.db") valid_db_names = filter_db_names(paths) latest_db_path = latest_db_file(valid_db_names) # First run, there is no database file available if latest_db_path is None: return file_version = get_file_version(latest_db_path) # The latest version matches our target version, nothing to do. if file_version == RAIDEN_DB_VERSION: return if file_version > RAIDEN_DB_VERSION: raise RuntimeError( f"Conflicting database versions detected, latest db version is v{file_version}, " f"Raiden client version is v{RAIDEN_DB_VERSION}." f"\n\n" f"Running a downgraded version of Raiden after an upgrade is not supported, " f"because the transfers done with the new client are not understandable by the " f"older." ) if RAIDEN_DB_VERSION >= 27 and file_version <= 26 and file_version > 1: msg = ( f"Your Raiden database is version {file_version} and there is no compatible " f"migration to version {RAIDEN_DB_VERSION} available.\n" "You need to either start a new Raiden node with a different account, or " "close and settle all channels, and start over with a fresh database.\n\n" "More information on this topic at " "https://raiden-network.readthedocs.io/en/latest/other/known-issues.html" "#database-upgrades\n\n" "If you are on **mainnet** and affected by this, please create an issue at " "https://github.com/raiden-network/raiden/issues/new?title=Mainnet%20Migration%20" f"{file_version}%20{RAIDEN_DB_VERSION}" ) log.warning(msg) sys.exit(msg) self._upgrade( target_file=self._current_db_filename, from_file=latest_db_path, from_version=file_version, ) def _upgrade(self, target_file: Path, from_file: Path, from_version: int) -> None: with get_file_lock(from_file), get_file_lock(target_file): _copy(from_file, target_file) # Only instantiate `SQLiteStorage` after the copy. Otherwise # `_copy` will deadlock because only one connection is allowed to # `target_file`. with SQLiteStorage(target_file) as storage: log.debug(f"Upgrading database from v{from_version} to v{RAIDEN_DB_VERSION}") try: version_iteration = from_version with storage.transaction(): for upgrade_record in UPGRADES_LIST: if upgrade_record.from_version < from_version: continue version_iteration = upgrade_record.function( storage=storage, old_version=version_iteration, current_version=RAIDEN_DB_VERSION, **self._kwargs, ) update_version(storage, RAIDEN_DB_VERSION) except BaseException as e: log.error(f"Failed to upgrade database: {e}") raise
mit
-4,492,969,264,185,019,000
36.891403
98
0.614999
false
4.143493
true
false
false
tymmothy/dds3x25
dds3x25/dds.py
1
12274
#!/usr/bin/env python """ This is an interface library for Hantek DDS-3X25 arbitrary waveform generator. Licenced LGPL2+ Copyright (C) 2013 Domas Jokubauskis ([email protected]) Copyright (C) 2014 Tymm Twillman ([email protected]) """ import struct import math import collections # dds3x25 imports... from usb_interface import * from packet import * def samplepoint_encode(value): SIGN_BIT = (1 << 11) encoded = abs(value) if encoded > DDS.MAX_POINT_VALUE: msg = "Value {0} is out of range ({1}-{2})".format(value, -DDS.MAX_POINT_VALUE, DDS.MAX_POINT_VALUE) raise ValueError(msg) # Note: 0 is negative value if value > 0: encoded = (DDS.MAX_POINT_VALUE + 1) - encoded else: encoded = encoded | SIGN_BIT return struct.pack("<H", encoded) def samplepoint_chunks(data): """Cut samplepoint data into 32-point chunks. If necessary, add padding to the last chunk to make it 64 bytes. """ SAMPLEPOINT_CHUNK_SIZE=32 for i in xrange(0, len(data), SAMPLEPOINT_CHUNK_SIZE): chunkdata = data[i:i+SAMPLEPOINT_CHUNK_SIZE] chunk = "".join([ samplepoint_encode(x) for x in chunkdata ]) if len(chunk) < SAMPLEPOINT_CHUNK_SIZE * 2: chunk += "\x91\x1c" * ((SAMPLEPOINT_CHUNK_SIZE - (len(chunk) / 2))) yield chunk class DDS(object): # Hantek 3x25 USB Vendor & Product IDs USB_VID = 0x0483 USB_PID = 0x5721 # Core DAC clock -> 200 MHz DAC_CLOCK = int(200e6) # Maximum DAC clock divider DAC_CLOCK_DIV_MAX = 131070 # Maximum # of sample points MAX_POINTS = 4095 # Maximum value of a point MAX_POINT_VALUE = (1 << 11) - 1 NUM_DIGITAL_OUTPUTS = 12 NUM_DIGITAL_INPUTS = 6 def __init__(self, idVendor=USB_VID, idProduct=USB_PID, **kwargs): """Initialize a DDS instance and connect to the hardware. Args: idVendor (int): 16-bit USB Vendor ID (VID) for the DDS hardware. idProduct (int): 16-bit USB Product ID (PID) for the DDS hardware. Kwargs: See DDS.configure() for the list of kwargs that __init__ understands. """ # Set up defaults for instance variables. self._ext_trigger = None self._oneshot = False self._counter_mode = False self._programmable_output = True self._digital_output = 0 self._clock_divider = 128 # do not initialize USB device if used for unit testing if kwargs.get('testing', False): return self._in_ep, self._out_ep = dds_usb_open(idVendor, idProduct) self.configure(**kwargs) def transfer(self, data): self._out_ep.write(data) return self._in_ep.read(self._in_ep.wMaxPacketSize) def configure(self, **kwargs): """Update the 3x25's configuration settings. Kwargs: reset_trig (bool): If True, reset the DDS external trigger. reset_counter (bool): If True, reset the DDS counter. oneshot (bool): If True, only output one wave (not continuous). counter_mode (bool): Set true to enable counter mode. If True, the 3x25 counts pulses. If False, the 3x25 measures frequency. programmable_output (bool): Set true to enable programmable digital output. If True, digital output pins are controlled by setting digital_output. If False, digital output pins follow the DAC output value. ext_trigger ([None, 0 or 1]): Configure external trigger mode. If None, external triggering is disabled. If 1, external triggering occurs on rising pulse edges. If 0, external triggering occurs on falling pulse edges. digital_output (int): 12-bit unsigned value whose bits are written to the 3x25's digital output pins. Note: Only used when programmable_output is enabled. clock_divider (int): Divisor to use for 200Mhz DAC clock to generate sample output clock. Must be an even value from 0-131070 """ reset_trigger = bool(kwargs.get('reset_trig', False)) reset_counter = bool(kwargs.get('reset_counter', False)) oneshot = bool(kwargs.get('oneshot', self._oneshot)) counter_mode = bool(kwargs.get('counter_mode', self._counter_mode)) programmable_output = bool(kwargs.get('programmable_output', self._programmable_output)) ext_trigger = kwargs.get('ext_trigger', self._ext_trigger) if ext_trigger not in [ None, 0, 1 ]: raise ValueError("Invalid value for ext_trigger (must be 1, 0 or None)") digital_output = int(kwargs.get('digital_output', self._digital_output)) clock_divider = int(kwargs.get('clock_divider', self._clock_divider)) if (clock_divider < 1) or (clock_divider > 131070) or (clock_divider > 1 and clock_divider & 1): msg = "Clock divider ({0}) must be 1 or an even value between 2 and {1}.".format(clock_divider, DDS.DAC_CLOCK_DIV_MAX) raise ValueError(msg) self._oneshot = oneshot self._counter_mode = counter_mode self._programmable_output = programmable_output self._ext_trigger = ext_trigger self._digital_output = digital_output self._clock_divider = clock_divider configure_packet = ConfigurePacket(self, reset_trigger=reset_trigger, reset_counter=reset_counter) response = self.transfer(str(configure_packet)) response = self._parse_configure_packet_response(response) return response def _parse_configure_packet_response(self, packet): vals = struct.unpack("<HII", packet) return { 'digital_input' : vals[0], 'frequency' : vals[1] * 2 if self._counter_mode is False else None, 'ticks' : None if vals[2] == 0xffffffff else vals[2], 'counts' : vals[1] if self._counter_mode is True else None, } def set_waveform(self, points, clock_divider=None, shift_points=0): count = len(points) if shift_points: points = collections.deque(points) points.rotate(shift_points) response = self.transfer(str(PointCountPacket(count, is_start=True))) assert response[0] == 0xcc for chunk in samplepoint_chunks(points): response = self.transfer(chunk) assert response[0] == 0xcc response = self.transfer(str(PointCountPacket(count))) assert response[0] == 0xcc if clock_divider is not None: self.configure(clock_divider=clock_divider) def reset_counter(self): """Reset the 3x25 counter state.""" self.configure(reset_counter=True) def reset_trigger(self): """Reset the 3x25 external trigger.""" self.configure(reset_trigger=True) def digital_write(self, pin, pin_state): """Set the output state of a digital output pin. Args: pin (int): Number of pin to control. pin_state (int/bool): If 1/True, pin will be set high. If 0/False, pin will be set low. """ pin_state = 1 if pin_state else 0 digital_output = self._digital_output & ~(1 << pin) digital_output |= (pin_state << pin) self.configure(digital_output=digital_output) def digital_write_port(self, pin_states): """Set the output states of all digital output pins. Args: pin_states (int): Value comprised of bits to write to the digital output pins. """ self.configure(digital_output=val) def digital_read(self, pin): """Read the state of a digital input pin. Args: pin (int): Input pin # to read. Returns: 0 if the pin is low, 1 if the pin is high. """ digital_in = self.configure()['digital_input'] return 1 if (digital_in & (1 << pin)) else 0 def digital_read_port(self): """Read the state of all input pins as one integer value. Returns: Integer w/bits set to the states of the input pins. """ return self.configure()['digital_input'] def count_in_frequency(self): """Get the input frequency at the 3x25's COUNT IN port. The frequency is only available when the 3x25 is NOT in counter mode. Returns: Frequency (in Hz) at the COUNT IN port, or None if in counter mode. """ return self.configure()['frequency'] def count_in_counts(self): """Get the # of pulses counted at the 3x25's COUNT IN port since last reset. The count is only available when the 3x25 IS in counter mode. use .reset_counter() to reset the value to 0. Returns: # of pulses counted at the COUNT IN port, or None if not in counter mode. """ return self.configure()['counts'] def count_in_ticks(self): return self.configure()['ticks'] @property def ext_trigger(self): return self._ext_trigger @ext_trigger.setter def ext_trigger(self, trig): if trig is not None and trig != 0 and trig != 1: raise ValueError("Invalid value for external trigger (should be 1, 0 or None)") self.configure(ext_trigger=trig) @property def oneshot_mode(self): return self._oneshot @oneshot_mode.setter def oneshot_mode(self, val): val = True if val else False self.configure(oneshot=val) @property def counter_mode(self): return self._counter_mode @counter_mode.setter def counter_mode(self, val): val = True if val else False self.configure(counter_mode=val) @property def programmable_output(self): return self._programmable_output @programmable_output.setter def programmable_output(self, val): self.configure(programmable_output=val) @staticmethod def points_and_div_for_freq(freq): # Calculate divisor based on using max # of available samples possible. # -- ceil( DAC_CLOCK / (frequency * MAX_POINTS) ) freq = int(freq) div = (DDS.DAC_CLOCK + (freq - 1) * DDS.MAX_POINTS) / (freq * DDS.MAX_POINTS) # Adjust if odd value -- divisor has to be 1 or a multiple of 2 if div > 1 and div & 1: div += 1 # Calculate # of sample points to use w/this divider to get closest # to requested frequency # -- round( DAC_CLOCK / (divider * frequency) ) npoints = (DDS.DAC_CLOCK + (div * freq / 2)) / (div * freq) # Calculate actual frequency actual = (DDS.DAC_CLOCK / div) / npoints return (npoints, div, actual) def generate_sine(self, freq, amplitude=(1<<11)-1, offset=0, phase=0.0, shift=0): phase = float(phase) npoints, div, actual = DDS.points_and_div_for_freq(freq) points = [] for i in range(npoints): i = float(i) point = (amplitude * math.sin((2.0 * math.pi * i / npoints) + phase)) + offset points.append(int(point)) self.set_waveform(points, clock_divider=div, shift_points=shift) return actual def generate_square(self, freq, duty_cycle=0.5, amplitude=(1<<11)-1, offset=0, phase=0.0, shift=0): phase = float(phase) npoints, div, actual = DDS.points_and_div_for_freq(freq) points = [] for i in range(npoints): shifted = int(i + (phase * npoints) / (2.0 * math.pi)) % npoints point = amplitude if shifted < (duty_cycle * npoints) else -amplitude points.append(int(point + offset)) self.set_waveform(points, clock_divider=div, shift_points=shift) return actual if __name__ == "__main__": import time freq = 6000000 d = DDS() # print "Generating square wave @ {0} hz".format(freq) # d.generate_square(25000000, 0.50) # time.sleep(10) print "Generating sine wave @ {0} hz".format(freq) d.generate_sine(freq) d.programmable_output=True d.reset_counter() d.counter_mode = True
lgpl-2.1
106,578,187,649,829,000
32.535519
130
0.607952
false
3.744356
true
false
false
atzengin/OCC
oc-utils/python/modtool/code_generator.py
1
2298
# # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # """ A code generator (needed by ModToolAdd) """ from templates import Templates import Cheetah.Template from util_functions import str_to_fancyc_comment from util_functions import str_to_python_comment from util_functions import strip_default_values from util_functions import strip_arg_types from util_functions import strip_arg_types_occ class GRMTemplate(Cheetah.Template.Template): """ An extended template class """ def __init__(self, src, searchList): self.grtypelist = { 'sync': 'sync_block', 'sink': 'sync_block', 'source': 'sync_block', 'decimator': 'sync_decimator', 'interpolator': 'sync_interpolator', 'general': 'block', 'tagged_stream': 'tagged_stream_block', 'hier': 'hier_block2', 'noblock': ''} searchList['str_to_fancyc_comment'] = str_to_fancyc_comment searchList['str_to_python_comment'] = str_to_python_comment searchList['strip_default_values'] = strip_default_values searchList['strip_arg_types'] = strip_arg_types searchList['strip_arg_types_occ'] = strip_arg_types_occ Cheetah.Template.Template.__init__(self, src, searchList=searchList) self.grblocktype = self.grtypelist[searchList['blocktype']] def get_template(tpl_id, **kwargs): """ Return the template given by tpl_id, parsed through Cheetah """ return str(GRMTemplate(Templates[tpl_id], searchList=kwargs))
gpl-3.0
-1,651,171,289,484,127,200
41.555556
76
0.681027
false
3.836394
false
false
false
mostaphaRoudsari/Honeybee
src/Honeybee_AskMe.py
1
1992
# # Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari # # This file is part of Honeybee. # # Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <[email protected]> # Honeybee is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 3 of the License, # or (at your option) any later version. # # Honeybee is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Honeybee; If not, see <http://www.gnu.org/licenses/>. # # @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+> """ Use this component to get basic information on Honeybee Objects, whether they are HBSrfs or HBZones. - Provided by Honeybee 0.0.66 Args: _HBObjects: Any valid Honeybee object. Returns: readMe!: Information about the Honeybee object. Connect to a panel to visualize. """ ghenv.Component.Name = "Honeybee_AskMe" ghenv.Component.NickName = 'askMe' ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020' ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application ghenv.Component.Category = "HB-Legacy" ghenv.Component.SubCategory = "00 | Honeybee" #compatibleHBVersion = VER 0.0.56\nJUL_24_2017 #compatibleLBVersion = VER 0.0.59\nFEB_01_2015 try: ghenv.Component.AdditionalHelpFromDocStrings = "1" except: pass import scriptcontext as sc try: # call the objects from the lib hb_hive = sc.sticky["honeybee_Hive"]() HBObjectsFromHive = hb_hive.visualizeFromHoneybeeHive(_HBObjects) for HBO in HBObjectsFromHive: print HBO except Exception, e: print "Honeybee has no idea what this object is! Vviiiiiiz!" pass
gpl-3.0
-3,768,216,618,678,705,700
35.218182
100
0.744478
false
3.207729
false
false
false
wdv4758h/ZipPy
edu.uci.python.benchmark/src/benchmarks/euler31-timed.py
1
1614
#runas solve() #unittest.skip recursive generator #pythran export solve() # 01/08/14 modified for benchmarking by Wei Zhang import sys, time COINS = [1, 2, 5, 10, 20, 50, 100, 200] # test def _sum(iterable): sum = None for i in iterable: if sum is None: sum = i else: sum += i return sum def balance(pattern): return _sum(COINS[x]*pattern[x] for x in range(0, len(pattern))) def gen(pattern, coinnum, num): coin = COINS[coinnum] for p in range(0, num//coin + 1): newpat = pattern[:coinnum] + (p,) bal = balance(newpat) if bal > num: return elif bal == num: yield newpat elif coinnum < len(COINS)-1: for pat in gen(newpat, coinnum+1, num): yield pat def solve(total): ''' In England the currency is made up of pound, P, and pence, p, and there are eight coins in general circulation: 1p, 2p, 5p, 10p, 20p, 50p, P1 (100p) and P2 (200p). It is possible to make P2 in the following way: 1 P1 + 1 50p + 2 20p + 1 5p + 1 2p + 3 1p How many different ways can P2 be made using any number of coins? ''' return _sum(1 for pat in gen((), 0, total)) def measure(): input = int(sys.argv[1]) # 200 for i in range(3): solve(input) print("Start timing...") start = time.time() result = solve(input) print('total number of different ways: ', result) duration = "%.3f\n" % (time.time() - start) print("euler31: " + duration) # warm up for i in range(2000): # 300 solve(40) measure()
bsd-3-clause
7,452,297,059,408,330,000
23.830769
115
0.576208
false
3.189723
false
false
false
longde123/MultiversePlatform
lib/IPCE/Lib/ctypes.py
1
5974
# Copyright (c) 2006 Seo Sanghyeon # 2006-06-08 sanxiyn Created # 2006-06-11 sanxiyn Implemented .value on primitive types # 2006-11-02 sanxiyn Support for multiple signatures __all__ = [ 'c_int', 'c_float', 'c_double', 'c_char_p', 'c_void_p', 'LibraryLoader', 'CDLL', 'cdll', 'byref', 'sizeof' ] # -------------------------------------------------------------------- # Dynamic module definition from System import AppDomain from System.Reflection import AssemblyName from System.Reflection.Emit import AssemblyBuilderAccess def pinvoke_module(): domain = AppDomain.CurrentDomain name = AssemblyName('pinvoke') flag = AssemblyBuilderAccess.Run assembly = domain.DefineDynamicAssembly(name, flag) module = assembly.DefineDynamicModule('pinvoke') return module # -------------------------------------------------------------------- # General interface class pinvoke_value: type = None value = None def get_type(obj): if isinstance(obj, pinvoke_value): return obj.type else: return type(obj) def get_value(obj): if isinstance(obj, pinvoke_value): return obj.value else: return obj # -------------------------------------------------------------------- # Primitive types from System import Single, Double, IntPtr class pinvoke_primitive(pinvoke_value): def __init__(self, value=None): if value is None: value = self.type() if not isinstance(value, self.type): expected = self.type.__name__ given = value.__class__.__name__ msg = "%s expected instead of %s" % (expected, given) raise TypeError(msg) self.value = value def __repr__(self): clsname = self.__class__.__name__ return "%s(%r)" % (clsname, self.value) class c_int(pinvoke_primitive): type = int class c_float(pinvoke_primitive): type = Single class c_double(pinvoke_primitive): type = Double class c_char_p(pinvoke_primitive): type = str class c_void_p(pinvoke_primitive): type = IntPtr # -------------------------------------------------------------------- # Reference from System import Type class pinvoke_reference(pinvoke_value): def __init__(self, obj): self.obj = obj self.type = Type.MakeByRefType(obj.type) self.value = obj.value def __repr__(self): return "byref(%r)" % (self.obj,) def byref(obj): if not isinstance(obj, pinvoke_value): raise TypeError("byref() argument must be a ctypes instance") ref = pinvoke_reference(obj) return ref # -------------------------------------------------------------------- # Utility from System.Runtime.InteropServices import Marshal def sizeof(obj): return Marshal.SizeOf(obj.type) # -------------------------------------------------------------------- # Dynamic P/Invoke from System import Array from System.Reflection import CallingConventions, MethodAttributes from System.Runtime.InteropServices import CallingConvention, CharSet from IronPython.Runtime.Calls import BuiltinFunction, FunctionType class pinvoke_method: pinvoke_attributes = ( MethodAttributes.Public | MethodAttributes.Static | MethodAttributes.PinvokeImpl ) calling_convention = None return_type = None def __init__(self, dll, entry): self.dll = dll self.entry = entry self.restype = None self.argtypes = None self.func = None self.signatures = set() def create(self, restype, argtypes): dll = self.dll entry = self.entry attributes = self.pinvoke_attributes cc = self.calling_convention clr_argtypes = Array[Type](argtypes) module = pinvoke_module() module.DefinePInvokeMethod( entry, dll, attributes, CallingConventions.Standard, restype, clr_argtypes, cc, CharSet.Ansi) module.CreateGlobalFunctions() method = module.GetMethod(entry) self.func = BuiltinFunction.MakeOrAdd( self.func, entry, method, FunctionType.Function) self.signatures.add((restype, argtypes)) def __call__(self, *args): if self.restype: restype = self.restype.type else: restype = self.return_type.type if self.argtypes: argtypes = [argtype.type for argtype in self.argtypes] else: argtypes = [get_type(arg) for arg in args] argtypes = tuple(argtypes) if (restype, argtypes) not in self.signatures: self.create(restype, argtypes) args = [get_value(arg) for arg in args] result = self.func(*args) return result # -------------------------------------------------------------------- # Function loader def is_special_name(name): return name.startswith('__') and name.endswith('__') class pinvoke_dll: method_class = None def __init__(self, name): self.name = name def __repr__(self): clsname = self.__class__.__name__ return "<%s '%s'>" % (clsname, self.name) def __getattr__(self, name): if is_special_name(name): raise AttributeError(name) method = self.method_class(self.name, name) setattr(self, name, method) return method class CDLL(pinvoke_dll): class method_class(pinvoke_method): calling_convention = CallingConvention.Cdecl return_type = c_int # -------------------------------------------------------------------- # Library loader class LibraryLoader(object): def __init__(self, dlltype): self.dlltype = dlltype def __getattr__(self, name): if is_special_name(name): raise AttributeError(name) dll = self.dlltype(name) setattr(self, name, dll) return dll def LoadLibrary(self, name): return self.dlltype(name) cdll = LibraryLoader(CDLL)
mit
-2,151,948,963,422,639,000
25.789238
70
0.571142
false
4.105842
false
false
false
bergolho1337/URI-Online-Judge
Basicos/Python/1061/main.py
1
1292
# -*- coding: utf-8 -*- def converteString (dia, hora): evento = [] # Parse do dia num = dia[4:6] evento.append(int(num)) # Parse da hora num = hora[0:2] evento.append(int(num)) # Parse dos minutos num = hora[5:7] evento.append(int(num)) # Parse dos segundos num = hora[10:12] evento.append(int(num)) return evento def calculaDuracao (inicio, fim): inicio_seg = (inicio[0]*86400)+(inicio[1]*3600)+(inicio[2]*60)+(inicio[3]) fim_seg = (fim[0]*86400)+(fim[1]*3600)+(fim[2]*60)+(fim[3]) duracao_seg = fim_seg - inicio_seg dias = duracao_seg / 86400 duracao_seg = duracao_seg - (dias*86400) horas = duracao_seg / 3600 duracao_seg = duracao_seg - (horas*3600) minutos = duracao_seg / 60 duracao_seg = duracao_seg - (minutos*60) segundos = duracao_seg return dias, horas, minutos, segundos dia_inicio = raw_input() hora_inicio = raw_input() dia_fim = raw_input() hora_fim = raw_input() evento_inicio = converteString(dia_inicio,hora_inicio) evento_fim = converteString(dia_fim,hora_fim) dias, horas, minutos, segundos = calculaDuracao(evento_inicio,evento_fim) print("%d dia(s)" % dias) print("%d hora(s)" % horas) print("%d minuto(s)" % minutos) print("%d segundo(s)" % segundos)
gpl-2.0
-1,258,978,114,268,866,600
25.387755
78
0.629257
false
2.327928
false
false
false
google/material-design-icons
update/venv/lib/python3.9/site-packages/fontTools/varLib/plot.py
5
4153
"""Visualize DesignSpaceDocument and resulting VariationModel.""" from fontTools.varLib.models import VariationModel, supportScalar from fontTools.designspaceLib import DesignSpaceDocument from matplotlib import pyplot from mpl_toolkits.mplot3d import axes3d from itertools import cycle import math import logging import sys log = logging.getLogger(__name__) def stops(support, count=10): a,b,c = support return [a + (b - a) * i / count for i in range(count)] + \ [b + (c - b) * i / count for i in range(count)] + \ [c] def _plotLocationsDots(locations, axes, subplot, **kwargs): for loc, color in zip(locations, cycle(pyplot.cm.Set1.colors)): if len(axes) == 1: subplot.plot( [loc.get(axes[0], 0)], [1.], 'o', color=color, **kwargs ) elif len(axes) == 2: subplot.plot( [loc.get(axes[0], 0)], [loc.get(axes[1], 0)], [1.], 'o', color=color, **kwargs ) else: raise AssertionError(len(axes)) def plotLocations(locations, fig, names=None, **kwargs): n = len(locations) cols = math.ceil(n**.5) rows = math.ceil(n / cols) if names is None: names = [None] * len(locations) model = VariationModel(locations) names = [names[model.reverseMapping[i]] for i in range(len(names))] axes = sorted(locations[0].keys()) if len(axes) == 1: _plotLocations2D( model, axes[0], fig, cols, rows, names=names, **kwargs ) elif len(axes) == 2: _plotLocations3D( model, axes, fig, cols, rows, names=names, **kwargs ) else: raise ValueError("Only 1 or 2 axes are supported") def _plotLocations2D(model, axis, fig, cols, rows, names, **kwargs): subplot = fig.add_subplot(111) for i, (support, color, name) in enumerate( zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names)) ): if name is not None: subplot.set_title(name) subplot.set_xlabel(axis) pyplot.xlim(-1.,+1.) Xs = support.get(axis, (-1.,0.,+1.)) X, Y = [], [] for x in stops(Xs): y = supportScalar({axis:x}, support) X.append(x) Y.append(y) subplot.plot(X, Y, color=color, **kwargs) _plotLocationsDots(model.locations, [axis], subplot) def _plotLocations3D(model, axes, fig, rows, cols, names, **kwargs): ax1, ax2 = axes axis3D = fig.add_subplot(111, projection='3d') for i, (support, color, name) in enumerate( zip(model.supports, cycle(pyplot.cm.Set1.colors), cycle(names)) ): if name is not None: axis3D.set_title(name) axis3D.set_xlabel(ax1) axis3D.set_ylabel(ax2) pyplot.xlim(-1.,+1.) pyplot.ylim(-1.,+1.) Xs = support.get(ax1, (-1.,0.,+1.)) Ys = support.get(ax2, (-1.,0.,+1.)) for x in stops(Xs): X, Y, Z = [], [], [] for y in Ys: z = supportScalar({ax1:x, ax2:y}, support) X.append(x) Y.append(y) Z.append(z) axis3D.plot(X, Y, Z, color=color, **kwargs) for y in stops(Ys): X, Y, Z = [], [], [] for x in Xs: z = supportScalar({ax1:x, ax2:y}, support) X.append(x) Y.append(y) Z.append(z) axis3D.plot(X, Y, Z, color=color, **kwargs) _plotLocationsDots(model.locations, [ax1, ax2], axis3D) def plotDocument(doc, fig, **kwargs): doc.normalize() locations = [s.location for s in doc.sources] names = [s.name for s in doc.sources] plotLocations(locations, fig, names, **kwargs) def main(args=None): from fontTools import configLogger if args is None: args = sys.argv[1:] # configure the library logger (for >= WARNING) configLogger() # comment this out to enable debug messages from logger # log.setLevel(logging.DEBUG) if len(args) < 1: print("usage: fonttools varLib.plot source.designspace", file=sys.stderr) print(" or") print("usage: fonttools varLib.plot location1 location2 ...", file=sys.stderr) sys.exit(1) fig = pyplot.figure() fig.set_tight_layout(True) if len(args) == 1 and args[0].endswith('.designspace'): doc = DesignSpaceDocument() doc.read(args[0]) plotDocument(doc, fig) else: axes = [chr(c) for c in range(ord('A'), ord('Z')+1)] locs = [dict(zip(axes, (float(v) for v in s.split(',')))) for s in args] plotLocations(locs, fig) pyplot.show() if __name__ == '__main__': import sys sys.exit(main())
apache-2.0
-5,397,906,847,771,724,000
23.868263
80
0.642427
false
2.69151
false
false
false
inonit/django-chemtrails
tests/testapp/migrations/0005_guild.py
1
1061
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-05-10 13:14 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('testapp', '0004_book_view_book_permission'), ] operations = [ migrations.CreateModel( name='Guild', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=100)), ('contact', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='guild_contacts', to='testapp.Author')), ('members', models.ManyToManyField(related_name='guild_set', to='testapp.Author', verbose_name='members')), ], ), migrations.AddField( model_name='author', name='guilds', field=models.ManyToManyField(blank=True, to='testapp.Guild'), ), ]
mit
6,104,085,431,111,180,000
34.366667
144
0.600377
false
4.144531
false
false
false
alfred82santa/tarrabme2
src/orgs/models.py
1
1856
from django.db import models from common.models import CommonModel, AbstractContact, AbstractAddress from django.contrib.auth.models import Group from imagekit.models import ProcessedImageField, ImageSpecField from imagekit.processors import ResizeToFill class Organization(CommonModel): name = models.CharField(max_length=100, unique=True) commercial_name = models.CharField(max_length=150, unique=True) prefix = models.CharField(max_length=6, unique=True) active = models.BooleanField('active', default=True) logo = ProcessedImageField( upload_to="logos", processors=[ResizeToFill(400, 400)], ) logo_thumbnail = ImageSpecField(source='logo', processors=[ResizeToFill(50, 50)],) def logo_thumbnail_img(self): return '<img src="%s"/>' % self.logo_thumbnail.url logo_thumbnail_img.allow_tags = True logo_thumbnail_img.short_description = '' class Meta: pass def __unicode__(self): return self.name class Contact(AbstractContact): organization = models.ForeignKey(Organization, blank=False, null=False, related_name="contacts_list" ) class BillingAccount(AbstractAddress): fiscal_number = models.CharField(max_length=126, unique=True) payment_method = models.CharField(max_length=126, unique=True) payment_data = models.CharField(max_length=126, unique=True) organization = models.ForeignKey(Organization, blank=False, null=False, related_name="contacts" ) class OrganizationRole(Group): organization = models.ForeignKey(Organization, blank=False, null=False, related_name="roles" )
gpl-3.0
-9,086,944,784,570,360,000
34.692308
77
0.634698
false
4.450839
false
false
false
anneline/Bika-LIMS
bika/lims/utils/__init__.py
1
13899
from time import time from AccessControl import ModuleSecurityInfo, allow_module from bika.lims import logger from bika.lims.browser import BrowserView from DateTime import DateTime from email import Encoders from email.MIMEBase import MIMEBase from plone.memoize import ram from plone.registry.interfaces import IRegistry from Products.Archetypes.public import DisplayList from Products.CMFCore.utils import getToolByName from Products.CMFPlone.utils import safe_unicode from zope.component import queryUtility from zope.i18n import translate from zope.i18n.locales import locales import App import Globals import os import re import urllib2 ModuleSecurityInfo('email.Utils').declarePublic('formataddr') allow_module('csv') def to_utf8(text): if text is None: text = '' return safe_unicode(text).encode('utf-8') def to_unicode(text): if text is None: text = '' return safe_unicode(text) def t(i18n_msg): """Safely translate and convert to UTF8, any zope i18n msgid returned from a bikaMessageFactory _ """ return to_utf8(translate(i18n_msg)) # Wrapper for PortalTransport's sendmail - don't know why there sendmail # method is marked private ModuleSecurityInfo('Products.bika.utils').declarePublic('sendmail') # Protected( Publish, 'sendmail') def sendmail(portal, from_addr, to_addrs, msg): mailspool = portal.portal_mailspool mailspool.sendmail(from_addr, to_addrs, msg) class js_log(BrowserView): def __call__(self, message): """Javascript sends a string for us to place into the log. """ self.logger.info(message) class js_err(BrowserView): def __call__(self, message): """Javascript sends a string for us to place into the error log """ self.logger.error(message); ModuleSecurityInfo('Products.bika.utils').declarePublic('printfile') def printfile(portal, from_addr, to_addrs, msg): """ set the path, then the cmd 'lpr filepath' temp_path = 'C:/Zope2/Products/Bika/version.txt' os.system('lpr "%s"' %temp_path) """ pass def _cache_key_getUsers(method, context, roles=[], allow_empty=True): key = time() // (60 * 60), roles, allow_empty return key @ram.cache(_cache_key_getUsers) def getUsers(context, roles, allow_empty=True): """ Present a DisplayList containing users in the specified list of roles """ mtool = getToolByName(context, 'portal_membership') pairs = allow_empty and [['', '']] or [] users = mtool.searchForMembers(roles=roles) for user in users: uid = user.getId() fullname = user.getProperty('fullname') if not fullname: fullname = uid pairs.append((uid, fullname)) pairs.sort(lambda x, y: cmp(x[1], y[1])) return DisplayList(pairs) def isActive(obj): """ Check if obj is inactive or cancelled. """ wf = getToolByName(obj, 'portal_workflow') if (hasattr(obj, 'inactive_state') and obj.inactive_state == 'inactive') or \ wf.getInfoFor(obj, 'inactive_state', 'active') == 'inactive': return False if (hasattr(obj, 'cancellation_state') and obj.inactive_state == 'cancelled') or \ wf.getInfoFor(obj, 'cancellation_state', 'active') == 'cancelled': return False return True def formatDateQuery(context, date_id): """ Obtain and reformat the from and to dates into a date query construct """ from_date = context.REQUEST.get('%s_fromdate' % date_id, None) if from_date: from_date = from_date + ' 00:00' to_date = context.REQUEST.get('%s_todate' % date_id, None) if to_date: to_date = to_date + ' 23:59' date_query = {} if from_date and to_date: date_query = {'query': [from_date, to_date], 'range': 'min:max'} elif from_date or to_date: date_query = {'query': from_date or to_date, 'range': from_date and 'min' or 'max'} return date_query def formatDateParms(context, date_id): """ Obtain and reformat the from and to dates into a printable date parameter construct """ from_date = context.REQUEST.get('%s_fromdate' % date_id, None) to_date = context.REQUEST.get('%s_todate' % date_id, None) date_parms = {} if from_date and to_date: date_parms = 'from %s to %s' % (from_date, to_date) elif from_date: date_parms = 'from %s' % (from_date) elif to_date: date_parms = 'to %s' % (to_date) return date_parms def formatDuration(context, totminutes): """ Format a time period in a usable manner: eg. 3h24m """ mins = totminutes % 60 hours = (totminutes - mins) / 60 if mins: mins_str = '%sm' % mins else: mins_str = '' if hours: hours_str = '%sh' % hours else: hours_str = '' return '%s%s' % (hours_str, mins_str) def formatDecimalMark(value, decimalmark='.'): """ Dummy method to replace decimal mark from an input string. Assumes that 'value' uses '.' as decimal mark and ',' as thousand mark. """ rawval = value if decimalmark == ',': rawval = rawval.replace('.', '[comma]') rawval = rawval.replace(',', '.') rawval = rawval.replace('[comma]', ',') return rawval # encode_header function copied from roundup's rfc2822 package. hqre = re.compile(r'^[A-z0-9!"#$%%&\'()*+,-./:;<=>?@\[\]^_`{|}~ ]+$') ModuleSecurityInfo('Products.bika.utils').declarePublic('encode_header') def encode_header(header, charset='utf-8'): """ Will encode in quoted-printable encoding only if header contains non latin characters """ # Return empty headers unchanged if not header: return header # return plain header if it does not contain non-ascii characters if hqre.match(header): return header quoted = '' # max_encoded = 76 - len(charset) - 7 for c in header: # Space may be represented as _ instead of =20 for readability if c == ' ': quoted += '_' # These characters can be included verbatim elif hqre.match(c): quoted += c # Otherwise, replace with hex value like =E2 else: quoted += "=%02X" % ord(c) return '=?%s?q?%s?=' % (charset, quoted) def zero_fill(matchobj): return matchobj.group().zfill(8) num_sort_regex = re.compile('\d+') ModuleSecurityInfo('Products.bika.utils').declarePublic('sortable_title') def sortable_title(portal, title): """Convert title to sortable title """ if not title: return '' def_charset = portal.plone_utils.getSiteEncoding() sortabletitle = title.lower().strip() # Replace numbers with zero filled numbers sortabletitle = num_sort_regex.sub(zero_fill, sortabletitle) # Truncate to prevent bloat for charset in [def_charset, 'latin-1', 'utf-8']: try: sortabletitle = safe_unicode(sortabletitle, charset)[:30] sortabletitle = sortabletitle.encode(def_charset or 'utf-8') break except UnicodeError: pass except TypeError: # If we get a TypeError if we already have a unicode string sortabletitle = sortabletitle[:30] break return sortabletitle def logged_in_client(context, member=None): if not member: membership_tool = getToolByName(context, 'portal_membership') member = membership_tool.getAuthenticatedMember() client = None groups_tool = context.portal_groups member_groups = [groups_tool.getGroupById(group.id).getGroupName() for group in groups_tool.getGroupsByUserId(member.id)] if 'Clients' in member_groups: for obj in context.clients.objectValues("Client"): if member.id in obj.users_with_local_role('Owner'): client = obj return client def changeWorkflowState(content, wf_id, state_id, acquire_permissions=False, portal_workflow=None, **kw): """Change the workflow state of an object @param content: Content obj which state will be changed @param state_id: name of the state to put on content @param acquire_permissions: True->All permissions unchecked and on riles and acquired False->Applies new state security map @param portal_workflow: Provide workflow tool (optimisation) if known @param kw: change the values of same name of the state mapping @return: None """ if portal_workflow is None: portal_workflow = getToolByName(content, 'portal_workflow') # Might raise IndexError if no workflow is associated to this type found_wf = 0 for wf_def in portal_workflow.getWorkflowsFor(content): if wf_id == wf_def.getId(): found_wf = 1 break if not found_wf: logger.error("%s: Cannot find workflow id %s" % (content, wf_id)) wf_state = { 'action': None, 'actor': None, 'comments': "Setting state to %s" % state_id, 'review_state': state_id, 'time': DateTime(), } # Updating wf_state from keyword args for k in kw.keys(): # Remove unknown items if k not in wf_state: del kw[k] if 'review_state' in kw: del kw['review_state'] wf_state.update(kw) portal_workflow.setStatusOf(wf_id, content, wf_state) if acquire_permissions: # Acquire all permissions for permission in content.possible_permissions(): content.manage_permission(permission, acquire=1) else: # Setting new state permissions wf_def.updateRoleMappingsFor(content) # Map changes to the catalogs content.reindexObject(idxs=['allowedRolesAndUsers', 'review_state']) return def tmpID(): import binascii return binascii.hexlify(os.urandom(16)) def isnumber(s): try: float(s) return True except ValueError: return False def createPdf(htmlreport, outfile=None, css=None): debug_mode = App.config.getConfiguration().debug_mode # XXX css must be a local file - urllib fails under robotframework tests. css_def = '' if css: if css.startswith("http://") or css.startswith("https://"): # Download css file in temp dir u = urllib2.urlopen(css) _cssfile = Globals.INSTANCE_HOME + '/var/' + tmpID() + '.css' localFile = open(_cssfile, 'w') localFile.write(u.read()) localFile.close() else: _cssfile = css cssfile = open(_cssfile, 'r') css_def = cssfile.read() if not outfile: outfile = Globals.INSTANCE_HOME + "/var/" + tmpID() + ".pdf" from weasyprint import HTML, CSS import os if css: HTML(string=htmlreport, encoding='utf-8').write_pdf(outfile, stylesheets=[CSS(string=css_def)]) else: HTML(string=htmlreport, encoding='utf-8').write_pdf(outfile) if debug_mode: htmlfilepath = Globals.INSTANCE_HOME + "/var/" + tmpID() + ".html" htmlfile = open(htmlfilepath, 'w') htmlfile.write(htmlreport) htmlfile.close() return open(outfile, 'r').read(); def attachPdf(mimemultipart, pdfreport, filename=None): part = MIMEBase('application', "application/pdf") part.add_header('Content-Disposition', 'attachment; filename="%s.pdf"' % (filename or tmpID())) part.set_payload(pdfreport) Encoders.encode_base64(part) mimemultipart.attach(part) def get_invoice_item_description(obj): if obj.portal_type == 'AnalysisRequest': sample = obj.getSample() samplepoint = sample.getSamplePoint() samplepoint = samplepoint and samplepoint.Title() or '' sampletype = sample.getSampleType() sampletype = sampletype and sampletype.Title() or '' description = sampletype + ' ' + samplepoint elif obj.portal_type == 'SupplyOrder': products = obj.folderlistingFolderContents() products = [o.getProduct().Title() for o in products] description = ', '.join(products) return description def currency_format(context, locale): locale = locales.getLocale(locale) currency = context.bika_setup.getCurrency() symbol = locale.numbers.currencies[currency].symbol def format(val): return '%s %0.2f' % (symbol, val) return format def getHiddenAttributesForClass(classname): try: registry = queryUtility(IRegistry) hiddenattributes = registry.get('bika.lims.hiddenattributes', ()) if hiddenattributes is not None: for alist in hiddenattributes: if alist[0] == classname: return alist[1:] except: logger.warning( 'Probem accessing optionally hidden attributes in registry') return [] def isAttributeHidden(classname, fieldname): try: registry = queryUtility(IRegistry) hiddenattributes = registry.get('bika.lims.hiddenattributes', ()) if hiddenattributes is not None: for alist in hiddenattributes: if alist[0] == classname: return fieldname in alist[1:] except: logger.warning( 'Probem accessing optionally hidden attributes in registry') return False def dicts_to_dict(dictionaries, key_subfieldname): """Convert a list of dictionaries into a dictionary of dictionaries. key_subfieldname must exist in each Record's subfields and have a value, which will be used as the key for the new dictionary. If a key is duplicated, the earlier value will be overwritten. """ result = {} for d in dictionaries: result[d[key_subfieldname]] = d return result
agpl-3.0
-5,184,133,056,224,156,000
29.818182
86
0.630189
false
3.817358
false
false
false
zmarvel/slowboy
slowboy/util.py
1
1753
import abc from collections import namedtuple Op = namedtuple('Op', ['function', 'cycles', 'description']) class ClockListener(metaclass=abc.ABCMeta): @abc.abstractmethod def notify(self, clock: int, cycles: int): """Notify the listener that the clock has advanced. :param clock: The new value of the CPU clock. :param cycles: The number of cycles that have passed since the last notification.""" pass def uint8toBCD(uint8): """Convert an 8-bit unsigned integer to binary-coded decimal.""" d1 = uint8 // 10 d0 = uint8 % 10 return (d1 << 4) | d0 def sub_s8(x, y): """Subtract two 8-bit integers stored in two's complement.""" return (x + twoscompl8(y)) & 0xff def sub_s16(x, y): """Subtract two 16-bit integers stored in two's complement.""" return (x + twoscompl16(y)) & 0xffff def add_s8(x, y): """Add two 8-bit integers stored in two's complement.""" return (x + y) & 0xff def add_s16(x, y): """Add two 16-bit integers stored in two's complement.""" return (x + y) & 0xffff def twoscompl8(x): """Returns the reciprocal of 8-bit x in two's complement.""" return ((x ^ 0xff) + 1) & 0xff def twoscompl16(x): """Returns the reciprocal of 16-bit x in two's complement.""" return ((x ^ 0xffff) + 1) & 0xffff def hexdump(bytes, line_len, start=0): line = [] j = 0 for b in bytes: s = '{:02x}'.format(b) if j % line_len == 0 and j > 0: yield '{:04x}: {}'.format(start+j-line_len, ' '.join(line)) line = [] j += 1 line.append(s) yield '{:04x}: {}'.format(start+j-line_len, ' '.join(line)) def print_lines(it): for line in it: print(line)
mit
8,089,310,398,789,706,000
24.405797
75
0.590416
false
3.240296
false
false
false
koreiklein/fantasia
ui/render/gl/distances.py
1
1104
# Copyright (C) 2013 Korei Klein <[email protected]> # Constants for gl rendering of basic are collected here. from ui.render.gl import colors epsilon = 0.0001 divider_spacing = 15.0 notThickness = 22.0 notShiftThickness = notThickness + 21.0 # Amount by which to shift the value contained inside a Not. notShiftOffset = [notShiftThickness + 5, notShiftThickness, 0.0] quantifier_variables_spacing = 100.0 variable_binding_spacing = 20.0 quantifier_before_divider_spacing = 10.0 quantifier_after_divider_spacing = 55.0 infixSpacing = 88.0 applySpacing = 16.0 productVariableHorizontalSpacing = 0.0 productVariableBorder = 10.0 symbolBackgroundBorderWidth = 30.0 variableBackgroundBorderWidth = 30.0 holdsSpacing = 60.0 iffSpacing = 35.0 exponential_border_width = 40.0 min_unit_divider_length = 100.0 min_intersect_divider_length = 250.0 unit_width = 20.0 quantifier_divider_width = 20.0 conjunctiveDividerWidth = 20.0 def capLengthOfDividerByLength(length): return min(35.0, length / 7.0) inject_spacing = 8.0 before_dot_spacing = 8.0 after_dot_spacing = 8.0 dotWidth = 15.0
gpl-2.0
-6,752,107,023,383,802,000
19.444444
64
0.764493
false
2.860104
false
false
false
shakfu/start-vm
default/bin/normalize.py
1
1259
#!/usr/bin/env python import hashlib import os import sys from datetime import datetime HASH = hashlib.md5(str(datetime.now())).hexdigest() def normalize(path, file_func=None, dir_func=None): ''' recursive normalization of directory and file names applies the following changes to directory and filenames: - lowercasing - converts spaces to '-' ''' norm_func = lambda x: x.lower().replace(' ', '-') if not file_func: file_func = norm_func if not dir_func: dir_func = norm_func for root, dirs, files in os.walk(path, topdown=False): for name in files: f = os.path.join(root, name) print(file_func(f)) for name in dirs: d = os.path.join(root, name) #print(dir_func(d)) def norm_func(path): entry = os.path.basename(path) parent = os.path.dirname(path) entry_norm = entry.lower().replace(' ', '-') p = os.path.join(parent, entry_norm)+HASH os.rename(path, p) new = p.strip(HASH) os.rename(p, new) return new def norm_path(path=None): if not path: path = sys.argv[1] normalize(path, norm_func) #normalize(path, None, norm_func) if __name__ == '__main__': norm_path()
mit
-1,232,118,386,050,333,700
23.211538
65
0.597299
false
3.449315
false
false
false
luci/luci-py
appengine/components/components/auth/change_log_test.py
2
45674
#!/usr/bin/env vpython # Copyright 2014 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. import datetime import sys import unittest from test_support import test_env test_env.setup_test_env() from google.appengine.ext import ndb from components import utils from components.auth import change_log from components.auth import model from components.auth.proto import realms_pb2 from components.auth.proto import security_config_pb2 from test_support import test_case class MakeInitialSnapshotTest(test_case.TestCase): """Tests for ensure_initial_snapshot function.""" def test_works(self): # Initial entities. Don't call 'record_revision' to imitate "old" # application without history related code. @ndb.transactional def make_auth_db(): model.AuthGlobalConfig(key=model.root_key()).put() model.AuthIPWhitelistAssignments( key=model.ip_whitelist_assignments_key()).put() model.AuthGroup(key=model.group_key('A group')).put() model.AuthIPWhitelist(key=model.ip_whitelist_key('A whitelist')).put() model.replicate_auth_db() make_auth_db() # Bump auth_db once more to avoid hitting trivial case of "processing first # revision ever". auth_db_rev = ndb.transaction(model.replicate_auth_db) self.assertEqual(2, auth_db_rev) # Now do the work. change_log.ensure_initial_snapshot(auth_db_rev) # Generated new AuthDB rev with updated entities. self.assertEqual(3, model.get_auth_db_revision()) # Check all *History entities exist now. p = model.historical_revision_key(3) self.assertIsNotNone( ndb.Key('AuthGlobalConfigHistory', 'root', parent=p).get()) self.assertIsNotNone( ndb.Key( 'AuthIPWhitelistAssignmentsHistory', 'default', parent=p).get()) self.assertIsNotNone(ndb.Key('AuthGroupHistory', 'A group', parent=p).get()) self.assertIsNotNone( ndb.Key('AuthIPWhitelistHistory', 'A whitelist', parent=p).get()) # Call again, should be noop (marker is set). change_log.ensure_initial_snapshot(3) self.assertEqual(3, model.get_auth_db_revision()) ident = lambda x: model.Identity.from_bytes('user:' + x) glob = lambda x: model.IdentityGlob.from_bytes('user:' + x) def make_group(name, comment, **kwargs): group = model.AuthGroup(key=model.group_key(name), **kwargs) group.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment=comment) group.put() def make_ip_whitelist(name, comment, **kwargs): wl = model.AuthIPWhitelist(key=model.ip_whitelist_key(name), **kwargs) wl.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment=comment) wl.put() def security_config(regexps): msg = security_config_pb2.SecurityConfig(internal_service_regexp=regexps) return msg.SerializeToString() class GenerateChangesTest(test_case.TestCase): """Tests for generate_changes function.""" def setUp(self): super(GenerateChangesTest, self).setUp() self.mock(change_log, 'enqueue_process_change_task', lambda _: None) self.mock_now(datetime.datetime(2015, 1, 2, 3, 4, 5)) def auth_db_transaction(self, callback): """Imitates AuthDB change and subsequent 'process-change' task. Returns parent entity of entity subgroup with all generated changes. """ @ndb.transactional def run(): callback() return model.replicate_auth_db() auth_db_rev = run() change_log.process_change(auth_db_rev) return change_log.change_log_revision_key(auth_db_rev) def grab_all(self, ancestor): """Returns dicts with all entities under given ancestor.""" entities = {} def cb(key): # Skip AuthDBLogRev itself, it's not interesting. if key == ancestor: return as_str = [] k = key while k and k != ancestor: as_str.append('%s:%s' % (k.kind(), k.id())) k = k.parent() entities['/'.join(as_str)] = { prop: val for prop, val in key.get().to_dict().items() if val } ndb.Query(ancestor=ancestor).map(cb, keys_only=True) return entities def test_works(self): # Touch all kinds of entities at once. More thorough tests for per-entity # changes are below. def touch_all(): make_group( name='A group', members=[ident('[email protected]'), ident('[email protected]')], description='Blah', comment='New group') make_ip_whitelist( name='An IP whitelist', subnets=['127.0.0.1/32'], description='Bluh', comment='New IP whitelist') a = model.AuthIPWhitelistAssignments( key=model.ip_whitelist_assignments_key(), assignments=[ model.AuthIPWhitelistAssignments.Assignment( identity=ident('[email protected]'), ip_whitelist='An IP whitelist') ]) a.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='New assignment') a.put() c = model.AuthGlobalConfig( key=model.root_key(), oauth_client_id='client_id', oauth_client_secret='client_secret', oauth_additional_client_ids=['1', '2']) c.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='Config change') c.put() r = model.AuthRealmsGlobals( key=model.realms_globals_key(), permissions=[realms_pb2.Permission(name='luci.dev.p1')]) r.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='New permission') r.put() p = model.AuthProjectRealms( key=model.project_realms_key('proj1'), realms=realms_pb2.Realms(api_version=1234), config_rev='config_rev', perms_rev='prems_rev') p.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='New project') p.put() changes = self.grab_all(self.auth_db_transaction(touch_all)) self.assertEqual({ 'AuthDBChange:AuthGlobalConfig$root!7000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_CONF_OAUTH_CLIENT_CHANGED, 'class_': [u'AuthDBChange', u'AuthDBConfigChange'], 'comment': u'Config change', 'oauth_client_id': u'client_id', 'oauth_client_secret': u'client_secret', 'target': u'AuthGlobalConfig$root', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGlobalConfig$root!7100': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_ADDED, 'class_': [u'AuthDBChange', u'AuthDBConfigChange'], 'comment': u'Config change', 'oauth_additional_client_ids': [u'1', u'2'], 'target': u'AuthGlobalConfig$root', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_CREATED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'New group', 'description': u'Blah', 'owners': u'administrators', 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1200': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'New group', 'members': [ model.Identity(kind='user', name='[email protected]'), model.Identity(kind='user', name='[email protected]'), ], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthIPWhitelist$An IP whitelist!3000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_IPWL_CREATED, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'], 'comment': u'New IP whitelist', 'description': u'Bluh', 'target': u'AuthIPWhitelist$An IP whitelist', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthIPWhitelist$An IP whitelist!3200': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_ADDED, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'], 'comment': u'New IP whitelist', 'subnets': [u'127.0.0.1/32'], 'target': u'AuthIPWhitelist$An IP whitelist', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthIPWhitelistAssignments' '$default$user:[email protected]!5000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'], 'comment': u'New assignment', 'identity': model.Identity(kind='user', name='[email protected]'), 'ip_whitelist': u'An IP whitelist', 'target': u'AuthIPWhitelistAssignments$default$user:[email protected]', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]') }, 'AuthDBChange:AuthProjectRealms$proj1!10000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CREATED, 'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'], 'comment': u'New project', 'config_rev_new': u'config_rev', 'perms_rev_new': u'prems_rev', 'target': u'AuthProjectRealms$proj1', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]') }, 'AuthDBChange:AuthRealmsGlobals$globals!9000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_REALMS_GLOBALS_CHANGED, 'class_': [u'AuthDBChange', u'AuthRealmsGlobalsChange'], 'comment': u'New permission', 'permissions_added': [u'luci.dev.p1'], 'target': u'AuthRealmsGlobals$globals', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]') }, }, changes) def test_groups_diff(self): def create(): make_group( name='A group', members=[ident('[email protected]'), ident('[email protected]')], globs=[glob('*@example.com'), glob('*@other.com')], nested=['A', 'B'], description='Blah', comment='New group') changes = self.grab_all(self.auth_db_transaction(create)) self.assertEqual({ 'AuthDBChange:AuthGroup$A group!1000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_CREATED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'New group', 'description': u'Blah', 'owners': u'administrators', 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1200': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'New group', 'members': [ model.Identity(kind='user', name='[email protected]'), model.Identity(kind='user', name='[email protected]'), ], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1400': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_ADDED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'New group', 'globs': [ model.IdentityGlob(kind='user', pattern='*@example.com'), model.IdentityGlob(kind='user', pattern='*@other.com'), ], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1600': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_ADDED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'New group', 'nested': [u'A', u'B'], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def modify(): g = model.group_key('A group').get() g.members = [ident('[email protected]'), ident('[email protected]')] g.globs = [glob('*@example.com'), glob('*@blah.com')] g.nested = ['A', 'C'] g.description = 'Another blah' g.owners = 'another-owners' g.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='Changed') g.put() changes = self.grab_all(self.auth_db_transaction(modify)) self.assertEqual({ 'AuthDBChange:AuthGroup$A group!1100': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_DESCRIPTION_CHANGED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Changed', 'description': u'Another blah', 'old_description': u'Blah', 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1150': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_OWNERS_CHANGED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Changed', 'old_owners': u'administrators', 'owners': u'another-owners', 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1200': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Changed', 'members': [model.Identity(kind='user', name='[email protected]')], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1300': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_REMOVED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Changed', 'members': [model.Identity(kind='user', name='[email protected]')], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1400': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_ADDED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Changed', 'globs': [model.IdentityGlob(kind='user', pattern='*@blah.com')], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1500': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_REMOVED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Changed', 'globs': [model.IdentityGlob(kind='user', pattern='*@other.com')], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1600': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_ADDED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Changed', 'nested': [u'C'], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1700': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_REMOVED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Changed', 'nested': [u'B'], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def delete(): g = model.group_key('A group').get() g.record_deletion( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='Deleted') g.key.delete() changes = self.grab_all(self.auth_db_transaction(delete)) self.assertEqual({ 'AuthDBChange:AuthGroup$A group!1300': { 'app_version': u'v1a', 'auth_db_rev': 3, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_REMOVED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Deleted', 'members': [ model.Identity(kind='user', name='[email protected]'), model.Identity(kind='user', name='[email protected]'), ], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1500': { 'app_version': u'v1a', 'auth_db_rev': 3, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_GLOBS_REMOVED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Deleted', 'globs': [ model.IdentityGlob(kind='user', pattern='*@example.com'), model.IdentityGlob(kind='user', pattern='*@blah.com'), ], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1700': { 'app_version': u'v1a', 'auth_db_rev': 3, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_NESTED_REMOVED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Deleted', 'nested': [u'A', u'C'], 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGroup$A group!1800': { 'app_version': u'v1a', 'auth_db_rev': 3, 'change_type': change_log.AuthDBChange.CHANGE_GROUP_DELETED, 'class_': [u'AuthDBChange', u'AuthDBGroupChange'], 'comment': u'Deleted', 'old_description': u'Another blah', 'old_owners': u'another-owners', 'target': u'AuthGroup$A group', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def test_ip_whitelists_diff(self): def create(): make_ip_whitelist( name='A list', subnets=['127.0.0.1/32', '127.0.0.2/32'], description='Blah', comment='New list') changes = self.grab_all(self.auth_db_transaction(create)) self.assertEqual({ 'AuthDBChange:AuthIPWhitelist$A list!3000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_IPWL_CREATED, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'], 'comment': u'New list', 'description': u'Blah', 'target': u'AuthIPWhitelist$A list', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthIPWhitelist$A list!3200': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_ADDED, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'], 'comment': u'New list', 'subnets': [u'127.0.0.1/32', u'127.0.0.2/32'], 'target': u'AuthIPWhitelist$A list', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def modify(): l = model.ip_whitelist_key('A list').get() l.subnets = ['127.0.0.1/32', '127.0.0.3/32'] l.description = 'Another blah' l.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='Changed') l.put() changes = self.grab_all(self.auth_db_transaction(modify)) self.assertEqual({ 'AuthDBChange:AuthIPWhitelist$A list!3100': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_IPWL_DESCRIPTION_CHANGED, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'], 'comment': u'Changed', 'description': u'Another blah', 'old_description': u'Blah', 'target': u'AuthIPWhitelist$A list', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthIPWhitelist$A list!3200': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_ADDED, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'], 'comment': u'Changed', 'subnets': [u'127.0.0.3/32'], 'target': u'AuthIPWhitelist$A list', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthIPWhitelist$A list!3300': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_REMOVED, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'], 'comment': u'Changed', 'subnets': [u'127.0.0.2/32'], 'target': u'AuthIPWhitelist$A list', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def delete(): l = model.ip_whitelist_key('A list').get() l.record_deletion( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='Deleted') l.key.delete() changes = self.grab_all(self.auth_db_transaction(delete)) self.assertEqual({ 'AuthDBChange:AuthIPWhitelist$A list!3300': { 'app_version': u'v1a', 'auth_db_rev': 3, 'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_REMOVED, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'], 'comment': u'Deleted', 'subnets': [u'127.0.0.1/32', u'127.0.0.3/32'], 'target': u'AuthIPWhitelist$A list', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthIPWhitelist$A list!3400': { 'app_version': u'v1a', 'auth_db_rev': 3, 'change_type': change_log.AuthDBChange.CHANGE_IPWL_DELETED, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'], 'comment': u'Deleted', 'old_description': u'Another blah', 'target': u'AuthIPWhitelist$A list', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def test_ip_wl_assignments_diff(self): def create(): a = model.AuthIPWhitelistAssignments( key=model.ip_whitelist_assignments_key(), assignments=[ model.AuthIPWhitelistAssignments.Assignment( identity=ident('[email protected]'), ip_whitelist='An IP whitelist'), model.AuthIPWhitelistAssignments.Assignment( identity=ident('[email protected]'), ip_whitelist='Another IP whitelist'), ]) a.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='New assignment') a.put() changes = self.grab_all(self.auth_db_transaction(create)) self.assertEqual({ 'AuthDBChange:AuthIPWhitelistAssignments$' 'default$user:[email protected]!5000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'], 'comment': u'New assignment', 'identity': model.Identity(kind='user', name='[email protected]'), 'ip_whitelist': u'An IP whitelist', 'target': u'AuthIPWhitelistAssignments$default$user:[email protected]', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthIPWhitelistAssignments$' 'default$user:[email protected]!5000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'], 'comment': u'New assignment', 'identity': model.Identity(kind='user', name='[email protected]'), 'ip_whitelist': u'Another IP whitelist', 'target': u'AuthIPWhitelistAssignments$default$user:[email protected]', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def change(): a = model.ip_whitelist_assignments_key().get() a.assignments=[ model.AuthIPWhitelistAssignments.Assignment( identity=ident('[email protected]'), ip_whitelist='Another IP whitelist'), model.AuthIPWhitelistAssignments.Assignment( identity=ident('[email protected]'), ip_whitelist='IP whitelist'), ] a.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='change') a.put() changes = self.grab_all(self.auth_db_transaction(change)) self.assertEqual({ 'AuthDBChange:AuthIPWhitelistAssignments$' 'default$user:[email protected]!5000': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'], 'comment': u'change', 'identity': model.Identity(kind='user', name='[email protected]'), 'ip_whitelist': u'Another IP whitelist', 'target': u'AuthIPWhitelistAssignments$default$user:[email protected]', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthIPWhitelistAssignments$' 'default$user:[email protected]!5100': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_UNSET, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'], 'comment': u'change', 'identity': model.Identity(kind='user', name='[email protected]'), 'ip_whitelist': u'Another IP whitelist', 'target': u'AuthIPWhitelistAssignments$default$user:[email protected]', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthIPWhitelistAssignments$' 'default$user:[email protected]!5000': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET, 'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'], 'comment': u'change', 'identity': model.Identity(kind='user', name='[email protected]'), 'ip_whitelist': u'IP whitelist', 'target': u'AuthIPWhitelistAssignments$default$user:[email protected]', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def test_global_config_diff(self): def create(): c = model.AuthGlobalConfig( key=model.root_key(), oauth_client_id='client_id', oauth_client_secret='client_secret', oauth_additional_client_ids=['1', '2']) c.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='Config change') c.put() changes = self.grab_all(self.auth_db_transaction(create)) self.assertEqual({ 'AuthDBChange:AuthGlobalConfig$root!7000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_CONF_OAUTH_CLIENT_CHANGED, 'class_': [u'AuthDBChange', u'AuthDBConfigChange'], 'comment': u'Config change', 'oauth_client_id': u'client_id', 'oauth_client_secret': u'client_secret', 'target': u'AuthGlobalConfig$root', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGlobalConfig$root!7100': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_ADDED, 'class_': [u'AuthDBChange', u'AuthDBConfigChange'], 'comment': u'Config change', 'oauth_additional_client_ids': [u'1', u'2'], 'target': u'AuthGlobalConfig$root', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def modify(): c = model.root_key().get() c.oauth_additional_client_ids = ['1', '3'] c.token_server_url = 'https://token-server' c.security_config = security_config(['hi']) c.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='Config change') c.put() changes = self.grab_all(self.auth_db_transaction(modify)) self.assertEqual({ 'AuthDBChange:AuthGlobalConfig$root!7100': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_ADDED, 'class_': [u'AuthDBChange', u'AuthDBConfigChange'], 'comment': u'Config change', 'oauth_additional_client_ids': [u'3'], 'target': u'AuthGlobalConfig$root', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGlobalConfig$root!7200': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_REMOVED, 'class_': [u'AuthDBChange', u'AuthDBConfigChange'], 'comment': u'Config change', 'oauth_additional_client_ids': [u'2'], 'target': u'AuthGlobalConfig$root', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGlobalConfig$root!7300': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_CONF_TOKEN_SERVER_URL_CHANGED, 'class_': [u'AuthDBChange', u'AuthDBConfigChange'], 'comment': u'Config change', 'target': u'AuthGlobalConfig$root', 'token_server_url_new': u'https://token-server', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthGlobalConfig$root!7400': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_CONF_SECURITY_CONFIG_CHANGED, 'class_': [u'AuthDBChange', u'AuthDBConfigChange'], 'comment': u'Config change', 'security_config_new': security_config(['hi']), 'target': u'AuthGlobalConfig$root', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def test_realms_globals_diff(self): def create(): c = model.AuthRealmsGlobals( key=model.realms_globals_key(), permissions=[ realms_pb2.Permission(name='luci.dev.p1'), realms_pb2.Permission(name='luci.dev.p2'), realms_pb2.Permission(name='luci.dev.p3'), ]) c.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='New realms config') c.put() self.auth_db_transaction(create) def modify(): ent = model.realms_globals_key().get() ent.permissions = [ realms_pb2.Permission(name='luci.dev.p1'), realms_pb2.Permission(name='luci.dev.p3'), realms_pb2.Permission(name='luci.dev.p4'), ] ent.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='Realms config change') ent.put() changes = self.grab_all(self.auth_db_transaction(modify)) self.assertEqual({ 'AuthDBChange:AuthRealmsGlobals$globals!9000': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_REALMS_GLOBALS_CHANGED, 'class_': [u'AuthDBChange', u'AuthRealmsGlobalsChange'], 'comment': u'Realms config change', 'permissions_added': [u'luci.dev.p4'], 'permissions_removed': [u'luci.dev.p2'], 'target': u'AuthRealmsGlobals$globals', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def test_project_realms_diff(self): # Note: in reality Realms.api_version is fixed. We change it in this test # since it is the simplest field to change. def create(): p = model.AuthProjectRealms( key=model.project_realms_key('proj1'), realms=realms_pb2.Realms(api_version=123), config_rev='config_rev1', perms_rev='perms_rev1') p.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='Created') p.put() changes = self.grab_all(self.auth_db_transaction(create)) self.assertEqual({ 'AuthDBChange:AuthProjectRealms$proj1!10000': { 'app_version': u'v1a', 'auth_db_rev': 1, 'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CREATED, 'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'], 'comment': u'Created', 'config_rev_new': u'config_rev1', 'perms_rev_new': u'perms_rev1', 'target': u'AuthProjectRealms$proj1', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) def update(api_version, config_rev, perms_rev): p = model.project_realms_key('proj1').get() p.realms = realms_pb2.Realms(api_version=api_version) p.config_rev = config_rev p.perms_rev = perms_rev p.record_revision( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='Updated') p.put() # Update everything. changes = self.grab_all(self.auth_db_transaction( lambda: update(1234, 'config_rev2', 'perms_rev2'))) self.assertEqual({ 'AuthDBChange:AuthProjectRealms$proj1!10100': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CHANGED, 'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'], 'comment': u'Updated', 'config_rev_new': u'config_rev2', 'config_rev_old': u'config_rev1', 'target': u'AuthProjectRealms$proj1', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, 'AuthDBChange:AuthProjectRealms$proj1!10200': { 'app_version': u'v1a', 'auth_db_rev': 2, 'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_REEVALUATED, 'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'], 'comment': u'Updated', 'perms_rev_new': u'perms_rev2', 'perms_rev_old': u'perms_rev1', 'target': u'AuthProjectRealms$proj1', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) # Update realms_pb2.Realms, but do not change revisions. changes = self.grab_all(self.auth_db_transaction( lambda: update(12345, 'config_rev2', 'perms_rev2'))) self.assertEqual({ 'AuthDBChange:AuthProjectRealms$proj1!10100': { 'app_version': u'v1a', 'auth_db_rev': 3, 'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CHANGED, 'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'], 'comment': u'Updated', 'config_rev_new': u'config_rev2', 'config_rev_old': u'config_rev2', 'target': u'AuthProjectRealms$proj1', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) # Update revisions, but don't actually touch realms. changes = self.grab_all(self.auth_db_transaction( lambda: update(12345, 'config_rev3', 'perms_rev3'))) self.assertEqual({}, changes) def delete(): p = model.project_realms_key('proj1').get() p.record_deletion( modified_by=ident('[email protected]'), modified_ts=utils.utcnow(), comment='Deleted') p.key.delete() changes = self.grab_all(self.auth_db_transaction(delete)) self.assertEqual({ 'AuthDBChange:AuthProjectRealms$proj1!10300': { 'app_version': u'v1a', 'auth_db_rev': 5, 'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_REMOVED, 'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'], 'comment': u'Deleted', 'config_rev_old': u'config_rev3', 'perms_rev_old': u'perms_rev3', 'target': u'AuthProjectRealms$proj1', 'when': datetime.datetime(2015, 1, 2, 3, 4, 5), 'who': model.Identity(kind='user', name='[email protected]'), }, }, changes) class AuthDBChangeTest(test_case.TestCase): # Test to_jsonish for AuthDBGroupChange and AuthDBIPWhitelistAssignmentChange, # the rest are trivial. def test_group_change_to_jsonish(self): c = change_log.AuthDBGroupChange( change_type=change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED, target='AuthGroup$abc', auth_db_rev=123, who=ident('[email protected]'), when=datetime.datetime(2015, 1, 2, 3, 4, 5), comment='A comment', app_version='v123', description='abc', members=[ident('[email protected]')], globs=[glob('*@a.com')], nested=['A'], owners='abc', old_owners='def') self.assertEqual({ 'app_version': 'v123', 'auth_db_rev': 123, 'change_type': 'GROUP_MEMBERS_ADDED', 'comment': 'A comment', 'description': 'abc', 'globs': ['user:*@a.com'], 'members': ['user:[email protected]'], 'nested': ['A'], 'old_description': None, 'old_owners': 'def', 'owners': 'abc', 'target': 'AuthGroup$abc', 'when': 1420167845000000, 'who': 'user:[email protected]', }, c.to_jsonish()) def test_wl_assignment_to_jsonish(self): c = change_log.AuthDBIPWhitelistAssignmentChange( change_type=change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED, target='AuthIPWhitelistAssignments$default', auth_db_rev=123, who=ident('[email protected]'), when=datetime.datetime(2015, 1, 2, 3, 4, 5), comment='A comment', app_version='v123', identity=ident('[email protected]'), ip_whitelist='whitelist') self.assertEqual({ 'app_version': 'v123', 'auth_db_rev': 123, 'change_type': 'GROUP_MEMBERS_ADDED', 'comment': 'A comment', 'identity': 'user:[email protected]', 'ip_whitelist': 'whitelist', 'target': 'AuthIPWhitelistAssignments$default', 'when': 1420167845000000, 'who': 'user:[email protected]', }, c.to_jsonish()) def test_security_config_change_to_jsonish(self): c = change_log.AuthDBConfigChange( change_type=change_log.AuthDBChange.CHANGE_CONF_SECURITY_CONFIG_CHANGED, target='AuthGlobalConfig$default', auth_db_rev=123, who=ident('[email protected]'), when=datetime.datetime(2015, 1, 2, 3, 4, 5), comment='A comment', app_version='v123', security_config_old=None, security_config_new=security_config(['hi'])) self.assertEqual({ 'app_version': 'v123', 'auth_db_rev': 123, 'change_type': 'CONF_SECURITY_CONFIG_CHANGED', 'comment': 'A comment', 'oauth_additional_client_ids': [], 'oauth_client_id': None, 'oauth_client_secret': None, 'security_config_new': {'internal_service_regexp': [u'hi']}, 'security_config_old': None, 'target': 'AuthGlobalConfig$default', 'token_server_url_new': None, 'token_server_url_old': None, 'when': 1420167845000000, 'who': 'user:[email protected]', }, c.to_jsonish()) class ChangeLogQueryTest(test_case.TestCase): # We know that some indexes are required. But component can't declare them, # so don't check them. SKIP_INDEX_YAML_CHECK = True def test_is_changle_log_indexed(self): self.assertTrue(change_log.is_changle_log_indexed()) def test_make_change_log_query(self): def mk_ch(tp, rev, target): ch = change_log.AuthDBChange( change_type=getattr(change_log.AuthDBChange, 'CHANGE_%s' % tp), auth_db_rev=rev, target=target) ch.key = change_log.make_change_key(ch) ch.put() def key(c): return '%s/%s' % (c.key.parent().id(), c.key.id()) mk_ch('GROUP_CREATED', 1, 'AuthGroup$abc') mk_ch('GROUP_MEMBERS_ADDED', 1, 'AuthGroup$abc') mk_ch('GROUP_CREATED', 1, 'AuthGroup$another') mk_ch('GROUP_DELETED', 2, 'AuthGroup$abc') mk_ch('GROUP_MEMBERS_ADDED', 2, 'AuthGroup$another') # All. Most recent first. Largest even types first. q = change_log.make_change_log_query() self.assertEqual([ '2/AuthGroup$another!1200', '2/AuthGroup$abc!1800', '1/AuthGroup$another!1000', '1/AuthGroup$abc!1200', '1/AuthGroup$abc!1000', ], map(key, q.fetch())) # Single revision only. q = change_log.make_change_log_query(auth_db_rev=1) self.assertEqual([ '1/AuthGroup$another!1000', '1/AuthGroup$abc!1200', '1/AuthGroup$abc!1000', ], map(key, q.fetch())) # Single target only. q = change_log.make_change_log_query(target='AuthGroup$another') self.assertEqual([ '2/AuthGroup$another!1200', '1/AuthGroup$another!1000', ], map(key, q.fetch())) # Single revision and single target. q = change_log.make_change_log_query( auth_db_rev=1, target='AuthGroup$another') self.assertEqual(['1/AuthGroup$another!1000'], map(key, q.fetch())) if __name__ == '__main__': if '-v' in sys.argv: unittest.TestCase.maxDiff = None unittest.main()
apache-2.0
-6,992,933,365,418,927,000
38.238832
80
0.593073
false
3.291108
true
false
false
HydrelioxGitHub/PiDDL
ZTPAGE.py
1
2847
# coding: utf-8 from urllib2 import urlopen import urllib2 import bs4 as BeautifulSoup class ZTPage: def __init__(self, url): self.url = url self.update() def update(self): self.update_content() self.parse_type() self.parse_infos() self.parse_links() def update_content(self): req = urllib2.Request(self.url, headers={'User-Agent': "Magic Browser"}) html = urlopen(req).read() soup = BeautifulSoup.BeautifulSoup(html, "html5lib") self.content = soup.find('div', class_="maincont") def parse_type(self): if "series" in self.url: self.type = "Show" if "films" in self.url: self.type = "Movie" def parse_links(self): liste = {} host = 'error' html = self.content.find('div', class_="contentl").find_all(["span", "a"]) for elem in html: if ('span' == elem.name) and (unicode(elem.string) != 'None'): host = elem.string liste[host] = {} if elem.name == 'a': elem.string = elem.string.replace("Episode", '').replace('Final', '').strip() episode_number = int(elem.string) liste[host][episode_number] = elem.attrs['href'] self.links = liste def parse_infos(self): # Retreive Title title = self.content.find('div', class_="titrearticles").h1.string if self.type == "Show": title = title.split("-") self.title = title[0].strip() # Retreive Season for TV Shows self.season = int(title[1].replace("Saison", "").replace('[Complete]', '').strip()) if self.type == "Movie": self.title = title.strip() # Retreive Language, Format, Codec ... info = self.content.find('div', class_="corps").div.span.span.b.strong.string first_part = info.split('|')[0] second_part = info.split('|')[1] self.language = first_part.split(' ')[1].strip() self.currentEpisode = first_part.split(' ')[0].strip() self.currentEpisode = self.currentEpisode.replace('[', '') self.currentEpisode = int(self.currentEpisode.split('/')[0]) # Pb encodage ... quality = second_part.replace("Qualit", '').strip() quality = quality[1:] # ... self.quality = quality.strip() def get_available_hosts(self): return self.links.keys() def get_tvshow_link(self, host, episodenumber): alllinks = self.links[host] link = alllinks[episodenumber] return link def print_report(self): print self.url print self.title print self.season print self.quality print self.language print self.currentEpisode print self.links
gpl-2.0
8,344,752,789,403,362,000
32.892857
95
0.556024
false
3.842105
false
false
false
darknao/piOClock
ssd1351.py
1
13500
#!/bin/env python # -*- coding: UTF-8 -*- # ---------------------------------------------------------------------- # ssd1351.py from https://github.com/guyc/py-gaugette # ported by Jason Porritt, # and reworked by darknao, # based on original work by Guy Carpenter for display.py # # This library works with # Adafruit's 128x96 SPI color OLED http://www.adafruit.com/products/1673 # # The code is based heavily on Adafruit's Arduino library # https://github.com/adafruit/Adafruit_SSD1351 # written by Limor Fried/Ladyada for Adafruit Industries. # # It has the following dependencies: # wiringpi2 for GPIO # spidev for SPI # PIL for easy drawing capabilities # numpy for fast RGB888 to RGB565 convertion # ---------------------------------------------------------------------- # NEED HEAVY CLEANING ! import wiringpi2 import spidev import time import sys from PIL import Image, ImageDraw, ImageFont import logging import numpy as np import tools class SSD1351: # SSD1351 Commands EXTERNAL_VCC = 0x1 SWITCH_CAP_VCC = 0x2 MEMORY_MODE_HORIZ = 0x00 MEMORY_MODE_VERT = 0x01 CMD_SETCOLUMN = 0x15 CMD_SETROW = 0x75 CMD_WRITERAM = 0x5C CMD_READRAM = 0x5D CMD_SETREMAP = 0xA0 CMD_STARTLINE = 0xA1 CMD_DISPLAYOFFSET = 0xA2 CMD_DISPLAYALLOFF = 0xA4 CMD_DISPLAYALLON = 0xA5 CMD_NORMALDISPLAY = 0xA6 CMD_INVERTDISPLAY = 0xA7 CMD_FUNCTIONSELECT = 0xAB CMD_DISPLAYOFF = 0xAE CMD_DISPLAYON = 0xAF CMD_PRECHARGE = 0xB1 CMD_DISPLAYENHANCE = 0xB2 CMD_CLOCKDIV = 0xB3 CMD_SETVSL = 0xB4 CMD_SETGPIO = 0xB5 CMD_PRECHARGE2 = 0xB6 CMD_SETGRAY = 0xB8 CMD_USELUT = 0xB9 CMD_PRECHARGELEVEL = 0xBB CMD_VCOMH = 0xBE CMD_CONTRASTABC = 0xC1 CMD_CONTRASTMASTER = 0xC7 CMD_MUXRATIO = 0xCA CMD_COMMANDLOCK = 0xFD CMD_HORIZSCROLL = 0x96 CMD_STOPSCROLL = 0x9E CMD_STARTSCROLL = 0x9F # Device name will be /dev/spidev-{bus}.{device} # dc_pin is the data/commmand pin. This line is HIGH for data, LOW for command. # We will keep d/c low and bump it high only for commands with data # reset is normally HIGH, and pulled LOW to reset the display def __init__(self, bus=0, device=0, dc_pin="P9_15", reset_pin="P9_13", rows=128, cols=128): self.cols = cols self.rows = rows self.dc_pin = dc_pin self.reset_pin = reset_pin # SPI self.spi = spidev.SpiDev(bus, device) self.spi.max_speed_hz = 16000000 # 16Mhz # GPIO self.gpio = wiringpi2.GPIO(wiringpi2.GPIO.WPI_MODE_PINS) self.gpio.pinMode(self.reset_pin, self.gpio.OUTPUT) self.gpio.digitalWrite(self.reset_pin, self.gpio.HIGH) self.gpio.pinMode(self.dc_pin, self.gpio.OUTPUT) self.gpio.digitalWrite(self.dc_pin, self.gpio.LOW) # Drawing tools self.im = Image.new("RGB", (cols, rows), 'black') self.draw = ImageDraw.Draw(self.im) # logging self.log = logging.getLogger(self.__class__.__name__) self.log.setLevel(logging.INFO) self.contrast = 15 def reset(self): self.gpio.digitalWrite(self.reset_pin, self.gpio.LOW) time.sleep(0.010) # 10ms self.gpio.digitalWrite(self.reset_pin, self.gpio.HIGH) def command(self, cmd, cmddata=None): # already low # self.gpio.digitalWrite(self.dc_pin, self.gpio.LOW) if type(cmd) == list: self.spi.writebytes(cmd) else: self.spi.writebytes([cmd]) if cmddata is not None: if type(cmddata) == list: self.data(cmddata) else: self.data([cmddata]) def data(self, bytes): self.gpio.digitalWrite(self.dc_pin, self.gpio.HIGH) max_xfer = 1024 start = 0 remaining = len(bytes) while remaining>0: count = remaining if remaining <= max_xfer else max_xfer remaining -= count self.spi.writebytes(bytes[start:start+count]) start += count self.gpio.digitalWrite(self.dc_pin, self.gpio.LOW) def begin(self, vcc_state = SWITCH_CAP_VCC): time.sleep(0.001) # 1ms self.reset() self.command(self.CMD_COMMANDLOCK, 0x12) self.command(self.CMD_COMMANDLOCK, 0xB1) self.command(self.CMD_DISPLAYOFF) self.command(self.CMD_CLOCKDIV, 0xF1) # support for 128x128 line mode self.command(self.CMD_MUXRATIO, 127) self.command(self.CMD_SETREMAP, 0x74) self.command(self.CMD_SETCOLUMN, [0x00, self.cols-1]) self.command(self.CMD_SETROW, [0x00, self.rows-1]) # TODO Support 96-row display self.command(self.CMD_STARTLINE, 96) self.command(self.CMD_DISPLAYOFFSET, 0x00) self.command(self.CMD_SETGPIO, 0x00) self.command(self.CMD_FUNCTIONSELECT, 0x01) self.command(self.CMD_PRECHARGE, 0x32) self.command(self.CMD_VCOMH, 0x05) self.command(self.CMD_NORMALDISPLAY) self.set_contrast(200) # c8 -> 200 self.set_master_contrast(10) self.command(self.CMD_SETVSL, [0xA0, 0xB5, 0x55]) self.command(self.CMD_PRECHARGE2, 0x01) self.command(self.CMD_DISPLAYON) def set_master_contrast(self, level): # 0 to 15 level &= 0x0F self.command(self.CMD_CONTRASTMASTER, level) def set_contrast(self, level): # 0 to 255 level &= 0xFF self.command(self.CMD_CONTRASTABC, [level, level, level]) self.contrast = level def invert_display(self): self.command(self.CMD_INVERTDISPLAY) def normal_display(self): self.command(self.CMD_NORMALDISPLAY) def scale(self, x, inLow, inHigh, outLow, outHigh): return ((x - inLow) / float(inHigh) * outHigh) + outLow def encode_color(self, color): red = (color >> 16) & 0xFF green = (color >> 8) & 0xFF blue = color & 0xFF redScaled = int(self.scale(red, 0, 0xFF, 0, 0x1F)) greenScaled = int(self.scale(green, 0, 0xFF, 0, 0x3F)) blueScaled = int(self.scale(blue, 0, 0xFF, 0, 0x1F)) return (((redScaled << 6) | greenScaled) << 5) | blueScaled def color565(self, r, g, b): # 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 # r r r r r g g g g g g b b b b b # r = 31 g = 63 b = 31 redScaled = int(self.scale(r, 0, 0xFF, 0, 0x1F)) greenScaled = int(self.scale(g, 0, 0xFF, 0, 0x3F)) blueScaled = int(self.scale(b, 0, 0xFF, 0, 0x1F)) return (((redScaled << 6) | greenScaled) << 5) | blueScaled def goTo(self, x, y): if x >= self.cols or y >= self.rows: return # set x and y coordinate self.command(self.CMD_SETCOLUMN, [x, self.cols-1]) self.command(self.CMD_SETROW, [y, self.rows-1]) self.command(self.CMD_WRITERAM) def drawPixel(self, x, y, color): if x >= self.cols or y >= self.rows: return if x < 0 or y < 0: return color = self.encode_color(color) # set location self.goTo(x, y) self.data([color >> 8, color & 0xFF]) def clear(self): """Clear display buffer""" self.im = Image.new("RGB", (self.cols, self.rows), 'black') self.draw = ImageDraw.Draw(self.im) def text_center(self, string, color, font=None, size=10): if font is None: font = ImageFont.truetype("/usr/share/fonts/truetype/droid/DroidSansMono.ttf", size) text_size = self.draw.textsize(string, font=font) text_x = max((self.cols-text_size[0])/2, 0) text_y = max((self.rows-text_size[1])/2, 0) self.draw_text(text_x, text_y, string, color, font=font, size=size) return text_x, text_y def text_center_y(self, text_y, string, color, font=None, size=10): if font is None: font = ImageFont.truetype("/usr/share/fonts/truetype/droid/DroidSansMono.ttf", size) text_size = self.draw.textsize(string, font=font) text_x = max((self.cols-text_size[0])/2, 0) self.draw_text(text_x, text_y, string, color, font=font, size=size) return text_x, text_y def draw_text(self, x, y, string, color, font=None, size=10): if font is None: font = ImageFont.truetype("/usr/share/fonts/truetype/droid/DroidSansMono.ttf", size) self.draw.text((x, y), string, font=font, fill=color) return self.draw.textsize(string, font=font) def fillScreen(self, fillcolor): self.rawFillRect(0, 0, self.cols, self.rows, fillcolor) def rawFillRect(self, x, y, w, h, fillcolor): self.log.debug("fillScreen start") # Bounds check if (x >= self.cols) or (y >= self.rows): return # Y bounds check if y+h > self.rows: h = self.rows - y - 1 # X bounds check if x+w > self.cols: w = self.cols - x - 1 self.setDisplay(x, y, x+(w-1), y+(h-1)) color = self.encode_color(fillcolor) self.data([color >> 8, color & 0xFF] * w*h) self.log.debug("fillScreen end") def setDisplay(self, startx, starty, endx, endy): if startx >= self.cols or starty >= self.rows: return # Y bounds check if endx > self.cols - 1: endx = self.cols - 1 # X bounds check if endy > self.rows - 1: endy = self.rows - 1 # set x and y coordinate # print "x:%d y:%d endx:%d endy:%d" % (startx, starty, endx, endy) self.command(self.CMD_SETCOLUMN, [startx, endx]) self.command(self.CMD_SETROW, [starty, endy]) self.command(self.CMD_WRITERAM) def im2list(self): """Convert PIL RGB888 Image to SSD1351 RAM buffer""" image = np.array(self.im).reshape(-1, 3) image[:,0] *= 0.121 image[:,1] *= 0.247 image[:,2] *= 0.121 d = np.left_shift(image, [11, 5, 0]).sum(axis=1) data =np.dstack(((d>>8)&0xff, d&0xff)).flatten() return data.tolist() def display(self, x=0, y=0, w=None, h=None): """Send display buffer to the device""" self.log.debug("disp in") if h is None: h = self.rows if w is None: w = self.cols x = max(x, 0) y = max(y, 0) w = min(w, self.cols) h = min(h, self.rows) if w-x < 0: return self.log.debug("set display") self.setDisplay(x, y, w-1, h-1) self.log.debug("set display end") data = [] start = y * self.cols + x end = h * self.cols + w self.log.debug("get data") self.data(self.im2list()) self.log.debug("disp out") @tools.timed def dump_disp(self): """Dump display buffer on screen, for debugging purpose""" image = np.array(self.im).reshape(-1, 3) for r in range(0, self.rows,2): txt = [None,] * self.cols start = r*self.cols end = start + self.cols * 2 line = image[start:end] for c in range(len(line)): idx = c % self.cols if line[c].sum() > 0: if txt[idx] is None: txt[idx] = '▀' elif txt[idx] == '▀': txt[idx] = '█' else: txt[idx] = '▄' else: if txt[idx] is None: txt[idx] = ' ' print ''.join(txt) + '║' @tools.timed def dump_disp2(self): #image = list(self.im.convert("I").getdata()) image = np.array(self.im) for row, r in enumerate(image): if row % 2 == 0: txt = [None,] * self.cols for idx, c in enumerate(r): if c.sum() > 0: if txt[idx] is None: txt[idx] = '▀' elif txt[idx] == '▀': txt[idx] = '█' else: txt[idx] = '▄' else: if txt[idx] is None: txt[idx] = ' ' print ''.join(txt) + '║' if __name__ == '__main__': import datetime import time import ssd1351 import random from PIL import ImageFont import psutil import logging import os log = logging.getLogger("clock") logging.basicConfig( format='%(asctime)-23s - %(levelname)-7s - %(name)s - %(message)s') log.setLevel(logging.INFO) RESET_PIN = 15 DC_PIN = 16 led = ssd1351.SSD1351(reset_pin=15, dc_pin=16, rows=96) led.begin() led.fillScreen(0) color = 0x000000 bands = 10 color_step = 0xFF / bands color_width = led.cols / 3 for x in range(0, led.rows, led.rows/bands): led.rawFillRect(0, x, color_width, bands, color&0xff0000) led.rawFillRect(color_width, x, color_width*2, bands, color&0xff00) led.rawFillRect(color_width*2, x, color_width*3, bands, color&0xff) color = (color + (color_step << 16) + (color_step << 8) + (color_step)) & 0xFFFFFF
gpl-3.0
4,144,339,368,829,440,500
32.039216
96
0.548368
false
3.222568
false
false
false
bnx05/pytest-selenium
test_parameters.py
1
2603
#!/usr/bin/env python # -*- coding: utf-8 -*- import pytest import time from selenium import webdriver sample_email_address = "[email protected]" sample_password = "Password123" email_addresses = ["invalid_email", "another_invalid_email@", "not_another_invalid_email@blah"] passwords = ["weak_password", "generic_password", "bleep_password"] browser = webdriver.Firefox() browser.maximize_window() # this test checks the maxlength attribute of the login and password fields @pytest.mark.parametrize("field_name, maxlength", [ ("login", "75"), ("password", "128"), ]) def test_assert_field_maxlength(field_name, maxlength): browser.get("https://start.engagespark.com/sign-in/") time.sleep(5) browser.find_element_by_name(field_name).get_attribute("maxlength") == maxlength # this test asserts the string length of values entered in the login and # password fields @pytest.mark.parametrize("field_name, sample_string, string_length", [ ("login", sample_email_address, 20), ("password", sample_password, 11), ]) def test_assert_email_and_password_length(field_name, sample_string, string_length): browser.get("https://start.engagespark.com/sign-in/") time.sleep(5) browser.find_element_by_name(field_name).click() browser.find_element_by_name(field_name).send_keys(sample_string) assert len(browser.find_element_by_name(field_name).get_attribute("value")) == string_length # this test checks if the login button is enabled after entering different # combinations of invalid values in the email and password fields @pytest.mark.parametrize("email", email_addresses) @pytest.mark.parametrize("password", passwords) def test_assert_login_button_enabled(email, password): browser.get("https://start.engagespark.com/sign-in/") time.sleep(5) browser.find_element_by_name("login").click() browser.find_element_by_name("login").send_keys(email) browser.find_element_by_name("password").click() browser.find_element_by_name("password").send_keys(password) assert browser.find_element_by_xpath("//button[contains(text(), 'Login')]").is_enabled() # this test checks if the values entered into the email field contain '@' @pytest.mark.parametrize("email", [ "[email protected]", "[email protected]", "blah", ]) def test_assert_valid_email_entry(email): browser.get("https://start.engagespark.com/sign-in/") time.sleep(5) browser.find_element_by_name("login").click() browser.find_element_by_name("login").send_keys(email) assert "@" in browser.find_element_by_name("login").get_attribute("value")
mit
-6,535,209,611,040,000,000
36.724638
96
0.717633
false
3.443122
true
false
false
DonaldTrumpHasTinyHands/tiny_hands_pac
documents_gallery/models.py
1
4091
from django.db import models from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from wagtail.wagtailcore.fields import RichTextField from wagtail.wagtailcore.models import Page from wagtail.wagtailadmin.edit_handlers import FieldPanel, MultiFieldPanel from wagtail.wagtaildocs.models import Document from wagtail.wagtailimages.edit_handlers import ImageChooserPanel from wagtail.wagtailsearch import index from modelcluster.fields import ParentalKey from modelcluster.tags import ClusterTaggableManager from taggit.models import TaggedItemBase, Tag class DocumentsIndexPage(Page): """ This is the index page for the Documents Gallery. It contains the links to Gallery pages. Gallery Page displays the gallery documents according to tags defined. """ intro = RichTextField(blank=True) search_fields = Page.search_fields + ( index.SearchField('intro'), ) feed_image = models.ForeignKey( 'wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+' ) @property def children(self): return self.get_children().live() def get_context(self, request): # Get list of live Gallery pages that are descendants of this page pages = DocumentsPage.objects.live().descendant_of(self) # Update template context context = super(DocumentsIndexPage, self).get_context(request) context['pages'] = pages return context class Meta: verbose_name = "Documents Index Page" DocumentsIndexPage.content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('intro', classname="full") ] DocumentsIndexPage.promote_panels = [ MultiFieldPanel(Page.promote_panels, "SEO and metadata fields"), ImageChooserPanel('feed_image'), ] class DocumentsPageTag(TaggedItemBase): content_object = ParentalKey('documents_gallery.DocumentsPage', related_name='tagged_items') class DocumentsPage(Page): """ This is the Documents page. It takes tag names which you have assigned to your documents. It gets the document objects according to tags defined by you. Your document gallery will be created as per tags. """ tags = ClusterTaggableManager(through=DocumentsPageTag, blank=True) feed_image = models.ForeignKey( 'wagtailimages.Image', null=True, blank=True, on_delete=models.SET_NULL, related_name='+' ) @property def gallery_index(self): # Find closest ancestor which is a Gallery index return self.get_ancestors().type(GalleryIndexPage).last() def get_context(self, request): # Get tags and convert them into list so we can iterate over them tags = self.tags.values_list('name', flat=True) # Creating empty Queryset from Wagtail Document model documents = Document.objects.none() # Populating the empty documents Queryset with documents of all tags in tags list. if tags: len_tags = len(tags) for i in range(0, len_tags): doc = Document.objects.filter(tags__name=tags[i]) documents = documents | doc # Pagination page = request.GET.get('page') paginator = Paginator(documents, 25) # Show 25 documents per page try: documents = paginator.page(page) except PageNotAnInteger: documents = paginator.page(1) except EmptyPage: documents = paginator.page(paginator.num_pages) # Update template context context = super(DocumentsPage, self).get_context(request) context['documents'] = documents return context class Meta: verbose_name = "Documents Page" DocumentsPage.content_panels = [ FieldPanel('title', classname="full title"), FieldPanel('tags'), ] DocumentsPage.promote_panels = [ MultiFieldPanel(Page.promote_panels, "SEO and metadata fields"), ImageChooserPanel('feed_image'), ]
mit
3,715,756,117,222,630,000
29.088235
104
0.676363
false
4.279289
false
false
false
mrocklin/into
into/backends/sql_csv.py
1
2811
from ..regex import RegexDispatcher from ..append import append from .csv import CSV import os import datashape import sqlalchemy import subprocess copy_command = RegexDispatcher('copy_command') execute_copy = RegexDispatcher('execute_copy') @copy_command.register('.*sqlite') def copy_sqlite(dialect, tbl, csv): abspath = os.path.abspath(csv.path) tblname = tbl.name dbpath = str(tbl.bind.url).split('///')[-1] statement = """ (echo '.mode csv'; echo '.import {abspath} {tblname}';) | sqlite3 {dbpath} """ return statement.format(**locals()) @execute_copy.register('sqlite') def execute_copy_sqlite(dialect, engine, statement): ps = subprocess.Popen(statement, shell=True, stdout=subprocess.PIPE) return ps.stdout.read() @copy_command.register('postgresql') def copy_postgres(dialect, tbl, csv): abspath = os.path.abspath(csv.path) tblname = tbl.name format_str = 'csv' delimiter = csv.dialect.get('delimiter', ',') na_value = '' quotechar = csv.dialect.get('quotechar', '"') escapechar = csv.dialect.get('escapechar', '\\') header = not not csv.has_header encoding = csv.encoding or 'utf-8' statement = """ COPY {tblname} FROM '{abspath}' (FORMAT {format_str}, DELIMITER E'{delimiter}', NULL '{na_value}', QUOTE '{quotechar}', ESCAPE '{escapechar}', HEADER {header}, ENCODING '{encoding}');""" return statement.format(**locals()) @copy_command.register('mysql.*') def copy_mysql(dialect, tbl, csv): mysql_local = '' abspath = os.path.abspath(csv.path) tblname = tbl.name delimiter = csv.dialect.get('delimiter', ',') quotechar = csv.dialect.get('quotechar', '"') escapechar = csv.dialect.get('escapechar', '\\') lineterminator = csv.dialect.get('lineterminator', r'\n\r') skiprows = 1 if csv.has_header else 0 encoding = csv.encoding or 'utf-8' statement = u""" LOAD DATA {mysql_local} INFILE '{abspath}' INTO TABLE {tblname} CHARACTER SET {encoding} FIELDS TERMINATED BY '{delimiter}' ENCLOSED BY '{quotechar}' ESCAPED BY '{escapechar}' LINES TERMINATED by '{lineterminator}' IGNORE {skiprows} LINES; """ return statement.format(**locals()) @execute_copy.register('.*', priority=9) def execute_copy_all(dialect, engine, statement): conn = engine.raw_connection() cursor = conn.cursor() cursor.execute(statement) conn.commit() @append.register(sqlalchemy.Table, CSV) def append_csv_to_sql_table(tbl, csv, **kwargs): statement = copy_command(tbl.bind.dialect.name, tbl, csv) execute_copy(tbl.bind.dialect.name, tbl.bind, statement) return tbl
bsd-3-clause
-432,463,007,566,880,260
27.683673
82
0.630736
false
3.655397
false
false
false
blenderben/lolstatbot
lolstatbot.py
1
22559
# Leauge of Legends Statistics Chat Bot # A chat bot written in Python that provides match statistics right to your Twitch chat. # 2015 Benjamin Chu - https://github.com/blenderben import socket # imports module allowing connection to IRC import threading # imports module allowing timing functions import requests # imports module allowing requests import json import time import calendar # imports module allowing epoch time import ConfigParser # imports module allowing reading of .ini files import os # for relative pathing import string # for string manipulation # from routes import API_ROUTES class API_ROUTES: # summoner-v1.4 - get summoner id data summoner_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.4/summoner/by-name/{summonername}?api_key={key}' # summoner-v1.4 - summoner mastery data summonermastery_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.4/summoner/{summonerid}/masteries?api_key={key}' # league-v2.5 - summoner league data summonerleague_url = 'https://{region}.api.pvp.net/api/lol/{region}/v2.5/league/by-summoner/{summonerid}/entry?api_key={key}' # lol-static-data-v1.2 - static champion data championstaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/champion/{championid}?champData=all&api_key={key}' # lol-static-data-v1.2 - static rune data runestaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/rune/{runeid}?runeData=all&api_key={key}' # lol-static-data-v1.2 - static mastery data masterystaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/mastery/{masteryid}?masteryData=all&api_key={key}' # lol-static-data-v1.2 - static spell data spellstaticdata_url = 'https://global.api.pvp.net/api/lol/static-data/{region}/v1.2/summoner-spell/{spellid}?api_key={key}' # current-game-v1.0 - current game data current_url = 'https://{region}.api.pvp.net/observer-mode/rest/consumer/getSpectatorGameInfo/{region_upper}1/{summonerid}?api_key={key}' # game-v1.3 - historic game data last_url = 'https://{region}.api.pvp.net/api/lol/{region}/v1.3/game/by-summoner/{summonerid}/recent?api_key={key}' # op.gg opgg_url = 'http://{region}.op.gg/summoner/userName={summonername}' opgg_masteries_url = 'http://{region}.op.gg/summoner/mastery/userName={summonername}' opgg_runes_url = 'http://{region}.op.gg/summoner/rune/userName={summonername}' opgg_matches_url = 'http://{region}.op.gg/summoner/matches/userName={summonername}' opgg_leagues_url = 'http://{region}.op.gg/summoner/league/userName={summonername}' opgg_champions_url = 'http://{region}.op.gg/summoner/champions/userName={summonername}' # LoLNexus lolnexus_url = 'http://www.lolnexus.com/{region}/search?name={summonername}&server={region}' # LoLKing lolking_url = 'http://www.lolking.net/summoner/{region}/{summonerid}' # LoLSkill lolskill_url = 'http://www.lolskill.net/summoner/{region}/{summonername}' # ====== READ CONFIG ====== Config = ConfigParser.ConfigParser() Config.read(os.path.dirname(os.path.abspath(__file__)) + '/config.ini') def ConfigSectionMap(section): temp_dict = {} options = Config.options(section) for option in options: try: temp_dict[option] = Config.get(section, option) if temp_dict[option] == -1: DebugPrint('skip: %s' % option) except: print('exception on %s!' % option) temp_dict[option] = None return temp_dict # ====== CONNECTION INFO ====== # Set variables for connection botOwner = ConfigSectionMap('settings')['botowner'] nick = ConfigSectionMap('settings')['nick'] channel = '#' + ConfigSectionMap('settings')['channel'] server = ConfigSectionMap('settings')['server'] port = int(ConfigSectionMap('settings')['port']) password = ConfigSectionMap('settings')['oauth'] # ====== RIOT API PRELIM DATA ====== api_key = ConfigSectionMap('settings')['api'] # Riot API Information summonerName = ConfigSectionMap('settings')['summonername'].lower() summonerName = summonerName.replace(" ", "") region = ConfigSectionMap('settings')['region'] summoner_url = API_ROUTES.summoner_url.format(region=region, summonername=summonerName, key=api_key) # Initial Data Load // Get Summoner ID and Level summonerName_dict = requests.get(summoner_url).json() summonerID = str(summonerName_dict[summonerName]['id']) summonerLevel = str(summonerName_dict[summonerName]['summonerLevel']) # ====== RIOT API FUNCTIONS ====== def about(ircname): return 'Hello ' + ircname + '! I am a League of Legends statistics chat bot. My creator is blenderben [ https://github.com/blenderben/LoLStatBot ].'\ + ' I am currently assigned to summoner ' + summonerName.upper() + ' [ID:' + getSummonerID() + '].' def getCommands(): return 'Available commands: ['\ + ' !about, !summoner, !league, !last, !current, !runes, !mastery, !opgg, !lolnexus, !lolking, !lolskill ]' def getSummonerInfo(): return summonerName.upper() + ' is summoner level ' + getSummonerLevel() + ', playing in Region: ' + region.upper() + ' // ' + opgg('') def opgg(details): if details == 'runes': return API_ROUTES.opgg_runes_url.format(region=region, summonername=summonerName) elif details == 'masteries': return API_ROUTES.opgg_masteries_url.format(region=region, summonername=summonerName) elif details == 'matches': return API_ROUTES.opgg_matches_url.format(region=region, summonername=summonerName) elif details == 'leagues': return API_ROUTES.opgg_leagues_url.format(region=region, summonername=summonerName) elif details == 'champions': return API_ROUTES.opgg_champions_url.format(region=region, summonername=summonerName) else: return API_ROUTES.opgg_url.format(region=region, summonername=summonerName) def lolnexus(): return API_ROUTES.lolnexus_url.format(region=region, summonername=summonerName) def lolking(details): if details == 'runes': return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#runes' elif details == 'masteries': return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#masteries' elif details == 'matches': return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#matches' elif details == 'rankedstats': return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#ranked-stats' elif details == 'leagues': return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) + '#leagues' else: return API_ROUTES.lolking_url.format(region=region, summonerid=summonerID) def lolskill(details): if details == 'runes': return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/runes' elif details == 'masteries': return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/masteries' elif details == 'matches': return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/matches' elif details == 'stats': return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/stats' elif details == 'champions': return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) + '/champions' else: return API_ROUTES.lolskill_url.format(region=region.upper(), summonername=summonerName) def getTeamColor(teamid): if teamid == 100: return 'Blue Team' elif teamid == 200: return 'Purple Team' else: return 'No Team' def getWinLoss(win): if win == True: return 'WON' elif win == False: return 'LOST' else: return 'TIED' def getTimePlayed(time): if time > 3600: hours = time / 3600 minutes = time % 3600 / 60 seconds = time % 3600 % 60 if hours > 1: return str(hours) + ' hours & ' + str(minutes) + ' minutes & ' + str(seconds) + ' seconds' else: return str(hours) + ' hour & ' + str(minutes) + ' minutes & ' + str(seconds) + ' seconds' elif time > 60: minutes = time / 60 seconds = time % 60 return str(minutes) + ' minutes & ' + str(seconds) + ' seconds' else: return str(time) + ' seconds' def getKDA(kills, deaths, assists): if deaths < 1: return 'PERFECT' else: kda = float(kills) + float(assists) / (float(deaths)) kda = round(kda, 2) return str(kda) + ':1' def getChampionbyID(championid): tempDict = requests.get(API_ROUTES.championstaticdata_url.format(region=region, championid=int(championid), key=api_key)).json() name = tempDict['name'] + " " + tempDict['title'] return name def getSpellbyID(spellid): tempDict = requests.get(API_ROUTES.spellstaticdata_url.format(region=region, spellid=int(spellid), key=api_key)).json() spellName = tempDict['name'] return spellName # Refresh / Get Summoner ID def getSummonerID(): global summonerID try: tempDict = requests.get(summoner_url).json() summonerID = str(tempDict[summonerName]['id']) return summonerID except: print 'Riot API Down' return 1 # Refresh / Get Summoner Level def getSummonerLevel(): global summonerLevel tempDict = requests.get(summoner_url).json() summonerLevel = str(tempDict[summonerName]['summonerLevel']) return summonerLevel def getWinRatio(win, loss): total = float(win) + float(loss) ratio = win / total ratioPercent = round(ratio * 100, 1) return str(ratioPercent) + '%' def getStats(): # Function to eventually get statistics, avg kills, etc, for now, output Stats page from Lolskill return lolskill('stats') def getSummonerMastery(): tempDict = requests.get(API_ROUTES.summonermastery_url.format(region=region, summonerid=summonerID, key=api_key)).json() i = 0 masteryIDList = [] masteryRank = [] for pages in tempDict[summonerID]['pages']: if bool(pages.get('current')) == True: pageName = tempDict[summonerID]['pages'][i]['name'] for mastery in tempDict[summonerID]['pages'][i]['masteries']: masteryIDList.append(mastery.get('id')) masteryRank.append(mastery.get('rank')) else: i += 1 return getCurrentMastery(masteryIDList, masteryRank) + ' // Mastery Name: ' + pageName def getLeagueInfo(): try: tempDict = requests.get(API_ROUTES.summonerleague_url.format(region=region, summonerid=summonerID, key=api_key)).json() LEAGUE_TIER = string.capwords(tempDict[summonerID][0]['tier']) LEAGUE_QUEUE = tempDict[summonerID][0]['queue'].replace('_', ' ') LEAGUE_DIVISION = tempDict[summonerID][0]['entries'][0]['division'] LEAGUE_WINS = tempDict[summonerID][0]['entries'][0]['wins'] LEAGUE_LOSSES = tempDict[summonerID][0]['entries'][0]['losses'] LEAGUE_POINTS = tempDict[summonerID][0]['entries'][0]['leaguePoints'] # LEAGUE_ISVETERAN = tempDict[summonerID][0]['entries'][0]['isHotStreak'] # LEAGUE_ISHOTSTREAK = tempDict[summonerID][0]['entries'][0]['isVeteran'] # LEAGUE_ISFRESHBLOOD = tempDict[summonerID][0]['entries'][0]['isFreshBlood'] # LEAGUE_ISINACTIVE = tempDict[summonerID][0]['entries'][0]['isInactive'] return summonerName.upper() + ' is ' + LEAGUE_TIER + ' ' + LEAGUE_DIVISION + ' in ' + LEAGUE_QUEUE\ + ' // ' + str(LEAGUE_WINS) + 'W / ' + str(LEAGUE_LOSSES) + 'L (Win Ratio ' + getWinRatio(LEAGUE_WINS, LEAGUE_LOSSES) + ')'\ + ' // LP: ' + str(LEAGUE_POINTS)\ + ' // ' + lolking('leagues') except: return 'Summoner ' + summonerName.upper() + ' has not played any Ranked Solo 5x5 matches'\ + ' // ' + lolking('leagues') # Get Current Match Stats def getCurrent(details): try: current_api_url = API_ROUTES.current_url.format(region=region, region_upper=region.upper(), summonerid=summonerID, key=api_key) tempDict = requests.get(current_api_url).json() CURRENT_GAMEMODE = tempDict['gameMode'] CURRENT_GAMELENGTH = tempDict['gameLength'] CURRENT_GAMETYPE = tempDict['gameType'].replace('_', ' ') CURRENT_TIME = calendar.timegm(time.gmtime()) CURRENT_EPOCHTIME = tempDict['gameStartTime'] / 1000 if CURRENT_EPOCHTIME <= 0: CURRENT_TIMEDIFF = 0 else: CURRENT_TIMEDIFF = CURRENT_TIME - CURRENT_EPOCHTIME if CURRENT_TIMEDIFF < 0: CURRENT_TIMEDIFF = 0 runeIDList = [] runeCount = [] masteryIDList = [] masteryRank = [] i = 0 for participant in tempDict['participants']: if int(summonerID) == int(participant.get('summonerId')): CURRENT_TEAM = participant.get('teamId') CURRENT_CHAMPION = participant.get('championId') CURRENT_SPELL1 = participant.get('spell1Id') CURRENT_SPELL2 = participant.get('spell2Id') for rune in tempDict['participants'][i]['runes']: runeIDList.append(rune.get('runeId')) runeCount.append(rune.get('count')) for mastery in tempDict['participants'][i]['masteries']: masteryIDList.append(mastery.get('masteryId')) masteryRank.append(mastery.get('rank')) else: i += 1 runeCountOutput = '' runeBonusOutput = '' for x in range(len(runeIDList)): runeCountOutput += ' [' + getCurrentRuneTotal(runeIDList[x], runeCount[x]) + '] ' runeBonusOutput += ' [' + getCurrentRuneBonusTotal(runeIDList[x], runeCount[x]) + '] ' masteryOutput = getCurrentMastery(masteryIDList, masteryRank) if details == 'runes': return 'Current Runes: ' + runeCountOutput\ + ' // Rune Bonuses: ' + runeBonusOutput\ + ' // ' + lolskill('runes') elif details == 'masteries': return 'Current Mastery Distribution: ' + masteryOutput\ + ' // ' + lolskill('masteries') else: return summonerName.upper()\ + ' is currently playing ' + CURRENT_GAMEMODE + ' ' + CURRENT_GAMETYPE\ + ' with ' + getChampionbyID(CURRENT_CHAMPION)\ + ' on the ' + getTeamColor(CURRENT_TEAM)\ + ' // Elapsed Time: ' + getTimePlayed(CURRENT_TIMEDIFF)\ + ' // Spells Chosen: ' + getSpellbyID(CURRENT_SPELL1) + ' & ' + getSpellbyID(CURRENT_SPELL2)\ + ' // Mastery Distribution: ' + masteryOutput\ + ' // Rune Bonuses: ' + runeBonusOutput\ + ' // ' + lolnexus() except: if details == 'runes': return 'Summoner ' + summonerName.upper() + ' needs to currently be in a game for current Rune data to display'\ + ' // ' + lolking('runes') elif details == 'masteries': return 'Current Mastery Distribution: ' + getSummonerMastery() + ' // ' + lolskill('masteries') else: return 'The summoner ' + summonerName.upper() + ' is not currently in a game.' def getCurrentMastery(masteryidlist, masteryrank): offense = 0 defense = 0 utility = 0 for x in range(len(masteryidlist)): masteryID = masteryidlist[x] tempDict = requests.get(API_ROUTES.masterystaticdata_url.format(region=region, masteryid=masteryID, key=api_key)).json() masteryTree = tempDict['masteryTree'] ranks = int(masteryrank[x]) if masteryTree == 'Offense': offense += ranks elif masteryTree == 'Defense': defense += ranks else: utility += ranks return '(' + str(offense) + '/' + str(defense) + '/' + str(utility) + ')' def getCurrentRuneTotal(runeid, count): tempDict = requests.get(API_ROUTES.runestaticdata_url.format(region=region, runeid=runeid, key=api_key)).json() runeName = tempDict['name'] return str(count) + 'x ' + runeName def getCurrentRuneBonusTotal(runeid, count): tempDict = requests.get(API_ROUTES.runestaticdata_url.format(region=region, runeid=runeid, key=api_key)).json() runeBonus = tempDict['description'] try: runeBonus.split('/')[1] except IndexError: # Single Bonus value = runeBonus.split()[0] value = value.replace('+', '').replace('%', '').replace('-', '') valueCount = float(value) * float(count) valueCount = round(valueCount, 2) description = tempDict['description'].split(' (', 1)[0] description = string.capwords(description) description = description.replace(value, str(valueCount)) return description else: # Hybrid Bonus value = runeBonus.split()[0] value = value.replace('+', '').replace('%', '').replace('-', '') valueCount = float(value) * float(count) valueCount = round(valueCount, 2) firstDescription = runeBonus.split('/')[0].strip() firstDescription = firstDescription.split(' (', 1)[0] firstDescription = string.capwords(firstDescription) firstDescription = firstDescription.replace(value, str(valueCount)) value = runeBonus.split('/')[1].strip() if value.split()[1] == 'sec.': return firstDescription + ' / 5 Sec.' else: value = value.split()[0] value = value.replace('+', '').replace('%', '').replace('-', '') valueCount = float(value) * float(count) valueCount = round(valueCount, 2) secondDescription = runeBonus.split('/')[1].strip() secondDescription = secondDescription.split(' (', 1)[0] secondDescription = string.capwords(secondDescription) secondDescription = secondDescription.replace(value, str(valueCount)) return firstDescription + ' / ' + secondDescription # Get Last Match Stats def getLast(): tempDict = requests.get(API_ROUTES.last_url.format(region=region, summonerid=summonerID, key=api_key)).json() LAST_GAMEID = tempDict['games'][0]['gameId'] # LAST_GAMEMODE = tempDict['games'][0]['gameMode'] LAST_SUBTYPE = tempDict['games'][0]['subType'].replace('_', ' ') LAST_GAMETYPE = tempDict['games'][0]['gameType'].replace('_GAME', '') LAST_TIMEPLAYED = tempDict['games'][0]['stats']['timePlayed'] LAST_WIN = tempDict['games'][0]['stats']['win'] LAST_GOLDSPENT = tempDict['games'][0]['stats']['goldSpent'] LAST_GOLDEARNED = tempDict['games'][0]['stats']['goldEarned'] LAST_CHAMPION_ID = str(tempDict['games'][0]['championId']) LAST_IPEARNED = str(tempDict['games'][0]['ipEarned']) LAST_LEVEL = str(tempDict['games'][0]['stats']['level']) LAST_SPELL1 = tempDict['games'][0]['spell1'] LAST_SPELL2 = tempDict['games'][0]['spell2'] LAST_CHAMPIONSKILLED = str(tempDict['games'][0]['stats'].get('championsKilled', 0)) LAST_NUMDEATHS = str(tempDict['games'][0]['stats'].get('numDeaths' , 0)) LAST_ASSISTS = str(tempDict['games'][0]['stats'].get('assists', 0)) LAST_TOTALDAMAGECHAMPIONS = str(tempDict['games'][0]['stats']['totalDamageDealtToChampions']) LAST_MINIONSKILLED = str(tempDict['games'][0]['stats']['minionsKilled']) LAST_WARDSPLACED = str(tempDict['games'][0]['stats'].get('wardPlaced', 0)) output = summonerName.upper() + ' ' + getWinLoss(LAST_WIN)\ + ' the last ' + LAST_GAMETYPE + ' ' + LAST_SUBTYPE\ + ' GAME using ' + getChampionbyID(LAST_CHAMPION_ID)\ + ' // The game took ' + getTimePlayed(LAST_TIMEPLAYED)\ + ' // ' + getKDA(LAST_CHAMPIONSKILLED, LAST_NUMDEATHS, LAST_ASSISTS) + ' KDA (' + LAST_CHAMPIONSKILLED + '/' + LAST_NUMDEATHS + '/' + LAST_ASSISTS + ')'\ + ' // ' + getSpellbyID(LAST_SPELL1) + ' & ' + getSpellbyID(LAST_SPELL2) + ' spells were chosen'\ + ' // ' + LAST_TOTALDAMAGECHAMPIONS + ' damage was dealt to champions'\ + ' // ' + LAST_MINIONSKILLED + ' minions were killed'\ + ' // ' + LAST_WARDSPLACED + ' wards were placed'\ + ' // Spent ' + str(round(float(LAST_GOLDSPENT) / float(LAST_GOLDEARNED)*100, 1)) + '% of Gold earned [' + str(LAST_GOLDSPENT) + '/' + str(LAST_GOLDEARNED) + ']'\ + ' // ' + LAST_IPEARNED + ' IP was earned' # add Official League Match history here return output # ====== IRC FUNCTIONS ====== # Extract Nickname def getNick(data): nick = data.split('!')[0] nick = nick.replace(':', ' ') nick = nick.replace(' ', '') nick = nick.strip(' \t\n\r') return nick def getMessage(data): if data.find('PRIVMSG'): try: message = data.split(channel, 1)[1][2:] return message except IndexError: return 'Index Error' except: return 'No message' else: return 'Not a message' # ====== TIMER FUNCTIONS ====== def printit(): threading.Timer(60.0, printit).start() print "Hello World" # =============================== # queue = 13 #sets variable for anti-spam queue functionality # Connect to server print '\nConnecting to: ' + server + ' over port ' + str(port) irc = socket.socket() irc.connect((server, port)) # Send variables for connection to Twitch chat irc.send('PASS ' + password + '\r\n') irc.send('USER ' + nick + ' 0 * :' + botOwner + '\r\n') irc.send('NICK ' + nick + '\r\n') irc.send('JOIN ' + channel + '\r\n') printit() # Main Program Loop while True: ircdata = irc.recv(4096) # gets output from IRC server ircuser = ircdata.split(':')[1] ircuser = ircuser.split('!')[0] # determines the sender of the messages # Check messages for any banned words against banned.txt list f = open(os.path.dirname(os.path.abspath(__file__)) + '/banned.txt', 'r') banned = f.readlines() message = getMessage(ircdata).lower().strip(' \t\n\r') for i in range(len(banned)): if message.find(banned[i].strip(' \t\n\r')) != -1: irc.send('PRIVMSG ' + channel + ' :' + getNick(ircdata) + ', banned words are not allowed. A timeout has been issued.' + '\r\n') # irc.send('PRIVMSG ' + channel + ' :\/timeout ' + getNick(ircdata) + ' 5\r\n') break else: pass print 'DEBUG: ' + ircdata.strip(' \t\n\r') print 'USER: ' + getNick(ircdata).strip(' \t\n\r') print 'MESSAGE: ' + getMessage(ircdata).strip(' \t\n\r') print '=======================' # About if ircdata.find(':!about') != -1: irc.send('PRIVMSG ' + channel + ' :' + about(getNick(ircdata)) + '\r\n') # Commands if ircdata.find(':!commands') != -1: irc.send('PRIVMSG ' + channel + ' :' + getCommands() + '\r\n') # Last if ircdata.find(':!last') != -1: irc.send('PRIVMSG ' + channel + ' :' + getLast() + '\r\n') # Current if ircdata.find(':!current') != -1: irc.send('PRIVMSG ' + channel + ' :' + getCurrent('games') + '\r\n') # Current Runes if ircdata.find(':!runes') != -1 or ircdata.find(':!rune') != -1: irc.send('PRIVMSG ' + channel + ' :' + getCurrent('runes') + '\r\n') # Current Mastery if ircdata.find(':!mastery') != -1 or ircdata.find(':!masteries') != -1: irc.send('PRIVMSG ' + channel + ' :' + getCurrent('masteries') + '\r\n') # Basic Summoner Data if ircdata.find(':!summoner') != -1: irc.send('PRIVMSG ' + channel + ' :' + getSummonerInfo() + '\r\n') # Seaonal League Rank Data if ircdata.find(':!league') != -1: irc.send('PRIVMSG ' + channel + ' :' + getLeagueInfo() + '\r\n') # Stats if ircdata.find(':!stats') != -1: irc.send('PRIVMSG ' + channel + ' :' + getStats() + '\r\n') # Return op.gg if ircdata.find(':!opgg') != -1: irc.send('PRIVMSG ' + channel + ' :' + opgg('') + '\r\n') # Return lolnexus if ircdata.find(':!lolnexus') != -1: irc.send('PRIVMSG ' + channel + ' :' + lolnexus() + '\r\n') # Return lolking if ircdata.find(':!lolking') != -1: irc.send('PRIVMSG ' + channel + ' :' + lolking('') + '\r\n') # Return lolskill if ircdata.find(':!lolskill') != -1: irc.send('PRIVMSG ' + channel + ' :' + lolskill('') + '\r\n') # Keep Alive if ircdata.find('PING') != -1: irc.send('PONG ' + ircdata.split()[1] + '\r\n')
mit
362,198,703,597,034,500
37.894828
164
0.675074
false
2.769335
true
false
false
pacpac1992/mymockup
src/widgets/tab.py
1
3754
#!/usr/bin/python # -*- coding: utf-8 -*- import wx import wx.lib.ogl as ogl class Tab_dialog(wx.Dialog): def __init__(self, parent, title): super(Tab_dialog, self).__init__(parent, title=title,size=(410,220)) self.parent = parent self.nombre = wx.TextCtrl(self,-1, pos=(10,10), size=(200,30),style=wx.TE_PROCESS_ENTER) wx.StaticText(self,-1,'Activo: ',pos=(10,55)) self.lbl_selection = wx.StaticText(self,-1,'',(60, 55),(150, -1)) btn = wx.Button(self,-1,'Aceptar',pos=(10,100)) self.listBox = wx.ListBox(self, -1, (220, 10), (90, 170), [], wx.LB_SINGLE) up = wx.Button(self,-1,'Arriba',pos=(320,10)) down = wx.Button(self,-1,'Abajo',pos=(320,50)) delete = wx.Button(self,-1,'Eliminar',pos=(320,90)) btn.Bind(wx.EVT_BUTTON,self.crear_tabs) up.Bind(wx.EVT_BUTTON,self.up) down.Bind(wx.EVT_BUTTON,self.down) delete.Bind(wx.EVT_BUTTON,self.delete) self.nombre.Bind(wx.EVT_TEXT_ENTER, self.add_list) self.Bind(wx.EVT_LISTBOX, self.onListBox, self.listBox) def crear_tabs(self,evt): if self.lbl_selection.GetLabel() != '': lista = {} for i in range(0,self.listBox.GetCount()): lista[i] = self.listBox.GetString(i) self.parent.draw_tab(None,self.lbl_selection.GetLabel(),lista,False) self.Destroy() else: wx.MessageBox("Seleccione un item", "Message" ,wx.OK | wx.ICON_ERROR) def add_list(self,evt): n = self.nombre.GetValue() self.listBox.Append(n) self.nombre.SetValue('') def up(self,evt): n = self.listBox.GetCount() r = 0 for i in range(0,n): if self.listBox.GetString(i) == self.listBox.GetStringSelection(): r = i dato = self.listBox.GetStringSelection() if r != 0: r = r - 1 d = self.listBox.GetString(r) self.listBox.SetString(r,dato) self.listBox.SetString(r+1,d) def down(self,evt): try: n = self.listBox.GetCount() r = 0 for i in range(0,n): if self.listBox.GetString(i) == self.listBox.GetStringSelection(): r = i dato = self.listBox.GetStringSelection() if r <= (n-1): r = r + 1 d = self.listBox.GetString(r) self.listBox.SetString(r,dato) self.listBox.SetString(r-1,d) except Exception as e: print(e) def delete(self,evt): n = self.listBox.GetCount() r = 0 for i in range(0,n): if self.listBox.GetString(i) == self.listBox.GetStringSelection(): r = i self.listBox.Delete(r) def onListBox(self,evt): self.lbl_selection.SetLabel(evt.GetEventObject().GetStringSelection()) class Tab(ogl.DrawnShape): def __init__(self,lista,active): ogl.DrawnShape.__init__(self) n = len(lista) self.diccionario = lista i = self.buscarElemento(lista,active) r = (int(n) * 70 + ((int(n)-1))*4)+50 self.calculate_size(r) self.tabs(n,r,i) self.labels(n,r) self.CalculateSize() def calculate_size(self,r): w = r/2 self.SetDrawnPen(wx.BLACK_PEN) self.SetDrawnBrush(wx.WHITE_BRUSH) return self.DrawPolygon([(w, 100), (-w,100),(-w,-70),(w,-70),(w,100)]) def tabs(self,n,r,i): w = r / 2 cp4 = 0 for x in range(0,n): sp = 70 self.SetDrawnPen(wx.BLACK_PEN) if x == i: self.SetDrawnBrush(wx.Brush(wx.Colour(240, 240, 240))) else: self.SetDrawnBrush(wx.Brush(wx.Colour(155, 155, 155))) self.DrawPolygon([((-w + cp4),-70),((-w + cp4),-100),(((-w+cp4)+sp),-100),(((-w+cp4)+sp),-70)]) cp4 = cp4 + 74 def labels(self,items,r): w = r / 2 ran = 0 for x in xrange(0,items): self.SetDrawnTextColour(wx.BLACK) self.SetDrawnFont(wx.Font(10, wx.SWISS, wx.NORMAL, wx.NORMAL)) name = self.diccionario[x] self.DrawText(str(name), (-w+ran+10, -90)) ran = ran + 74 def buscarElemento(self,lista, elemento): for i in range(0,len(lista)): if(lista[i] == elemento): return i
mit
5,063,618,514,684,932,000
23.86755
98
0.633724
false
2.453595
false
false
false
vegeclic/django-regularcom
blog/migrations/0001_initial.py
1
14127
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'TaggedItem' db.create_table('blog_taggeditem', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('tag', self.gf('django.db.models.fields.SlugField')(max_length=50)), ('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], related_name='blog_tags')), ('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()), )) db.send_create_signal('blog', ['TaggedItem']) # Adding model 'CategoryTranslation' db.create_table('blog_category_translation', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=200)), ('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)), ('master', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.Category'], related_name='translations', null=True)), )) db.send_create_signal('blog', ['CategoryTranslation']) # Adding unique constraint on 'CategoryTranslation', fields ['language_code', 'master'] db.create_unique('blog_category_translation', ['language_code', 'master_id']) # Adding model 'Category' db.create_table('blog_category', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('date_created', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now_add=True)), ('date_last_modified', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now=True)), )) db.send_create_signal('blog', ['Category']) # Adding model 'ArticleTranslation' db.create_table('blog_article_translation', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=200)), ('body', self.gf('django.db.models.fields.TextField')()), ('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)), ('master', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.Article'], related_name='translations', null=True)), )) db.send_create_signal('blog', ['ArticleTranslation']) # Adding unique constraint on 'ArticleTranslation', fields ['language_code', 'master'] db.create_unique('blog_article_translation', ['language_code', 'master_id']) # Adding model 'Article' db.create_table('blog_article', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=200)), ('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)), ('date_created', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now_add=True)), ('date_last_modified', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now=True)), ('main_image', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, to=orm['common.Image'], related_name='blog_article_main_image', unique=True, null=True)), ('title_image', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, to=orm['common.Image'], related_name='blog_article_title_image', unique=True, null=True)), ('thumb_image', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, to=orm['common.Image'], related_name='blog_article_thumb_image', unique=True, null=True)), )) db.send_create_signal('blog', ['Article']) # Adding M2M table for field authors on 'Article' m2m_table_name = db.shorten_name('blog_article_authors') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('article', models.ForeignKey(orm['blog.article'], null=False)), ('author', models.ForeignKey(orm['accounts.author'], null=False)) )) db.create_unique(m2m_table_name, ['article_id', 'author_id']) # Adding M2M table for field categories on 'Article' m2m_table_name = db.shorten_name('blog_article_categories') db.create_table(m2m_table_name, ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('article', models.ForeignKey(orm['blog.article'], null=False)), ('category', models.ForeignKey(orm['blog.category'], null=False)) )) db.create_unique(m2m_table_name, ['article_id', 'category_id']) # Adding model 'Comment' db.create_table('blog_comment', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('participant', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, to=orm['accounts.Account'], null=True)), ('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['blog.Article'])), ('body', self.gf('django.db.models.fields.TextField')()), ('date_created', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now_add=True)), ('date_last_modified', self.gf('django.db.models.fields.DateTimeField')(blank=True, auto_now=True)), )) db.send_create_signal('blog', ['Comment']) def backwards(self, orm): # Removing unique constraint on 'ArticleTranslation', fields ['language_code', 'master'] db.delete_unique('blog_article_translation', ['language_code', 'master_id']) # Removing unique constraint on 'CategoryTranslation', fields ['language_code', 'master'] db.delete_unique('blog_category_translation', ['language_code', 'master_id']) # Deleting model 'TaggedItem' db.delete_table('blog_taggeditem') # Deleting model 'CategoryTranslation' db.delete_table('blog_category_translation') # Deleting model 'Category' db.delete_table('blog_category') # Deleting model 'ArticleTranslation' db.delete_table('blog_article_translation') # Deleting model 'Article' db.delete_table('blog_article') # Removing M2M table for field authors on 'Article' db.delete_table(db.shorten_name('blog_article_authors')) # Removing M2M table for field categories on 'Article' db.delete_table(db.shorten_name('blog_article_categories')) # Deleting model 'Comment' db.delete_table('blog_comment') models = { 'accounts.account': { 'Meta': {'object_name': 'Account'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}), 'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'unique': 'True', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'accounts.author': { 'Meta': {'object_name': 'Author'}, 'account': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['accounts.Account']", 'unique': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'main_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'+'", 'unique': 'True', 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'blank': 'True', 'max_length': '30'}) }, 'blog.article': { 'Meta': {'object_name': 'Article'}, 'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['accounts.Author']", 'symmetrical': 'False', 'related_name': "'blog_article_authors'"}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'to': "orm['blog.Category']", 'symmetrical': 'False', 'related_name': "'blog_article_categories'", 'null': 'True'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}), 'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'main_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'blog_article_main_image'", 'unique': 'True', 'null': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '200'}), 'thumb_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'blog_article_thumb_image'", 'unique': 'True', 'null': 'True'}), 'title_image': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'to': "orm['common.Image']", 'related_name': "'blog_article_title_image'", 'unique': 'True', 'null': 'True'}) }, 'blog.articletranslation': { 'Meta': {'object_name': 'ArticleTranslation', 'unique_together': "[('language_code', 'master')]", 'db_table': "'blog_article_translation'"}, 'body': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'master': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Article']", 'related_name': "'translations'", 'null': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'blog.category': { 'Meta': {'object_name': 'Category'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}), 'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'blog.categorytranslation': { 'Meta': {'object_name': 'CategoryTranslation', 'unique_together': "[('language_code', 'master')]", 'db_table': "'blog_category_translation'"}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), 'master': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Category']", 'related_name': "'translations'", 'null': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'blog.comment': { 'Meta': {'object_name': 'Comment'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blog.Article']"}), 'body': ('django.db.models.fields.TextField', [], {}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now_add': 'True'}), 'date_last_modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True', 'auto_now': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'participant': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'to': "orm['accounts.Account']", 'null': 'True'}) }, 'blog.taggeditem': { 'Meta': {'object_name': 'TaggedItem'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'related_name': "'blog_tags'"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'tag': ('django.db.models.fields.SlugField', [], {'max_length': '50'}) }, 'common.image': { 'Meta': {'object_name': 'Image'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'related_name': "'+'"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '200'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'", 'object_name': 'ContentType'}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['blog']
agpl-3.0
-4,488,898,404,940,486,700
65.328638
216
0.590784
false
3.72253
false
false
false
Ultimaker/Cura
cura/XRayPass.py
1
1577
# Copyright (c) 2018 Ultimaker B.V. # Cura is released under the terms of the LGPLv3 or higher. import os.path from UM.Resources import Resources from UM.Application import Application from UM.PluginRegistry import PluginRegistry from UM.View.RenderPass import RenderPass from UM.View.RenderBatch import RenderBatch from UM.View.GL.OpenGL import OpenGL from cura.Scene.CuraSceneNode import CuraSceneNode from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator class XRayPass(RenderPass): def __init__(self, width, height): super().__init__("xray", width, height) self._shader = None self._gl = OpenGL.getInstance().getBindingsObject() self._scene = Application.getInstance().getController().getScene() def render(self): if not self._shader: self._shader = OpenGL.getInstance().createShaderProgram(Resources.getPath(Resources.Shaders, "xray.shader")) batch = RenderBatch(self._shader, type = RenderBatch.RenderType.NoType, backface_cull = False, blend_mode = RenderBatch.BlendMode.Additive) for node in DepthFirstIterator(self._scene.getRoot()): if isinstance(node, CuraSceneNode) and node.getMeshData() and node.isVisible(): batch.addItem(node.getWorldTransformation(copy = False), node.getMeshData(), normal_transformation=node.getCachedNormalMatrix()) self.bind() self._gl.glDisable(self._gl.GL_DEPTH_TEST) batch.render(self._scene.getActiveCamera()) self._gl.glEnable(self._gl.GL_DEPTH_TEST) self.release()
lgpl-3.0
-1,777,091,487,277,248,500
38.425
147
0.715916
false
3.754762
false
false
false
the-nick-of-time/DnD
DnD/modules/resourceModule.py
1
2471
import tkinter as tk from typing import Union import lib.components as gui import lib.resourceLib as res import lib.settingsLib as settings class ResourceDisplay(gui.Section): """Displays a resource like sorcery points or Hit Dice.""" def __init__(self, container: Union[tk.BaseWidget, tk.Tk], resource: res.Resource, lockMax=False, **kwargs): super().__init__(container, **kwargs) self.resource = resource self.numbers = tk.Frame(self.f) self.current = gui.NumericEntry(self.numbers, self.resource.number, self.set_current, width=5) self.max = gui.NumericEntry(self.numbers, self.resource.maxnumber, self.set_max, width=5) if lockMax: self.max.disable() self.value = tk.Label(self.numbers, text='*' + str(self.resource.value)) self.buttonFrame = tk.Frame(self.f) self.use = tk.Button(self.buttonFrame, text='-', command=self.increment) self.regain = tk.Button(self.buttonFrame, text='+', command=self.decrement) self.display = tk.Label(self.buttonFrame, width=3) self.reset_ = tk.Button(self.buttonFrame, text='Reset', command=self.reset) self._draw() def _draw(self): tk.Label(self.f, text=self.resource.name).grid(row=0, column=0) self.numbers.grid(row=1, column=0) self.current.grid(1, 0) tk.Label(self.numbers, text='/').grid(row=1, column=1) self.max.grid(1, 2) self.value.grid(row=1, column=4) self.buttonFrame.grid(row=2, column=0, columnspan=3) self.display.grid(row=0, column=0) self.regain.grid(row=0, column=1) self.use.grid(row=0, column=2) self.reset_.grid(row=0, column=3) def update_view(self): self.max.set(self.resource.maxnumber) self.current.set(self.resource.number) def set_current(self, value): self.resource.number = value def set_max(self, value): self.resource.maxnumber = value def increment(self): self.resource.regain(1) self.update_view() def decrement(self): val = self.resource.use(1) self.display.config(text=str(val)) self.update_view() def reset(self): self.resource.reset() self.update_view() def rest(self, which: settings.RestLength): self.resource.rest(which) self.update_view()
gpl-2.0
771,843,469,293,149,600
34.811594
93
0.613112
false
3.490113
false
false
false
lptorres/noah-inasafe
web_api/third_party/simplejson/decoder.py
1
14670
"""Implementation of JSONDecoder """ from __future__ import absolute_import import re import sys import struct from .compat import fromhex, b, u, text_type, binary_type, PY3, unichr from .scanner import make_scanner, JSONDecodeError def _import_c_scanstring(): try: from ._speedups import scanstring return scanstring except ImportError: return None c_scanstring = _import_c_scanstring() # NOTE (3.1.0): JSONDecodeError may still be imported from this module for # compatibility, but it was never in the __all__ __all__ = ['JSONDecoder'] FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL def _floatconstants(): _BYTES = fromhex('7FF80000000000007FF0000000000000') # The struct module in Python 2.4 would get frexp() out of range here # when an endian is specified in the format string. Fixed in Python 2.5+ if sys.byteorder != 'big': _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] nan, inf = struct.unpack('dd', _BYTES) return nan, inf, -inf NaN, PosInf, NegInf = _floatconstants() _CONSTANTS = { '-Infinity': NegInf, 'Infinity': PosInf, 'NaN': NaN, } STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) BACKSLASH = { '"': u('"'), '\\': u('\u005c'), '/': u('/'), 'b': u('\b'), 'f': u('\f'), 'n': u('\n'), 'r': u('\r'), 't': u('\t'), } DEFAULT_ENCODING = "utf-8" def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join, _PY3=PY3, _maxunicode=sys.maxunicode): """Scan the string s for a JSON string. End is the index of the character in s after the quote that started the JSON string. Unescapes all valid JSON string escape sequences and raises ValueError on attempt to decode an invalid string. If strict is False then literal control characters are allowed in the string. Returns a tuple of the decoded string and the index of the character in s after the end quote.""" if encoding is None: encoding = DEFAULT_ENCODING chunks = [] _append = chunks.append begin = end - 1 while 1: chunk = _m(s, end) if chunk is None: raise JSONDecodeError( "Unterminated string starting at", s, begin) end = chunk.end() content, terminator = chunk.groups() # Content is contains zero or more unescaped string characters if content: if not _PY3 and not isinstance(content, text_type): content = text_type(content, encoding) _append(content) # Terminator is the end of string, a literal control character, # or a backslash denoting that an escape sequence follows if terminator == '"': break elif terminator != '\\': if strict: msg = "Invalid control character %r at" raise JSONDecodeError(msg, s, end) else: _append(terminator) continue try: esc = s[end] except IndexError: raise JSONDecodeError( "Unterminated string starting at", s, begin) # If not a unicode escape sequence, must be in the lookup table if esc != 'u': try: char = _b[esc] except KeyError: msg = "Invalid \\X escape sequence %r" raise JSONDecodeError(msg, s, end) end += 1 else: # Unicode escape sequence msg = "Invalid \\uXXXX escape sequence" esc = s[end + 1:end + 5] escX = esc[1:2] if len(esc) != 4 or escX == 'x' or escX == 'X': raise JSONDecodeError(msg, s, end - 1) try: uni = int(esc, 16) except ValueError: raise JSONDecodeError(msg, s, end - 1) end += 5 # Check for surrogate pair on UCS-4 systems # Note that this will join high/low surrogate pairs # but will also pass unpaired surrogates through if (_maxunicode > 65535 and uni & 0xfc00 == 0xd800 and s[end:end + 2] == '\\u'): esc2 = s[end + 2:end + 6] escX = esc2[1:2] if len(esc2) == 4 and not (escX == 'x' or escX == 'X'): try: uni2 = int(esc2, 16) except ValueError: raise JSONDecodeError(msg, s, end) if uni2 & 0xfc00 == 0xdc00: uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00)) end += 6 char = unichr(uni) # Append the unescaped character _append(char) return _join(chunks), end # Use speedup if available scanstring = c_scanstring or py_scanstring WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS) WHITESPACE_STR = ' \t\n\r' def JSONObject(state, encoding, strict, scan_once, object_hook, object_pairs_hook, memo=None, _w=WHITESPACE.match, _ws=WHITESPACE_STR): (s, end) = state # Backwards compatibility if memo is None: memo = {} memo_get = memo.setdefault pairs = [] # Use a slice to prevent IndexError from being raised, the following # check will raise a more specific ValueError if the string is empty nextchar = s[end:end + 1] # Normally we expect nextchar == '"' if nextchar != '"': if nextchar in _ws: end = _w(s, end).end() nextchar = s[end:end + 1] # Trivial empty object if nextchar == '}': if object_pairs_hook is not None: result = object_pairs_hook(pairs) return result, end + 1 pairs = {} if object_hook is not None: pairs = object_hook(pairs) return pairs, end + 1 elif nextchar != '"': raise JSONDecodeError( "Expecting property name enclosed in double quotes", s, end) end += 1 while True: key, end = scanstring(s, end, encoding, strict) key = memo_get(key, key) # To skip some function call overhead we optimize the fast paths where # the JSON key separator is ": " or just ":". if s[end:end + 1] != ':': end = _w(s, end).end() if s[end:end + 1] != ':': raise JSONDecodeError("Expecting ':' delimiter", s, end) end += 1 try: if s[end] in _ws: end += 1 if s[end] in _ws: end = _w(s, end + 1).end() except IndexError: pass value, end = scan_once(s, end) pairs.append((key, value)) try: nextchar = s[end] if nextchar in _ws: end = _w(s, end + 1).end() nextchar = s[end] except IndexError: nextchar = '' end += 1 if nextchar == '}': break elif nextchar != ',': raise JSONDecodeError("Expecting ',' delimiter or '}'", s, end - 1) try: nextchar = s[end] if nextchar in _ws: end += 1 nextchar = s[end] if nextchar in _ws: end = _w(s, end + 1).end() nextchar = s[end] except IndexError: nextchar = '' end += 1 if nextchar != '"': raise JSONDecodeError( "Expecting property name enclosed in double quotes", s, end - 1) if object_pairs_hook is not None: result = object_pairs_hook(pairs) return result, end pairs = dict(pairs) if object_hook is not None: pairs = object_hook(pairs) return pairs, end def JSONArray(state, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR): (s, end) = state values = [] nextchar = s[end:end + 1] if nextchar in _ws: end = _w(s, end + 1).end() nextchar = s[end:end + 1] # Look-ahead for trivial empty array if nextchar == ']': return values, end + 1 elif nextchar == '': raise JSONDecodeError("Expecting value or ']'", s, end) _append = values.append while True: value, end = scan_once(s, end) _append(value) nextchar = s[end:end + 1] if nextchar in _ws: end = _w(s, end + 1).end() nextchar = s[end:end + 1] end += 1 if nextchar == ']': break elif nextchar != ',': raise JSONDecodeError("Expecting ',' delimiter or ']'", s, end - 1) try: if s[end] in _ws: end += 1 if s[end] in _ws: end = _w(s, end + 1).end() except IndexError: pass return values, end class JSONDecoder(object): """Simple JSON <http://json.org> decoder Performs the following translations in decoding by default: +---------------+-------------------+ | JSON | Python | +===============+===================+ | object | dict | +---------------+-------------------+ | array | list | +---------------+-------------------+ | string | unicode | +---------------+-------------------+ | number (int) | int, long | +---------------+-------------------+ | number (real) | float | +---------------+-------------------+ | true | True | +---------------+-------------------+ | false | False | +---------------+-------------------+ | null | None | +---------------+-------------------+ It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as their corresponding ``float`` values, which is outside the JSON spec. """ def __init__(self, encoding=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, strict=True, object_pairs_hook=None): """ *encoding* determines the encoding used to interpret any :class:`str` objects decoded by this instance (``'utf-8'`` by default). It has no effect when decoding :class:`unicode` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as :class:`unicode`. *object_hook*, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given :class:`dict`. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). *object_pairs_hook* is an optional function that will be called with the result of any object literal decode with an ordered list of pairs. The return value of *object_pairs_hook* will be used instead of the :class:`dict`. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, :func:`collections.OrderedDict` will remember the order of insertion). If *object_hook* is also defined, the *object_pairs_hook* takes priority. *parse_float*, if specified, will be called with the string of every JSON float to be decoded. By default, this is equivalent to ``float(num_str)``. This can be used to use another datatype or parser for JSON floats (e.g. :class:`decimal.Decimal`). *parse_int*, if specified, will be called with the string of every JSON int to be decoded. By default, this is equivalent to ``int(num_str)``. This can be used to use another datatype or parser for JSON integers (e.g. :class:`float`). *parse_constant*, if specified, will be called with one of the following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This can be used to raise an exception if invalid JSON numbers are encountered. *strict* controls the parser's behavior when it encounters an invalid control character in a string. The default setting of ``True`` means that unescaped control characters are parse errors, if ``False`` then control characters will be allowed in strings. """ if encoding is None: encoding = DEFAULT_ENCODING self.encoding = encoding self.object_hook = object_hook self.object_pairs_hook = object_pairs_hook self.parse_float = parse_float or float self.parse_int = parse_int or int self.parse_constant = parse_constant or _CONSTANTS.__getitem__ self.strict = strict self.parse_object = JSONObject self.parse_array = JSONArray self.parse_string = scanstring self.memo = {} self.scan_once = make_scanner(self) def decode(self, s, _w=WHITESPACE.match, _PY3=PY3): """Return the Python representation of ``s`` (a ``str`` or ``unicode`` instance containing a JSON document) """ if _PY3 and isinstance(s, binary_type): s = s.decode(self.encoding) obj, end = self.raw_decode(s) end = _w(s, end).end() if end != len(s): raise JSONDecodeError("Extra data", s, end, len(s)) return obj def raw_decode(self, s, idx=0, _w=WHITESPACE.match, _PY3=PY3): """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning with a JSON document) and return a 2-tuple of the Python representation and the index in ``s`` where the document ended. Optionally, ``idx`` can be used to specify an offset in ``s`` where the JSON document begins. This can be used to decode a JSON document from a string that may have extraneous data at the end. """ if _PY3 and not isinstance(s, text_type): raise TypeError("Input string must be text, not bytes") return self.scan_once(s, idx=_w(s, idx).end())
gpl-3.0
8,479,624,750,722,993,000
35.712082
79
0.519632
false
4.300792
false
false
false
miaoski/bsideslv-plc-home
hmi.py
1
1699
# -*- coding: utf8 -*- # This trivial HMI is decoupled from ModBus server import gevent from flask import Flask, render_template from flask_sockets import Sockets from pymodbus.client.sync import ModbusTcpClient from time import sleep import sys app = Flask(__name__) sockets = Sockets(app) try: myip = sys.argv[1] except IndexError: print 'Usage python hmi.py 192.168.42.1' sys.exit(1) client = ModbusTcpClient(myip) def read_di(num = 20): rr = client.read_discrete_inputs(1, num).bits[:num] di = ['1' if x else '0' for x in rr] return di def read_co(num = 20): rr = client.read_coils(1, num).bits[:num] di = ['1' if x else '0' for x in rr] return di def read_ir(num = 5): rr = client.read_input_registers(1, num).registers[:num] di = map(str, rr) return di def read_hr(num = 5): rr = client.read_holding_registers(1, num).registers[:num] di = map(str, rr) return di @sockets.route('/data') def read_data(ws): while not ws.closed: try: di = read_di() co = read_co() ir = read_ir() hr = read_hr() except: print 'Exception. Wait for next run.' gevent.sleep(1) continue ws.send('\n'.join((','.join(di), ','.join(co), ','.join(ir), ','.join(hr)))) gevent.sleep(0.3) print "Connection Closed!!!", reason @app.route('/') def homepage(): return render_template('hmi.html') # main if __name__ == "__main__": from gevent import pywsgi from geventwebsocket.handler import WebSocketHandler server = pywsgi.WSGIServer((myip, 8000), app, handler_class=WebSocketHandler) server.serve_forever()
gpl-2.0
4,138,471,727,828,687,000
23.623188
84
0.608005
false
3.248566
false
false
false
houshengbo/nova_vmware_compute_driver
nova/virt/hyperv/vmops.py
1
25971
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for basic VM operations. """ import os import uuid from nova.api.metadata import base as instance_metadata from nova import exception from nova.openstack.common import cfg from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova import utils from nova.virt import configdrive from nova.virt.hyperv import baseops from nova.virt.hyperv import constants from nova.virt.hyperv import vmutils LOG = logging.getLogger(__name__) hyperv_opts = [ cfg.StrOpt('vswitch_name', default=None, help='Default vSwitch Name, ' 'if none provided first external is used'), cfg.BoolOpt('limit_cpu_features', default=False, help='Required for live migration among ' 'hosts with different CPU features'), cfg.BoolOpt('config_drive_inject_password', default=False, help='Sets the admin password in the config drive image'), cfg.StrOpt('qemu_img_cmd', default="qemu-img.exe", help='qemu-img is used to convert between ' 'different image types'), cfg.BoolOpt('config_drive_cdrom', default=False, help='Attaches the Config Drive image as a cdrom drive ' 'instead of a disk drive') ] CONF = cfg.CONF CONF.register_opts(hyperv_opts) CONF.import_opt('use_cow_images', 'nova.config') class VMOps(baseops.BaseOps): def __init__(self, volumeops): super(VMOps, self).__init__() self._vmutils = vmutils.VMUtils() self._volumeops = volumeops def list_instances(self): """ Return the names of all the instances known to Hyper-V. """ vms = [v.ElementName for v in self._conn.Msvm_ComputerSystem(['ElementName'], Caption="Virtual Machine")] return vms def get_info(self, instance): """Get information about the VM""" LOG.debug(_("get_info called for instance"), instance=instance) return self._get_info(instance['name']) def _get_info(self, instance_name): vm = self._vmutils.lookup(self._conn, instance_name) if vm is None: raise exception.InstanceNotFound(instance=instance_name) vm = self._conn.Msvm_ComputerSystem( ElementName=instance_name)[0] vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] vmsettings = vm.associators( wmi_association_class='Msvm_SettingsDefineState', wmi_result_class='Msvm_VirtualSystemSettingData') settings_paths = [v.path_() for v in vmsettings] #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx summary_info = vs_man_svc.GetSummaryInformation( [constants.VM_SUMMARY_NUM_PROCS, constants.VM_SUMMARY_ENABLED_STATE, constants.VM_SUMMARY_MEMORY_USAGE, constants.VM_SUMMARY_UPTIME], settings_paths)[1] info = summary_info[0] LOG.debug(_("hyperv vm state: %s"), info.EnabledState) state = constants.HYPERV_POWER_STATE[info.EnabledState] memusage = str(info.MemoryUsage) numprocs = str(info.NumberOfProcessors) uptime = str(info.UpTime) LOG.debug(_("Got Info for vm %(instance_name)s: state=%(state)d," " mem=%(memusage)s, num_cpu=%(numprocs)s," " uptime=%(uptime)s"), locals()) return {'state': state, 'max_mem': info.MemoryUsage, 'mem': info.MemoryUsage, 'num_cpu': info.NumberOfProcessors, 'cpu_time': info.UpTime} def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """ Create a new VM and start it.""" vm = self._vmutils.lookup(self._conn, instance['name']) if vm is not None: raise exception.InstanceExists(name=instance['name']) ebs_root = self._volumeops.volume_in_mapping( self._volumeops.get_default_root_device(), block_device_info) #If is not a boot from volume spawn if not (ebs_root): #Fetch the file, assume it is a VHD file. vhdfile = self._vmutils.get_vhd_path(instance['name']) try: self._cache_image(fn=self._vmutils.fetch_image, context=context, target=vhdfile, fname=instance['image_ref'], image_id=instance['image_ref'], user=instance['user_id'], project=instance['project_id'], cow=CONF.use_cow_images) except Exception as exn: LOG.exception(_('cache image failed: %s'), exn) self.destroy(instance) try: self._create_vm(instance) if not ebs_root: self._attach_ide_drive(instance['name'], vhdfile, 0, 0, constants.IDE_DISK) else: self._volumeops.attach_boot_volume(block_device_info, instance['name']) #A SCSI controller for volumes connection is created self._create_scsi_controller(instance['name']) for vif in network_info: mac_address = vif['address'].replace(':', '') self._create_nic(instance['name'], mac_address) if configdrive.required_by(instance): self._create_config_drive(instance, injected_files, admin_password) LOG.debug(_('Starting VM %s '), instance['name']) self._set_vm_state(instance['name'], 'Enabled') LOG.info(_('Started VM %s '), instance['name']) except Exception as exn: LOG.exception(_('spawn vm failed: %s'), exn) self.destroy(instance) raise exn def _create_config_drive(self, instance, injected_files, admin_password): if CONF.config_drive_format != 'iso9660': vmutils.HyperVException(_('Invalid config_drive_format "%s"') % CONF.config_drive_format) LOG.info(_('Using config drive'), instance=instance) extra_md = {} if admin_password and CONF.config_drive_inject_password: extra_md['admin_pass'] = admin_password inst_md = instance_metadata.InstanceMetadata(instance, content=injected_files, extra_md=extra_md) instance_path = self._vmutils.get_instance_path( instance['name']) configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso') LOG.info(_('Creating config drive at %(path)s'), {'path': configdrive_path_iso}, instance=instance) with configdrive.config_drive_helper(instance_md=inst_md) as cdb: try: cdb.make_drive(configdrive_path_iso) except exception.ProcessExecutionError, e: LOG.error(_('Creating config drive failed with error: %s'), e, instance=instance) raise if not CONF.config_drive_cdrom: drive_type = constants.IDE_DISK configdrive_path = os.path.join(instance_path, 'configdrive.vhd') utils.execute(CONF.qemu_img_cmd, 'convert', '-f', 'raw', '-O', 'vpc', configdrive_path_iso, configdrive_path, attempts=1) os.remove(configdrive_path_iso) else: drive_type = constants.IDE_DVD configdrive_path = configdrive_path_iso self._attach_ide_drive(instance['name'], configdrive_path, 1, 0, drive_type) def _create_vm(self, instance): """Create a VM but don't start it. """ vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() vs_gs_data.ElementName = instance["name"] (job, ret_val) = vs_man_svc.DefineVirtualSystem( [], None, vs_gs_data.GetText_(1))[1:] if ret_val == constants.WMI_JOB_STATUS_STARTED: success = self._vmutils.check_job_status(job) else: success = (ret_val == 0) if not success: raise vmutils.HyperVException(_('Failed to create VM %s') % instance["name"]) LOG.debug(_('Created VM %s...'), instance["name"]) vm = self._conn.Msvm_ComputerSystem(ElementName=instance["name"])[0] vmsettings = vm.associators( wmi_result_class='Msvm_VirtualSystemSettingData') vmsetting = [s for s in vmsettings if s.SettingType == 3][0] # avoid snapshots memsetting = vmsetting.associators( wmi_result_class='Msvm_MemorySettingData')[0] #No Dynamic Memory, so reservation, limit and quantity are identical. mem = long(str(instance['memory_mb'])) memsetting.VirtualQuantity = mem memsetting.Reservation = mem memsetting.Limit = mem (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( vm.path_(), [memsetting.GetText_(1)]) LOG.debug(_('Set memory for vm %s...'), instance["name"]) procsetting = vmsetting.associators( wmi_result_class='Msvm_ProcessorSettingData')[0] vcpus = long(instance['vcpus']) procsetting.VirtualQuantity = vcpus procsetting.Reservation = vcpus procsetting.Limit = 100000 # static assignment to 100% if CONF.limit_cpu_features: procsetting.LimitProcessorFeatures = True (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( vm.path_(), [procsetting.GetText_(1)]) LOG.debug(_('Set vcpus for vm %s...'), instance["name"]) def _create_scsi_controller(self, vm_name): """ Create an iscsi controller ready to mount volumes """ LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume ' 'attaching') % locals()) vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) vm = vms[0] scsicontrldefault = self._conn.query( "SELECT * FROM Msvm_ResourceAllocationSettingData \ WHERE ResourceSubType = 'Microsoft Synthetic SCSI Controller'\ AND InstanceID LIKE '%Default%'")[0] if scsicontrldefault is None: raise vmutils.HyperVException(_('Controller not found')) scsicontrl = self._vmutils.clone_wmi_obj(self._conn, 'Msvm_ResourceAllocationSettingData', scsicontrldefault) scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] scsiresource = self._vmutils.add_virt_resource(self._conn, scsicontrl, vm) if scsiresource is None: raise vmutils.HyperVException( _('Failed to add scsi controller to VM %s') % vm_name) def _get_ide_controller(self, vm, ctrller_addr): #Find the IDE controller for the vm. vmsettings = vm.associators( wmi_result_class='Msvm_VirtualSystemSettingData') rasds = vmsettings[0].associators( wmi_result_class='MSVM_ResourceAllocationSettingData') ctrller = [r for r in rasds if r.ResourceSubType == 'Microsoft Emulated IDE Controller' and r.Address == str(ctrller_addr)] return ctrller def _attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr, drive_type=constants.IDE_DISK): """Create an IDE drive and attach it to the vm""" LOG.debug(_('Creating disk for %(vm_name)s by attaching' ' disk file %(path)s') % locals()) vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) vm = vms[0] ctrller = self._get_ide_controller(vm, ctrller_addr) if drive_type == constants.IDE_DISK: resSubType = 'Microsoft Synthetic Disk Drive' elif drive_type == constants.IDE_DVD: resSubType = 'Microsoft Synthetic DVD Drive' #Find the default disk drive object for the vm and clone it. drivedflt = self._conn.query( "SELECT * FROM Msvm_ResourceAllocationSettingData \ WHERE ResourceSubType LIKE '%(resSubType)s'\ AND InstanceID LIKE '%%Default%%'" % locals())[0] drive = self._vmutils.clone_wmi_obj(self._conn, 'Msvm_ResourceAllocationSettingData', drivedflt) #Set the IDE ctrller as parent. drive.Parent = ctrller[0].path_() drive.Address = drive_addr #Add the cloned disk drive object to the vm. new_resources = self._vmutils.add_virt_resource(self._conn, drive, vm) if new_resources is None: raise vmutils.HyperVException( _('Failed to add drive to VM %s') % vm_name) drive_path = new_resources[0] LOG.debug(_('New %(drive_type)s drive path is %(drive_path)s') % locals()) if drive_type == constants.IDE_DISK: resSubType = 'Microsoft Virtual Hard Disk' elif drive_type == constants.IDE_DVD: resSubType = 'Microsoft Virtual CD/DVD Disk' #Find the default VHD disk object. drivedefault = self._conn.query( "SELECT * FROM Msvm_ResourceAllocationSettingData \ WHERE ResourceSubType LIKE '%(resSubType)s' AND \ InstanceID LIKE '%%Default%%' " % locals())[0] #Clone the default and point it to the image file. res = self._vmutils.clone_wmi_obj(self._conn, 'Msvm_ResourceAllocationSettingData', drivedefault) #Set the new drive as the parent. res.Parent = drive_path res.Connection = [path] #Add the new vhd object as a virtual hard disk to the vm. new_resources = self._vmutils.add_virt_resource(self._conn, res, vm) if new_resources is None: raise vmutils.HyperVException( _('Failed to add %(drive_type)s image to VM %(vm_name)s') % locals()) LOG.info(_('Created drive type %(drive_type)s for %(vm_name)s') % locals()) def _create_nic(self, vm_name, mac): """Create a (synthetic) nic and attach it to the vm""" LOG.debug(_('Creating nic for %s '), vm_name) #Find the vswitch that is connected to the physical nic. vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) extswitch = self._find_external_network() if extswitch is None: raise vmutils.HyperVException(_('Cannot find vSwitch')) vm = vms[0] switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] #Find the default nic and clone it to create a new nic for the vm. #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with #Linux Integration Components installed. syntheticnics_data = self._conn.Msvm_SyntheticEthernetPortSettingData() default_nic_data = [n for n in syntheticnics_data if n.InstanceID.rfind('Default') > 0] new_nic_data = self._vmutils.clone_wmi_obj(self._conn, 'Msvm_SyntheticEthernetPortSettingData', default_nic_data[0]) #Create a port on the vswitch. (new_port, ret_val) = switch_svc.CreateSwitchPort( Name=str(uuid.uuid4()), FriendlyName=vm_name, ScopeOfResidence="", VirtualSwitch=extswitch.path_()) if ret_val != 0: LOG.error(_('Failed creating a port on the external vswitch')) raise vmutils.HyperVException(_('Failed creating port for %s') % vm_name) ext_path = extswitch.path_() LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s") % locals()) #Connect the new nic to the new port. new_nic_data.Connection = [new_port] new_nic_data.ElementName = vm_name + ' nic' new_nic_data.Address = mac new_nic_data.StaticMacAddress = 'True' new_nic_data.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}'] #Add the new nic to the vm. new_resources = self._vmutils.add_virt_resource(self._conn, new_nic_data, vm) if new_resources is None: raise vmutils.HyperVException(_('Failed to add nic to VM %s') % vm_name) LOG.info(_("Created nic for %s "), vm_name) def _find_external_network(self): """Find the vswitch that is connected to the physical nic. Assumes only one physical nic on the host """ #If there are no physical nics connected to networks, return. LOG.debug(_("Attempting to bind NIC to %s ") % CONF.vswitch_name) if CONF.vswitch_name: LOG.debug(_("Attempting to bind NIC to %s ") % CONF.vswitch_name) bound = self._conn.Msvm_VirtualSwitch( ElementName=CONF.vswitch_name) else: LOG.debug(_("No vSwitch specified, attaching to default")) self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE') if len(bound) == 0: return None if CONF.vswitch_name: return self._conn.Msvm_VirtualSwitch( ElementName=CONF.vswitch_name)[0]\ .associators(wmi_result_class='Msvm_SwitchPort')[0]\ .associators(wmi_result_class='Msvm_VirtualSwitch')[0] else: return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')\ .associators(wmi_result_class='Msvm_SwitchPort')[0]\ .associators(wmi_result_class='Msvm_VirtualSwitch')[0] def reboot(self, instance, network_info, reboot_type): """Reboot the specified instance.""" vm = self._vmutils.lookup(self._conn, instance['name']) if vm is None: raise exception.InstanceNotFound(instance_id=instance["id"]) self._set_vm_state(instance['name'], 'Reboot') def destroy(self, instance, network_info=None, cleanup=True): """Destroy the VM. Also destroy the associated VHD disk files""" LOG.debug(_("Got request to destroy vm %s"), instance['name']) vm = self._vmutils.lookup(self._conn, instance['name']) if vm is None: return vm = self._conn.Msvm_ComputerSystem(ElementName=instance['name'])[0] vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] #Stop the VM first. self._set_vm_state(instance['name'], 'Disabled') vmsettings = vm.associators( wmi_result_class='Msvm_VirtualSystemSettingData') rasds = vmsettings[0].associators( wmi_result_class='MSVM_ResourceAllocationSettingData') disks = [r for r in rasds if r.ResourceSubType == 'Microsoft Virtual Hard Disk'] disk_files = [] volumes = [r for r in rasds if r.ResourceSubType == 'Microsoft Physical Disk Drive'] volumes_drives_list = [] #collect the volumes information before destroying the VM. for volume in volumes: hostResources = volume.HostResource drive_path = hostResources[0] #Appending the Msvm_Disk path volumes_drives_list.append(drive_path) #Collect disk file information before destroying the VM. for disk in disks: disk_files.extend([c for c in disk.Connection]) #Nuke the VM. Does not destroy disks. (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) if ret_val == constants.WMI_JOB_STATUS_STARTED: success = self._vmutils.check_job_status(job) elif ret_val == 0: success = True if not success: raise vmutils.HyperVException(_('Failed to destroy vm %s') % instance['name']) #Disconnect volumes for volume_drive in volumes_drives_list: self._volumeops.disconnect_volume(volume_drive) #Delete associated vhd disk files. for disk in disk_files: vhdfile = self._conn_cimv2.query( "Select * from CIM_DataFile where Name = '" + disk.replace("'", "''") + "'")[0] LOG.debug(_("Del: disk %(vhdfile)s vm %(name)s") % {'vhdfile': vhdfile, 'name': instance['name']}) vhdfile.Delete() def pause(self, instance): """Pause VM instance.""" LOG.debug(_("Pause instance"), instance=instance) self._set_vm_state(instance["name"], 'Paused') def unpause(self, instance): """Unpause paused VM instance.""" LOG.debug(_("Unpause instance"), instance=instance) self._set_vm_state(instance["name"], 'Enabled') def suspend(self, instance): """Suspend the specified instance.""" print instance LOG.debug(_("Suspend instance"), instance=instance) self._set_vm_state(instance["name"], 'Suspended') def resume(self, instance): """Resume the suspended VM instance.""" LOG.debug(_("Resume instance"), instance=instance) self._set_vm_state(instance["name"], 'Enabled') def power_off(self, instance): """Power off the specified instance.""" LOG.debug(_("Power off instance"), instance=instance) self._set_vm_state(instance["name"], 'Disabled') def power_on(self, instance): """Power on the specified instance""" LOG.debug(_("Power on instance"), instance=instance) self._set_vm_state(instance["name"], 'Enabled') def _set_vm_state(self, vm_name, req_state): """Set the desired state of the VM""" vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) if len(vms) == 0: return False (job, ret_val) = vms[0].RequestStateChange( constants.REQ_POWER_STATE[req_state]) success = False if ret_val == constants.WMI_JOB_STATUS_STARTED: success = self._vmutils.check_job_status(job) elif ret_val == 0: success = True elif ret_val == 32775: #Invalid state for current operation. Typically means it is #already in the state requested success = True if success: LOG.info(_("Successfully changed vm state of %(vm_name)s" " to %(req_state)s") % locals()) else: msg = _("Failed to change vm state of %(vm_name)s" " to %(req_state)s") % locals() LOG.error(msg) raise vmutils.HyperVException(msg) def _cache_image(self, fn, target, fname, cow=False, Size=None, *args, **kwargs): """Wrapper for a method that creates an image that caches the image. This wrapper will save the image into a common store and create a copy for use by the hypervisor. The underlying method should specify a kwarg of target representing where the image will be saved. fname is used as the filename of the base image. The filename needs to be unique to a given image. If cow is True, it will make a CoW image instead of a copy. """ @lockutils.synchronized(fname, 'nova-') def call_if_not_exists(path, fn, *args, **kwargs): if not os.path.exists(path): fn(target=path, *args, **kwargs) if not os.path.exists(target): LOG.debug(_("use_cow_image:%s"), cow) if cow: base = self._vmutils.get_base_vhd_path(fname) call_if_not_exists(base, fn, *args, **kwargs) image_service = self._conn.query( "Select * from Msvm_ImageManagementService")[0] (job, ret_val) = \ image_service.CreateDifferencingVirtualHardDisk( Path=target, ParentPath=base) LOG.debug( "Creating difference disk: JobID=%s, Source=%s, Target=%s", job, base, target) if ret_val == constants.WMI_JOB_STATUS_STARTED: success = self._vmutils.check_job_status(job) else: success = (ret_val == 0) if not success: raise vmutils.HyperVException( _('Failed to create Difference Disk from ' '%(base)s to %(target)s') % locals()) else: call_if_not_exists(target, fn, *args, **kwargs)
apache-2.0
-4,034,157,555,488,214,000
42.141196
79
0.576643
false
4.102844
true
false
false
mganeva/mantid
qt/applications/workbench/workbench/widgets/plotselector/presenter.py
1
15293
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source # & Institut Laue - Langevin # SPDX - License - Identifier: GPL - 3.0 + # This file is part of the mantid workbench. # # from __future__ import absolute_import, print_function import os import re from .model import PlotSelectorModel from .view import PlotSelectorView, Column class PlotSelectorPresenter(object): """ Presenter for the plot selector widget. This class can be responsible for the creation of the model and view, passing in the GlobalFigureManager as an argument, or the presenter and view can be passed as arguments (only intended for testing). """ def __init__(self, global_figure_manager, view=None, model=None): """ Initialise the presenter, creating the view and model, and setting the initial plot list :param global_figure_manager: The GlobalFigureManager class :param view: Optional - a view to use instead of letting the class create one (intended for testing) :param model: Optional - a model to use instead of letting the class create one (intended for testing) """ # Create model and view, or accept mocked versions if view is None: self.view = PlotSelectorView(self) else: self.view = view if model is None: self.model = PlotSelectorModel(self, global_figure_manager) else: self.model = model # Make sure the plot list is up to date self.update_plot_list() def get_plot_name_from_number(self, plot_number): return self.model.get_plot_name_from_number(plot_number) # ------------------------ Plot Updates ------------------------ def update_plot_list(self): """ Updates the plot list in the model and the view. Filter text is applied to the updated selection if required. """ plot_list = self.model.get_plot_list() self.view.set_plot_list(plot_list) def append_to_plot_list(self, plot_number): """ Appends the plot name to the end of the plot list :param plot_number: The unique number in GlobalFigureManager """ self.view.append_to_plot_list(plot_number) self.view.set_visibility_icon(plot_number, self.model.is_visible(plot_number)) def remove_from_plot_list(self, plot_number): """ Removes the plot name from the plot list :param plot_number: The unique number in GlobalFigureManager """ self.view.remove_from_plot_list(plot_number) def rename_in_plot_list(self, plot_number, new_name): """ Replaces a name in the plot list :param plot_number: The unique number in GlobalFigureManager :param new_name: The new name for the plot """ self.view.rename_in_plot_list(plot_number, new_name) # ----------------------- Plot Filtering ------------------------ def filter_text_changed(self): """ Called by the view when the filter text is changed (e.g. by typing or clearing the text) """ if self.view.get_filter_text(): self.view.filter_plot_list() else: self.view.unhide_all_plots() def is_shown_by_filter(self, plot_number): """ :param plot_number: The unique number in GlobalFigureManager :return: True if shown, or False if filtered out """ filter_text = self.view.get_filter_text() plot_name = self.get_plot_name_from_number(plot_number) return filter_text.lower() in plot_name.lower() # ------------------------ Plot Showing ------------------------ def show_single_selected(self): """ When a list item is double clicked the view calls this method to bring the selected plot to the front """ plot_number = self.view.get_currently_selected_plot_number() self._make_plot_active(plot_number) def show_multiple_selected(self): """ Shows multiple selected plots, e.g. from pressing the 'Show' button with multiple selected plots """ selected_plots = self.view.get_all_selected_plot_numbers() for plot_number in selected_plots: self._make_plot_active(plot_number) def _make_plot_active(self, plot_number): """ Make the plot with the given name active - bring it to the front and make it the choice for overplotting :param plot_number: The unique number in GlobalFigureManager """ try: self.model.show_plot(plot_number) except ValueError as e: print(e) def set_active_font(self, plot_number): """ Set the icon for the active plot to be colored :param plot_number: The unique number in GlobalFigureManager """ active_plot_number = self.view.active_plot_number if active_plot_number > 0: try: self.view.set_active_font(active_plot_number, False) except TypeError: pass # The last active plot could have been closed # already, so there is nothing to do self.view.set_active_font(plot_number, True) self.view.active_plot_number = plot_number # ------------------------ Plot Hiding ------------------------- def hide_selected_plots(self): """ Hide all plots that are selected in the view """ selected_plots = self.view.get_all_selected_plot_numbers() for plot_number in selected_plots: self._hide_plot(plot_number) def _hide_plot(self, plot_number): """ Hides a single plot """ try: self.model.hide_plot(plot_number) except ValueError as e: print(e) def toggle_plot_visibility(self, plot_number): """ Toggles a plot between hidden and shown :param plot_number: The unique number in GlobalFigureManager """ if self.model.is_visible(plot_number): self._hide_plot(plot_number) else: self._make_plot_active(plot_number) self.update_visibility_icon(plot_number) def update_visibility_icon(self, plot_number): """ Updates the icon to indicate a plot as hidden or visible :param plot_number: The unique number in GlobalFigureManager """ try: is_visible = self.model.is_visible(plot_number) self.view.set_visibility_icon(plot_number, is_visible) except ValueError: # There is a chance the plot was closed, which calls an # update to this method. If we can not get the visibility # status it is safe to assume the plot has been closed. pass # ------------------------ Plot Renaming ------------------------ def rename_figure(self, plot_number, new_name): """ Replaces a name in the plot list :param plot_number: The unique number in GlobalFigureManager :param new_name: The new plot name """ try: self.model.rename_figure(plot_number, new_name) except ValueError as e: # We need to undo the rename in the view self.view.rename_in_plot_list(plot_number, new_name) print(e) # ------------------------ Plot Closing ------------------------- def close_action_called(self): """ This is called by the view when closing plots is requested (e.g. pressing close or delete). """ selected_plots = self.view.get_all_selected_plot_numbers() self._close_plots(selected_plots) def close_single_plot(self, plot_number): """ This is used to close plots when a close action is called that does not refer to the selected plot(s) :param plot_number: The unique number in GlobalFigureManager """ self._close_plots([plot_number]) def _close_plots(self, list_of_plot_numbers): """ Accepts a list of plot names to close :param list_of_plots: A list of strings containing plot names """ for plot_number in list_of_plot_numbers: try: self.model.close_plot(plot_number) except ValueError as e: print(e) # ----------------------- Plot Sorting -------------------------- def set_sort_order(self, is_ascending): """ Sets the sort order in the view :param is_ascending: If true ascending order, else descending """ self.view.set_sort_order(is_ascending) def set_sort_type(self, sort_type): """ Sets the sort order in the view :param sort_type: A Column enum with the column to sort on """ self.view.set_sort_type(sort_type) self.update_last_active_order() def update_last_active_order(self): """ Update the sort keys in the view. This is only required when changes to the last shown order occur in the model, when renaming the key is set already """ if self.view.sort_type() == Column.LastActive: self._set_last_active_order() def _set_last_active_order(self): """ Set the last shown order in the view. This checks the sorting currently set and then sets the sort keys to the appropriate values """ last_active_values = self.model.last_active_values() self.view.set_last_active_values(last_active_values) def get_initial_last_active_value(self, plot_number): """ Gets the initial last active value for a plot just added, in this case it is assumed to not have been shown :param plot_number: The unique number in GlobalFigureManager :return: A string with the last active value """ return '_' + self.model.get_plot_name_from_number(plot_number) def get_renamed_last_active_value(self, plot_number, old_last_active_value): """ Gets the initial last active value for a plot that was renamed. If the plot had a numeric value, i.e. has been shown this is retained, else it is set :param plot_number: The unique number in GlobalFigureManager :param old_last_active_value: The previous last active value """ if old_last_active_value.isdigit(): return old_last_active_value else: return self.get_initial_last_active_value(plot_number) # ---------------------- Plot Exporting ------------------------- def export_plots_called(self, extension): """ Export plots called from the view, then a single or multiple plots exported depending on the number currently selected :param extension: The file extension as a string including a '.', for example '.png' (must be a type supported by matplotlib) """ plot_numbers = self.view.get_all_selected_plot_numbers() if len(plot_numbers) == 1: self._export_single_plot(plot_numbers[0], extension) elif len(plot_numbers) > 1: self._export_multiple_plots(plot_numbers, extension) def _export_single_plot(self, plot_number, extension): """ Called when a single plot is selected to export - prompts for a filename then tries to save the plot :param plot_number: The unique number in GlobalFigureManager :param extension: The file extension as a string including a '.', for example '.png' (must be a type supported by matplotlib) """ absolute_path = self.view.get_file_name_for_saving(extension) if not absolute_path[-4:] == extension: absolute_path += extension try: self.model.export_plot(plot_number, absolute_path) except ValueError as e: print(e) def _export_multiple_plots(self, plot_numbers, extension): """ Export all selected plots in the plot_numbers list, first prompting for a save directory then sanitising plot names to unique, usable file names :param plot_numbers: A list of plot numbers to export :param extension: The file extension as a string including a '.', for example '.png' (must be a type supported by matplotlib) """ dir_name = self.view.get_directory_name_for_saving() # A temporary dictionary holding plot numbers as keys, plot # names as values plots = {} for plot_number in plot_numbers: plot_name = self.model.get_plot_name_from_number(plot_number) plot_name = self._replace_special_characters(plot_name) if plot_name in plots.values(): plot_name = self._make_unique_name(plot_name, plots) plots[plot_number] = plot_name self._export_plot(plot_number, plot_name, dir_name, extension) def _replace_special_characters(self, string): """ Removes any characters that are not valid in file names across all operating systems ('/' for Linux/Mac), more for Windows :param string: The string to replace characters in :return: The string with special characters replace by '-' """ return re.sub(r'[<>:"/|\\?*]', r'-', string) def _make_unique_name(self, name, dictionary): """ Given a name and a dictionary, make a unique name that does not already exist in the dictionary values by appending ' (1)', ' (2)', ' (3)' etc. to the end of the name :param name: A string with the non-unique name :param dictionary: A dictionary with string values :return : The unique plot name """ i = 1 while True: plot_name_attempt = name + ' ({})'.format(str(i)) if plot_name_attempt not in dictionary.values(): break i += 1 return plot_name_attempt def _export_plot(self, plot_number, plot_name, dir_name, extension): """ Given a plot number, plot name, directory and extension construct the absolute path name and call the model to save the figure :param plot_number: The unique number in GlobalFigureManager :param plot_name: The name to use for saving :param dir_name: The directory to save to :param extension: The file extension as a string including a '.', for example '.png' (must be a type supported by matplotlib) """ if dir_name: filename = os.path.join(dir_name, plot_name + extension) try: self.model.export_plot(plot_number, filename) except ValueError as e: print(e)
gpl-3.0
4,486,501,413,745,917,000
36.760494
86
0.589028
false
4.316399
false
false
false
DailyActie/Surrogate-Model
surrogate/sampling/samLatinHypercube.py
1
8477
# MIT License # # Copyright (c) 2016 Daily Actie # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # Author: Quan Pan <[email protected]> # License: MIT License # Create: 2016-12-02 import numpy as np def samLatinHypercube(n, samples=None, criterion=None, iterations=None): """Generate a latin-hypercube design :param n: The number of factors to generate samples for :param samples: The number of samples to generate for each factor (Default: n) :param criterion: Allowable values are "center" or "c", "maximin" or "m", "centermaximin" or "cm", and "correlation" or "corr". If no value given, the design is simply randomized. :param iterations: The number of iterations in the maximin and correlations algorithms (Default: 5). :returns: An n-by-samples design matrix that has been normalized so factor values are uniformly spaced between zero and one. This code was originally published by the following individuals for use with Scilab: - Copyright (C) 2012 - 2013 - Michael Baudin - Copyright (C) 2012 - Maria Christopoulou - Copyright (C) 2010 - 2011 - INRIA - Michael Baudin - Copyright (C) 2009 - Yann Collette - Copyright (C) 2009 - CEA - Jean-Marc Martinez web: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros Much thanks goes to these individuals. It has been converted to Python by Abraham Lee. :Example: A 3-factor design (defaults to 3 samples): >>> samLatinHypercube(3) array([[ 0.40069325, 0.08118402, 0.69763298], [ 0.19524568, 0.41383587, 0.29947106], [ 0.85341601, 0.75460699, 0.360024 ]]) A 4-factor design with 6 samples: >>> samLatinHypercube(4, samples=6) array([[ 0.27226812, 0.02811327, 0.62792445, 0.91988196], [ 0.76945538, 0.43501682, 0.01107457, 0.09583358], [ 0.45702981, 0.76073773, 0.90245401, 0.18773015], [ 0.99342115, 0.85814198, 0.16996665, 0.65069309], [ 0.63092013, 0.22148567, 0.33616859, 0.36332478], [ 0.05276917, 0.5819198 , 0.67194243, 0.78703262]]) A 2-factor design with 5 centered samples: >>> samLatinHypercube(2, samples=5, criterion='center') array([[ 0.3, 0.5], [ 0.7, 0.9], [ 0.1, 0.3], [ 0.9, 0.1], [ 0.5, 0.7]]) A 3-factor design with 4 samples where the minimum distance between all samples has been maximized: >>> samLatinHypercube(3, samples=4, criterion='maximin') array([[ 0.02642564, 0.55576963, 0.50261649], [ 0.51606589, 0.88933259, 0.34040838], [ 0.98431735, 0.0380364 , 0.01621717], [ 0.40414671, 0.33339132, 0.84845707]]) A 4-factor design with 5 samples where the samples are as uncorrelated as possible (within 10 iterations): >>> samLatinHypercube(4, samples=5, criterion='correlate', iterations=10) """ H = None if samples is None: samples = n if criterion is not None: assert criterion.lower() in ('center', 'c', 'maximin', 'm', 'centermaximin', 'cm', 'correlation', 'corr'), 'Invalid value for "criterion": {}'.format(criterion) else: H = _lhsclassic(n, samples) if criterion is None: criterion = 'center' if iterations is None: iterations = 5 if H is None: if criterion.lower() in ('center', 'c'): H = _lhscentered(n, samples) elif criterion.lower() in ('maximin', 'm'): H = _lhsmaximin(n, samples, iterations, 'maximin') elif criterion.lower() in ('centermaximin', 'cm'): H = _lhsmaximin(n, samples, iterations, 'centermaximin') elif criterion.lower() in ('correlate', 'corr'): H = _lhscorrelate(n, samples, iterations) return H ################################################################################ def _lhsclassic(n, samples): # Generate the intervals cut = np.linspace(0, 1, samples + 1) # Fill points uniformly in each interval u = np.random.rand(samples, n) a = cut[:samples] b = cut[1:samples + 1] rdpoints = np.zeros_like(u) for j in range(n): rdpoints[:, j] = u[:, j] * (b - a) + a # Make the random pairings H = np.zeros_like(rdpoints) for j in range(n): order = np.random.permutation(range(samples)) H[:, j] = rdpoints[order, j] return H ################################################################################ def _lhscentered(n, samples): # Generate the intervals cut = np.linspace(0, 1, samples + 1) # Fill points uniformly in each interval u = np.random.rand(samples, n) a = cut[:samples] b = cut[1:samples + 1] _center = (a + b) / 2 # Make the random pairings H = np.zeros_like(u) for j in range(n): H[:, j] = np.random.permutation(_center) return H ################################################################################ def _lhsmaximin(n, samples, iterations, lhstype): maxdist = 0 # Maximize the minimum distance between points for i in range(iterations): if lhstype == 'maximin': Hcandidate = _lhsclassic(n, samples) else: Hcandidate = _lhscentered(n, samples) d = _pdist(Hcandidate) if maxdist < np.min(d): maxdist = np.min(d) H = Hcandidate.copy() return H ################################################################################ def _lhscorrelate(n, samples, iterations): mincorr = np.inf # Minimize the components correlation coefficients for i in range(iterations): # Generate a random LHS Hcandidate = _lhsclassic(n, samples) R = np.corrcoef(Hcandidate) if np.max(np.abs(R[R != 1])) < mincorr: mincorr = np.max(np.abs(R - np.eye(R.shape[0]))) print('new candidate solution found with max,abs corrcoef = {}'.format(mincorr)) H = Hcandidate.copy() return H ################################################################################ def _pdist(x): """Calculate the pair-wise point distances of a matrix :param x: An m-by-n array of scalars, where there are m points in n dimensions. :type x: 2d-array :returns: d array A 1-by-b array of scalars, where b = m*(m - 1)/2. This array contains all the pair-wise point distances, arranged in the order (1, 0), (2, 0), ..., (m-1, 0), (2, 1), ..., (m-1, 1), ..., (m-1, m-2). :Example: >>> x = np.array([[0.1629447, 0.8616334], ... [0.5811584, 0.3826752], ... [0.2270954, 0.4442068], ... [0.7670017, 0.7264718], ... [0.8253975, 0.1937736]]) >>> _pdist(x) array([ 0.6358488, 0.4223272, 0.6189940, 0.9406808, 0.3593699, 0.3908118, 0.3087661, 0.6092392, 0.6486001, 0.5358894]) """ x = np.atleast_2d(x) assert len(x.shape) == 2, 'Input array must be 2d-dimensional' m, n = x.shape if m < 2: return [] d = [] for i in range(m - 1): for j in range(i + 1, m): d.append((sum((x[j, :] - x[i, :]) ** 2)) ** 0.5) return np.array(d)
mit
210,019,104,960,525,800
33.044177
99
0.572018
false
3.502893
false
false
false
pyannote/pyannote-parser
tests/test_repere.py
1
2075
#!/usr/bin/env python # encoding: utf-8 # The MIT License (MIT) # Copyright (c) 2014-2015 CNRS # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # AUTHORS # Hervé BREDIN - http://herve.niderb.fr from __future__ import print_function import pytest from pyannote.core import Segment from pyannote.parser import REPEREParser import tempfile import os SAMPLE_ANNOTATION = """uri1 1.0 3.5 speech alice uri1 3.0 7.5 speech barbara uri1 6.0 9.0 speech chris """ @pytest.fixture def sample_annotation(request): _, filename = tempfile.mkstemp() with open(filename, 'w') as f: f.write(SAMPLE_ANNOTATION) def delete(): os.remove(filename) request.addfinalizer(delete) return filename def test_load_annotation(sample_annotation): parser = REPEREParser() annotations = parser.read(sample_annotation) speech1 = annotations(uri="uri1", modality="speech") assert list(speech1.itertracks(label=True)) == [ (Segment(1, 3.5), 0, 'alice'), (Segment(3, 7.5), 1, 'barbara'), (Segment(6, 9), 2, 'chris')]
mit
1,854,699,619,890,575,400
31.920635
79
0.729508
false
3.764065
false
false
false
Rdbaker/Mealbound
tests/models/test_transactions.py
1
4543
"""Test the Transaction models.""" from unittest.mock import patch import pytest from ceraon.models.transactions import Transaction @pytest.mark.usefixtures('db') class TestTransaction: """Transaction tests.""" def test_get_by_id(self, meal, host, guest): """Get Transaction by id.""" transaction = Transaction(payer=guest, amount=meal.price, payee=host, meal=meal) transaction.save() retrieved = Transaction.find(transaction.id) assert retrieved == transaction @patch('ceraon.models.transactions.stripe') def test_charge_returns_true_without_error(self, stripe_mock, transaction): """Test that charge() returns True if no stripe error is raised.""" assert transaction.charge() is True @patch('ceraon.models.transactions.stripe') def test_successful_charge_sets_property(self, stripe_mock, transaction): """Test that charge() sets transaction_went_through to True.""" transaction.charge() assert transaction.transaction_went_through is True @patch('ceraon.models.transactions.stripe') def test_failed_charge_returns_false(self, stripe_mock, transaction): """Test that charge() returns false if stripe throws an error.""" stripe_mock.Charge.create.side_effect = RuntimeError('failed charge') assert transaction.charge() is False @patch('ceraon.models.transactions.stripe') def test_failed_charge_doesnt_set_attribute(self, stripe_mock, transaction): """Test that a failed charge() doesn't set transaction_went_through.""" stripe_mock.Charge.create.side_effect = RuntimeError('failed charge') transaction.charge() assert transaction.transaction_went_through is False def test_cancel_sets_canceled(self, transaction): """Test that calling cancel() sets the canceled property.""" transaction.cancel() assert transaction.canceled is True @patch('ceraon.models.transactions.stripe') def test_set_stripe_source_on_user_no_stripe_id(self, stripe_mock, user): """Test that setting the stripe customer ID works.""" customer_id = 'this is the stripe customer id' stripe_mock.Customer.create.return_value.id = customer_id Transaction.set_stripe_source_on_user(user=user, token='some token') assert user.stripe_customer_id == customer_id @patch('ceraon.models.transactions.stripe') def test_set_stripe_source_on_user_returns_true(self, stripe_mock, user): """Test that setting the stripe customer ID returns True.""" customer_id = 'this is the stripe customer id' stripe_mock.Customer.create.return_value.id = customer_id assert Transaction.set_stripe_source_on_user( user=user, token='some token') is True @patch('ceraon.models.transactions.stripe') def test_set_stripe_source_on_user_existing_id(self, stripe_mock, user): """Test that resetting the stripe customer ID works.""" customer_id = 'this is the stripe customer id' assert user.stripe_customer_id is None user.stripe_customer_id = customer_id assert Transaction.set_stripe_source_on_user( user=user, token='some token') is True stripe_mock.Customer.retrieve.assert_called_once() @patch('ceraon.models.transactions.stripe') def test_set_stripe_source_on_user_fail(self, stripe_mock, user): """Test that a stripe failure returns false.""" stripe_mock.Customer.create.side_effect = RuntimeError('stripe error') assert Transaction.set_stripe_source_on_user( user=user, token='some token') is False @pytest.mark.parametrize('amount,expected', [ (5.00, 0.5), (5.05, 0.505), (4.00, 0.5), (90.00, 9), (42.10, 4.21), (2.50, 0.5) ]) def test_operational_overhead_cut(self, transaction, amount, expected): """Test that the operational_overhead_cost is as expected.""" transaction.amount = amount assert transaction.operational_overhead_cut == expected @pytest.mark.parametrize('amount,expected', [ (5.00, 4.5), (5.05, 4.545), (4.00, 3.5), (90.00, 81), (42.10, 37.89), (2.50, 2) ]) def test_takehome_amount(self, transaction, amount, expected): """Test that the takehome_amount is as expected.""" transaction.amount = amount assert transaction.takehome_amount == expected
bsd-3-clause
5,709,859,063,645,114,000
41.064815
80
0.657495
false
3.866383
true
false
false
ptisserand/ansible
lib/ansible/modules/cloud/amazon/cloudfront_distribution.py
1
85955
#!/usr/bin/python # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cloudfront_distribution short_description: create, update and delete aws cloudfront distributions. description: - Allows for easy creation, updating and deletion of CloudFront distributions. requirements: - boto3 >= 1.0.0 - python >= 2.6 version_added: "2.5" author: - Willem van Ketwich (@wilvk) - Will Thames (@willthames) extends_documentation_fragment: - aws - ec2 options: state: description: - The desired state of the distribution present - creates a new distribution or updates an existing distribution. absent - deletes an existing distribution. choices: ['present', 'absent'] default: 'present' distribution_id: description: - The id of the cloudfront distribution. This parameter can be exchanged with I(alias) or I(caller_reference) and is used in conjunction with I(e_tag). e_tag: description: - A unique identifier of a modified or existing distribution. Used in conjunction with I(distribution_id). Is determined automatically if not specified. caller_reference: description: - A unique identifier for creating and updating cloudfront distributions. Each caller reference must be unique across all distributions. e.g. a caller reference used in a web distribution cannot be reused in a streaming distribution. This parameter can be used instead of I(distribution_id) to reference an existing distribution. If not specified, this defaults to a datetime stamp of the format 'YYYY-MM-DDTHH:MM:SS.ffffff'. tags: description: - Should be input as a dict() of key-value pairs. Note that numeric keys or values must be wrapped in quotes. e.g. "Priority:" '1' purge_tags: description: - Specifies whether existing tags will be removed before adding new tags. When I(purge_tags=yes), existing tags are removed and I(tags) are added, if specified. If no tags are specified, it removes all existing tags for the distribution. When I(purge_tags=no), existing tags are kept and I(tags) are added, if specified. default: 'no' choices: ['yes', 'no'] alias: description: - The name of an alias (CNAME) that is used in a distribution. This is used to effectively reference a distribution by its alias as an alias can only be used by one distribution per AWS account. This variable avoids having to provide the I(distribution_id) as well as the I(e_tag), or I(caller_reference) of an existing distribution. aliases: description: - A I(list[]) of domain name aliases (CNAMEs) as strings to be used for the distribution. Each alias must be unique across all distribution for the AWS account. purge_aliases: description: - Specifies whether existing aliases will be removed before adding new aliases. When I(purge_aliases=yes), existing aliases are removed and I(aliases) are added. default: 'no' choices: ['yes', 'no'] default_root_object: description: - A config element that specifies the path to request when the user requests the origin. e.g. if specified as 'index.html', this maps to www.example.com/index.html when www.example.com is called by the user. This prevents the entire distribution origin from being exposed at the root. default_origin_domain_name: description: - The domain name to use for an origin if no I(origins) have been specified. Should only be used on a first run of generating a distribution and not on subsequent runs. Should not be used in conjunction with I(distribution_id), I(caller_reference) or I(alias). default_origin_path: description: - The default origin path to specify for an origin if no I(origins) have been specified. Defaults to empty if not specified. origins: description: - A config element that is a I(list[]) of complex origin objects to be specified for the distribution. Used for creating and updating distributions. Each origin item comprises the attributes I(id) I(domain_name) (defaults to default_origin_domain_name if not specified) I(origin_path) (defaults to default_origin_path if not specified) I(custom_headers[]) I(header_name) I(header_value) I(s3_origin_access_identity_enabled) I(custom_origin_config) I(http_port) I(https_port) I(origin_protocol_policy) I(origin_ssl_protocols[]) I(origin_read_timeout) I(origin_keepalive_timeout) purge_origins: description: Whether to remove any origins that aren't listed in I(origins) default: false default_cache_behavior: description: - A config element that is a complex object specifying the default cache behavior of the distribution. If not specified, the I(target_origin_id) is defined as the I(target_origin_id) of the first valid I(cache_behavior) in I(cache_behaviors) with defaults. The default cache behavior comprises the attributes I(target_origin_id) I(forwarded_values) I(query_string) I(cookies) I(forward) I(whitelisted_names) I(headers[]) I(query_string_cache_keys[]) I(trusted_signers) I(enabled) I(items[]) I(viewer_protocol_policy) I(min_ttl) I(allowed_methods) I(items[]) I(cached_methods[]) I(smooth_streaming) I(default_ttl) I(max_ttl) I(compress) I(lambda_function_associations[]) I(lambda_function_arn) I(event_type) cache_behaviors: description: - A config element that is a I(list[]) of complex cache behavior objects to be specified for the distribution. The order of the list is preserved across runs unless C(purge_cache_behavior) is enabled. Each cache behavior comprises the attributes I(path_pattern) I(target_origin_id) I(forwarded_values) I(query_string) I(cookies) I(forward) I(whitelisted_names) I(headers[]) I(query_string_cache_keys[]) I(trusted_signers) I(enabled) I(items[]) I(viewer_protocol_policy) I(min_ttl) I(allowed_methods) I(items[]) I(cached_methods[]) I(smooth_streaming) I(default_ttl) I(max_ttl) I(compress) I(lambda_function_associations[]) purge_cache_behaviors: description: Whether to remove any cache behaviors that aren't listed in I(cache_behaviors). This switch also allows the reordering of cache_behaviors. default: false custom_error_responses: description: - A config element that is a I(list[]) of complex custom error responses to be specified for the distribution. This attribute configures custom http error messages returned to the user. Each custom error response object comprises the attributes I(error_code) I(reponse_page_path) I(response_code) I(error_caching_min_ttl) purge_custom_error_responses: description: Whether to remove any custom error responses that aren't listed in I(custom_error_responses) default: false comment: description: - A comment that describes the cloudfront distribution. If not specified, it defaults to a generic message that it has been created with Ansible, and a datetime stamp. logging: description: - A config element that is a complex object that defines logging for the distribution. The logging object comprises the attributes I(enabled) I(include_cookies) I(bucket) I(prefix) price_class: description: - A string that specifies the pricing class of the distribution. As per U(https://aws.amazon.com/cloudfront/pricing/) I(price_class=PriceClass_100) consists of the areas United States Canada Europe I(price_class=PriceClass_200) consists of the areas United States Canada Europe Hong Kong, Philippines, S. Korea, Singapore & Taiwan Japan India I(price_class=PriceClass_All) consists of the areas United States Canada Europe Hong Kong, Philippines, S. Korea, Singapore & Taiwan Japan India South America Australia choices: ['PriceClass_100', 'PriceClass_200', 'PriceClass_All'] default: aws defaults this to 'PriceClass_All' enabled: description: - A boolean value that specifies whether the distribution is enabled or disabled. default: 'yes' choices: ['yes', 'no'] viewer_certificate: description: - A config element that is a complex object that specifies the encryption details of the distribution. Comprises the following attributes I(cloudfront_default_certificate) I(iam_certificate_id) I(acm_certificate_arn) I(ssl_support_method) I(minimum_protocol_version) I(certificate) I(certificate_source) restrictions: description: - A config element that is a complex object that describes how a distribution should restrict it's content. The restriction object comprises the following attributes I(geo_restriction) I(restriction_type) I(items[]) web_acl_id: description: - The id of a Web Application Firewall (WAF) Access Control List (ACL). http_version: description: - The version of the http protocol to use for the distribution. choices: [ 'http1.1', 'http2' ] default: aws defaults this to 'http2' ipv6_enabled: description: - Determines whether IPv6 support is enabled or not. choices: ['yes', 'no'] default: 'no' wait: description: - Specifies whether the module waits until the distribution has completed processing the creation or update. choices: ['yes', 'no'] default: 'no' wait_timeout: description: - Specifies the duration in seconds to wait for a timeout of a cloudfront create or update. Defaults to 1800 seconds (30 minutes). default: 1800 ''' EXAMPLES = ''' # create a basic distribution with defaults and tags - cloudfront_distribution: state: present default_origin_domain_name: www.my-cloudfront-origin.com tags: Name: example distribution Project: example project Priority: '1' # update a distribution comment by distribution_id - cloudfront_distribution: state: present distribution_id: E1RP5A2MJ8073O comment: modified by ansible cloudfront.py # update a distribution comment by caller_reference - cloudfront_distribution: state: present caller_reference: my cloudfront distribution 001 comment: modified by ansible cloudfront.py # update a distribution's aliases and comment using the distribution_id as a reference - cloudfront_distribution: state: present distribution_id: E1RP5A2MJ8073O comment: modified by cloudfront.py again aliases: [ 'www.my-distribution-source.com', 'zzz.aaa.io' ] # update a distribution's aliases and comment using an alias as a reference - cloudfront_distribution: state: present caller_reference: my test distribution comment: modified by cloudfront.py again aliases: - www.my-distribution-source.com - zzz.aaa.io # update a distribution's comment and aliases and tags and remove existing tags - cloudfront_distribution: state: present distribution_id: E15BU8SDCGSG57 comment: modified by cloudfront.py again aliases: - tested.com tags: Project: distribution 1.2 purge_tags: yes # create a distribution with an origin, logging and default cache behavior - cloudfront_distribution: state: present caller_reference: unique test distribution id origins: - id: 'my test origin-000111' domain_name: www.example.com origin_path: /production custom_headers: - header_name: MyCustomHeaderName header_value: MyCustomHeaderValue default_cache_behavior: target_origin_id: 'my test origin-000111' forwarded_values: query_string: true cookies: forward: all headers: - '*' viewer_protocol_policy: allow-all smooth_streaming: true compress: true allowed_methods: items: - GET - HEAD cached_methods: - GET - HEAD logging: enabled: true include_cookies: false bucket: mylogbucket.s3.amazonaws.com prefix: myprefix/ enabled: false comment: this is a cloudfront distribution with logging # delete a distribution - cloudfront_distribution: state: absent caller_reference: replaceable distribution ''' RETURN = ''' active_trusted_signers: description: Key pair IDs that CloudFront is aware of for each trusted signer returned: always type: complex contains: enabled: description: Whether trusted signers are in use returned: always type: bool sample: false quantity: description: Number of trusted signers returned: always type: int sample: 1 items: description: Number of trusted signers returned: when there are trusted signers type: list sample: - key_pair_id aliases: description: Aliases that refer to the distribution returned: always type: complex contains: items: description: List of aliases returned: always type: list sample: - test.example.com quantity: description: Number of aliases returned: always type: int sample: 1 arn: description: Amazon Resource Name of the distribution returned: always type: string sample: arn:aws:cloudfront::123456789012:distribution/E1234ABCDEFGHI cache_behaviors: description: Cloudfront cache behaviors returned: always type: complex contains: items: description: List of cache behaviors returned: always type: complex contains: allowed_methods: description: Methods allowed by the cache behavior returned: always type: complex contains: cached_methods: description: Methods cached by the cache behavior returned: always type: complex contains: items: description: List of cached methods returned: always type: list sample: - HEAD - GET quantity: description: Count of cached methods returned: always type: int sample: 2 items: description: List of methods allowed by the cache behavior returned: always type: list sample: - HEAD - GET quantity: description: Count of methods allowed by the cache behavior returned: always type: int sample: 2 compress: description: Whether compression is turned on for the cache behavior returned: always type: bool sample: false default_ttl: description: Default Time to Live of the cache behavior returned: always type: int sample: 86400 forwarded_values: description: Values forwarded to the origin for this cache behavior returned: always type: complex contains: cookies: description: Cookies to forward to the origin returned: always type: complex contains: forward: description: Which cookies to forward to the origin for this cache behavior returned: always type: string sample: none whitelisted_names: description: The names of the cookies to forward to the origin for this cache behavior returned: when I(forward) is C(whitelist) type: complex contains: quantity: description: Count of cookies to forward returned: always type: int sample: 1 items: description: List of cookies to forward returned: when list is not empty type: list sample: my_cookie headers: description: Which headers are used to vary on cache retrievals returned: always type: complex contains: quantity: description: Count of headers to vary on returned: always type: int sample: 1 items: description: List of headers to vary on returned: when list is not empty type: list sample: - Host query_string: description: Whether the query string is used in cache lookups returned: always type: bool sample: false query_string_cache_keys: description: Which query string keys to use in cache lookups returned: always type: complex contains: quantity: description: Count of query string cache keys to use in cache lookups returned: always type: int sample: 1 items: description: List of query string cache keys to use in cache lookups returned: when list is not empty type: list sample: lambda_function_associations: description: Lambda function associations for a cache behavior returned: always type: complex contains: quantity: description: Count of lambda function associations returned: always type: int sample: 1 items: description: List of lambda function associations returned: when list is not empty type: list sample: - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function event_type: viewer-response max_ttl: description: Maximum Time to Live returned: always type: int sample: 31536000 min_ttl: description: Minimum Time to Live returned: always type: int sample: 0 path_pattern: description: Path pattern that determines this cache behavior returned: always type: string sample: /path/to/files/* smooth_streaming: description: Whether smooth streaming is enabled returned: always type: bool sample: false target_origin_id: description: Id of origin reference by this cache behavior returned: always type: string sample: origin_abcd trusted_signers: description: Trusted signers returned: always type: complex contains: enabled: description: Whether trusted signers are enabled for this cache behavior returned: always type: bool sample: false quantity: description: Count of trusted signers returned: always type: int sample: 1 viewer_protocol_policy: description: Policy of how to handle http/https returned: always type: string sample: redirect-to-https quantity: description: Count of cache behaviors returned: always type: int sample: 1 caller_reference: description: Idempotency reference given when creating cloudfront distribution returned: always type: string sample: '1484796016700' comment: description: Any comments you want to include about the distribution returned: always type: string sample: 'my first cloudfront distribution' custom_error_responses: description: Custom error responses to use for error handling returned: always type: complex contains: items: description: List of custom error responses returned: always type: complex contains: error_caching_min_ttl: description: Mininum time to cache this error response returned: always type: int sample: 300 error_code: description: Origin response code that triggers this error response returned: always type: int sample: 500 response_code: description: Response code to return to the requester returned: always type: string sample: '500' response_page_path: description: Path that contains the error page to display returned: always type: string sample: /errors/5xx.html quantity: description: Count of custom error response items returned: always type: int sample: 1 default_cache_behavior: description: Default cache behavior returned: always type: complex contains: allowed_methods: description: Methods allowed by the cache behavior returned: always type: complex contains: cached_methods: description: Methods cached by the cache behavior returned: always type: complex contains: items: description: List of cached methods returned: always type: list sample: - HEAD - GET quantity: description: Count of cached methods returned: always type: int sample: 2 items: description: List of methods allowed by the cache behavior returned: always type: list sample: - HEAD - GET quantity: description: Count of methods allowed by the cache behavior returned: always type: int sample: 2 compress: description: Whether compression is turned on for the cache behavior returned: always type: bool sample: false default_ttl: description: Default Time to Live of the cache behavior returned: always type: int sample: 86400 forwarded_values: description: Values forwarded to the origin for this cache behavior returned: always type: complex contains: cookies: description: Cookies to forward to the origin returned: always type: complex contains: forward: description: Which cookies to forward to the origin for this cache behavior returned: always type: string sample: none whitelisted_names: description: The names of the cookies to forward to the origin for this cache behavior returned: when I(forward) is C(whitelist) type: complex contains: quantity: description: Count of cookies to forward returned: always type: int sample: 1 items: description: List of cookies to forward returned: when list is not empty type: list sample: my_cookie headers: description: Which headers are used to vary on cache retrievals returned: always type: complex contains: quantity: description: Count of headers to vary on returned: always type: int sample: 1 items: description: List of headers to vary on returned: when list is not empty type: list sample: - Host query_string: description: Whether the query string is used in cache lookups returned: always type: bool sample: false query_string_cache_keys: description: Which query string keys to use in cache lookups returned: always type: complex contains: quantity: description: Count of query string cache keys to use in cache lookups returned: always type: int sample: 1 items: description: List of query string cache keys to use in cache lookups returned: when list is not empty type: list sample: lambda_function_associations: description: Lambda function associations for a cache behavior returned: always type: complex contains: quantity: description: Count of lambda function associations returned: always type: int sample: 1 items: description: List of lambda function associations returned: when list is not empty type: list sample: - lambda_function_arn: arn:aws:lambda:123456789012:us-east-1/lambda/lambda-function event_type: viewer-response max_ttl: description: Maximum Time to Live returned: always type: int sample: 31536000 min_ttl: description: Minimum Time to Live returned: always type: int sample: 0 path_pattern: description: Path pattern that determines this cache behavior returned: always type: string sample: /path/to/files/* smooth_streaming: description: Whether smooth streaming is enabled returned: always type: bool sample: false target_origin_id: description: Id of origin reference by this cache behavior returned: always type: string sample: origin_abcd trusted_signers: description: Trusted signers returned: always type: complex contains: enabled: description: Whether trusted signers are enabled for this cache behavior returned: always type: bool sample: false quantity: description: Count of trusted signers returned: always type: int sample: 1 viewer_protocol_policy: description: Policy of how to handle http/https returned: always type: string sample: redirect-to-https default_root_object: description: The object that you want CloudFront to request from your origin (for example, index.html) when a viewer requests the root URL for your distribution returned: always type: string sample: '' diff: description: Difference between previous configuration and new configuration returned: always type: dict sample: {} domain_name: description: Domain name of cloudfront distribution returned: always type: string sample: d1vz8pzgurxosf.cloudfront.net enabled: description: Whether the cloudfront distribution is enabled or not returned: always type: bool sample: true http_version: description: Version of HTTP supported by the distribution returned: always type: string sample: http2 id: description: Cloudfront distribution ID returned: always type: string sample: E123456ABCDEFG in_progress_invalidation_batches: description: The number of invalidation batches currently in progress returned: always type: int sample: 0 is_ipv6_enabled: description: Whether IPv6 is enabled returned: always type: bool sample: true last_modified_time: description: Date and time distribution was last modified returned: always type: string sample: '2017-10-13T01:51:12.656000+00:00' logging: description: Logging information returned: always type: complex contains: bucket: description: S3 bucket logging destination returned: always type: string sample: logs-example-com.s3.amazonaws.com enabled: description: Whether logging is enabled returned: always type: bool sample: true include_cookies: description: Whether to log cookies returned: always type: bool sample: false prefix: description: Prefix added to logging object names returned: always type: string sample: cloudfront/test origins: description: Origins in the cloudfront distribution returned: always type: complex contains: items: description: List of origins returned: always type: complex contains: custom_headers: description: Custom headers passed to the origin returned: always type: complex contains: quantity: description: Count of headers returned: always type: int sample: 1 custom_origin_config: description: Configuration of the origin returned: always type: complex contains: http_port: description: Port on which HTTP is listening returned: always type: int sample: 80 https_port: description: Port on which HTTPS is listening returned: always type: int sample: 443 origin_keepalive_timeout: description: Keep-alive timeout returned: always type: int sample: 5 origin_protocol_policy: description: Policy of which protocols are supported returned: always type: string sample: https-only origin_read_timeout: description: Timeout for reads to the origin returned: always type: int sample: 30 origin_ssl_protocols: description: SSL protocols allowed by the origin returned: always type: complex contains: items: description: List of SSL protocols returned: always type: list sample: - TLSv1 - TLSv1.1 - TLSv1.2 quantity: description: Count of SSL protocols returned: always type: int sample: 3 domain_name: description: Domain name of the origin returned: always type: string sample: test-origin.example.com id: description: ID of the origin returned: always type: string sample: test-origin.example.com origin_path: description: Subdirectory to prefix the request from the S3 or HTTP origin returned: always type: string sample: '' quantity: description: Count of origins returned: always type: int sample: 1 price_class: description: Price class of cloudfront distribution returned: always type: string sample: PriceClass_All restrictions: description: Restrictions in use by Cloudfront returned: always type: complex contains: geo_restriction: description: Controls the countries in which your content is distributed. returned: always type: complex contains: quantity: description: Count of restrictions returned: always type: int sample: 1 items: description: List of country codes allowed or disallowed returned: always type: list sample: xy restriction_type: description: Type of restriction returned: always type: string sample: blacklist status: description: Status of the cloudfront distribution returned: always type: string sample: InProgress tags: description: Distribution tags returned: always type: dict sample: Hello: World viewer_certificate: description: Certificate used by cloudfront distribution returned: always type: complex contains: acm_certificate_arn: description: ARN of ACM certificate returned: when certificate comes from ACM type: string sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef certificate: description: Reference to certificate returned: always type: string sample: arn:aws:acm:us-east-1:123456789012:certificate/abcd1234-1234-1234-abcd-123456abcdef certificate_source: description: Where certificate comes from returned: always type: string sample: acm minimum_protocol_version: description: Minimum SSL/TLS protocol supported by this distribution returned: always type: string sample: TLSv1 ssl_support_method: description: Support for pre-SNI browsers or not returned: always type: string sample: sni-only web_acl_id: description: ID of Web Access Control List (from WAF service) returned: always type: string sample: abcd1234-1234-abcd-abcd-abcd12345678 ''' from ansible.module_utils._text import to_text, to_native from ansible.module_utils.aws.core import AnsibleAWSModule from ansible.module_utils.aws.cloudfront_facts import CloudFrontFactsServiceManager from ansible.module_utils.ec2 import get_aws_connection_info from ansible.module_utils.ec2 import ec2_argument_spec, boto3_conn, compare_aws_tags from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ansible_dict_to_boto3_tag_list from ansible.module_utils.ec2 import snake_dict_to_camel_dict, boto3_tag_list_to_ansible_dict import datetime try: from collections import OrderedDict except ImportError: try: from ordereddict import OrderedDict except ImportError: pass # caught by AnsibleAWSModule (as python 2.6 + boto3 => ordereddict is installed) try: import botocore except ImportError: pass def change_dict_key_name(dictionary, old_key, new_key): if old_key in dictionary: dictionary[new_key] = dictionary.get(old_key) dictionary.pop(old_key, None) return dictionary def merge_validation_into_config(config, validated_node, node_name): if validated_node is not None: if isinstance(validated_node, dict): config_node = config.get(node_name) if config_node is not None: config_node_items = list(config_node.items()) else: config_node_items = [] config[node_name] = dict(config_node_items + list(validated_node.items())) if isinstance(validated_node, list): config[node_name] = list(set(config.get(node_name) + validated_node)) return config def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True): if list_items is None: list_items = [] if not isinstance(list_items, list): raise ValueError('Expected a list, got a {0} with value {1}'.format(type(list_items).__name__, str(list_items))) result = {} if include_quantity: result['quantity'] = len(list_items) if len(list_items) > 0: result['items'] = list_items return result def recursive_diff(dict1, dict2): left = dict((k, v) for (k, v) in dict1.items() if k not in dict2) right = dict((k, v) for (k, v) in dict2.items() if k not in dict1) for k in (set(dict1.keys()) & set(dict2.keys())): if isinstance(dict1[k], dict) and isinstance(dict2[k], dict): result = recursive_diff(dict1[k], dict2[k]) if result: left[k] = result[0] right[k] = result[1] elif dict1[k] != dict2[k]: left[k] = dict1[k] right[k] = dict2[k] if left or right: return left, right else: return None def create_distribution(client, module, config, tags): try: if not tags: return client.create_distribution(DistributionConfig=config)['Distribution'] else: distribution_config_with_tags = { 'DistributionConfig': config, 'Tags': { 'Items': tags } } return client.create_distribution_with_tags(DistributionConfigWithTags=distribution_config_with_tags)['Distribution'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error creating distribution") def delete_distribution(client, module, distribution): try: return client.delete_distribution(Id=distribution['Distribution']['Id'], IfMatch=distribution['ETag']) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution['Distribution'])) def update_distribution(client, module, config, distribution_id, e_tag): try: return client.update_distribution(DistributionConfig=config, Id=distribution_id, IfMatch=e_tag)['Distribution'] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config)) def tag_resource(client, module, arn, tags): try: return client.tag_resource(Resource=arn, Tags=dict(Items=tags)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error tagging resource") def untag_resource(client, module, arn, tag_keys): try: return client.untag_resource(Resource=arn, TagKeys=dict(Items=tag_keys)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error untagging resource") def list_tags_for_resource(client, module, arn): try: response = client.list_tags_for_resource(Resource=arn) return boto3_tag_list_to_ansible_dict(response.get('Tags').get('Items')) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error listing tags for resource") def update_tags(client, module, existing_tags, valid_tags, purge_tags, arn): changed = False to_add, to_remove = compare_aws_tags(existing_tags, valid_tags, purge_tags) if to_remove: untag_resource(client, module, arn, to_remove) changed = True if to_add: tag_resource(client, module, arn, ansible_dict_to_boto3_tag_list(to_add)) changed = True return changed class CloudFrontValidationManager(object): """ Manages Cloudfront validations """ def __init__(self, module): self.__cloudfront_facts_mgr = CloudFrontFactsServiceManager(module) self.module = module self.__default_distribution_enabled = True self.__default_http_port = 80 self.__default_https_port = 443 self.__default_ipv6_enabled = False self.__default_origin_ssl_protocols = [ 'TLSv1', 'TLSv1.1', 'TLSv1.2' ] self.__default_custom_origin_protocol_policy = 'match-viewer' self.__default_custom_origin_read_timeout = 30 self.__default_custom_origin_keepalive_timeout = 5 self.__default_datetime_string = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S.%f') self.__default_cache_behavior_min_ttl = 0 self.__default_cache_behavior_max_ttl = 31536000 self.__default_cache_behavior_default_ttl = 86400 self.__default_cache_behavior_compress = False self.__default_cache_behavior_viewer_protocol_policy = 'allow-all' self.__default_cache_behavior_smooth_streaming = False self.__default_cache_behavior_forwarded_values_forward_cookies = 'none' self.__default_cache_behavior_forwarded_values_query_string = True self.__default_trusted_signers_enabled = False self.__valid_price_classes = set([ 'PriceClass_100', 'PriceClass_200', 'PriceClass_All' ]) self.__valid_origin_protocol_policies = set([ 'http-only', 'match-viewer', 'https-only' ]) self.__valid_origin_ssl_protocols = set([ 'SSLv3', 'TLSv1', 'TLSv1.1', 'TLSv1.2' ]) self.__valid_cookie_forwarding = set([ 'none', 'whitelist', 'all' ]) self.__valid_viewer_protocol_policies = set([ 'allow-all', 'https-only', 'redirect-to-https' ]) self.__valid_methods = set([ 'GET', 'HEAD', 'POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE' ]) self.__valid_methods_cached_methods = [ set([ 'GET', 'HEAD' ]), set([ 'GET', 'HEAD', 'OPTIONS' ]) ] self.__valid_methods_allowed_methods = [ self.__valid_methods_cached_methods[0], self.__valid_methods_cached_methods[1], self.__valid_methods ] self.__valid_lambda_function_association_event_types = set([ 'viewer-request', 'viewer-response', 'origin-request', 'origin-response' ]) self.__valid_viewer_certificate_ssl_support_methods = set([ 'sni-only', 'vip' ]) self.__valid_viewer_certificate_minimum_protocol_versions = set([ 'SSLv3', 'TLSv1' ]) self.__valid_viewer_certificate_certificate_sources = set([ 'cloudfront', 'iam', 'acm' ]) self.__valid_http_versions = set([ 'http1.1', 'http2' ]) self.__s3_bucket_domain_identifier = '.s3.amazonaws.com' def add_missing_key(self, dict_object, key_to_set, value_to_set): if key_to_set not in dict_object and value_to_set is not None: dict_object[key_to_set] = value_to_set return dict_object def add_key_else_change_dict_key(self, dict_object, old_key, new_key, value_to_set): if old_key not in dict_object and value_to_set is not None: dict_object[new_key] = value_to_set else: dict_object = change_dict_key_name(dict_object, old_key, new_key) return dict_object def add_key_else_validate(self, dict_object, key_name, attribute_name, value_to_set, valid_values, to_aws_list=False): if key_name in dict_object: self.validate_attribute_with_allowed_values(value_to_set, attribute_name, valid_values) else: if to_aws_list: dict_object[key_name] = ansible_list_to_cloudfront_list(value_to_set) elif value_to_set is not None: dict_object[key_name] = value_to_set return dict_object def validate_logging(self, logging): try: if logging is None: return None valid_logging = {} if logging and not set(['enabled', 'include_cookies', 'bucket', 'prefix']).issubset(logging): self.module.fail_json(msg="The logging parameters enabled, include_cookies, bucket and prefix must be specified.") valid_logging['include_cookies'] = logging.get('include_cookies') valid_logging['enabled'] = logging.get('enabled') valid_logging['bucket'] = logging.get('bucket') valid_logging['prefix'] = logging.get('prefix') return valid_logging except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution logging") def validate_is_list(self, list_to_validate, list_name): if not isinstance(list_to_validate, list): self.module.fail_json(msg='%s is of type %s. Must be a list.' % (list_name, type(list_to_validate).__name__)) def validate_required_key(self, key_name, full_key_name, dict_object): if key_name not in dict_object: self.module.fail_json(msg="%s must be specified." % full_key_name) def validate_origins(self, client, config, origins, default_origin_domain_name, default_origin_path, create_distribution, purge_origins=False): try: if origins is None: if default_origin_domain_name is None and not create_distribution: if purge_origins: return None else: return ansible_list_to_cloudfront_list(config) if default_origin_domain_name is not None: origins = [{ 'domain_name': default_origin_domain_name, 'origin_path': default_origin_path or '' }] else: origins = [] self.validate_is_list(origins, 'origins') if not origins and default_origin_domain_name is None and create_distribution: self.module.fail_json(msg="Both origins[] and default_origin_domain_name have not been specified. Please specify at least one.") all_origins = OrderedDict() new_domains = list() for origin in config: all_origins[origin.get('domain_name')] = origin for origin in origins: origin = self.validate_origin(client, all_origins.get(origin.get('domain_name'), {}), origin, default_origin_path) all_origins[origin['domain_name']] = origin new_domains.append(origin['domain_name']) if purge_origins: for domain in list(all_origins.keys()): if domain not in new_domains: del(all_origins[domain]) return ansible_list_to_cloudfront_list(list(all_origins.values())) except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution origins") def validate_s3_origin_configuration(self, client, existing_config, origin): if origin['s3_origin_access_identity_enabled'] and existing_config.get('s3_origin_config', {}).get('origin_access_identity'): return existing_config['s3_origin_config']['origin_access_identity'] if not origin['s3_origin_access_identity_enabled']: return None try: comment = "access-identity-by-ansible-%s-%s" % (origin.get('domain_name'), self.__default_datetime_string) cfoai_config = dict(CloudFrontOriginAccessIdentityConfig=dict(CallerReference=self.__default_datetime_string, Comment=comment)) oai = client.create_cloud_front_origin_access_identity(**cfoai_config)['CloudFrontOriginAccessIdentity']['Id'] except Exception as e: self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin['id']) return "origin-access-identity/cloudfront/%s" % oai def validate_origin(self, client, existing_config, origin, default_origin_path): try: origin = self.add_missing_key(origin, 'origin_path', existing_config.get('origin_path', default_origin_path or '')) self.validate_required_key('origin_path', 'origins[].origin_path', origin) origin = self.add_missing_key(origin, 'id', existing_config.get('id', self.__default_datetime_string)) if 'custom_headers' in origin and len(origin.get('custom_headers')) > 0: for custom_header in origin.get('custom_headers'): if 'header_name' not in custom_header or 'header_value' not in custom_header: self.module.fail_json(msg="Both origins[].custom_headers.header_name and origins[].custom_headers.header_value must be specified.") origin['custom_headers'] = ansible_list_to_cloudfront_list(origin.get('custom_headers')) else: origin['custom_headers'] = ansible_list_to_cloudfront_list() if self.__s3_bucket_domain_identifier in origin.get('domain_name').lower(): if origin.get("s3_origin_access_identity_enabled") is not None: s3_origin_config = self.validate_s3_origin_configuration(client, existing_config, origin) if s3_origin_config: oai = s3_origin_config else: oai = "" origin["s3_origin_config"] = dict(origin_access_identity=oai) del(origin["s3_origin_access_identity_enabled"]) if 'custom_origin_config' in origin: self.module.fail_json(msg="s3_origin_access_identity_enabled and custom_origin_config are mutually exclusive") else: origin = self.add_missing_key(origin, 'custom_origin_config', existing_config.get('custom_origin_config', {})) custom_origin_config = origin.get('custom_origin_config') custom_origin_config = self.add_key_else_validate(custom_origin_config, 'origin_protocol_policy', 'origins[].custom_origin_config.origin_protocol_policy', self.__default_custom_origin_protocol_policy, self.__valid_origin_protocol_policies) custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_read_timeout', self.__default_custom_origin_read_timeout) custom_origin_config = self.add_missing_key(custom_origin_config, 'origin_keepalive_timeout', self.__default_custom_origin_keepalive_timeout) custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'http_port', 'h_t_t_p_port', self.__default_http_port) custom_origin_config = self.add_key_else_change_dict_key(custom_origin_config, 'https_port', 'h_t_t_p_s_port', self.__default_https_port) if custom_origin_config.get('origin_ssl_protocols', {}).get('items'): custom_origin_config['origin_ssl_protocols'] = custom_origin_config['origin_ssl_protocols']['items'] if custom_origin_config.get('origin_ssl_protocols'): self.validate_attribute_list_with_allowed_list(custom_origin_config['origin_ssl_protocols'], 'origins[].origin_ssl_protocols', self.__valid_origin_ssl_protocols) else: custom_origin_config['origin_ssl_protocols'] = self.__default_origin_ssl_protocols custom_origin_config['origin_ssl_protocols'] = ansible_list_to_cloudfront_list(custom_origin_config['origin_ssl_protocols']) return origin except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error validating distribution origin") def validate_cache_behaviors(self, config, cache_behaviors, valid_origins, purge_cache_behaviors=False): try: if cache_behaviors is None and valid_origins is not None and purge_cache_behaviors is False: return ansible_list_to_cloudfront_list(config) all_cache_behaviors = OrderedDict() # cache behaviors are order dependent so we don't preserve the existing ordering when purge_cache_behaviors # is true (if purge_cache_behaviors is not true, we can't really know the full new order) if not purge_cache_behaviors: for behavior in config: all_cache_behaviors[behavior['path_pattern']] = behavior for cache_behavior in cache_behaviors: valid_cache_behavior = self.validate_cache_behavior(all_cache_behaviors.get(cache_behavior.get('path_pattern'), {}), cache_behavior, valid_origins) all_cache_behaviors[cache_behavior['path_pattern']] = valid_cache_behavior if purge_cache_behaviors: for target_origin_id in set(all_cache_behaviors.keys()) - set([cb['path_pattern'] for cb in cache_behaviors]): del(all_cache_behaviors[target_origin_id]) return ansible_list_to_cloudfront_list(list(all_cache_behaviors.values())) except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution cache behaviors") def validate_cache_behavior(self, config, cache_behavior, valid_origins, is_default_cache=False): if is_default_cache and cache_behavior is None: cache_behavior = {} if cache_behavior is None and valid_origins is not None: return config cache_behavior = self.validate_cache_behavior_first_level_keys(config, cache_behavior, valid_origins, is_default_cache) cache_behavior = self.validate_forwarded_values(config, cache_behavior.get('forwarded_values'), cache_behavior) cache_behavior = self.validate_allowed_methods(config, cache_behavior.get('allowed_methods'), cache_behavior) cache_behavior = self.validate_lambda_function_associations(config, cache_behavior.get('lambda_function_associations'), cache_behavior) cache_behavior = self.validate_trusted_signers(config, cache_behavior.get('trusted_signers'), cache_behavior) return cache_behavior def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid_origins, is_default_cache): try: cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'min_ttl', 'min_t_t_l', config.get('min_t_t_l', self.__default_cache_behavior_min_ttl)) cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'max_ttl', 'max_t_t_l', config.get('max_t_t_l', self.__default_cache_behavior_max_ttl)) cache_behavior = self.add_key_else_change_dict_key(cache_behavior, 'default_ttl', 'default_t_t_l', config.get('default_t_t_l', self.__default_cache_behavior_default_ttl)) cache_behavior = self.add_missing_key(cache_behavior, 'compress', config.get('compress', self.__default_cache_behavior_compress)) target_origin_id = cache_behavior.get('target_origin_id', config.get('target_origin_id')) if not target_origin_id: target_origin_id = self.get_first_origin_id_for_default_cache_behavior(valid_origins) if target_origin_id not in [origin['id'] for origin in valid_origins.get('items', [])]: if is_default_cache: cache_behavior_name = 'Default cache behavior' else: cache_behavior_name = 'Cache behavior for path %s' % cache_behavior['path_pattern'] self.module.fail_json(msg="%s has target_origin_id pointing to an origin that does not exist." % cache_behavior_name) cache_behavior['target_origin_id'] = target_origin_id cache_behavior = self.add_key_else_validate(cache_behavior, 'viewer_protocol_policy', 'cache_behavior.viewer_protocol_policy', config.get('viewer_protocol_policy', self.__default_cache_behavior_viewer_protocol_policy), self.__valid_viewer_protocol_policies) cache_behavior = self.add_missing_key(cache_behavior, 'smooth_streaming', config.get('smooth_streaming', self.__default_cache_behavior_smooth_streaming)) return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution cache behavior first level keys") def validate_forwarded_values(self, config, forwarded_values, cache_behavior): try: if not forwarded_values: forwarded_values = dict() existing_config = config.get('forwarded_values', {}) headers = forwarded_values.get('headers', existing_config.get('headers', {}).get('items')) if headers: headers.sort() forwarded_values['headers'] = ansible_list_to_cloudfront_list(headers) if 'cookies' not in forwarded_values: forward = existing_config.get('cookies', {}).get('forward', self.__default_cache_behavior_forwarded_values_forward_cookies) forwarded_values['cookies'] = {'forward': forward} else: existing_whitelist = existing_config.get('cookies', {}).get('whitelisted_names', {}).get('items') whitelist = forwarded_values.get('cookies').get('whitelisted_names', existing_whitelist) if whitelist: self.validate_is_list(whitelist, 'forwarded_values.whitelisted_names') forwarded_values['cookies']['whitelisted_names'] = ansible_list_to_cloudfront_list(whitelist) cookie_forwarding = forwarded_values.get('cookies').get('forward', existing_config.get('cookies', {}).get('forward')) self.validate_attribute_with_allowed_values(cookie_forwarding, 'cache_behavior.forwarded_values.cookies.forward', self.__valid_cookie_forwarding) forwarded_values['cookies']['forward'] = cookie_forwarding query_string_cache_keys = forwarded_values.get('query_string_cache_keys', existing_config.get('query_string_cache_keys', {}).get('items', [])) self.validate_is_list(query_string_cache_keys, 'forwarded_values.query_string_cache_keys') forwarded_values['query_string_cache_keys'] = ansible_list_to_cloudfront_list(query_string_cache_keys) forwarded_values = self.add_missing_key(forwarded_values, 'query_string', existing_config.get('query_string', self.__default_cache_behavior_forwarded_values_query_string)) cache_behavior['forwarded_values'] = forwarded_values return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating forwarded values") def validate_lambda_function_associations(self, config, lambda_function_associations, cache_behavior): try: if lambda_function_associations is not None: self.validate_is_list(lambda_function_associations, 'lambda_function_associations') for association in lambda_function_associations: association = change_dict_key_name(association, 'lambda_function_arn', 'lambda_function_a_r_n') self.validate_attribute_with_allowed_values(association.get('event_type'), 'cache_behaviors[].lambda_function_associations.event_type', self.__valid_lambda_function_association_event_types) cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list(lambda_function_associations) else: if 'lambda_function_associations' in config: cache_behavior['lambda_function_associations'] = config.get('lambda_function_associations') else: cache_behavior['lambda_function_associations'] = ansible_list_to_cloudfront_list([]) return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating lambda function associations") def validate_allowed_methods(self, config, allowed_methods, cache_behavior): try: if allowed_methods is not None: self.validate_required_key('items', 'cache_behavior.allowed_methods.items[]', allowed_methods) temp_allowed_items = allowed_methods.get('items') self.validate_is_list(temp_allowed_items, 'cache_behavior.allowed_methods.items') self.validate_attribute_list_with_allowed_list(temp_allowed_items, 'cache_behavior.allowed_methods.items[]', self.__valid_methods_allowed_methods) cached_items = allowed_methods.get('cached_methods') if 'cached_methods' in allowed_methods: self.validate_is_list(cached_items, 'cache_behavior.allowed_methods.cached_methods') self.validate_attribute_list_with_allowed_list(cached_items, 'cache_behavior.allowed_items.cached_methods[]', self.__valid_methods_cached_methods) # we don't care if the order of how cloudfront stores the methods differs - preserving existing # order reduces likelihood of making unnecessary changes if 'allowed_methods' in config and set(config['allowed_methods']['items']) == set(temp_allowed_items): cache_behavior['allowed_methods'] = config['allowed_methods'] else: cache_behavior['allowed_methods'] = ansible_list_to_cloudfront_list(temp_allowed_items) if cached_items and set(cached_items) == set(config.get('allowed_methods', {}).get('cached_methods', {}).get('items', [])): cache_behavior['allowed_methods']['cached_methods'] = config['allowed_methods']['cached_methods'] else: cache_behavior['allowed_methods']['cached_methods'] = ansible_list_to_cloudfront_list(cached_items) else: if 'allowed_methods' in config: cache_behavior['allowed_methods'] = config.get('allowed_methods') return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating allowed methods") def validate_trusted_signers(self, config, trusted_signers, cache_behavior): try: if trusted_signers is None: trusted_signers = {} if 'items' in trusted_signers: valid_trusted_signers = ansible_list_to_cloudfront_list(trusted_signers.get('items')) else: valid_trusted_signers = dict(quantity=config.get('quantity', 0)) if 'items' in config: valid_trusted_signers = dict(items=config['items']) valid_trusted_signers['enabled'] = trusted_signers.get('enabled', config.get('enabled', self.__default_trusted_signers_enabled)) cache_behavior['trusted_signers'] = valid_trusted_signers return cache_behavior except Exception as e: self.module.fail_json_aws(e, msg="Error validating trusted signers") def validate_viewer_certificate(self, viewer_certificate): try: if viewer_certificate is None: return None if viewer_certificate.get('cloudfront_default_certificate') and viewer_certificate.get('ssl_support_method') is not None: self.module.fail_json(msg="viewer_certificate.ssl_support_method should not be specified with viewer_certificate_cloudfront_default" + "_certificate set to true.") self.validate_attribute_with_allowed_values(viewer_certificate.get('ssl_support_method'), 'viewer_certificate.ssl_support_method', self.__valid_viewer_certificate_ssl_support_methods) self.validate_attribute_with_allowed_values(viewer_certificate.get('minimum_protocol_version'), 'viewer_certificate.minimum_protocol_version', self.__valid_viewer_certificate_minimum_protocol_versions) self.validate_attribute_with_allowed_values(viewer_certificate.get('certificate_source'), 'viewer_certificate.certificate_source', self.__valid_viewer_certificate_certificate_sources) viewer_certificate = change_dict_key_name(viewer_certificate, 'cloudfront_default_certificate', 'cloud_front_default_certificate') viewer_certificate = change_dict_key_name(viewer_certificate, 'ssl_support_method', 's_s_l_support_method') viewer_certificate = change_dict_key_name(viewer_certificate, 'iam_certificate_id', 'i_a_m_certificate_id') viewer_certificate = change_dict_key_name(viewer_certificate, 'acm_certificate_arn', 'a_c_m_certificate_arn') return viewer_certificate except Exception as e: self.module.fail_json_aws(e, msg="Error validating viewer certificate") def validate_custom_error_responses(self, config, custom_error_responses, purge_custom_error_responses): try: if custom_error_responses is None and not purge_custom_error_responses: return ansible_list_to_cloudfront_list(config) self.validate_is_list(custom_error_responses, 'custom_error_responses') result = list() existing_responses = dict((response['error_code'], response) for response in custom_error_responses) for custom_error_response in custom_error_responses: self.validate_required_key('error_code', 'custom_error_responses[].error_code', custom_error_response) custom_error_response = change_dict_key_name(custom_error_response, 'error_caching_min_ttl', 'error_caching_min_t_t_l') if 'response_code' in custom_error_response: custom_error_response['response_code'] = str(custom_error_response['response_code']) if custom_error_response['error_code'] in existing_responses: del(existing_responses[custom_error_response['error_code']]) result.append(custom_error_response) if not purge_custom_error_responses: result.extend(existing_responses.values()) return ansible_list_to_cloudfront_list(result) except Exception as e: self.module.fail_json_aws(e, msg="Error validating custom error responses") def validate_restrictions(self, config, restrictions, purge_restrictions=False): try: if restrictions is None: if purge_restrictions: return None else: return config self.validate_required_key('geo_restriction', 'restrictions.geo_restriction', restrictions) geo_restriction = restrictions.get('geo_restriction') self.validate_required_key('restriction_type', 'restrictions.geo_restriction.restriction_type', geo_restriction) existing_restrictions = config.get('geo_restriction', {}).get(geo_restriction['restriction_type'], {}).get('items', []) geo_restriction_items = geo_restriction.get('items') if not purge_restrictions: geo_restriction_items.extend([rest for rest in existing_restrictions if rest not in geo_restriction_items]) valid_restrictions = ansible_list_to_cloudfront_list(geo_restriction_items) valid_restrictions['restriction_type'] = geo_restriction.get('restriction_type') return valid_restrictions except Exception as e: self.module.fail_json_aws(e, msg="Error validating restrictions") def validate_distribution_config_parameters(self, config, default_root_object, ipv6_enabled, http_version, web_acl_id): try: config['default_root_object'] = default_root_object or config.get('default_root_object', '') config['is_i_p_v_6_enabled'] = ipv6_enabled or config.get('i_p_v_6_enabled', self.__default_ipv6_enabled) if http_version is not None or config.get('http_version'): self.validate_attribute_with_allowed_values(http_version, 'http_version', self.__valid_http_versions) config['http_version'] = http_version or config.get('http_version') if web_acl_id or config.get('web_a_c_l_id'): config['web_a_c_l_id'] = web_acl_id or config.get('web_a_c_l_id') return config except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution config parameters") def validate_common_distribution_parameters(self, config, enabled, aliases, logging, price_class, purge_aliases=False): try: if config is None: config = {} if aliases is not None: if not purge_aliases: aliases.extend([alias for alias in config.get('aliases', {}).get('items', []) if alias not in aliases]) config['aliases'] = ansible_list_to_cloudfront_list(aliases) if logging is not None: config['logging'] = self.validate_logging(logging) config['enabled'] = enabled or config.get('enabled', self.__default_distribution_enabled) if price_class is not None: self.validate_attribute_with_allowed_values(price_class, 'price_class', self.__valid_price_classes) config['price_class'] = price_class return config except Exception as e: self.module.fail_json_aws(e, msg="Error validating common distribution parameters") def validate_comment(self, config, comment): config['comment'] = comment or config.get('comment', "Distribution created by Ansible with datetime stamp " + self.__default_datetime_string) return config def validate_caller_reference(self, caller_reference): return caller_reference or self.__default_datetime_string def get_first_origin_id_for_default_cache_behavior(self, valid_origins): try: if valid_origins is not None: valid_origins_list = valid_origins.get('items') if valid_origins_list is not None and isinstance(valid_origins_list, list) and len(valid_origins_list) > 0: return str(valid_origins_list[0].get('id')) self.module.fail_json(msg="There are no valid origins from which to specify a target_origin_id for the default_cache_behavior configuration.") except Exception as e: self.module.fail_json_aws(e, msg="Error getting first origin_id for default cache behavior") def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_list_name, allowed_list): try: self.validate_is_list(attribute_list, attribute_list_name) if (isinstance(allowed_list, list) and set(attribute_list) not in allowed_list or isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list)): self.module.fail_json(msg='The attribute list {0} must be one of [{1}]'.format(attribute_list_name, ' '.join(str(a) for a in allowed_list))) except Exception as e: self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list") def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list): if attribute is not None and attribute not in allowed_list: self.module.fail_json(msg='The attribute {0} must be one of [{1}]'.format(attribute_name, ' '.join(str(a) for a in allowed_list))) def validate_distribution_from_caller_reference(self, caller_reference): try: distributions = self.__cloudfront_facts_mgr.list_distributions(False) distribution_name = 'Distribution' distribution_config_name = 'DistributionConfig' distribution_ids = [dist.get('Id') for dist in distributions] for distribution_id in distribution_ids: config = self.__cloudfront_facts_mgr.get_distribution(distribution_id) distribution = config.get(distribution_name) if distribution is not None: distribution_config = distribution.get(distribution_config_name) if distribution_config is not None and distribution_config.get('CallerReference') == caller_reference: distribution['DistributionConfig'] = distribution_config return distribution except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution from caller reference") def validate_distribution_from_aliases_caller_reference(self, distribution_id, aliases, caller_reference): try: if caller_reference is not None: return self.validate_distribution_from_caller_reference(caller_reference) else: if aliases: distribution_id = self.validate_distribution_id_from_alias(aliases) if distribution_id: return self.__cloudfront_facts_mgr.get_distribution(distribution_id) return None except Exception as e: self.module.fail_json_aws(e, msg="Error validating distribution_id from alias, aliases and caller reference") def validate_distribution_id_from_alias(self, aliases): distributions = self.__cloudfront_facts_mgr.list_distributions(False) if distributions: for distribution in distributions: distribution_aliases = distribution.get('Aliases', {}).get('Items', []) if set(aliases) & set(distribution_aliases): return distribution['Id'] return None def wait_until_processed(self, client, wait_timeout, distribution_id, caller_reference): if distribution_id is None: distribution_id = self.validate_distribution_id_from_caller_reference(caller_reference=caller_reference) try: waiter = client.get_waiter('distribution_deployed') attempts = 1 + int(wait_timeout / 60) waiter.wait(Id=distribution_id, WaiterConfig={'MaxAttempts': attempts}) except botocore.exceptions.WaiterError as e: self.module.fail_json(msg="Timeout waiting for cloudfront action. Waited for {0} seconds before timeout. " "Error: {1}".format(to_text(wait_timeout), to_native(e))) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id)) def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict(choices=['present', 'absent'], default='present'), caller_reference=dict(), comment=dict(), distribution_id=dict(), e_tag=dict(), tags=dict(type='dict', default={}), purge_tags=dict(type='bool', default=False), alias=dict(), aliases=dict(type='list', default=[]), purge_aliases=dict(type='bool', default=False), default_root_object=dict(), origins=dict(type='list'), purge_origins=dict(type='bool', default=False), default_cache_behavior=dict(type='dict'), cache_behaviors=dict(type='list'), purge_cache_behaviors=dict(type='bool', default=False), custom_error_responses=dict(type='list'), purge_custom_error_responses=dict(type='bool', default=False), logging=dict(type='dict'), price_class=dict(), enabled=dict(type='bool'), viewer_certificate=dict(type='dict'), restrictions=dict(type='dict'), web_acl_id=dict(), http_version=dict(), ipv6_enabled=dict(type='bool'), default_origin_domain_name=dict(), default_origin_path=dict(), wait=dict(default=False, type='bool'), wait_timeout=dict(default=1800, type='int') )) result = {} changed = True module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=False, mutually_exclusive=[ ['distribution_id', 'alias'], ['default_origin_domain_name', 'distribution_id'], ['default_origin_domain_name', 'alias'], ] ) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) client = boto3_conn(module, conn_type='client', resource='cloudfront', region=region, endpoint=ec2_url, **aws_connect_kwargs) validation_mgr = CloudFrontValidationManager(module) state = module.params.get('state') caller_reference = module.params.get('caller_reference') comment = module.params.get('comment') e_tag = module.params.get('e_tag') tags = module.params.get('tags') purge_tags = module.params.get('purge_tags') distribution_id = module.params.get('distribution_id') alias = module.params.get('alias') aliases = module.params.get('aliases') purge_aliases = module.params.get('purge_aliases') default_root_object = module.params.get('default_root_object') origins = module.params.get('origins') purge_origins = module.params.get('purge_origins') default_cache_behavior = module.params.get('default_cache_behavior') cache_behaviors = module.params.get('cache_behaviors') purge_cache_behaviors = module.params.get('purge_cache_behaviors') custom_error_responses = module.params.get('custom_error_responses') purge_custom_error_responses = module.params.get('purge_custom_error_responses') logging = module.params.get('logging') price_class = module.params.get('price_class') enabled = module.params.get('enabled') viewer_certificate = module.params.get('viewer_certificate') restrictions = module.params.get('restrictions') purge_restrictions = module.params.get('purge_restrictions') web_acl_id = module.params.get('web_acl_id') http_version = module.params.get('http_version') ipv6_enabled = module.params.get('ipv6_enabled') default_origin_domain_name = module.params.get('default_origin_domain_name') default_origin_path = module.params.get('default_origin_path') wait = module.params.get('wait') wait_timeout = module.params.get('wait_timeout') if alias and alias not in aliases: aliases.append(alias) distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference) update = state == 'present' and distribution create = state == 'present' and not distribution delete = state == 'absent' and distribution if not (update or create or delete): module.exit_json(changed=False) if update or delete: config = distribution['Distribution']['DistributionConfig'] e_tag = distribution['ETag'] distribution_id = distribution['Distribution']['Id'] else: config = dict() if update: config = camel_dict_to_snake_dict(config, reversible=True) if create or update: config = validation_mgr.validate_common_distribution_parameters(config, enabled, aliases, logging, price_class, purge_aliases) config = validation_mgr.validate_distribution_config_parameters(config, default_root_object, ipv6_enabled, http_version, web_acl_id) config['origins'] = validation_mgr.validate_origins(client, config.get('origins', {}).get('items', []), origins, default_origin_domain_name, default_origin_path, create, purge_origins) config['cache_behaviors'] = validation_mgr.validate_cache_behaviors(config.get('cache_behaviors', {}).get('items', []), cache_behaviors, config['origins'], purge_cache_behaviors) config['default_cache_behavior'] = validation_mgr.validate_cache_behavior(config.get('default_cache_behavior', {}), default_cache_behavior, config['origins'], True) config['custom_error_responses'] = validation_mgr.validate_custom_error_responses(config.get('custom_error_responses', {}).get('items', []), custom_error_responses, purge_custom_error_responses) valid_restrictions = validation_mgr.validate_restrictions(config.get('restrictions', {}), restrictions, purge_restrictions) if valid_restrictions: config['restrictions'] = valid_restrictions valid_viewer_certificate = validation_mgr.validate_viewer_certificate(viewer_certificate) config = merge_validation_into_config(config, valid_viewer_certificate, 'viewer_certificate') config = validation_mgr.validate_comment(config, comment) config = snake_dict_to_camel_dict(config, capitalize_first=True) if create: config['CallerReference'] = validation_mgr.validate_caller_reference(caller_reference) result = create_distribution(client, module, config, ansible_dict_to_boto3_tag_list(tags)) result = camel_dict_to_snake_dict(result) result['tags'] = list_tags_for_resource(client, module, result['arn']) if delete: if config['Enabled']: config['Enabled'] = False result = update_distribution(client, module, config, distribution_id, e_tag) validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference')) distribution = validation_mgr.validate_distribution_from_aliases_caller_reference(distribution_id, aliases, caller_reference) # e_tag = distribution['ETag'] result = delete_distribution(client, module, distribution) if update: changed = config != distribution['Distribution']['DistributionConfig'] if changed: result = update_distribution(client, module, config, distribution_id, e_tag) else: result = distribution['Distribution'] existing_tags = list_tags_for_resource(client, module, result['ARN']) distribution['Distribution']['DistributionConfig']['tags'] = existing_tags changed |= update_tags(client, module, existing_tags, tags, purge_tags, result['ARN']) result = camel_dict_to_snake_dict(result) result['distribution_config']['tags'] = config['tags'] = list_tags_for_resource(client, module, result['arn']) result['diff'] = dict() diff = recursive_diff(distribution['Distribution']['DistributionConfig'], config) if diff: result['diff']['before'] = diff[0] result['diff']['after'] = diff[1] if wait and (create or update): validation_mgr.wait_until_processed(client, wait_timeout, distribution_id, config.get('CallerReference')) if 'distribution_config' in result: result.update(result['distribution_config']) del(result['distribution_config']) module.exit_json(changed=changed, **result) if __name__ == '__main__': main()
gpl-3.0
7,483,290,099,521,737,000
42.302267
159
0.610645
false
4.467051
true
false
false
40223232/w16b_test
wsgi.py
1
27797
#@+leo-ver=5-thin #@+node:2014fall.20141212095015.1775: * @file wsgi.py # coding=utf-8 # 上面的程式內容編碼必須在程式的第一或者第二行才會有作用 ################# (1) 模組導入區 # 導入 cherrypy 模組, 為了在 OpenShift 平台上使用 cherrypy 模組, 必須透過 setup.py 安裝 #@@language python #@@tabwidth -4 #@+<<declarations>> #@+node:2014fall.20141212095015.1776: ** <<declarations>> (wsgi) import cherrypy # 導入 Python 內建的 os 模組, 因為 os 模組為 Python 內建, 所以無需透過 setup.py 安裝 import os # 導入 random 模組 import random # 導入 gear 模組 import gear ################# (2) 廣域變數設定區 # 確定程式檔案所在目錄, 在 Windows 下有最後的反斜線 _curdir = os.path.join(os.getcwd(), os.path.dirname(__file__)) # 設定在雲端與近端的資料儲存目錄 if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # 表示程式在雲端執行 download_root_dir = os.environ['OPENSHIFT_DATA_DIR'] data_dir = os.environ['OPENSHIFT_DATA_DIR'] else: # 表示程式在近端執行 download_root_dir = _curdir + "/local_data/" data_dir = _curdir + "/local_data/" '''以下為近端 input() 與 for 迴圈應用的程式碼, 若要將程式送到 OpenShift 執行, 除了採用 CherryPy 網際框架外, 還要轉為 html 列印 # 利用 input() 取得的資料型別為字串 toprint = input("要印甚麼內容?") # 若要將 input() 取得的字串轉為整數使用, 必須利用 int() 轉換 repeat_no = int(input("重複列印幾次?")) for i in range(repeat_no): print(toprint) ''' #@-<<declarations>> #@+others #@+node:2014fall.20141212095015.1777: ** class Hello ################# (3) 程式類別定義區 # 以下改用 CherryPy 網際框架程式架構 # 以下為 Hello 類別的設計內容, 其中的 object 使用, 表示 Hello 類別繼承 object 的所有特性, 包括方法與屬性設計 class Hello(object): # Hello 類別的啟動設定 _cp_config = { 'tools.encode.encoding': 'utf-8', 'tools.sessions.on' : True, 'tools.sessions.storage_type' : 'file', #'tools.sessions.locking' : 'explicit', # session 以檔案儲存, 而且位於 data_dir 下的 tmp 目錄 'tools.sessions.storage_path' : data_dir+'/tmp', # session 有效時間設為 60 分鐘 'tools.sessions.timeout' : 60 } #@+others #@+node:2014fall.20141212095015.2004: *3* __init__ def __init__(self): # 配合透過案例啟始建立所需的目錄 if not os.path.isdir(data_dir+'/tmp'): os.mkdir(data_dir+'/tmp') if not os.path.isdir(data_dir+"/downloads"): os.mkdir(data_dir+"/downloads") if not os.path.isdir(data_dir+"/images"): os.mkdir(data_dir+"/images") #@+node:2014fall.20141212095015.1778: *3* index_orig # 以 @ 開頭的 cherrypy.expose 為 decorator, 用來表示隨後的成員方法, 可以直接讓使用者以 URL 連結執行 @cherrypy.expose # index 方法為 CherryPy 各類別成員方法中的內建(default)方法, 當使用者執行時未指定方法, 系統將會優先執行 index 方法 # 有 self 的方法為類別中的成員方法, Python 程式透過此一 self 在各成員方法間傳遞物件內容 def index_orig(self, toprint="Hello World!"): return toprint #@+node:2014fall.20141212095015.1779: *3* hello @cherrypy.expose def hello(self, toprint="Hello World!"): return toprint #@+node:2014fall.20141215194146.1791: *3* index @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def index(self): outstring = ''' <!DOCTYPE html> <html> <head> 40223232 </head> <body> <br /><a href ="index">index</a><br /> </body> </html> ''' return outstring #@+node:2015.20150330144929.1713: *3* twoDgear @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def twoDgear(self, N=20, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script> <script src="/static/Cango2D.js" type="text/javascript"></script> <script src="/static/gearUtils-04.js" type="text/javascript"></script> </head> <!-- 啟動 brython() --> <body onload="brython()"> <form method=POST action=do2Dgear> 齒數:<input type=text name=N><br /> 模數:<input type=text name=M><br /> 壓力角:<input type=text name=P><br /> <input type=submit value=send> </form> </body> </html> ''' return outstring #@+node:2015.20150331094055.1733: *3* threeDgear @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def threeDgear(self, N=20, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script> <script src="/static/Cango2D.js" type="text/javascript"></script> <script src="/static/gearUtils-04.js" type="text/javascript"></script> </head> <!-- 啟動 brython() --> <body onload="brython()"> <form method=POST action=do3Dgear> 齒數:<input type=text name=N><br /> 模數:<input type=text name=M><br /> 壓力角:<input type=text name=P><br /> <input type=submit value=send> </form> </body> </html> ''' return outstring #@+node:2015.20150330144929.1762: *3* do2Dgear @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def do2Dgear(self, N=20, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script> <script src="/static/Cango2D.js" type="text/javascript"></script> <script src="/static/gearUtils-04.js" type="text/javascript"></script> </head> <!-- 啟動 brython() --> <body onload="brython()"> <!-- 以下為 canvas 畫圖程式 --> <script type="text/python"> # 從 browser 導入 document from browser import document import math # 畫布指定在名稱為 plotarea 的 canvas 上 canvas = document["plotarea"] ctx = canvas.getContext("2d") # 用紅色畫一條直線 ctx.beginPath() ctx.lineWidth = 3 ''' outstring += ''' ctx.moveTo('''+str(N)+","+str(M)+")" outstring += ''' ctx.lineTo(0, 500) ctx.strokeStyle = "red" ctx.stroke() # 用藍色再畫一條直線 ctx.beginPath() ctx.lineWidth = 3 ctx.moveTo(0, 0) ctx.lineTo(500, 0) ctx.strokeStyle = "blue" ctx.stroke() # 用綠色再畫一條直線 ctx.beginPath() ctx.lineWidth = 3 ctx.moveTo(0, 0) ctx.lineTo(500, 500) ctx.strokeStyle = "green" ctx.stroke() # 用黑色畫一個圓 ctx.beginPath() ctx.lineWidth = 3 ctx.strokeStyle = "black" ctx.arc(250,250,50,0,2*math.pi) ctx.stroke() </script> <canvas id="plotarea" width="800" height="600"></canvas> </body> </html> ''' return outstring #@+node:2015.20150331094055.1735: *3* do3Dgear @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def do3Dgear(self, N=20, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script> <script src="/static/Cango2D.js" type="text/javascript"></script> <script src="/static/gearUtils-04.js" type="text/javascript"></script> </head> <!-- 啟動 brython() --> <body onload="brython()"> <!-- 以下為 canvas 畫圖程式 --> <script type="text/python"> # 從 browser 導入 document from browser import document import math # 畫布指定在名稱為 plotarea 的 canvas 上 canvas = document["plotarea"] ctx = canvas.getContext("2d") # 用紅色畫一條直線 ctx.beginPath() ctx.lineWidth = 3 ''' outstring += ''' ctx.moveTo('''+str(N)+","+str(M)+")" outstring += ''' ctx.lineTo(0, 500) ctx.strokeStyle = "red" ctx.stroke() # 用藍色再畫一條直線 ctx.beginPath() ctx.lineWidth = 3 ctx.moveTo(0, 0) ctx.lineTo(500, 0) ctx.strokeStyle = "blue" ctx.stroke() # 用綠色再畫一條直線 ctx.beginPath() ctx.lineWidth = 3 ctx.moveTo(0, 0) ctx.lineTo(500, 500) ctx.strokeStyle = "green" ctx.stroke() # 用黑色畫一個圓 ctx.beginPath() ctx.lineWidth = 3 ctx.strokeStyle = "black" ctx.arc(250,250,50,0,2*math.pi) ctx.stroke() </script> <canvas id="plotarea" width="800" height="600"></canvas> </body> </html> ''' return outstring #@+node:2015.20150330144929.1765: *3* mygeartest @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def mygeartest(self, N=20, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script> <script src="/static/Cango2D.js" type="text/javascript"></script> <script src="/static/gearUtils-04.js" type="text/javascript"></script> </head> <!-- 啟動 brython() --> <body onload="brython()"> <!-- 以下為 canvas 畫圖程式 --> <script type="text/python"> # 從 browser 導入 document from browser import document from math import * # 準備在 id="plotarea" 的 canvas 中繪圖 canvas = document["plotarea"] ctx = canvas.getContext("2d") def create_line(x1, y1, x2, y2, width=3, fill="red"): ctx.beginPath() ctx.lineWidth = width ctx.moveTo(x1, y1) ctx.lineTo(x2, y2) ctx.strokeStyle = fill ctx.stroke() # 導入數學函式後, 圓周率為 pi # deg 為角度轉為徑度的轉換因子 deg = pi/180. # # 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖 # # 定義一個繪正齒輪的繪圖函式 # midx 為齒輪圓心 x 座標 # midy 為齒輪圓心 y 座標 # rp 為節圓半徑, n 為齒數 def 齒輪(midx, midy, rp, n, 顏色): # 將角度轉換因子設為全域變數 global deg # 齒輪漸開線分成 15 線段繪製 imax = 15 # 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線 create_line(midx, midy, midx, midy-rp) # 畫出 rp 圓, 畫圓函式尚未定義 #create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2) # a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數 # 模數也就是齒冠大小 a=2*rp/n # d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍 d=2.5*rp/n # ra 為齒輪的外圍半徑 ra=rp+a print("ra:", ra) # 畫出 ra 圓, 畫圓函式尚未定義 #create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1) # rb 則為齒輪的基圓半徑 # 基圓為漸開線長齒之基準圓 rb=rp*cos(20*deg) print("rp:", rp) print("rb:", rb) # 畫出 rb 圓 (基圓), 畫圓函式尚未定義 #create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1) # rd 為齒根圓半徑 rd=rp-d # 當 rd 大於 rb 時 print("rd:", rd) # 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義 #create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1) # dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小 # 將圓弧分成 imax 段來繪製漸開線 dr=(ra-rb)/imax # tan(20*deg)-20*deg 為漸開線函數 sigma=pi/(2*n)+tan(20*deg)-20*deg for j in range(n): ang=-2.*j*pi/n+sigma ang2=2.*j*pi/n+sigma lxd=midx+rd*sin(ang2-2.*pi/n) lyd=midy-rd*cos(ang2-2.*pi/n) #for(i=0;i<=imax;i++): for i in range(imax+1): r=rb+i*dr theta=sqrt((r*r)/(rb*rb)-1.) alpha=theta-atan(theta) xpt=r*sin(alpha-ang) ypt=r*cos(alpha-ang) xd=rd*sin(-ang) yd=rd*cos(-ang) # i=0 時, 繪線起點由齒根圓上的點, 作為起點 if(i==0): last_x = midx+xd last_y = midy-yd # 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點 create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色) # 最後一點, 則為齒頂圓 if(i==imax): lfx=midx+xpt lfy=midy-ypt last_x = midx+xpt last_y = midy-ypt # the line from last end of dedendum point to the recent # end of dedendum point # lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標 # 下列為齒根圓上用來近似圓弧的直線 create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色) #for(i=0;i<=imax;i++): for i in range(imax+1): r=rb+i*dr theta=sqrt((r*r)/(rb*rb)-1.) alpha=theta-atan(theta) xpt=r*sin(ang2-alpha) ypt=r*cos(ang2-alpha) xd=rd*sin(ang2) yd=rd*cos(ang2) # i=0 時, 繪線起點由齒根圓上的點, 作為起點 if(i==0): last_x = midx+xd last_y = midy-yd # 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點 create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色) # 最後一點, 則為齒頂圓 if(i==imax): rfx=midx+xpt rfy=midy-ypt last_x = midx+xpt last_y = midy-ypt # lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標 # 下列為齒頂圓上用來近似圓弧的直線 create_line(lfx,lfy,rfx,rfy,fill=顏色) 齒輪(400,400,300,41,"blue") </script> <canvas id="plotarea" width="800" height="800"></canvas> </body> </html> ''' return outstring #@+node:amd.20150415215023.1: *3* mygeartest2 @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def mygeartest2(self, N=20, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script> <script src="/static/Cango2D.js" type="text/javascript"></script> <script src="/static/gearUtils-04.js" type="text/javascript"></script> </head> <!-- 啟動 brython() --> <body onload="brython()"> <!-- 以下為 canvas 畫圖程式 --> <script type="text/python"> # 從 browser 導入 document from browser import document from math import * # 請注意, 這裡導入位於 Lib/site-packages 目錄下的 spur.py 檔案 import spur # 準備在 id="plotarea" 的 canvas 中繪圖 canvas = document["plotarea"] ctx = canvas.getContext("2d") # 以下利用 spur.py 程式進行繪圖, 接下來的協同設計運算必須要配合使用者的需求進行設計運算與繪圖 # 其中並將工作分配給其他組員建立類似 spur.py 的相關零件繪圖模組 # midx, midy 為齒輪圓心座標, rp 為節圓半徑, n 為齒數, pa 為壓力角, color 為線的顏色 # Gear(midx, midy, rp, n=20, pa=20, color="black"): # 模數決定齒的尺寸大小, 囓合齒輪組必須有相同的模數與壓力角 # 壓力角 pa 單位為角度 pa = 20 # m 為模數 m = 20 # 第1齒輪齒數 n_g1 = 17 # 第2齒輪齒數 n_g2 = 11 # 第3齒輪齒數 n_g3 = 13 # 計算兩齒輪的節圓半徑 rp_g1 = m*n_g1/2 rp_g2 = m*n_g2/2 rp_g3 = m*n_g3/2 # 繪圖第1齒輪的圓心座標 x_g1 = 400 y_g1 = 400 # 第2齒輪的圓心座標, 假設排列成水平, 表示各齒輪圓心 y 座標相同 x_g2 = x_g1 + rp_g1 + rp_g2 y_g2 = y_g1 # 第3齒輪的圓心座標 x_g3 = x_g1 + rp_g1 + 2*rp_g2 + rp_g3 y_g3 = y_g1 # 將第1齒輪順時鐘轉 90 度 # 使用 ctx.save() 與 ctx.restore() 以確保各齒輪以相對座標進行旋轉繪圖 ctx.save() # translate to the origin of second gear ctx.translate(x_g1, y_g1) # rotate to engage ctx.rotate(pi/2) # put it back ctx.translate(-x_g1, -y_g1) spur.Spur(ctx).Gear(x_g1, y_g1, rp_g1, n_g1, pa, "blue") ctx.restore() # 將第2齒輪逆時鐘轉 90 度之後, 再多轉一齒, 以便與第1齒輪進行囓合 ctx.save() # translate to the origin of second gear ctx.translate(x_g2, y_g2) # rotate to engage ctx.rotate(-pi/2-pi/n_g2) # put it back ctx.translate(-x_g2, -y_g2) spur.Spur(ctx).Gear(x_g2, y_g2, rp_g2, n_g2, pa, "black") ctx.restore() # 將第3齒輪逆時鐘轉 90 度之後, 再往回轉第2齒輪定位帶動轉角, 然後再逆時鐘多轉一齒, 以便與第2齒輪進行囓合 ctx.save() # translate to the origin of second gear ctx.translate(x_g3, y_g3) # rotate to engage # pi+pi/n_g2 為第2齒輪從順時鐘轉 90 度之後, 必須配合目前的標記線所作的齒輪 2 轉動角度, 要轉換到齒輪3 的轉動角度 # 必須乘上兩齒輪齒數的比例, 若齒輪2 大, 則齒輪3 會轉動較快 # 第1個 -pi/2 為將原先垂直的第3齒輪定位線逆時鐘旋轉 90 度 # -pi/n_g3 則是第3齒與第2齒定位線重合後, 必須再逆時鐘多轉一齒的轉角, 以便進行囓合 # (pi+pi/n_g2)*n_g2/n_g3 則是第2齒原定位線為順時鐘轉動 90 度, # 但是第2齒輪為了與第1齒輪囓合, 已經距離定位線, 多轉了 180 度, 再加上第2齒輪的一齒角度, 因為要帶動第3齒輪定位, # 這個修正角度必須要再配合第2齒與第3齒的轉速比加以轉換成第3齒輪的轉角, 因此乘上 n_g2/n_g3 ctx.rotate(-pi/2-pi/n_g3+(pi+pi/n_g2)*n_g2/n_g3) # put it back ctx.translate(-x_g3, -y_g3) spur.Spur(ctx).Gear(x_g3, y_g3, rp_g3, n_g3, pa, "red") ctx.restore() # 按照上面三個正齒輪的囓合轉角運算, 隨後的傳動齒輪轉角便可依此類推, 完成6個齒輪的囓合繪圖 </script> <canvas id="plotarea" width="1200" height="1200"></canvas> </body> </html> ''' return outstring #@+node:2015.20150331094055.1737: *3* my3Dgeartest @cherrypy.expose # N 為齒數, M 為模數, P 為壓力角 def my3Dgeartest(self, N=20, M=5, P=15): outstring = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="content-type" content="text/html;charset=utf-8"> <!-- 載入 brython.js --> <script type="text/javascript" src="/static/Brython3.1.1-20150328-091302/brython.js"></script> <script src="/static/Cango2D.js" type="text/javascript"></script> <script src="/static/gearUtils-04.js" type="text/javascript"></script> </head> <!-- 啟動 brython() --> <body onload="brython()"> <!-- 以下為 canvas 畫圖程式 --> <script type="text/python"> # 從 browser 導入 document from browser import document from math import * # 準備在 id="plotarea" 的 canvas 中繪圖 canvas = document["plotarea"] ctx = canvas.getContext("2d") def create_line(x1, y1, x2, y2, width=3, fill="red"): ctx.beginPath() ctx.lineWidth = width ctx.moveTo(x1, y1) ctx.lineTo(x2, y2) ctx.strokeStyle = fill ctx.stroke() # 導入數學函式後, 圓周率為 pi # deg 為角度轉為徑度的轉換因子 deg = pi/180. # # 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖 # # 定義一個繪正齒輪的繪圖函式 # midx 為齒輪圓心 x 座標 # midy 為齒輪圓心 y 座標 # rp 為節圓半徑, n 為齒數 def gear(midx, midy, rp, n, 顏色): # 將角度轉換因子設為全域變數 global deg # 齒輪漸開線分成 15 線段繪製 imax = 15 # 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線 create_line(midx, midy, midx, midy-rp) # 畫出 rp 圓, 畫圓函式尚未定義 #create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2) # a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數 # 模數也就是齒冠大小 a=2*rp/n # d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍 d=2.5*rp/n # ra 為齒輪的外圍半徑 ra=rp+a print("ra:", ra) # 畫出 ra 圓, 畫圓函式尚未定義 #create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1) # rb 則為齒輪的基圓半徑 # 基圓為漸開線長齒之基準圓 rb=rp*cos(20*deg) print("rp:", rp) print("rb:", rb) # 畫出 rb 圓 (基圓), 畫圓函式尚未定義 #create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1) # rd 為齒根圓半徑 rd=rp-d # 當 rd 大於 rb 時 print("rd:", rd) # 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義 #create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1) # dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小 # 將圓弧分成 imax 段來繪製漸開線 dr=(ra-rb)/imax # tan(20*deg)-20*deg 為漸開線函數 sigma=pi/(2*n)+tan(20*deg)-20*deg for j in range(n): ang=-2.*j*pi/n+sigma ang2=2.*j*pi/n+sigma lxd=midx+rd*sin(ang2-2.*pi/n) lyd=midy-rd*cos(ang2-2.*pi/n) #for(i=0;i<=imax;i++): for i in range(imax+1): r=rb+i*dr theta=sqrt((r*r)/(rb*rb)-1.) alpha=theta-atan(theta) xpt=r*sin(alpha-ang) ypt=r*cos(alpha-ang) xd=rd*sin(-ang) yd=rd*cos(-ang) # i=0 時, 繪線起點由齒根圓上的點, 作為起點 if(i==0): last_x = midx+xd last_y = midy-yd # 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點 create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色) # 最後一點, 則為齒頂圓 if(i==imax): lfx=midx+xpt lfy=midy-ypt last_x = midx+xpt last_y = midy-ypt # the line from last end of dedendum point to the recent # end of dedendum point # lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標 # 下列為齒根圓上用來近似圓弧的直線 create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=顏色) #for(i=0;i<=imax;i++): for i in range(imax+1): r=rb+i*dr theta=sqrt((r*r)/(rb*rb)-1.) alpha=theta-atan(theta) xpt=r*sin(ang2-alpha) ypt=r*cos(ang2-alpha) xd=rd*sin(ang2) yd=rd*cos(ang2) # i=0 時, 繪線起點由齒根圓上的點, 作為起點 if(i==0): last_x = midx+xd last_y = midy-yd # 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點 create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=顏色) # 最後一點, 則為齒頂圓 if(i==imax): rfx=midx+xpt rfy=midy-ypt last_x = midx+xpt last_y = midy-ypt # lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標 # 下列為齒頂圓上用來近似圓弧的直線 create_line(lfx,lfy,rfx,rfy,fill=顏色) gear(400,400,300,41,"blue") </script> <canvas id="plotarea" width="800" height="800"></canvas> </body> </html> ''' return outstring #@+node:2014fall.20141215194146.1793: *3* doCheck @cherrypy.expose def doCheck(self, guess=None): # 假如使用者直接執行 doCheck, 則設法轉回根方法 if guess is None: raise cherrypy.HTTPRedirect("/") # 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況 try: theanswer = int(cherrypy.session.get('answer')) except: raise cherrypy.HTTPRedirect("/") # 經由表單所取得的 guess 資料型別為 string try: theguess = int(guess) except: return "error " + self.guessform() # 每執行 doCheck 一次,次數增量一次 cherrypy.session['count'] += 1 # 答案與所猜數字進行比對 if theanswer < theguess: return "big " + self.guessform() elif theanswer > theguess: return "small " + self.guessform() else: # 已經猜對, 從 session 取出累計猜測次數 thecount = cherrypy.session.get('count') return "exact: <a href=''>再猜</a>" #@+node:2014fall.20141215194146.1789: *3* guessform def guessform(self): # 印出讓使用者輸入的超文件表單 outstring = str(cherrypy.session.get('answer')) + "/" + str(cherrypy.session.get('count')) + '''<form method=POST action=doCheck> 請輸入您所猜的整數:<input type=text name=guess><br /> <input type=submit value=send> </form>''' return outstring #@-others #@-others ################# (4) 程式啟動區 # 配合程式檔案所在目錄設定靜態目錄或靜態檔案 application_conf = {'/static':{ 'tools.staticdir.on': True, # 程式執行目錄下, 必須自行建立 static 目錄 'tools.staticdir.dir': _curdir+"/static"}, '/downloads':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': data_dir+"/downloads"}, '/images':{ 'tools.staticdir.on': True, 'tools.staticdir.dir': data_dir+"/images"} } root = Hello() root.gear = gear.Gear() if 'OPENSHIFT_REPO_DIR' in os.environ.keys(): # 表示在 OpenSfhit 執行 application = cherrypy.Application(root, config=application_conf) else: # 表示在近端執行 cherrypy.quickstart(root, config=application_conf) #@-leo
gpl-3.0
421,274,124,816,849,400
29.535904
137
0.554152
false
2.049902
false
false
false
NeerajM999/recap-python
LearnPython/data_structures/binary_tree.py
1
1761
class Node: def __init__(self, value): self.value = value self.left = None self.right = None class BinaryTree(object): def __init__(self, root_val): self.root = Node(root_val) def preorder_traversal(self, start, traversal): """ Root -> left -> right """ if start: traversal += (str(start.value) + "-") traversal = self.preorder_traversal(start.left, traversal) traversal = self.preorder_traversal(start.right, traversal) return traversal def inorder_traversal(self, start, traversal): """ left -> root -> right """ if start: traversal = self.inorder_traversal(start.left, traversal) traversal += (str(start.value) + "-") traversal = self.inorder_traversal(start.right, traversal) return traversal def postorder_traversal(self, start, traversal): """ left -> right -> root """ if start: traversal = self.postorder_traversal(start.left, traversal) traversal = self.postorder_traversal(start.right, traversal) traversal += (str(start.value) + "-") return traversal if __name__ == "__main__": """ 1 / \ 2 3 / \ / \ 4 5 6 7 """ tree = BinaryTree(1) tree.root.left = Node(2) tree.root.right = Node(3) tree.root.left.left = Node(4) tree.root.left.right = Node(5) tree.root.right.left = Node(6) tree.root.right.right = Node(7) print("preorder-traversal: ", tree.preorder_traversal(tree.root, "")) print("inorder-traversal: ", tree.inorder_traversal(tree.root, "")) print("postorder-traversal: ", tree.postorder_traversal(tree.root, ""))
gpl-3.0
1,749,836,215,305,637,600
27.885246
75
0.571266
false
3.6841
false
false
false
kret0s/gnuhealth-live
tryton/server/trytond-3.8.3/trytond/model/fields/one2one.py
1
2080
# This file is part of Tryton. The COPYRIGHT file at the top level of # this repository contains the full copyright notices and license terms. from types import NoneType from trytond.model.fields.field import Field from trytond.model.fields.many2many import Many2Many from trytond.pool import Pool class One2One(Many2Many): ''' Define one2one field (``int``). ''' _type = 'one2one' def get(self, ids, model, name, values=None): ''' Return target record. :param ids: a list of ids :param model: a string with the name of the model :param name: a string with the name of the field :param values: a dictionary with the read values :return: a dictionary with ids as key and target id as value ''' res = super(One2One, self).get(ids, model, name, values=values) for i, vals in res.iteritems(): res[i] = vals[0] if vals else None return res def set(self, Model, name, ids, value, *args): ''' Set the values. ''' pool = Pool() Relation = pool.get(self.relation_name) to_delete = [] to_create = [] args = iter((ids, value) + args) for ids, value in zip(args, args): relations = Relation.search([ (self.origin, 'in', ids), ]) to_delete.extend(relations) if value: for record_id in ids: to_create.append({ self.origin: record_id, self.target: value, }) if to_delete: Relation.delete(to_delete) if to_create: Relation.create(to_create) def __set__(self, inst, value): Target = self.get_target() if isinstance(value, dict): value = Target(*value) elif isinstance(value, (int, long)): value = Target(value) assert isinstance(value, (Target, NoneType)) Field.__set__(self, inst, value)
gpl-3.0
4,749,240,343,853,297,000
32.015873
72
0.546635
false
4.16
false
false
false
Metonimie/Beaglebone
programs/server.py
1
3147
#!/usr/bin/env python """ A very simple server in python used to control gpio pins on the beaglebone black. The server listens for POST requests on port 6410. It has no security at all, which means that it accepts post-data from everyone. Send a GET request:: curl http://localhost Send a POST request:: curl -d "foo=bar&bin=baz" http://localhost Usage: nohup python3 server.py & """ # TODO: Add basic security # TODO: Use dictionary for gpio name : file import http.server import urllib PORT = 6410 gpio_path = "/sys/class/gpio/" # If the param name is in here then we handle the value. authorized_gpio = ["gpio60"] class Server(http.server.BaseHTTPRequestHandler): def prepare_response(self, code): """ Prepares the response that will be send back to the requester, along with the code. """ self.send_response(code) self.send_header("Content-type", "text/html") self.send_header("Access-Control-Allow-Origin", "*") self.end_headers() def handle_gpio(self, key, value): """ Very basic gpio handling, converts the value into an int and then it writes it to the file. """ try: clean_value = int(value) with open("{}{}/value".format(gpio_path, key), mode="w") as file: file.write(str(clean_value)) return False except ValueError as e: print(e) except Exception as e: print("Exception: {}".format(e)) return True def unsupported(self): self.wfile.write("Go Away!\n".encode()) def do_GET(self): self.unsupported() def do_HEAD(self): self.unsupported() def do_POST(self): """ Handles the post request. If error is True then the handling has failed or the request is invalid """ error = False try: # The length of the request, in bytes. length = int(self.headers['content-length']) # Dictionary containing keys and values from the request. postvars = urllib.parse.parse_qs(self.rfile.read(length)) for key, value in postvars.items(): clean_key = key.decode() clean_value = value[0].decode() print("Received: " + clean_key + " : " + clean_value) if clean_key in authorized_gpio: error = self.handle_gpio(clean_key, clean_value) else: error = True except Exception as e: print(e) error = True response = None if not error: self.prepare_response(200) response = "Operation authorized.\n" else: self.prepare_response(403) response = "Go away!\n" # Write response to the client. self.wfile.write(response.encode()) if __name__ == "__main__": server_address = ('', PORT) httpd = http.server.HTTPServer(server_address, Server) print('Starting server') httpd.serve_forever()
gpl-3.0
3,281,151,376,845,631,500
28.688679
77
0.571973
false
4.157199
false
false
false
arbrandes/edx-configuration
playbooks/roles/supervisor/files/pre_supervisor_checks.py
1
12593
import argparse import boto.ec2 from boto.utils import get_instance_metadata, get_instance_identity from boto.exception import AWSConnectionError import hipchat import os import subprocess import traceback import socket import time # Services that should be checked for migrations. MIGRATION_COMMANDS = { 'lms': "/edx/bin/edxapp-migrate-lms --noinput --list", 'cms': "/edx/bin/edxapp-migrate-cms --noinput --list", 'xqueue': ". {env_file}; sudo -E -u xqueue {python} {code_dir}/manage.py showmigrations", 'ecommerce': ". {env_file}; sudo -E -u ecommerce {python} {code_dir}/manage.py showmigrations", 'insights': ". {env_file}; sudo -E -u insights {python} {code_dir}/manage.py showmigrations", 'analytics_api': ". {env_file}; sudo -E -u analytics_api {python} {code_dir}/manage.py showmigrations", 'credentials': ". {env_file}; sudo -E -u credentials {python} {code_dir}/manage.py showmigrations", 'discovery': ". {env_file}; sudo -E -u discovery {python} {code_dir}/manage.py showmigrations", } HIPCHAT_USER = "PreSupervisor" # Max amount of time to wait for tags to be applied. MAX_BACKOFF = 120 INITIAL_BACKOFF = 1 REGION = get_instance_identity()['document']['region'] def services_for_instance(instance_id): """ Get the list of all services named by the services tag in this instance's tags. """ ec2 = boto.ec2.connect_to_region(REGION) reservations = ec2.get_all_instances(instance_ids=[instance_id]) for reservation in reservations: for instance in reservation.instances: if instance.id == instance_id: try: services = instance.tags['services'].split(',') except KeyError as ke: msg = "Tag named 'services' not found on this instance({})".format(instance_id) raise Exception(msg) for service in services: yield service def edp_for_instance(instance_id): ec2 = boto.ec2.connect_to_region(REGION) reservations = ec2.get_all_instances(instance_ids=[instance_id]) for reservation in reservations: for instance in reservation.instances: if instance.id == instance_id: try: environment = instance.tags['environment'] deployment = instance.tags['deployment'] play = instance.tags['play'] except KeyError as ke: msg = "{} tag not found on this instance({})".format(ke.message, instance_id) raise Exception(msg) return (environment, deployment, play) if __name__ == '__main__': parser = argparse.ArgumentParser( description="Enable all services that are in the services tag of this ec2 instance.") parser.add_argument("-a","--available", help="The location of the available services.") parser.add_argument("-e","--enabled", help="The location of the enabled services.") migration_args = parser.add_argument_group("edxapp_migrations", "Args for running edxapp migration checks.") migration_args.add_argument("--edxapp-code-dir", help="Location of the edx-platform code.") migration_args.add_argument("--edxapp-python", help="Path to python to use for executing migration check.") migration_args.add_argument("--edxapp-env", help="Location of the edxapp environment file.") xq_migration_args = parser.add_argument_group("xqueue_migrations", "Args for running xqueue migration checks.") xq_migration_args.add_argument("--xqueue-code-dir", help="Location of the xqueue code.") xq_migration_args.add_argument("--xqueue-python", help="Path to python to use for executing migration check.") migration_args.add_argument("--xqueue-env", help="Location of the xqueue environment file.") ecom_migration_args = parser.add_argument_group("ecommerce_migrations", "Args for running ecommerce migration checks.") ecom_migration_args.add_argument("--ecommerce-python", help="Path to python to use for executing migration check.") ecom_migration_args.add_argument("--ecommerce-env", help="Location of the ecommerce environment file.") ecom_migration_args.add_argument("--ecommerce-code-dir", help="Location of the ecommerce code.") credentials_migration_args = parser.add_argument_group("credentials_migrations", "Args for running credentials migration checks.") credentials_migration_args.add_argument("--credentials-python", help="Path to python to use for executing migration check.") credentials_migration_args.add_argument("--credentials-env", help="Location of the credentials environment file.") credentials_migration_args.add_argument("--credentials-code-dir", help="Location of the credentials code.") discovery_migration_args = parser.add_argument_group("discovery_migrations", "Args for running discovery migration checks.") discovery_migration_args.add_argument("--discovery-python", help="Path to python to use for executing migration check.") discovery_migration_args.add_argument("--discovery-env", help="Location of the discovery environment file.") discovery_migration_args.add_argument("--discovery-code-dir", help="Location of the discovery code.") insights_migration_args = parser.add_argument_group("insights_migrations", "Args for running insights migration checks.") insights_migration_args.add_argument("--insights-python", help="Path to python to use for executing migration check.") insights_migration_args.add_argument("--insights-env", help="Location of the insights environment file.") insights_migration_args.add_argument("--insights-code-dir", help="Location of the insights code.") analyticsapi_migration_args = parser.add_argument_group("analytics_api_migrations", "Args for running analytics_api migration checks.") analyticsapi_migration_args.add_argument("--analytics-api-python", help="Path to python to use for executing migration check.") analyticsapi_migration_args.add_argument("--analytics-api-env", help="Location of the analytics_api environment file.") analyticsapi_migration_args.add_argument("--analytics-api-code-dir", help="Location of the analytics_api code.") hipchat_args = parser.add_argument_group("hipchat", "Args for hipchat notification.") hipchat_args.add_argument("-c","--hipchat-api-key", help="Hipchat token if you want to receive notifications via hipchat.") hipchat_args.add_argument("-r","--hipchat-room", help="Room to send messages to.") args = parser.parse_args() report = [] prefix = None notify = None try: if args.hipchat_api_key: hc = hipchat.HipChat(token=args.hipchat_api_key) notify = lambda message: hc.message_room(room_id=args.hipchat_room, message_from=HIPCHAT_USER, message=message) except Exception as e: print("Failed to initialize hipchat, {}".format(e)) traceback.print_exc() instance_id = get_instance_metadata()['instance-id'] prefix = instance_id ec2 = boto.ec2.connect_to_region(REGION) reservations = ec2.get_all_instances(instance_ids=[instance_id]) instance = reservations[0].instances[0] if instance.instance_profile['arn'].endswith('/abbey'): print("Running an abbey build. Not starting any services.") # Needs to exit with 1 instead of 0 to prevent # services from starting. exit(1) time_left = MAX_BACKOFF backoff = INITIAL_BACKOFF environment = None deployment = None play = None while time_left > 0: try: environment, deployment, play = edp_for_instance(instance_id) prefix = "{environment}-{deployment}-{play}-{instance_id}".format( environment=environment, deployment=deployment, play=play, instance_id=instance_id) break except Exception as e: print("Failed to get EDP for {}: {}".format(instance_id, str(e))) # With the time limit being 2 minutes we will # try 5 times before giving up. time.sleep(backoff) time_left -= backoff backoff = backoff * 2 if environment is None or deployment is None or play is None: msg = "Unable to retrieve environment, deployment, or play tag." print(msg) if notify: notify("{} : {}".format(prefix, msg)) exit(0) #get the hostname of the sandbox hostname = socket.gethostname() try: #get the list of the volumes, that are attached to the instance volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id}) for volume in volumes: volume.add_tags({"hostname": hostname, "environment": environment, "deployment": deployment, "cluster": play, "instance-id": instance_id, "created": volume.create_time }) except Exception as e: msg = "Failed to tag volumes associated with {}: {}".format(instance_id, str(e)) print(msg) if notify: notify(msg) try: for service in services_for_instance(instance_id): if service in MIGRATION_COMMANDS: services = { "lms": {'python': args.edxapp_python, 'env_file': args.edxapp_env, 'code_dir': args.edxapp_code_dir}, "cms": {'python': args.edxapp_python, 'env_file': args.edxapp_env, 'code_dir': args.edxapp_code_dir}, "ecommerce": {'python': args.ecommerce_python, 'env_file': args.ecommerce_env, 'code_dir': args.ecommerce_code_dir}, "credentials": {'python': args.credentials_python, 'env_file': args.credentials_env, 'code_dir': args.credentials_code_dir}, "discovery": {'python': args.discovery_python, 'env_file': args.discovery_env, 'code_dir': args.discovery_code_dir}, "insights": {'python': args.insights_python, 'env_file': args.insights_env, 'code_dir': args.insights_code_dir}, "analytics_api": {'python': args.analytics_api_python, 'env_file': args.analytics_api_env, 'code_dir': args.analytics_api_code_dir}, "xqueue": {'python': args.xqueue_python, 'env_file': args.xqueue_env, 'code_dir': args.xqueue_code_dir}, } if service in services and all(arg!=None for arg in services[service].values()) and service in MIGRATION_COMMANDS: serv_vars = services[service] cmd = MIGRATION_COMMANDS[service].format(**serv_vars) if os.path.exists(serv_vars['code_dir']): os.chdir(serv_vars['code_dir']) # Run migration check command. output = subprocess.check_output(cmd, shell=True, ) if '[ ]' in output: raise Exception("Migrations have not been run for {}".format(service)) # Link to available service. available_file = os.path.join(args.available, "{}.conf".format(service)) link_location = os.path.join(args.enabled, "{}.conf".format(service)) if os.path.exists(available_file): subprocess.call("sudo -u supervisor ln -sf {} {}".format(available_file, link_location), shell=True) report.append("Enabling service: {}".format(service)) else: raise Exception("No conf available for service: {}".format(link_location)) except AWSConnectionError as ae: msg = "{}: ERROR : {}".format(prefix, ae) if notify: notify(msg) notify(traceback.format_exc()) raise ae except Exception as e: msg = "{}: ERROR : {}".format(prefix, e) print(msg) if notify: notify(msg) traceback.print_exc() raise e else: msg = "{}: {}".format(prefix, " | ".join(report)) print(msg) if notify: notify(msg)
agpl-3.0
-2,843,304,026,179,480,000
45.640741
152
0.615262
false
4.182331
false
false
false
cyanogen/uchroma
uchroma/traits.py
1
11759
# # uchroma - Copyright (C) 2021 Stefanie Kondik # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, version 3. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public # License for more details. # # pylint: disable=protected-access, invalid-name, no-member import enum import importlib import sys from argparse import ArgumentParser from typing import Iterable from traitlets import CaselessStrEnum, Container, Dict, Enum, Int, HasTraits, \ List, TraitType, Undefined, UseEnum from frozendict import frozendict from uchroma.color import to_color from uchroma.util import ArgsDict class ColorTrait(TraitType): """ A traitlet which encapsulates a grapefruit.Color and performs type coercion as needed. """ info_text = "a color" allow_none = True default_value = 'black' def __init__(self, *args, **kwargs): super(ColorTrait, self).__init__(*args, **kwargs) def validate(self, obj, value): try: if value is not None: value = to_color(value) except: self.error(obj, value) return value class ColorSchemeTrait(List): """ A list of ColorTraits which comprise a scheme """ info_text = 'a list of colors' def __init__(self, trait=ColorTrait(), default_value=(), minlen=0, maxlen=sys.maxsize, **kwargs): super(ColorSchemeTrait, self).__init__(trait=trait, default_value=default_value, minlen=minlen, maxlen=maxlen, **kwargs) class ColorPresetTrait(UseEnum): """ A trait which represents a group of color schemes defined as a Python Enum. """ info_text = 'a predefined color scheme' def __init__(self, enum_class, default_value=None, **kwargs): super(ColorPresetTrait, self).__init__(enum_class, default_value=default_value, **kwargs) class WriteOnceMixin(object): """ Mixin for traits which cannot be changed after an initial value has been set. """ write_once = True def validate(self, obj, value): if self.name not in obj._trait_values or \ obj._trait_values[self.name] == self.default_value: return super().validate(obj, value) self.error(obj, value) class WriteOnceInt(WriteOnceMixin, Int): """ Subclass of Int which may only be written once """ pass class FrozenDict(WriteOnceMixin, Dict): """ Subclass of Dict which converts the value to a frozendict on the first setting. """ def validate(self, obj, value): return frozendict(super().validate(obj, value)) class UseEnumCaseless(UseEnum): """ Subclass of UseEnum which allows selection of values using case insensitive strings """ def select_by_name(self, value, default=Undefined): if value.startswith(self.name_prefix): # -- SUPPORT SCOPED-NAMES, like: "Color.red" => "red" value = value.replace(self.name_prefix, "", 1) keys = [x.lower() for x in self.enum_class.__members__.keys()] idx = keys.index(value.lower()) if idx < 0: return Undefined return self.enum_class[list(self.enum_class.__members__.keys())[idx]] class WriteOnceUseEnumCaseless(WriteOnceMixin, UseEnumCaseless): """ Subclass of UseEnumCaseless which may only be written once. """ pass class DefaultCaselessStrEnum(CaselessStrEnum): """ Extension of CaselessStrEnum which handles default values better """ def validate(self, obj, value): if self.default_value and (value is None or value == ''): value = self.default_value return super().validate(obj, value) def is_trait_writable(trait: TraitType) -> bool: """ Test if a trait is writable :param trait: the trait to be tested :return: True if the trait is writable """ if trait.read_only: return False if hasattr(trait, 'write_once') and trait.write_once: return False return True def trait_as_dict(trait: TraitType) -> dict: """ Convert a trait to a dict for sending over D-Bus or the like :param trait: the trait to be converted :return: dict representing this trait """ cls = trait.__class__ tdict = {} for k, v in vars(trait).items(): if k.startswith('__') or k == 'this_class': continue if hasattr(cls, k) and getattr(cls, k) == v: continue if isinstance(v, Iterable) and len(v) == 0: continue if k.startswith('_'): tdict[k[1:]] = v else: tdict[k] = v if isinstance(trait, UseEnum): cls = CaselessStrEnum tdict['values'] = tuple(trait.enum_class.__members__.keys()) if 'enum_class' in tdict: del tdict['enum_class'] for k, v in tdict.items(): if isinstance(v, TraitType): tdict[k] = trait_as_dict(v) if isinstance(v, enum.Enum): tdict[k] = v.name if isinstance(v, type): tdict[k] = '%s.%s' % (v.__module__, v.__name__) tdict['__class__'] = (cls.__module__, cls.__name__) return tdict def class_traits_as_dict(obj: HasTraits, values: dict=None) -> dict: """ Create a dict which represents all traits of the given object. This dict itself can be inspected in a generic API, or it may be converted back to a (stub) instance of HasTraits. This facilitates the sending of configurable object properties over an interface such as D-Bus. :param obj: an instance of HasTraits :param value: optional dict of trait values (pulled from obj by default) :return: dict representing all traits in obj """ cls_dt = {} if isinstance(obj, type) and hasattr(obj, 'class_traits'): traits = obj.class_traits() elif isinstance(obj, dict): traits = obj elif isinstance(obj, HasTraits): traits = obj.traits() values = obj._trait_values else: raise TypeError("Object does not support traits") for k, v in traits.items(): dt = trait_as_dict(v) if dt is None: continue if values is not None and k in values: dt['__value__'] = values[k] cls_dt[k] = dt return cls_dt def dict_as_trait(obj: dict) -> TraitType: """ Create a trait from a dict (trait_as_dict). """ if '__class__' not in obj: raise ValueError("No module and class attribute present") tobj = obj.copy() module_name, trait_class = tobj.pop('__class__') module = importlib.import_module(module_name) if not hasattr(module, trait_class): raise TypeError("Unknown class: %s" % trait_class) cls = getattr(module, trait_class) if 'trait' in tobj: tobj['trait'] = dict_as_trait(tobj.pop('trait')) metadata = {} if 'metadata' in tobj: metadata.update(tobj.pop('metadata')) if issubclass(cls, Enum): trait = cls(tobj.pop('values'), **tobj) else: trait = cls(**tobj) for k in list(metadata.keys()): if k in ('name', 'default_args', 'default_kwargs'): setattr(trait, k, metadata.pop(k)) trait.metadata = metadata return trait def dict_as_class_traits(obj: dict) -> HasTraits: """ Convert a dict of unpacked traits to a HasTraits instance. Useful for remote parameter inspection and validation. :param obj: dict of unpacked traits :return: the stub HasTraits instance """ if not isinstance(obj, dict): raise TypeError("Object must be a dict (was: %s)" % obj) traits = {} values = {} for k, v in obj.items(): if '__value__' in v: values[k] = v.pop('__value__') trait = dict_as_trait(v) if trait is None: continue traits[k] = trait cls = HasTraits() cls.add_traits(**traits) for k, v in values.items(): setattr(cls, k, v) return cls def get_args_dict(obj: HasTraits, incl_all=False): """ Return a dict of user-configurable traits for an object :param obj: an instance of HasTraits :param incl_all: If all items should be included, regardless of RO status :return: dict of arguments """ argsdict = ArgsDict() for k in sorted(obj._trait_values.keys()): v = obj._trait_values[k] trait = obj.traits()[k] if incl_all or (not trait.get_metadata('hidden') and is_trait_writable(trait)): argsdict[k] = v return argsdict def add_traits_to_argparse(obj: HasTraits, parser: ArgumentParser, prefix: str=None): """ Add all traits from the given object to the argparse context. :param obj: an instance of HasTraits :param parser: argparse parser :param prefix: string to prefix keys with """ for key, trait in obj.traits().items(): if trait.get_metadata('config') is not True: continue argname = '--%s' % key if prefix is not None: argname = '--%s.%s' % (prefix, key) if isinstance(trait, Container): parser.add_argument(argname, nargs='+', help=trait.info_text) elif isinstance(trait, Enum): parser.add_argument(argname, type=str.lower, choices=[x.lower() for x in trait.values], help=trait.info_text) else: argtype = str if hasattr(trait, 'default_value'): argtype = type(trait.default_value) parser.add_argument(argname, type=argtype, help=trait.info_text) def apply_from_argparse(args, traits=None, target: HasTraits=None) -> dict: """ Applies arguments added via add_traits_to_argparse to a target object which implements HasTraits. If a target is not known, a dict of traits may be passed instead. Will throw TraitError if validation fails. :param args: Parsed args from argparse :param traits: Dictionary of traits (optional) :param target: Target object (optional) :return: Dict of the arguments which actually changed """ # apply the traits to an empty object, which will run # the validators on the client if isinstance(traits, HasTraits): traits = traits.traits() traits = traits.copy() for k, v in traits.items(): if not isinstance(v, TraitType): if isinstance(v, dict): k[v] = dict_as_trait(v) else: raise TypeError("A dict or trait object must be supplied") if target is None: if traits is None: raise ValueError("Either traits or target must be specified") target = HasTraits() target.add_traits(**traits) # determine what should actually be changed argkeys = [k for k, v in vars(args).items() if v is not None] intersect = set(target.traits().keys()).intersection(set(argkeys)) # apply the argparse flags to the target object for key in intersect: if target.traits()[key].get_metadata('config') is not True: raise ValueError("Trait is not marked as configurable: %s" % key) setattr(target, key, getattr(args, key)) # if all validators passed, return a dict of the changed args changed = {} for key in intersect: changed[key] = target._trait_values[key] return changed
lgpl-3.0
468,926,082,809,482,900
28.619647
97
0.615188
false
3.944649
false
false
false
cardmagic/PyAMF
pyamf/adapters/_django_db_models_base.py
1
8476
# Copyright (c) 2007-2009 The PyAMF Project. # See LICENSE.txt for details. """ `django.db.models` adapter module. :see: `Django Project <http://www.djangoproject.com>`_ :since: 0.4.1 """ from django.db.models.base import Model from django.db.models import fields from django.db.models.fields import related, files import datetime import pyamf from pyamf.util import imports class DjangoReferenceCollection(dict): """ This helper class holds a dict of klass to pk/objects loaded from the underlying db. :since: 0.5 """ def _getClass(self, klass): if klass not in self.keys(): self[klass] = {} return self[klass] def getClassKey(self, klass, key): """ Return an instance based on klass/key. If an instance cannot be found then `KeyError` is raised. :param klass: The class of the instance. :param key: The primary_key of the instance. :return: The instance linked to the `klass`/`key`. :rtype: Instance of `klass`. """ d = self._getClass(klass) return d[key] def addClassKey(self, klass, key, obj): """ Adds an object to the collection, based on klass and key. :param klass: The class of the object. :param key: The datastore key of the object. :param obj: The loaded instance from the datastore. """ d = self._getClass(klass) d[key] = obj class DjangoClassAlias(pyamf.ClassAlias): def getCustomProperties(self): self.fields = {} self.relations = {} self.columns = [] self.meta = self.klass._meta for name in self.meta.get_all_field_names(): x = self.meta.get_field_by_name(name)[0] if isinstance(x, files.FileField): self.readonly_attrs.update([name]) if isinstance(x, related.RelatedObject): continue if not isinstance(x, related.ForeignKey): self.fields[name] = x else: self.relations[name] = x for k, v in self.klass.__dict__.iteritems(): if isinstance(v, related.ReverseManyRelatedObjectsDescriptor): self.fields[k] = v.field parent_fields = [] for field in self.meta.parents.values(): parent_fields.append(field.attname) del self.relations[field.name] self.exclude_attrs.update(parent_fields) props = self.fields.keys() self.encodable_properties.update(props) self.decodable_properties.update(props) def _compile_base_class(self, klass): if klass is Model: return pyamf.ClassAlias._compile_base_class(self, klass) def _encodeValue(self, field, value): if value is fields.NOT_PROVIDED: return pyamf.Undefined if value is None: return value # deal with dates .. if isinstance(field, fields.DateTimeField): return value elif isinstance(field, fields.DateField): return datetime.datetime(value.year, value.month, value.day, 0, 0, 0) elif isinstance(field, fields.TimeField): return datetime.datetime(1970, 1, 1, value.hour, value.minute, value.second, value.microsecond) elif isinstance(value, files.FieldFile): return value.name return value def _decodeValue(self, field, value): if value is pyamf.Undefined: return fields.NOT_PROVIDED if isinstance(field, fields.AutoField) and value == 0: return None elif isinstance(field, fields.DateTimeField): # deal with dates return value elif isinstance(field, fields.DateField): if not value: return None return datetime.date(value.year, value.month, value.day) elif isinstance(field, fields.TimeField): if not value: return None return datetime.time(value.hour, value.minute, value.second, value.microsecond) return value def getEncodableAttributes(self, obj, **kwargs): attrs = pyamf.ClassAlias.getEncodableAttributes(self, obj, **kwargs) if not attrs: attrs = {} for name, prop in self.fields.iteritems(): if name not in attrs.keys(): continue if isinstance(prop, related.ManyToManyField): attrs[name] = [x for x in getattr(obj, name).all()] else: attrs[name] = self._encodeValue(prop, getattr(obj, name)) keys = attrs.keys() for key in keys: if key.startswith('_'): del attrs[key] for name, relation in self.relations.iteritems(): if '_%s_cache' % name in obj.__dict__: attrs[name] = getattr(obj, name) del attrs[relation.column] if not attrs: attrs = None return attrs def getDecodableAttributes(self, obj, attrs, **kwargs): attrs = pyamf.ClassAlias.getDecodableAttributes(self, obj, attrs, **kwargs) for n in self.decodable_properties: if n in self.relations: continue f = self.fields[n] attrs[f.attname] = self._decodeValue(f, attrs[n]) # primary key of django object must always be set first for # relationships with other model objects to work properly # and dict.iteritems() does not guarantee order # # django also forces the use only one attribute as primary key, so # our obj._meta.pk.attname check is sufficient) try: setattr(obj, obj._meta.pk.attname, attrs[obj._meta.pk.attname]) del attrs[obj._meta.pk.attname] except KeyError: pass return attrs def getDjangoObjects(context): """ Returns a reference to the `django_objects` on the context. If it doesn't exist then it is created. :param context: The context to load the `django_objects` index from. :type context: Instance of :class:`pyamf.BaseContext` :return: The `django_objects` index reference. :rtype: Instance of :class:`DjangoReferenceCollection` :since: 0.5 """ if not hasattr(context, 'django_objects'): context.django_objects = DjangoReferenceCollection() return context.django_objects def writeDjangoObject(self, obj, *args, **kwargs): """ The Django ORM creates new instances of objects for each db request. This is a problem for PyAMF as it uses the id(obj) of the object to do reference checking. We could just ignore the problem, but the objects are conceptually the same so the effort should be made to attempt to resolve references for a given object graph. We create a new map on the encoder context object which contains a dict of C{object.__class__: {key1: object1, key2: object2, .., keyn: objectn}}. We use the primary key to do the reference checking. :since: 0.5 """ if not isinstance(obj, Model): self.writeNonDjangoObject(obj, *args, **kwargs) return context = self.context kls = obj.__class__ s = obj.pk if s is None: self.writeNonDjangoObject(obj, *args, **kwargs) return django_objects = getDjangoObjects(context) try: referenced_object = django_objects.getClassKey(kls, s) except KeyError: referenced_object = obj django_objects.addClassKey(kls, s, obj) self.writeNonDjangoObject(referenced_object, *args, **kwargs) def install_django_reference_model_hook(mod): """ Called when :module:`pyamf.amf0` or :module:`pyamf.amf3` are imported. Attaches the :func:`writeDjangoObject` method to the `Encoder` class in that module. :param mod: The module imported. :since: 0.4.1 """ if not hasattr(mod.Encoder, 'writeNonDjangoObject'): mod.Encoder.writeNonDjangoObject = mod.Encoder.writeObject mod.Encoder.writeObject = writeDjangoObject # initialise the module here: hook into pyamf pyamf.register_alias_type(DjangoClassAlias, Model) # hook the L{writeDjangobject} method to the Encoder class on import imports.when_imported('pyamf.amf0', install_django_reference_model_hook) imports.when_imported('pyamf.amf3', install_django_reference_model_hook)
mit
-6,217,738,496,913,844,000
28.430556
91
0.61975
false
4.080886
false
false
false
ICOS-Carbon-Portal/data
src/main/python/update-restheart/Restheart.py
1
2242
import requests class Restheart(object): def __init__(self): # self._baseUrl = 'http://127.0.0.1:8088/db/' # localhost self._baseUrl = 'https://restheart.icos-cp.eu/db/' # production self._verfify = True if self._baseUrl.__contains__('restheart') else False def get_records_to_update(self, op, pagesize, collection): resp = None try: url = self.get_url(op, pagesize, collection) resp = requests.get(url, timeout=10, verify=self._verfify) if resp.status_code != 200: print(resp.status_code, resp.reason, resp.json()) return resp.json() except: print(resp) def update_record(self, id, record, collection): url = self._baseUrl + collection + '/' + id headers = {"Content-Type": "application/json"} resp = None try: resp = requests.patch(url, headers=headers, json=record, timeout=5, verify=self._verfify) if resp.status_code != 200: print(resp.status_code, resp.reason) except: print(resp) def get_url(self, op, pagesize, collection): if op == 'geo': if collection == 'portaluse': return self._baseUrl + collection + '?filter={"city":{"$exists":0}}&np&pagesize=' + str(pagesize) elif collection == 'dobjdls': return self._baseUrl + collection + '?filter={"$and":[{"ip":{"$exists":1}},{"city":{"$exists":0}}]}&np&pagesize=' + str(pagesize) else: raise ValueError("Unknown collection: " + collection) elif op == 'label': if collection == 'portaluse': return self._baseUrl + collection + '?np&pagesize=' + str(pagesize) # return self._baseUrl + collection + '?filter={"_id":{"$oid":"5bb21519f17df4d065e9c53c"}}&np&pagesize=' + str(pagesize) # return self._baseUrl + collection + '?filter={"filterChange":{"$exists":1}}&np&pagesize=' + str(pagesize) # return self._baseUrl + collection + '?filter={"previewNetCDF":{"$exists":1}}&np&pagesize=' + str(pagesize) # return self._baseUrl + collection + '?filter={"previewTimeserie":{"$exists":1}}&np&pagesize=' + str(pagesize) # return self._baseUrl + collection + '?filter={"$and":[{"filterChange":{"$exists":0}},{"previewNetCDF":{"$exists":0}},{"previewTimeserie":{"$exists":0}}]}&np&pagesize=' + str(pagesize) else: raise ValueError("Unknown collection: " + collection)
gpl-3.0
5,153,336,162,370,622,000
37.655172
189
0.650758
false
3.058663
false
false
false
domecraft/Games
RPG/classes.py
1
3530
class character: def __init__(self, name, gender ,health, race, role, status, strength, defense, magic, bounty, income, reputation): self.name = name self.health = health self.status = status self.strength = strength self.defense = defense self.race = race self.role = role self.bounty = bounty self.magic = magic self.gender = gender self.income = income self.reputation = reputation self.inventory = [] def modify_health(self, amount): self.health += amount def set_health(self, amount): self.health = amount def set_status(self, status): self.status = status def modify_str(self, amount): self.strength += amount def modify_def(self, amount): self.defense += amount def add_item(self, item): self.inventory.append(item) def remove_item(self, item): if item in self.inventory: self.inventory.remove(item) else: print item + " is not in your inventory!" def set_race(self, race): self.race = race def modify_bounty(self, amount): self.bounty += amount def checkDead(self, health): if self.health <= 0: self.status = "dead" return "dead" else: self.status = "alive" return "alive" def modify_income(self, amount): self.income += amount def modify_reputation(self, amount): self.reputation += amount #The following class is used for random npcs that I don't really develop in the storyline. class basicCharacter: def __init__(self, name, gender, income, status): self.name = name self.gender = gender self.income = income self.status = status def set_status(self, status): self.status = status class store: def __init__(self, name = "General Store" , owner = "Store Owner", alliance = "Rebellion"): self.name = name self.store_owner = owner self.alliance = alliance self.stock = { 'longsword': {'cost': 10, 'speed': 3, 'strength': 7, 'defense': 2}, 'shortsword': {'cost': 8, 'speed': 5, 'strength': 4, 'defense': 2}, 'bronze_armor': {'cost': 10, 'speed': -2, 'strength': 1, 'defense': 6}, 'silver_armor': {'cost': 20, 'speed': -5, 'strength': 2, 'defense': 12}, 'platinum_armor': {'cost': 35, 'speed': -8, 'strength': 4, 'defense': 20} } class town: def __init__(self, name, ruler, alliance, income, population): self.name = name self.ruler = ruler self.alliance = alliance self.income = income self.population = population def set_ruler(self, ruler): self.ruler = ruler def set_name(self, name): self.name = name def set_alliance(self, alliance): self.alliance = alliance def modify_income(self, amount): self.income += amount def modify_pop(self, population): self.population += population class bar: def __init__(self, name, owner, income): self.name = name self.owner = owner self.income = income def set_owner(self, owner): self.owner = owner def modify_income(amount): self.income += amount
gpl-2.0
3,359,393,522,367,290,000
29.964912
119
0.545326
false
3.904867
false
false
false
fbergmann/libSEDML
examples/python/create_sedml.py
1
5521
#!/usr/bin/env python ## ## @file create_sedml.py ## @brief cerates a SED-ML document. ## @author Frank T. Bergmann ## ## <!-------------------------------------------------------------------------- ## This file is part of libSEDML. Please visit http://sed-ml.org for more ## information about SEDML, and the latest version of libSEDML. ## ## Copyright (c) 2013, Frank T. Bergmann ## All rights reserved. ## ## Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## 1. Redistributions of source code must retain the above copyright notice, this ## list of conditions and the following disclaimer. ## 2. Redistributions in binary form must reproduce the above copyright notice, ## this list of conditions and the following disclaimer in the documentation ## and/or other materials provided with the distribution. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ## ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ## LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ## SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ## ------------------------------------------------------------------------ -. ## import sys import os.path import libsedml def main (args): """Usage: create_sedml output-filename """ if (len(args) != 2): print(main.__doc__) sys.exit(1); # create the document doc = libsedml.SedDocument(); doc.setLevel(1); doc.setVersion(1); # create a first model referencing an sbml file model = doc.createModel(); model.setId("model1"); model.setSource("file.xml"); model.setLanguage("urn:sedml:language:sbml"); # create a second model modifying a variable of that other sbml file model = doc.createModel(); model.setId("model2"); model.setSource("model1"); model.setLanguage("urn:sedml:sbml"); # change a paramerter 'k' to 0.1 change = model.createChangeAttribute(); change.setTarget("/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='k']/@value"); change.setNewValue("0.1"); # remove species 's1' remove = model.createRemoveXML(); remove.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S1']"); # now for something tricky we want to update the initialConcentration of 'S2' to be # half what it was in the original model compute = model.createComputeChange(); compute.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id=&quot;S2&quot;]/@initialConcentration"); variable = compute.createVariable(); variable.setId("S2"); variable.setModelReference("model1"); variable.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S2']"); compute.setMath(libsedml.parseFormula("S2 / 2")); # create simulation tc = doc.createUniformTimeCourse(); tc.setId("sim1"); tc.setInitialTime(0.0); tc.setOutputStartTime(0.0); tc.setOutputEndTime(10.0); tc.setNumberOfPoints(1000); # need to set the correct KISAO Term alg = tc.createAlgorithm(); alg.setKisaoID("KISAO:0000019"); # create a task that uses the simulation and the model above task = doc.createTask(); task.setId("task1"); task.setModelReference("model1"); task.setSimulationReference("sim1"); # add a DataGenerator to hold the output for time dg = doc.createDataGenerator(); dg.setId("time"); dg.setName("time"); var = dg.createVariable(); var.setId("v0"); var.setName("time"); var.setTaskReference("task1"); var.setSymbol("urn:sedml:symbol:time"); dg.setMath(libsedml.parseFormula("v0")); # and one for S1 dg = doc.createDataGenerator(); dg.setId("S1"); dg.setName("S1"); var = dg.createVariable(); var.setId("v1"); var.setName("S1"); var.setTaskReference("task1"); var.setTarget("/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='S1']"); dg.setMath(libsedml.parseFormula("v1")); # add a report report = doc.createReport(); report.setId("r1"); report.setName("report 1"); set = report.createDataSet(); set.setId("ds1"); set.setLabel("time"); set.setDataReference("time"); set = report.createDataSet(); set.setId("ds2"); set.setLabel("S1"); set.setDataReference("S1"); # add a 2d plot plot = doc.createPlot2D(); plot.setId("p1"); plot.setName("S1 Timecourse"); curve = plot.createCurve(); curve.setId("c1"); curve.setName("S1"); curve.setLogX(False); curve.setLogY(False); curve.setXDataReference("time"); curve.setYDataReference("S1"); # add a 3D Plot plot2 = doc.createPlot3D(); plot2.setId("p2"); plot2.setName("dunno"); surf = plot2.createSurface(); surf.setId("surf1"); surf.setName("S1"); surf.setLogX(False); surf.setLogY(False); surf.setLogZ(False); surf.setXDataReference("time"); surf.setYDataReference("S1"); surf.setZDataReference("S1"); # write the document libsedml.writeSedML(doc, args[1]); if __name__ == '__main__': main(sys.argv)
bsd-2-clause
3,096,274,935,878,346,000
32.05988
119
0.685926
false
3.393362
false
false
false
molmod/yaff
yaff/pes/colvar.py
1
13249
# -*- coding: utf-8 -*- # YAFF is yet another force-field code. # Copyright (C) 2011 Toon Verstraelen <[email protected]>, # Louis Vanduyfhuys <[email protected]>, Center for Molecular Modeling # (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise # stated. # # This file is part of YAFF. # # YAFF is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # YAFF is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/> # # -- '''Collective variables This module implements the computation of collective variables and their derivatives, typically used in advanced sampling methods such as umbrella sampling or metadynamics. The ``CollectiveVariable`` class is the main item in this module, which is normally used in conjuction with an instance of the ``Bias`` class. Note that many collective variables such as bond lengths, bending angles, improper angles, ... are already implemented by the :mod:`yaff.pes.iclist` module, so no separate implementation needs to be provided here. ''' from __future__ import division import numpy as np from yaff.log import log from yaff.pes.dlist import DeltaList from yaff.pes.iclist import InternalCoordinateList from yaff.sampling.utils import cell_lower __all__ = [ 'CollectiveVariable', 'CVVolume', 'CVCOMProjection','CVInternalCoordinate', 'CVLinCombIC', ] class CollectiveVariable(object): '''Base class for collective variables.''' def __init__(self, name, system): """ **Arguments:** name A name for the collective variable. system The system for the collective variable. """ self.name = name self.system = system self.value = np.nan self.gpos = np.zeros((system.natom, 3), float) self.vtens = np.zeros((3, 3), float) def get_conversion(self): '''Auxiliary routine that allows base classes the specify the unit conversion associated with the internal coordinate. ''' raise NotImplementedError def get_log(self): '''Describe the internal coordinate in a format that is suitable for screen logging. ''' return '%s' % (self.__class__.__name__) def compute(self, gpos=None, vtens=None): """Compute the collective variable and optionally some derivatives The only variable inputs for the compute routine are the atomic positions and the cell vectors. **Optional arguments:** gpos The derivatives of the collective variable towards the Cartesian coordinates of the atoms. ('g' stands for gradient and 'pos' for positions.) This must be a writeable numpy array with shape (N, 3) where N is the number of atoms. vtens The force contribution to the pressure tensor. This is also known as the virial tensor. It represents the derivative of the energy towards uniform deformations, including changes in the shape of the unit cell. (v stands for virial and 'tens' stands for tensor.) This must be a writeable numpy array with shape (3, 3). The collective variable value is returned. The optional arguments are Fortran-style output arguments. When they are present, the corresponding results are computed and **stored** to the current contents of the array. """ #Subclasses implement their compute code here. raise NotImplementedError def get_last_computed_value(self): """Return the last value that was computed. It is not assured that this value reflects the value for the current state of the system. This is merely a convenience method to obtain the value without performing an actual computation. """ return self.value class CVInternalCoordinate(CollectiveVariable): ''' An InternalCoordinate disguised as a CollectiveVariable so that it can be used together with a BiasPotential. This is less efficient than using the InternalCoordinate with a ValenceTerm, so the latter is preferred if it is possible. ''' def __init__(self, system, ic, comlist=None): self.system = system self.ic = ic self.comlist = comlist self.dlist = DeltaList(system if comlist is None else comlist) self.iclist = InternalCoordinateList(self.dlist) self.iclist.add_ic(ic) def get_conversion(self): return self.ic.get_conversion() def compute(self, gpos=None, vtens=None): if self.comlist is not None: self.comlist.forward() self.dlist.forward() self.iclist.forward() self.value = self.iclist.ictab[0]['value'] if gpos is not None: gpos[:] = 0.0 if vtens is not None: vtens[:] = 0.0 if not ((gpos is None) and (vtens is None)): self.iclist.ictab[0]['grad'] = 1.0 self.iclist.back() if self.comlist is None: self.dlist.back(gpos, vtens) else: self.comlist.gpos[:] = 0.0 self.dlist.back(self.comlist.gpos, vtens) self.comlist.back(gpos) return self.value class CVVolume(CollectiveVariable): '''The volume of the simulation cell.''' def __init__(self, system): ''' **Arguments:** system An instance of the ``System`` class. ''' if system.cell.nvec == 0: raise TypeError('Can not compute volume of a non-periodic system.') CollectiveVariable.__init__(self, 'CVVolume', system) def get_conversion(self): return np.power(log.length.conversion, self.system.cell.nvec) def compute(self, gpos=None, vtens=None): self.value = self.system.cell.volume if gpos is not None: # No dependence on atomic positions gpos[:] = 0.0 if vtens is not None: vtens[:] = np.identity(3)*self.value return self.value class CVCOMProjection(CollectiveVariable): '''Compute the vector connecting two centers of masses and return the projection along a selected vector. cv=(r_{COM}^{B}-r_{COM}^{A})[index] and r_{COM} is a vector with centers of mass of groups A and B: * first component: projected onto ``a`` vector of cell * second component: projected onto vector perpendicular to ``a`` and in the plane spanned by ``a`` and ``b`` * third component: projected onto vector perpendicular to ``a`` and ``b`` Note that periodic boundary conditions are NOT taken into account * the centers of mass are computed using absolute positions; this is most likely the desired behavior * the center of mass difference can in principle be periodic, but the periodicity is not the same as the periodicity of the system, because of the projection on a selected vector ''' def __init__(self, system, groups, index): ''' **Arguments:** system An instance of the ``System`` class groups List of 2 arrays, each array containing atomic indexes used to compute one of the centers of mass index Selected projection vector: * if index==0, projection onto ``a`` vector of cell * if index==1, projection onto vector perpendicular to ``a`` and in the plane spanned by ``a`` and ``b`` * if index==2, projection onto vector perpendicular to ``a`` and ``b`` ''' CollectiveVariable.__init__(self, 'CVCOMProjection', system) self.index = index # Safety checks assert len(groups)==2, "Exactly 2 groups need to be defined" assert system.cell.nvec==3, "Only 3D periodic systems are supported" assert self.index in [0,1,2], "Index should be one of 0,1,2" # Masses need to be defined in order to compute centers of mass if self.system.masses is None: self.system.set_standard_masses() # Define weights w_i such that difference of centers of mass can be # computed as sum_i w_i r_i self.weights = np.zeros((system.natom)) self.weights[groups[0]] = -self.system.masses[groups[0]]/np.sum(self.system.masses[groups[0]]) self.weights[groups[1]] = self.system.masses[groups[1]]/np.sum(self.system.masses[groups[1]]) def get_conversion(self): return log.length.conversion def compute(self, gpos=None, vtens=None): ''' Consider a rotation of the entire system such that the ``a`` vector is aligned with the X-axis, the ``b`` vector is in the XY-plane, and the ``c`` vector chosen such that a right-handed basis is formed. The rotated cell is lower-diagonal in the Yaff notation. In this rotated system, it is fairly simple to compute the required projections and derivatives, because the projections are simply the Cartesian components. Values obtained in the rotated system are then transformed back to the original system. ''' # Compute rotation that makes cell lower diagonal _, R = cell_lower(self.system.cell.rvecs) # The projected vector of centers of mass difference (aka the # collective variable) in the rotated system cv_orig = np.sum(self.weights.reshape((-1,1))*self.system.pos, axis=0) # Transform back to the original system cv = np.dot(R, cv_orig) self.value = cv[self.index] if gpos is not None: gpos[:] = 0.0 gpos[:,self.index] = self.weights # Forces (vector) need to be rotated back to original system gpos[:] = np.einsum('ij,kj', gpos, R.T) if vtens is not None: vtens[:] = 0.0 vtens[self.index,self.index:] = cv[self.index:] vtens[self.index:,self.index] = cv[self.index:] # Virial (tensor) needs to be rotated back to original system vtens[:] = np.dot(R.T,np.dot(vtens[:],R)) return self.value class CVLinCombIC(CollectiveVariable): ''' A linear combination of InternalCoordinates: cv = w0*ic0 + w1*ic1 + ... ''' def __init__(self, system, ics, weights, comlist=None): ''' **Arguments:** system An instance of the ``System`` class. ics A list of InternalCoordinate instances. weights A list defining the weight of each InternalCoordinate that is used when computing the linear combination. **Optional arguments:** comlist An instance COMList; if provided, this is used instead of the normal DeltaList to compute the InternalCoordinates ''' assert len(weights)==len(ics) self.system = system self.ics = ics self.comlist = comlist self.dlist = DeltaList(system if comlist is None else comlist) self.iclist = InternalCoordinateList(self.dlist) for ic in self.ics: self.iclist.add_ic(ic) self.weights = weights def get_conversion(self): # Units depend on the particular linear combination of internal # coordinates return 1.0 def compute(self, gpos=None, vtens=None): if self.comlist is not None: self.comlist.forward() self.dlist.forward() self.iclist.forward() self.value = 0.0 for iic in range(len(self.ics)): self.value += self.weights[iic]*self.iclist.ictab[iic]['value'] if gpos is not None: gpos[:] = 0.0 if vtens is not None: vtens[:] = 0.0 if not ((gpos is None) and (vtens is None)): for iic in range(len(self.ics)): # Derivative of the linear combination to this particular # internal coordinate self.iclist.ictab[iic]['grad'] = self.weights[iic] self.iclist.back() if self.comlist is None: self.dlist.back(gpos, vtens) else: self.comlist.gpos[:] = 0.0 self.dlist.back(self.comlist.gpos, vtens) self.comlist.back(gpos) return self.value
gpl-3.0
-3,113,228,627,627,625,000
37.853372
102
0.611669
false
4.174228
false
false
false
russorat/savage-leads
api/models/lead.py
1
2649
from elasticsearch import Elasticsearch,RequestsHttpConnection,NotFoundError from flask import url_for import config import json class Lead(object): es = Elasticsearch(config.ES_HOSTS,connection_class=RequestsHttpConnection) @staticmethod def create_lead(lead_data): try: results = Lead.es.create(index='leads', doc_type='leads', body=lead_data ) if results['created']: return { 'status': 'success', 'message': '', 'created_id': results['_id'] } else: return { 'status': 'failure', 'message': 'failed to create new lead.', 'created_id': '' } except Exception as e: print e return { 'status': 'failure', 'message': 'unknown error', 'created_id': '' } @staticmethod def delete_lead(lead_id): try : Lead.es.delete(index='leads', doc_type='leads', id=lead_id ) return { 'status': 'success', 'message': '' } except NotFoundError as e: return { 'status': 'failure', 'message': 'id not found' } except Exception as e: print e return { 'status': 'failure', 'message': 'unknown error' } @staticmethod def get_lead(lead_id): try: results = Lead.es.get( index='leads', doc_type='leads', id='%s'%(lead_id), ignore=404 ) if results and results['found'] : return {'status':'success','message':'','results':[Lead.from_es_hit(results)]} return {'status':'success','message':'','results':[]} except NotFoundError as e: return { 'status': 'failure', 'message': 'id not found', 'results': [] } except Exception as e: print e return { 'status': 'failure', 'message': 'unknown exception', 'results': [] } @staticmethod def get_leads(size,page,search): try: results = Lead.es.search( index='leads', doc_type='leads', size=size, q=search or "*", sort='last_name:ASC,first_name:ASC' ) retVal = [] if results and results['hits']['total'] > 0 : for hit in results['hits']['hits']: retVal.append(Lead.from_es_hit(hit)) return {'status':'success','message':'','results':retVal} except Exception as e: print e return {'status':'failure','message':'unknown error','results':[]} @staticmethod def from_es_hit(hit): lead = {} lead['id'] = hit['_id'] for key,val in hit['_source'].items(): lead[key] = val lead['uri'] = url_for('get_lead', lead_id=lead['id'], _external=True) return lead
apache-2.0
-1,660,902,958,443,782,400
29.102273
86
0.559079
false
3.800574
false
false
false
jaantollander/CrowdDynamics
crowddynamics/core/tests/test_interactions_benchmark.py
1
1239
import numpy as np import pytest from crowddynamics.core.interactions import agent_agent_block_list from crowddynamics.core.vector2D import unit_vector from crowddynamics.simulation.agents import Agents, Circular, ThreeCircle, \ AgentGroup def attributes(): orientation = np.random.uniform(-np.pi, np.pi) return dict(body_type='adult', orientation=orientation, velocity=np.random.uniform(0.0, 1.3, 2), angular_velocity=np.random.uniform(-1.0, 1.0), target_direction=unit_vector(orientation), target_orientation=orientation) @pytest.mark.parametrize('size', (200, 500, 1000)) @pytest.mark.parametrize('agent_type', (Circular, ThreeCircle)) def test_agent_agent_block_list(benchmark, size, agent_type, algorithm): # Grow the area with size. Keeps agent density constant. area_size = np.sqrt(2 * size) agents = Agents(agent_type=agent_type) group = AgentGroup( agent_type=agent_type, size=size, attributes=attributes) agents.add_non_overlapping_group( group, position_gen=lambda: np.random.uniform(-area_size, area_size, 2)) benchmark(agent_agent_block_list, agents.array) assert True
gpl-3.0
4,276,068,190,012,276,700
36.545455
80
0.684423
false
3.580925
false
false
false
myshkov/bnn-analysis
models/bbb_sampler.py
1
4851
""" This module implements Bayes By Backprop -based sampler for NNs. http://jmlr.org/proceedings/papers/v37/blundell15.pdf """ import numpy as np from keras.models import Sequential from keras.layers.core import Activation from keras import backend as K from keras.engine.topology import Layer from sampler import Sampler, SampleStats class BBBSampler(Sampler): """ BBB sampler for NNs. """ def __init__(self, model=None, batch_size=None, n_epochs=None, **kwargs): """ Creates a new BBBSampler object. """ super().__init__(**kwargs) self.sampler_type = 'BBB' self.model = model self.batch_size = batch_size if batch_size is not None else self.train_set_size self.n_epochs = n_epochs def __repr__(self): s = super().__repr__() return s def _fit(self, n_epochs=None, verbose=0, **kwargs): """ Fits the model before sampling. """ n_epochs = n_epochs if n_epochs is not None else self.n_epochs self.model.fit(self.train_x, self.train_y, batch_size=self.batch_size, nb_epoch=n_epochs, verbose=verbose) def _sample_predictive(self, test_x=None, return_stats=False, **kwargs): """ Draws a new sample from the model. """ sample = self.model.predict(test_x, batch_size=self.batch_size) stats = None if return_stats: stats = SampleStats(time=self._running_time()) return [sample], [stats] @classmethod def model_from_description(cls, layers, noise_std, weights_std, batch_size, train_size): """ Creates a BBB model from the specified parameters. """ n_batches = int(train_size / batch_size) step = .01 class BBBLayer(Layer): def __init__(self, output_dim, **kwargs): self.output_dim = output_dim super().__init__(**kwargs) def build(self, input_shape): input_dim = input_shape[1] shape = [input_dim, self.output_dim] eps_std = step # weights self.eps_w = K.random_normal([input_shape[0]] + shape, std=eps_std) self.mu_w = K.variable(np.random.normal(0., 10. * step, size=shape), name='mu_w') self.rho_w = K.variable(np.random.normal(0., 10. * step, size=shape), name='rho_w') self.W = self.mu_w + self.eps_w * K.log(1.0 + K.exp(self.rho_w)) self.eps_b = K.random_normal([self.output_dim], std=eps_std) self.mu_b = K.variable(np.random.normal(0., 10. * step, size=[self.output_dim]), name='mu_b') self.rho_b = K.variable(np.random.normal(0., 10. * step, size=[self.output_dim]), name='rho_b') self.b = self.mu_b + self.eps_b * K.log(1.0 + K.exp(self.rho_b)) self.trainable_weights = [self.mu_w, self.rho_w, self.mu_b, self.rho_b] def call(self, x, mask=None): return K.squeeze(K.batch_dot(K.expand_dims(x, dim=1), self.W), axis=1) + self.b def get_output_shape_for(self, input_shape): return (input_shape[0], self.output_dim) def log_gaussian(x, mean, std): return -K.log(std) - (x - mean) ** 2 / (2. * std ** 2) def sigma_from_rho(rho): return K.log(1. + K.exp(rho)) / step def variational_objective(model, noise_std, weights_std, batch_size, nb_batches): def loss(y, fx): log_pw = K.variable(0.) log_qw = K.variable(0.) for layer in model.layers: if type(layer) is BBBLayer: log_pw += K.sum(log_gaussian(layer.W, 0., weights_std)) log_pw += K.sum(log_gaussian(layer.b, 0., weights_std)) log_qw += K.sum(log_gaussian(layer.W, layer.mu_w, sigma_from_rho(layer.rho_w))) log_qw += K.sum(log_gaussian(layer.b, layer.mu_b, sigma_from_rho(layer.rho_b))) log_likelihood = K.sum(log_gaussian(y, fx, noise_std)) return K.sum((log_qw - log_pw) / nb_batches - log_likelihood) / batch_size return loss model = Sequential() in_shape = [batch_size, layers[0][0]] # input model.add(BBBLayer(layers[1][0], batch_input_shape=in_shape)) model.add(Activation('relu')) # hidden layers for l in range(2, len(layers) - 1): model.add(BBBLayer(layers[l - 1][0])) model.add(Activation('relu')) # output layer model.add(BBBLayer(1)) loss = variational_objective(model, noise_std, weights_std, batch_size, n_batches) model.compile(loss=loss, optimizer='adam', metrics=['accuracy']) return model
mit
-8,254,993,371,662,850,000
35.201493
111
0.556999
false
3.433121
false
false
false
felixbr/nosql-rest-preprocessor
nosql_rest_preprocessor/models.py
1
5131
from __future__ import absolute_import, unicode_literals, print_function, division from nosql_rest_preprocessor import exceptions from nosql_rest_preprocessor.utils import non_mutating class BaseModel(object): required_attributes = set() optional_attributes = None immutable_attributes = set() private_attributes = set() sub_models = {} resolved_attributes = {} @classmethod def validate(cls, obj): cls._check_required_attributes(obj) cls._check_allowed_attributes(obj) # recurse for sub models for attr, sub_model in cls.sub_models.items(): if attr in obj.keys(): sub_model.validate(obj[attr]) return obj @classmethod @non_mutating def prepare_response(cls, obj): # remove non-public attrs for attr in cls.private_attributes: obj.pop(attr, None) # recurse for sub models for attr, sub_model in cls.sub_models.items(): if attr in obj.keys(): obj[attr] = sub_model.prepare_response(obj[attr]) return obj @classmethod def merge_updated(cls, db_obj, new_obj): cls.validate(new_obj) merged_obj = {} # check if previously present immutable attributes should be deleted for key in cls.immutable_attributes: if key in db_obj and key not in new_obj: raise exceptions.ChangingImmutableAttributeError() # copy attributes into merged_obj for key, value in new_obj.items(): cls._check_immutable_attrs_on_update(key, value, db_obj) if key in cls.resolved_attributes and isinstance(value, dict): # ignore resolved attributes in update merged_obj[key] = db_obj[key] else: merged_obj[key] = value # recurse for sub models for attr, sub_model in cls.sub_models.items(): merged_obj[attr] = sub_model.merge_updated(db_obj[attr], new_obj[attr]) return merged_obj @classmethod def _check_immutable_attrs_on_update(cls, key, value, db_obj): # check if immutable attributes should be changed if key in cls.immutable_attributes: if db_obj[key] != value: raise exceptions.ChangingImmutableAttributeError() @classmethod def _check_required_attributes(cls, obj): for attr in cls.required_attributes: if isinstance(attr, tuple): set_wanted = set(attr[1]) set_contained = set(obj.keys()) if attr[0] == 'one_of': if len(set_wanted & set_contained) < 1: raise exceptions.ValidationError() elif attr[0] == 'either_of': if len(set_wanted & set_contained) != 1: raise exceptions.ValidationError() else: raise exceptions.ConfigurationError() else: if attr not in obj.keys(): raise exceptions.ValidationError() @classmethod def _check_allowed_attributes(cls, obj): if cls.optional_attributes is not None: required = cls._required_attributes() for attr in obj.keys(): if attr in required: continue allowed = False for opt_attr in cls.optional_attributes: if attr == opt_attr: allowed = True break elif isinstance(opt_attr, tuple): if opt_attr[0] == 'all_of': if attr in opt_attr[1]: # if one of these is in obj.keys()... if not set(opt_attr[1]).issubset(obj.keys()): # ...all of them have to be there raise exceptions.ValidationError() else: allowed = True break elif opt_attr[0] == 'either_of': if attr in opt_attr[1]: # if one of these is in obj.keys()... if next((key for key in opt_attr[1] if key != attr and key in obj.keys()), None): # ...no other key may be present in obj.keys() raise exceptions.ValidationError() else: allowed = True break else: raise exceptions.ConfigurationError() if not allowed: # if we haven't found attr anywhere in cls.optional_attributes raise exceptions.ValidationError() @classmethod def _required_attributes(cls): required = set() for attr in cls.required_attributes: if isinstance(attr, tuple): required = required | set(attr[1]) else: required.add(attr) return required
mit
7,771,158,724,342,860,000
33.213333
161
0.524069
false
4.80881
false
false
false
math-a3k/django-ai
tests/test_models/migrations/0011_add_is_inferred_and_minor_tweaks.py
1
2196
# -*- coding: utf-8 -*- # Generated by Django 1.11.5 on 2017-12-20 15:34 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('test_models', '0010_myunsupervisedlearningtechnique'), ] operations = [ migrations.AddField( model_name='mystatisticalmodel', name='is_inferred', field=models.BooleanField( default=False, verbose_name='Is Inferred?'), ), migrations.AddField( model_name='mysupervisedlearningtechnique', name='is_inferred', field=models.BooleanField( default=False, verbose_name='Is Inferred?'), ), migrations.AddField( model_name='myunsupervisedlearningtechnique', name='is_inferred', field=models.BooleanField( default=False, verbose_name='Is Inferred?'), ), migrations.AlterField( model_name='mystatisticalmodel', name='sm_type', field=models.SmallIntegerField(blank=True, choices=[ (0, 'General / System'), (1, 'Classification'), (2, 'Regression')], default=0, null=True, verbose_name='Statistical Technique Type'), ), migrations.AlterField( model_name='mysupervisedlearningtechnique', name='sm_type', field=models.SmallIntegerField(blank=True, choices=[ (0, 'General / System'), (1, 'Classification'), (2, 'Regression')], default=0, null=True, verbose_name='Statistical Technique Type'), ), migrations.AlterField( model_name='myunsupervisedlearningtechnique', name='sm_type', field=models.SmallIntegerField(blank=True, choices=[ (0, 'General / System'), (1, 'Classification'), (2, 'Regression')], default=0, null=True, verbose_name='Statistical Technique Type'), ), ]
lgpl-3.0
-7,274,611,177,119,370
33.857143
64
0.536885
false
4.565489
false
false
false
Makeystreet/makeystreet
woot/apps/catalog/views/review.py
1
5983
from django.conf import settings from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect, Http404 from django.shortcuts import render from django.utils import timezone from woot.apps.catalog.forms import CreateProductReviewForm,\ CreateShopReviewForm, CreateSpaceReviewForm from woot.apps.catalog.models.core import Product, Shop, Space, NewProduct from woot.apps.catalog.models.review import ProductReview, ShopReview,\ SpaceReview from .helper import get_user_details_json static_blob = settings.STATIC_BLOB def all_reviews(request): product_reviews = ProductReview.objects.all() shop_reviews = ShopReview.objects.all() space_reviews = SpaceReview.objects.all() context = { 'static_blob': static_blob, 'user_details': get_user_details_json(request), 'product_reviews': product_reviews, 'shop_reviews': shop_reviews, 'space_reviews': space_reviews, } return render(request, 'catalog/all_reviews.html', context) def store_review(request, review_id): try: user_details = get_user_details_json(request) review = ShopReview.objects.get(id=review_id) review.upvotes = review.voteshopreview_set.filter(vote=True) context = { 'static_blob': static_blob, 'user_details': user_details, 'review': review, } return render(request, 'catalog/store_review.html', context) except ShopReview.DoesNotExist: raise Http404 def product_review(request, review_id): try: user_details = get_user_details_json(request) review = ProductReview.objects.get(id=review_id) review.upvotes = review.voteproductreview_set.filter(vote=True) context = { 'static_blob': static_blob, 'user_details': user_details, 'review': review, } return render(request, 'catalog/product_review.html', context) except ProductReview.DoesNotExist: raise Http404 def space_review(request, review_id): try: user_details = get_user_details_json(request) review = SpaceReview.objects.get(id=review_id) review.upvotes = review.votespacereview_set.filter(vote=True) context = { 'static_blob': static_blob, 'user_details': user_details, 'review': review, } return render(request, 'catalog/space_review.html', context) except SpaceReview.DoesNotExist: raise Http404 def create_review(request): if request.method == "POST": if request.POST.get('val_type', '') == 'PART': form = CreateProductReviewForm(request.POST) if form.is_valid(): r = ProductReview() r.title = form.cleaned_data['val_title'] r.review = form.cleaned_data['val_review'] r.user = request.user r.rating = form.cleaned_data['val_rating'] r.added_time = timezone.now() product_data_split = form.cleaned_data['val_part'].split('_') product_type = product_data_split[0] product_id = int(product_data_split[1]) if product_type == 'old': product = Product.objects.get(id=product_id) r.product = product elif product_type == 'new': product = NewProduct.objects.get(id=product_id) r.product = product r.save() return HttpResponseRedirect(reverse('catalog:all_reviews')) else: print(form.errors) elif request.POST.get('val_type', '') == 'SHOP': form = CreateShopReviewForm(request.POST) if form.is_valid(): r = ShopReview() r.title = form.cleaned_data['val_title'] r.review = form.cleaned_data['val_review'] r.user = request.user r.rating = form.cleaned_data['val_rating'] r.added_time = timezone.now() shop_data_split = form.cleaned_data['val_shop'].split('_') shop_type = shop_data_split[0] shop_id = int(shop_data_split[1]) if shop_type == 'old': shop = Shop.objects.get(id=shop_id) r.shop = shop elif shop_type == 'new': shop = NewProduct.objects.get(id=shop_id) r.shop = shop r.save() return HttpResponseRedirect(reverse('catalog:all_reviews')) else: print(form.errors) elif request.POST.get('val_type', '') == 'SPACE': form = CreateSpaceReviewForm(request.POST) if form.is_valid(): r = SpaceReview() r.title = form.cleaned_data['val_title'] r.review = form.cleaned_data['val_review'] r.user = request.user r.rating = form.cleaned_data['val_rating'] r.added_time = timezone.now() space_data_split = form.cleaned_data['val_space'].split('_') space_type = space_data_split[0] space_id = int(space_data_split[1]) if space_type == 'old': space = Space.objects.get(id=space_id) r.space = space elif space_type == 'new': space = NewProduct.objects.get(id=space_id) r.space = space r.save() return HttpResponseRedirect(reverse('catalog:all_reviews')) else: print(form.errors) else: pass context = { 'static_blob': static_blob, 'user_details': get_user_details_json(request), } return render(request, 'catalog/create_product_review.html', context)
apache-2.0
661,690,713,215,756,200
33.188571
77
0.563095
false
4.131906
false
false
false
semplea/characters-meta
python/alchemy/examples/alchemy_vision_v1.py
1
1466
import json from os.path import join, dirname from watson_developer_cloud import AlchemyVisionV1 alchemy_vision = AlchemyVisionV1(api_key='c851400276c1acbd020210847f8677e6d1577c26') # Face recognition with open(join(dirname(__file__), '../resources/face.jpg'), 'rb') as image_file: print(json.dumps(alchemy_vision.recognize_faces(image_file, knowledge_graph=True), indent=2)) face_url = 'https://upload.wikimedia.org/wikipedia/commons/9/9d/Barack_Obama.jpg' print(json.dumps(alchemy_vision.recognize_faces(image_url=face_url, knowledge_graph=True), indent=2)) # Image tagging with open(join(dirname(__file__), '../resources/test.jpg'), 'rb') as image_file: print(json.dumps(alchemy_vision.get_image_keywords(image_file, knowledge_graph=True, force_show_all=True), indent=2)) # Text recognition with open(join(dirname(__file__), '../resources/text.png'), 'rb') as image_file: print(json.dumps(alchemy_vision.get_image_scene_text(image_file), indent=2)) print(json.dumps(alchemy_vision.get_image_keywords( image_url='https://upload.wikimedia.org/wikipedia/commons/8/81/Morris-Chair-Ironwood.jpg'), indent=2)) # Image link extraction print(json.dumps(alchemy_vision.get_image_links(url='http://www.zillow.com/'), indent=2)) with open(join(dirname(__file__), '../resources/example.html'), 'r') as webpage: print(json.dumps(alchemy_vision.get_image_links(html=webpage.read()), indent=2))
mit
1,583,828,267,179,628,300
47.866667
106
0.71487
false
3.073375
false
true
false
mmclenna/engine
sky/tools/create_ios_sdk.py
1
1820
#!/usr/bin/env python # Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import argparse import subprocess import shutil import sys import os def main(): parser = argparse.ArgumentParser(description='Creates the Flutter iOS SDK') parser.add_argument('--dst', type=str, required=True) parser.add_argument('--device-out-dir', type=str, required=True) parser.add_argument('--simulator-out-dir', type=str, required=True) args = parser.parse_args() device_sdk = os.path.join(args.device_out_dir, 'Flutter') simulator_sdk = os.path.join(args.simulator_out_dir, 'Flutter') flutter_framework_binary = 'Flutter.framework/Flutter' device_dylib = os.path.join(args.device_out_dir, flutter_framework_binary) simulator_dylib = os.path.join(args.simulator_out_dir, flutter_framework_binary) if not os.path.isdir(device_sdk): print 'Cannot find iOS device SDK at', device_sdk return 1 if not os.path.isdir(simulator_sdk): print 'Cannot find iOS simulator SDK at', simulator_sdk return 1 if not os.path.isfile(device_dylib): print 'Cannot find iOS device dylib at', device_dylib return 1 if not os.path.isfile(simulator_dylib): print 'Cannot find iOS device dylib at', simulator_dylib return 1 shutil.rmtree(args.dst, True) shutil.copytree(device_sdk, args.dst) sim_tools = 'Tools/iphonesimulator' shutil.copytree(os.path.join(simulator_sdk, sim_tools), os.path.join(args.dst, sim_tools)) subprocess.call([ 'lipo', device_dylib, simulator_dylib, '-create', '-output', os.path.join(args.dst, 'Tools/common/Flutter.framework/Flutter') ]) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
4,522,966,620,628,158,000
26.575758
77
0.697802
false
3.427495
false
false
false
calinerd/AWS
LAMBDA/Lambda_AutoUpdate_SecurityGroup_to_Allow_inbound_All_CloudFront_IPs_443.py
1
6268
''' Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import boto3 import hashlib import json import urllib2 # Name of the service, as seen in the ip-groups.json file, to extract information for SERVICE = "CLOUDFRONT" # Ports your application uses that need inbound permissions from the service for INGRESS_PORTS = [ 443 ] # Tags which identify the security groups you want to update SECURITY_GROUP_TAGS = { 'Name': 'SG_Allow_CF_IPs_443', 'AutoUpdate': 'true' } def lambda_handler(event, context): print("Received event: " + json.dumps(event, indent=2)) message = json.loads(event['Records'][0]['Sns']['Message']) # Load the ip ranges from the url ip_ranges = json.loads(get_ip_groups_json(message['url'], message['md5'])) # extract the service ranges cf_ranges = get_ranges_for_service(ip_ranges, SERVICE) # update the security groups result = update_security_groups(cf_ranges) return result def get_ip_groups_json(url, expected_hash): print("Updating from " + url) response = urllib2.urlopen(url) ip_json = response.read() m = hashlib.md5() m.update(ip_json) hash = m.hexdigest() if hash != expected_hash: raise Exception('MD5 Mismatch: got ' + hash + ' expected ' + expected_hash) return ip_json def get_ranges_for_service(ranges, service): service_ranges = list() for prefix in ranges['prefixes']: if prefix['service'] == service: print('Found ' + service + ' range: ' + prefix['ip_prefix']) service_ranges.append(prefix['ip_prefix']) return service_ranges def update_security_groups(new_ranges): client = boto3.client('ec2') groups = get_security_groups_for_update(client) print ('Found ' + str(len(groups)) + ' SecurityGroups to update') result = list() updated = 0 for group in groups: if update_security_group(client, group, new_ranges): updated += 1 result.append('Updated ' + group['GroupId']) result.append('Updated ' + str(updated) + ' of ' + str(len(groups)) + ' SecurityGroups') return result def update_security_group(client, group, new_ranges): added = 0 removed = 0 if len(group['IpPermissions']) > 0: for permission in group['IpPermissions']: if INGRESS_PORTS.count(permission['ToPort']) > 0: old_prefixes = list() to_revoke = list() to_add = list() for range in permission['IpRanges']: cidr = range['CidrIp'] old_prefixes.append(cidr) if new_ranges.count(cidr) == 0: to_revoke.append(range) print(group['GroupId'] + ": Revoking " + cidr + ":" + str(permission['ToPort'])) for range in new_ranges: if old_prefixes.count(range) == 0: to_add.append({ 'CidrIp': range }) print(group['GroupId'] + ": Adding " + range + ":" + str(permission['ToPort'])) removed += revoke_permissions(client, group, permission, to_revoke) added += add_permissions(client, group, permission, to_add) else: for port in INGRESS_PORTS: to_add = list() for range in new_ranges: to_add.append({ 'CidrIp': range }) print(group['GroupId'] + ": Adding " + range + ":" + str(port)) permission = { 'ToPort': port, 'FromPort': port, 'IpProtocol': 'tcp'} added += add_permissions(client, group, permission, to_add) print (group['GroupId'] + ": Added " + str(added) + ", Revoked " + str(removed)) return (added > 0 or removed > 0) def revoke_permissions(client, group, permission, to_revoke): if len(to_revoke) > 0: revoke_params = { 'ToPort': permission['ToPort'], 'FromPort': permission['FromPort'], 'IpRanges': to_revoke, 'IpProtocol': permission['IpProtocol'] } client.revoke_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[revoke_params]) return len(to_revoke) def add_permissions(client, group, permission, to_add): if len(to_add) > 0: add_params = { 'ToPort': permission['ToPort'], 'FromPort': permission['FromPort'], 'IpRanges': to_add, 'IpProtocol': permission['IpProtocol'] } client.authorize_security_group_ingress(GroupId=group['GroupId'], IpPermissions=[add_params]) return len(to_add) def get_security_groups_for_update(client): filters = list(); for key, value in SECURITY_GROUP_TAGS.iteritems(): filters.extend( [ { 'Name': "tag-key", 'Values': [ key ] }, { 'Name': "tag-value", 'Values': [ value ] } ] ) response = client.describe_security_groups(Filters=filters) return response['SecurityGroups'] ''' Sample Event From SNS: { "Records": [ { "EventVersion": "1.0", "EventSubscriptionArn": "arn:aws:sns:EXAMPLE", "EventSource": "aws:sns", "Sns": { "SignatureVersion": "1", "Timestamp": "1970-01-01T00:00:00.000Z", "Signature": "EXAMPLE", "SigningCertUrl": "EXAMPLE", "MessageId": "95df01b4-ee98-5cb9-9903-4c221d41eb5e", "Message": "{\"create-time\": \"yyyy-mm-ddThh:mm:ss+00:00\", \"synctoken\": \"0123456789\", \"md5\": \"03a8199d0c03ddfec0e542f8bf650ee7\", \"url\": \"https://ip-ranges.amazonaws.com/ip-ranges.json\"}", "Type": "Notification", "UnsubscribeUrl": "EXAMPLE", "TopicArn": "arn:aws:sns:EXAMPLE", "Subject": "TestInvoke" } } ] } '''
unlicense
4,925,142,042,631,168,000
34.619318
266
0.596522
false
3.755542
false
false
false
Xdynix/PixivPixie
bundle_cli.py
1
2691
import os import subprocess import sys from pixiv_pixie.cli import main as cli_main, NAME BINARY_PATH = 'lib' DATA_PATH = 'data' def is_packaged(): # Return true if executing from packaged file return hasattr(sys, 'frozen') def get_path(path, package_prefix=DATA_PATH): if os.path.isabs(path) or not is_packaged(): return path else: return os.path.join( sys.prefix, os.path.join(package_prefix, path) ) def build( script, name=None, one_file=False, no_console=False, icon=None, binary_path=BINARY_PATH, addition_binary=None, data_path=DATA_PATH, addition_data=None, hidden_import=None, distpath=None, workpath=None, specpath=None, addition_args=None, ): args = [] if name is not None: args.extend(('-n', name)) if one_file: args.append('-F') if no_console: args.append('-w') if icon is not None: args.extend(('-i', icon)) if addition_args is None: addition_args = [] def add_resource(add_type, path, resources): for resource in resources: args.append('--add-{}'.format(add_type)) if isinstance(resource, tuple) or isinstance(resource, list): src = resource[0] dest = resource[1] args.append(src + os.path.pathsep + os.path.join(path, dest)) else: args.append( resource + os.path.pathsep + os.path.join(path, resource), ) if addition_binary is not None: add_resource( add_type='binary', path=binary_path, resources=addition_binary, ) if addition_data is not None: add_resource( add_type='data', path=data_path, resources=addition_data, ) if hidden_import is not None: for m in hidden_import: args.extend(('--hidden-import', m)) if distpath is not None: args.extend(('--distpath', distpath)) if workpath is not None: args.extend(('--workpath', workpath)) if specpath is not None: args.extend(('--specpath', specpath)) subprocess.call(['pyinstaller'] + args + addition_args + [script]) def main(): if not is_packaged(): build( __file__, name=NAME, one_file=True, addition_binary=[ ('freeimage-3.15.1-win64.dll', '') ], addition_args=[ '-y', '--clean', ], ) else: cli_main() if __name__ == '__main__': main()
apache-2.0
-7,565,490,886,109,477,000
24.628571
78
0.531401
false
3.860832
false
false
false
tortugueta/multilayers
examples/radcenter_distribution.py
1
8087
# -*- coding: utf-8 -*- """ Name : radcenter_distribution Author : Joan Juvert <[email protected]> Version : 1.0 Description : This script calculates the influence of the distribution of : radiative centers in the active layer on the observed : spectrum. Copyright 2012 Joan Juvert This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import multilayers as ml import numpy as np import bphysics as bp import scipy.integrate as integ import argparse as ap import sys import pdb # Argument parsing parser = ap.ArgumentParser( description = "This script calculates the effect of the " + \ "distribution of radiative centers in the active layer on " + \ "the modificator to the spectrum. The observation angle is " + \ "a fixed parameter. Optionally, the output can be plotted " + \ "and output to the standard output or to a file. The matrix " + \ "containing the values of F(z, lambda) can be saved to a file " + \ "and recovered in a following run of the program to avoid " + \ "recalculating it in case we want to calculate the effect of " + \ "different distributions on the same system.") parser.add_argument( "--graph", help = "Plot the results", action = "store_true") parser.add_argument( "-o", "--output", help = "Dump the results to a file") parser.add_argument( "-s", "--savematrix", help = "Save the matrix with the F(z, lambda) values to a file") parser.add_argument( "-l", "--loadmatrix", help = "Load the matrix with the F(z, lambda) values from a file") args = parser.parse_args() # Load the depth distribution of radiative centers. Note that the origin # and units of z must be the same as in the multilayer.The distribution # should be normalized to 1. print("Loading the distribution...") path = "/home/joan/Dropbox/CNM/projectes/simulations_report/figures/" + \ "rcdistributions/" distribution = bp.rdfile(path + "gaussian_m25_s07.dat", usecols = [0, 1])[1] print("Done") print("Checking the distribution...") integral = integ.simps(distribution[:, 1], distribution[:, 0], 0) np.testing.assert_almost_equal(integral, 1, 2) print("Done") # If we load the values of F(z, lambda) calculated in a previous # execution we do not need to build the multilayer and repeat the # calculation of the F function. Notice that the values of z at which # the new distribution is sampled should be the same as the previous # one. if args.loadmatrix: print("Loading matrix...") fmatrix = np.load(args.loadmatrix) zlist = fmatrix['zlist'] np.testing.assert_array_equal(zlist, distribution[:, 0]) wlist = fmatrix['wlist'] angle = fmatrix['angle'] fte = fmatrix['fte'] ftm = fmatrix['ftm'] print("Done") else: # Create the materials print("Loading materials... ") silicon = ml.Medium("silicon.dat") air = ml.Medium("air.dat") sio2 = ml.Medium("sio2.dat") poly = ml.Medium("polysilicon.dat") print("Done") # Set the fixed parameters. angle = np.deg2rad(0) # Create the multilayer print("Building multilayer and allocating memory... ") thicknesses = [300, 50] multilayer = ml.Multilayer([ air, [poly, thicknesses[0]], [sio2, thicknesses[1]], silicon]) # Define the wavelengths and z coordinates at which F will be calculated # and allocate memory for the results. We will use a structured array to # store the values of F(z, lambda). wstep = 1 wmin = multilayer.getMinMaxWlength()[0] wmax = multilayer.getMinMaxWlength()[1] wlist = np.arange(wmin, wmax, wstep) zlist = distribution[:, 0] ftype = np.dtype([ ('fx', np.complex128), ('fy', np.complex128), ('fz', np.complex128)]) resmatrix = np.empty((zlist.size, wlist.size), dtype = ftype) print("Done") # I(wavelength, theta) = s(wavelength) * F'(wavelength, theta), where # F'(wav, theta) = integral[z](|F|^2 * rcdist(z). Therefore, we # calculate the new spectrum as a modification to the original spectrum. # The modification factor F'(wav, theta) is an integral over z. # First calculate |Fy|^2 for te and |Fx*cos^2 + Fz*sin^2|^2 for tm. We # do fx and fz in one loop and fy in another independent loop to avoid # recalculating the characteristic matrix at every iteration due to the # change of polarization. print("Calculating F...") for (widx, wlength) in enumerate(wlist): percent = (float(widx) / wlist.size) * 100 print("%.2f%%" % percent) for (zidx, z) in enumerate(zlist): resmatrix[zidx][widx]['fx'] = multilayer.calculateFx(z, wlength, angle) resmatrix[zidx][widx]['fz'] = multilayer.calculateFz(z, wlength, angle) for (zidx, z) in enumerate(zlist): resmatrix[zidx][widx]['fy'] = multilayer.calculateFy(z, wlength, angle) # We are probably more interesed on the effect of the multilayer on the # energy rather than the electric field. What we want is |Fy(z)|^2 for # TE waves and |Fx(z) cosA^2 + Fz(z) sinA^2|^2 for TM waves. ftm = np.absolute( resmatrix['fx'] * np.cos(angle) ** 2 + \ resmatrix['fz'] * np.sin(angle) ** 2) ** 2 fte = np.absolute(resmatrix['fy']) ** 2 print("Done") # Notice that until now we have not used the distribution of the # radiative ceneters, but the calculation of ftm and fte is costly. # If requested, we can save fte and ftm to a file. In a following # execution of the script, the matrix can be loaded from the file # instead of recalculated. if args.savematrix: print("Saving matrix...") np.savez(args.savematrix, fte = fte, ftm = ftm, zlist = zlist, wlist = wlist, angle = angle) print("Done") # Build or load the original spectrum. It should be sampled at the same # wavelengths defined in wlist. If we are interested only in the # modificator to the spectrum, not in the modified spectrum, we can # leave it at 1. original_spec = 1 # Multiply each F(z, lambda) by the distribution. print("Integrating...") distval = distribution[:, 1].reshape(distribution[:, 1].size, 1) fte_mplied = fte * distval ftm_mplied = ftm * distval fte_int = integ.simps(fte_mplied, zlist, axis = 0) ftm_int = integ.simps(ftm_mplied, zlist, axis = 0) spectrum_modte = original_spec * fte_int spectrum_modtm = original_spec * ftm_int print("Done") # Dump data to file or stdout comments = "# F_TE = |Fy^2|^2\n" + \ "# F_TM = |Fx * cosA^2 + Fz * sinA^2|^2\n" + \ "# Modified spectrum for TE and TM waves for a\n" + \ "# distributions of the radiative centers.\n" + \ "# wlength\tF_TE\tF_TM" if args.output: bp.wdfile(args.output, comments, np.array([wlist, spectrum_modte, spectrum_modtm]).T, '%.6e') else: print(comments) for i in xrange(wlist.size): print("%.6e\t%.6e\t%.6e" % (wlist[i], spectrum_modte[i], spectrum_modtm[i])) # Plot data if requested if args.graph: import matplotlib.pyplot as plt plt.plot(wlist, spectrum_modte, label='TE', color = 'r') plt.plot(wlist, spectrum_modtm, label='TM', color = 'b') plt.xlabel('Wavelength (nm)') plt.ylabel('Energy ratio') plt.grid() plt.legend(loc=2) plt.title('%.1f rad' % angle) plt.show() plt.close()
gpl-3.0
-2,529,649,230,264,011,300
36.967136
83
0.649808
false
3.451558
false
false
false
linaro-technologies/jobserv
jobserv/storage/local_storage.py
1
3989
# Copyright (C) 2017 Linaro Limited # Author: Andy Doan <[email protected]> import hmac import os import mimetypes import shutil from flask import Blueprint, request, send_file, url_for from jobserv.jsend import get_or_404 from jobserv.models import Build, Project, Run from jobserv.settings import INTERNAL_API_KEY, LOCAL_ARTIFACTS_DIR from jobserv.storage.base import BaseStorage blueprint = Blueprint('local_storage', __name__, url_prefix='/local-storage') class Storage(BaseStorage): blueprint = blueprint def __init__(self): super().__init__() self.artifacts = LOCAL_ARTIFACTS_DIR def _get_local(self, storage_path): assert storage_path[0] != '/' path = os.path.join(self.artifacts, storage_path) dirname = os.path.dirname(path) if not os.path.exists(dirname): os.makedirs(dirname) return path def _create_from_string(self, storage_path, contents): path = self._get_local(storage_path) with open(path, 'w') as f: f.write(contents) def _create_from_file(self, storage_path, filename, content_type): path = self._get_local(storage_path) with open(filename, 'rb') as fin, open(path, 'wb') as fout: shutil.copyfileobj(fin, fout) def _get_as_string(self, storage_path): assert storage_path[0] != '/' path = os.path.join(self.artifacts, storage_path) with open(path, 'r') as f: return f.read() def list_artifacts(self, run): path = '%s/%s/%s/' % ( run.build.project.name, run.build.build_id, run.name) path = os.path.join(self.artifacts, path) for base, _, names in os.walk(path): for name in names: if name != '.rundef.json': yield os.path.join(base, name)[len(path):] def get_download_response(self, request, run, path): try: p = os.path.join(self.artifacts, self._get_run_path(run), path) mt = mimetypes.guess_type(p)[0] return send_file(open(p, 'rb'), mimetype=mt) except FileNotFoundError: return 'File not found', 404 def _generate_put_url(self, run, path, expiration, content_type): p = os.path.join(self.artifacts, self._get_run_path(run), path) msg = '%s,%s,%s' % ('PUT', p, content_type) sig = hmac.new(INTERNAL_API_KEY, msg.encode(), 'sha1').hexdigest() return url_for( 'local_storage.run_upload_artifact', sig=sig, proj=run.build.project.name, build_id=run.build.build_id, run=run.name, path=path, _external=True) def _get_run(proj, build_id, run): p = get_or_404(Project.query.filter_by(name=proj)) b = get_or_404(Build.query.filter_by(project=p, build_id=build_id)) return Run.query.filter_by( name=run ).filter( Run.build.has(Build.id == b.id) ).first_or_404() @blueprint.route('/<sig>/<proj>/builds/<int:build_id>/runs/<run>/<path:path>', methods=('PUT',)) def run_upload_artifact(sig, proj, build_id, run, path): run = _get_run(proj, build_id, run) # validate the signature ls = Storage() p = os.path.join(ls.artifacts, ls._get_run_path(run), path) msg = '%s,%s,%s' % (request.method, p, request.headers.get('Content-Type')) computed = hmac.new(INTERNAL_API_KEY, msg.encode(), 'sha1').hexdigest() if not hmac.compare_digest(sig, computed): return 'Invalid signature', 401 dirname = os.path.dirname(p) try: # we could have 2 uploads trying this, so just do it this way to avoid # race conditions os.makedirs(dirname) except FileExistsError: pass # stream the contents to disk with open(p, 'wb') as f: chunk_size = 4096 while True: chunk = request.stream.read(chunk_size) if len(chunk) == 0: break f.write(chunk) return 'ok'
agpl-3.0
-3,963,570,515,246,286,300
33.094017
79
0.603159
false
3.426976
false
false
false
jadref/buffer_bci
python/echoClient/eventForwarder.py
1
2911
#!/usr/bin/env python3 bufferpath = "../../python/signalProc" fieldtripPath="../../dataAcq/buffer/python" import os, sys, random, math, time, socket, struct sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),bufferpath)) import bufhelp sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),fieldtripPath)) import FieldTrip # Configuration of buffer buffer1_hostname='localhost' buffer1_port=1972 # Configuration of forwarding buffer buffer2_hostname=None buffer2_port=None # holder for the buffer2 connection ftc2=None # flag to stop running when used from another function running=True def connectBuffers(buffer1_hostname,buffer1_port,buffer2_hostname,buffer2_port): if buffer1_hostname==buffer2_hostname and buffer1_port==buffer2_port : print("WARNING:: fowarding to the same port may result in infinite loops!!!!") #Connect to Buffer2 -- do this first so the global state is for ftc1 print("Connecting to " + buffer2_hostname + ":" + str(buffer2_port)) (ftc2,hdr2) = bufhelp.connect(buffer2_hostname,buffer2_port) print("Connected"); print(hdr2) #Connect to Buffer1 print("Connecting to " + buffer1_hostname + ":" + str(buffer1_port)) (ftc1,hdr1) = bufhelp.connect(buffer1_hostname,buffer1_port) print("Connected!"); print(hdr1) return (ftc1,ftc2) # Receive events from the buffer1 and send them to buffer2 def forwardBufferEvents(ftc1,ftc2): global running global ftc ftc=ftc1 while ( running ): events = bufhelp.buffer_newevents() for evt in events: print(str(evt.sample) + ": " + str(evt)) evt.sample=-1 ftc2.putEvents(evt) def guiGetBuffer2(): print("GUI info not supported yet!!") return; import tkinter as tk master = tk.Tk() tk.Label(master, text="HostName").grid(row=0) tk.Label(master, text="Port").grid(row=1) e1 = tk.Entry(master) e2 = tk.Entry(master) e1.grid(row=0, column=1) e2.grid(row=1, column=1) master.mainloop() if __name__ == "__main__": if len(sys.argv)>0: # called with options, i.e. commandline buffer2_hostname = sys.argv[1] if len(sys.argv)>1: try: buffer2_port = int(sys.argv[2]) except: print('Error: second argument (%s) must be a valid (=integer) port number'%sys.argv[2]) sys.exit(1) if buffer2_hostname is None : (buffer2_hostname,buffer2_port)=guiGetBuffer2() (ftc1,ftc2)=connectBuffers(buffer1_hostname,buffer1_port,buffer2_hostname,buffer2_port) forwardBufferEvents(ftc1,ftc2)
gpl-3.0
-8,554,221,728,104,355,000
34.938272
103
0.605634
false
3.602723
false
false
false
ozgurakgun/minion
mini-scripts/testallconstraints.py
1
3983
#!/usr/bin/python # Generate two minion input files, run them then compare dumptree outputs to # detect bugs in constraint propagators. import sys, os, getopt from constraint_test_common import * from multiprocessing import Pool, Manager import random #from sendemail import * import time (optargs, other)=getopt.gnu_getopt(sys.argv, "", ["minion=", "numtests=", "email", "fullprop", "64bit", "procs=", "seed=", "conslist="]) if len(other)>1: print("Usage: testallconstraints.py [--minion=<location of minion binary>] [--numtests=...] [--email] [--procs=...] [--seed=...] [--conslist=...]") sys.exit(1) # This one tests all the constraints in the following list. conslist=[] # equality constraints conslist+=["diseq", "eq", "gaceq"] # alldiffs conslist+=["alldiff", "gacalldiff", "alldiffmatrix"] # capacity constraints conslist+=["gcc", "gccweak", "occurrence", "occurrenceleq", "occurrencegeq"] #element constraints conslist+=["element", "element_undefzero", "watchelement", "watchelement_undefzero"] conslist+=["watchelement_one", "element_one"] # arithmetic constraints conslist+=["modulo", "modulo_undefzero", "pow", "minuseq", "product", "div", "div_undefzero", "abs"] conslist+=["watchsumleq", "watchsumgeq", "watchvecneq", "hamming", "not-hamming"] conslist+=["weightedsumleq", "weightedsumgeq"] conslist+=["litsumgeq"] # should test table to test reifytable? and reifyimplytable conslist+=["sumgeq", "sumleq", "weightedsumleq", "weightedsumgeq"] conslist+=["ineq"] conslist+=["difference"] conslist+=["negativetable", "lighttable"] # symmetry-breaking constraints conslist+=["lexleq", "lexless", "lexleq_rv", "lexleq_quick", "lexless_quick"] conslist+=["max", "min"] conslist+=["watchneq", "watchless"] conslist+=["w-inset", "w-inintervalset", "w-notinset", "w-inrange", "w-notinrange", "w-literal", "w-notliteral"] conslist+=["watchsumgeq", "litsumgeq", "watchneq", "watchless", "not-hamming"] conslist+=["not-hamming"] conslist+=["gacschema", "haggisgac", "haggisgac-stable", "str2plus", "shortstr2", "shortctuplestr2", "mddc"] conslist+=["nvalueleq", "nvaluegeq"] # add reifyimply variant of all constraints, # and reify variant of all except those in reifyexceptions it=conslist[:] for c in it: conslist+=["reifyimply"+c] conslist+=["reify"+c] numtests=100 minionbin="bin/minion" email=False fullprop=False # compare the constraint against itself with fullprop. Needs DEBUG=1. bit64=False procs=1 seed=12345 for i in optargs: (a1, a2)=i if a1=="--minion": minionbin=a2 elif a1=="--numtests": numtests=int(a2) elif a1=="--email": email=True elif a1=="--fullprop": fullprop=True elif a1=="--64bit": bit64=True elif a1=="--procs": procs=int(a2) elif a1=="--seed": seed=int(a2) elif a1=="--conslist": conslist=a2.split(",") def runtest(consname): cachename = consname starttime=time.time() sys.stdout.flush() random.seed(seed) reify=False reifyimply=False if consname[0:10]=="reifyimply": reifyimply=True consname=consname[10:] if consname[0:5]=="reify": reify=True consname=consname[5:] consname=consname.replace("-", "__minus__") testobj=eval("test"+consname+"()") testobj.solver=minionbin for testnum in range(numtests): options = {'reify': reify, 'reifyimply': reifyimply, 'fullprop': fullprop, 'printcmd': False, 'fixlength':False, 'getsatisfyingassignment':True} if not testobj.runtest(options): print("Failed when testing %s"%cachename) sys.stdout.flush() return False print("Completed testing %s, duration: %d"%(cachename, time.time()-starttime)) return True if __name__ == '__main__': p = Pool(procs) retval = p.map(runtest, conslist) if all(retval): print("Success") exit(0) else: print("Failure") exit(1)
gpl-2.0
-5,649,680,209,550,508,000
27.654676
152
0.651017
false
3.061491
true
false
false
rabramley/telomere
app/model/batch.py
1
2972
from app import db from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.sql import select, func from app.model.outstandingError import OutstandingError import numpy import decimal class Batch(db.Model): id = db.Column(db.Integer, primary_key=True) robot = db.Column(db.String(20)) temperature = db.Column(db.Numeric(precision=3, scale=1)) datetime = db.Column(db.DateTime()) userId = db.Column(db.Integer, db.ForeignKey('user.id')) version_id = db.Column(db.Integer, nullable=False) plateName = db.Column(db.String(50)) halfPlate = db.Column(db.String(1)) humidity = db.Column(db.Integer()) primerBatch = db.Column(db.Integer()) enzymeBatch = db.Column(db.Integer()) rotorGene = db.Column(db.Integer()) operatorUserId = db.Column(db.Integer, db.ForeignKey('user.id')) batchFailureReason = db.Column(db.Integer()) processType = db.Column(db.String(20)) __mapper_args__ = { "version_id_col": version_id } def __init__(self, *args, **kwargs): self.id = kwargs.get('id') self.robot = kwargs.get('robot') self.temperature = kwargs.get('temperature') self.datetime = kwargs.get('datetime') self.userId = kwargs.get('userId') self.plateName = kwargs.get('plateName') self.halfPlate = kwargs.get('halfPlate') self.humidity = kwargs.get('humidity') self.primerBatch = kwargs.get('primerBatch') self.enzymeBatch = kwargs.get('enzymeBatch') self.rotorGene = kwargs.get('rotorGene') self.operatorUserId = kwargs.get('operatorUserId') self.batchFailureReason = kwargs.get('batchFailureReason') self.processType = kwargs.get('processType') @hybrid_property def outstandingErrorCount(self): return len(self.outstandingErrors) @outstandingErrorCount.expression def outstandingErrorCount(cls): return (select([func.count(OutstandingError.id)]). where(OutstandingError.batchId == cls.id). label("outstandingErrorCount") ) def get_measurements_for_sample_code(self, sampleCode): return [m for m in self.measurements if m.sample.sampleCode == sampleCode] def has_no_pool_samples(self): return not any(m.sample.is_pool_sample() for m in self.measurements) def has_no_non_pool_samples(self): return not any(not m.sample.is_pool_sample() for m in self.measurements) def has_invalid_pool_ts_average(self): poolTsValues = [ decimal.Decimal(m.ts) for m in self.measurements if m.ts is not None and m.sample.is_pool_sample()] averagePoolTs = numpy.mean(poolTsValues) return averagePoolTs < 0.99 or averagePoolTs > 1.01 def is_duplicate(self): return self.processType == "Duplicate" def is_replate(self): return self.processType == "Re-Plate" def is_initial(self): return self.processType == "Initial"
mit
7,113,725,852,751,646,000
37.102564
124
0.664536
false
3.580723
false
false
false
DarioGT/OMS-PluginXML
org.modelsphere.sms/lib/jython-2.2.1/Lib/uu.py
1
6092
#! /usr/bin/env python # Copyright 1994 by Lance Ellinghouse # Cathedral City, California Republic, United States of America. # All Rights Reserved # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose and without fee is hereby granted, # provided that the above copyright notice appear in all copies and that # both that copyright notice and this permission notice appear in # supporting documentation, and that the name of Lance Ellinghouse # not be used in advertising or publicity pertaining to distribution # of the software without specific, written prior permission. # LANCE ELLINGHOUSE DISCLAIMS ALL WARRANTIES WITH REGARD TO # THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS, IN NO EVENT SHALL LANCE ELLINGHOUSE CENTRUM BE LIABLE # FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # # Modified by Jack Jansen, CWI, July 1995: # - Use binascii module to do the actual line-by-line conversion # between ascii and binary. This results in a 1000-fold speedup. The C # version is still 5 times faster, though. # - Arguments more compliant with python standard """Implementation of the UUencode and UUdecode functions. encode(in_file, out_file [,name, mode]) decode(in_file [, out_file, mode]) """ import binascii import os import sys from types import StringType __all__ = ["Error", "encode", "decode"] class Error(Exception): pass def encode(in_file, out_file, name=None, mode=None): """Uuencode file""" # # If in_file is a pathname open it and change defaults # if in_file == '-': in_file = sys.stdin elif isinstance(in_file, StringType): if name is None: name = os.path.basename(in_file) if mode is None: try: mode = os.stat(in_file)[0] except AttributeError: pass in_file = open(in_file, 'rb') # # Open out_file if it is a pathname # if out_file == '-': out_file = sys.stdout elif isinstance(out_file, StringType): out_file = open(out_file, 'w') # # Set defaults for name and mode # if name is None: name = '-' if mode is None: mode = 0666 # # Write the data # out_file.write('begin %o %s\n' % ((mode&0777),name)) str = in_file.read(45) while len(str) > 0: out_file.write(binascii.b2a_uu(str)) str = in_file.read(45) out_file.write(' \nend\n') def decode(in_file, out_file=None, mode=None, quiet=0): """Decode uuencoded file""" # # Open the input file, if needed. # if in_file == '-': in_file = sys.stdin elif isinstance(in_file, StringType): in_file = open(in_file) # # Read until a begin is encountered or we've exhausted the file # while 1: hdr = in_file.readline() if not hdr: raise Error, 'No valid begin line found in input file' if hdr[:5] != 'begin': continue hdrfields = hdr.split(" ", 2) if len(hdrfields) == 3 and hdrfields[0] == 'begin': try: int(hdrfields[1], 8) break except ValueError: pass if out_file is None: out_file = hdrfields[2].rstrip() if os.path.exists(out_file): raise Error, 'Cannot overwrite existing file: %s' % out_file if mode is None: mode = int(hdrfields[1], 8) # # Open the output file # opened = False if out_file == '-': out_file = sys.stdout elif isinstance(out_file, StringType): fp = open(out_file, 'wb') try: os.path.chmod(out_file, mode) except AttributeError: pass out_file = fp opened = True # # Main decoding loop # s = in_file.readline() while s and s.strip() != 'end': try: data = binascii.a2b_uu(s) except binascii.Error, v: # Workaround for broken uuencoders by /Fredrik Lundh nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3 data = binascii.a2b_uu(s[:nbytes]) if not quiet: sys.stderr.write("Warning: %s\n" % str(v)) out_file.write(data) s = in_file.readline() if not s: raise Error, 'Truncated input file' if opened: out_file.close() def test(): """uuencode/uudecode main program""" import getopt dopt = 0 topt = 0 input = sys.stdin output = sys.stdout ok = 1 try: optlist, args = getopt.getopt(sys.argv[1:], 'dt') except getopt.error: ok = 0 if not ok or len(args) > 2: print 'Usage:', sys.argv[0], '[-d] [-t] [input [output]]' print ' -d: Decode (in stead of encode)' print ' -t: data is text, encoded format unix-compatible text' sys.exit(1) for o, a in optlist: if o == '-d': dopt = 1 if o == '-t': topt = 1 if len(args) > 0: input = args[0] if len(args) > 1: output = args[1] if dopt: if topt: if isinstance(output, StringType): output = open(output, 'w') else: print sys.argv[0], ': cannot do -t to stdout' sys.exit(1) decode(input, output) else: if topt: if isinstance(input, StringType): input = open(input, 'r') else: print sys.argv[0], ': cannot do -t from stdin' sys.exit(1) encode(input, output) if __name__ == '__main__': test()
gpl-3.0
8,905,765,275,347,266,000
29.241026
72
0.559094
false
3.732843
false
false
false
jhogg41/gm-o-matic
gom_server/gom_server/urls.py
1
1187
"""gom_server URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.8/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Add an import: from blog import urls as blog_urls 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls)) """ from django.conf.urls import include, url from django.contrib import admin from rest_framework import routers import core.router import char_attr.router router = routers.DefaultRouter() core.router.addRoutes(router) char_attr.router.addRoutes(router) urlpatterns = [ url(r'^admin/', include(admin.site.urls)), url(r'^api-auth/', include('rest_framework.urls', namespace='rest-framework')), url(r'^', include(router.urls)), url(r'^rest-auth/', include('rest_auth.urls')), url(r'^rest-auth/registration', include('rest_auth.registration.urls')), ]
bsd-2-clause
-3,051,245,291,478,614,500
36.09375
83
0.708509
false
3.420749
false
false
false
kevin-coder/tensorflow-fork
tensorflow/python/keras/layers/normalization_test.py
1
22900
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for normalization layers.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python import keras from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util as tf_test_util from tensorflow.python.keras import keras_parameterized from tensorflow.python.keras import testing_utils from tensorflow.python.keras.layers import normalization from tensorflow.python.keras.layers import normalization_v2 from tensorflow.python.keras.mixed_precision.experimental import policy from tensorflow.python.platform import test from tensorflow.python.training import gradient_descent class BatchNormalizationTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_basic_batchnorm(self): testing_utils.layer_test( keras.layers.BatchNormalization, kwargs={ 'momentum': 0.9, 'epsilon': 0.1, 'gamma_regularizer': keras.regularizers.l2(0.01), 'beta_regularizer': keras.regularizers.l2(0.01) }, input_shape=(3, 4, 2)) testing_utils.layer_test( keras.layers.BatchNormalization, kwargs={ 'gamma_initializer': 'ones', 'beta_initializer': 'ones', 'moving_mean_initializer': 'zeros', 'moving_variance_initializer': 'ones' }, input_shape=(3, 4, 2)) testing_utils.layer_test( keras.layers.BatchNormalization, kwargs={'scale': False, 'center': False}, input_shape=(3, 3)) @tf_test_util.run_in_graph_and_eager_modes def test_batchnorm_weights(self): layer = keras.layers.BatchNormalization(scale=False, center=False) layer.build((None, 3, 4)) self.assertEqual(len(layer.trainable_weights), 0) self.assertEqual(len(layer.weights), 2) layer = keras.layers.BatchNormalization() layer.build((None, 3, 4)) self.assertEqual(len(layer.trainable_weights), 2) self.assertEqual(len(layer.weights), 4) @tf_test_util.run_in_graph_and_eager_modes def test_batchnorm_regularization(self): layer = keras.layers.BatchNormalization( gamma_regularizer='l1', beta_regularizer='l1') layer.build((None, 3, 4)) self.assertEqual(len(layer.losses), 2) max_norm = keras.constraints.max_norm layer = keras.layers.BatchNormalization( gamma_constraint=max_norm, beta_constraint=max_norm) layer.build((None, 3, 4)) self.assertEqual(layer.gamma.constraint, max_norm) self.assertEqual(layer.beta.constraint, max_norm) @keras_parameterized.run_all_keras_modes def test_batchnorm_convnet(self): if test.is_gpu_available(cuda_only=True): with self.session(use_gpu=True): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( axis=1, input_shape=(3, 4, 4), momentum=0.8) model.add(norm) model.compile(loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1)) out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1)) np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1) np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1) @keras_parameterized.run_all_keras_modes def test_batchnorm_convnet_channel_last(self): model = keras.models.Sequential() norm = keras.layers.BatchNormalization( axis=-1, input_shape=(4, 4, 3), momentum=0.8) model.add(norm) model.compile(loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3)) out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3)) np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1) np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1) @keras_parameterized.run_all_keras_modes def test_batchnorm_correctness(self): _run_batchnorm_correctness_test( normalization.BatchNormalization, dtype='float32') _run_batchnorm_correctness_test( normalization_v2.BatchNormalization, dtype='float32') @keras_parameterized.run_all_keras_modes def test_batchnorm_mixed_precision(self): _run_batchnorm_correctness_test( normalization.BatchNormalization, dtype='float16') _run_batchnorm_correctness_test( normalization_v2.BatchNormalization, dtype='float16') @tf_test_util.run_in_graph_and_eager_modes def test_batchnorm_policy(self): norm = keras.layers.BatchNormalization( axis=-1, input_shape=(4, 4, 3), momentum=0.8, dtype=policy.Policy('infer_float32_vars')) x = np.random.normal(size=(10, 4, 4, 3)).astype('float16') y = norm(x) self.assertEqual(y.dtype, 'float16') self.assertEqual(norm.beta.dtype.base_dtype, 'float32') self.assertEqual(norm.gamma.dtype.base_dtype, 'float32') class BatchNormalizationV1Test(test.TestCase): @tf_test_util.run_in_graph_and_eager_modes def test_v1_fused_attribute(self): norm = normalization.BatchNormalization() inp = keras.layers.Input((4, 4, 4)) norm(inp) self.assertEqual(norm.fused, True) norm = normalization.BatchNormalization(fused=False) self.assertEqual(norm.fused, False) inp = keras.layers.Input(shape=(4, 4, 4)) norm(inp) self.assertEqual(norm.fused, False) norm = normalization.BatchNormalization(virtual_batch_size=2) self.assertEqual(norm.fused, True) inp = keras.layers.Input(shape=(2, 2, 2)) norm(inp) self.assertEqual(norm.fused, False) class BatchNormalizationV2Test(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_basic_batchnorm_v2(self): testing_utils.layer_test( normalization_v2.BatchNormalization, kwargs={'fused': True}, input_shape=(3, 3, 3, 3)) testing_utils.layer_test( normalization_v2.BatchNormalization, kwargs={'fused': None}, input_shape=(3, 3, 3)) @tf_test_util.run_in_graph_and_eager_modes def test_v2_fused_attribute(self): norm = normalization_v2.BatchNormalization() self.assertEqual(norm.fused, None) inp = keras.layers.Input(shape=(4, 4, 4)) norm(inp) self.assertEqual(norm.fused, True) norm = normalization_v2.BatchNormalization() self.assertEqual(norm.fused, None) inp = keras.layers.Input(shape=(4, 4)) norm(inp) self.assertEqual(norm.fused, False) norm = normalization_v2.BatchNormalization(virtual_batch_size=2) self.assertEqual(norm.fused, False) inp = keras.layers.Input(shape=(4, 4, 4)) norm(inp) self.assertEqual(norm.fused, False) norm = normalization_v2.BatchNormalization(fused=False) self.assertEqual(norm.fused, False) inp = keras.layers.Input(shape=(4, 4, 4)) norm(inp) self.assertEqual(norm.fused, False) norm = normalization_v2.BatchNormalization(fused=True, axis=[3]) self.assertEqual(norm.fused, True) inp = keras.layers.Input(shape=(4, 4, 4)) norm(inp) self.assertEqual(norm.fused, True) with self.assertRaisesRegexp(ValueError, 'fused.*renorm'): normalization_v2.BatchNormalization(fused=True, renorm=True) with self.assertRaisesRegexp(ValueError, 'fused.*when axis is 1 or 3'): normalization_v2.BatchNormalization(fused=True, axis=2) with self.assertRaisesRegexp(ValueError, 'fused.*when axis is 1 or 3'): normalization_v2.BatchNormalization(fused=True, axis=[1, 3]) with self.assertRaisesRegexp(ValueError, 'fused.*virtual_batch_size'): normalization_v2.BatchNormalization(fused=True, virtual_batch_size=2) with self.assertRaisesRegexp(ValueError, 'fused.*adjustment'): normalization_v2.BatchNormalization(fused=True, adjustment=lambda _: (1, 0)) norm = normalization_v2.BatchNormalization(fused=True) self.assertEqual(norm.fused, True) inp = keras.layers.Input(shape=(4, 4)) with self.assertRaisesRegexp(ValueError, '4D input tensors'): norm(inp) def _run_batchnorm_correctness_test(layer, dtype='float32', fused=False): model = keras.models.Sequential() model.add(keras.Input(shape=(2, 2, 2), dtype=dtype)) norm = layer(momentum=0.8, fused=fused) model.add(norm) if dtype == 'float16': # Keras models require float32 losses. model.add(keras.layers.Lambda(lambda x: keras.backend.cast(x, 'float32'))) model.compile(loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) # centered on 5.0, variance 10.0 x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2)) .astype(dtype)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= keras.backend.eval(norm.beta) out /= keras.backend.eval(norm.gamma) np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1) np.testing.assert_allclose(out.std(), 1.0, atol=1e-1) @parameterized.parameters( [normalization.BatchNormalization, normalization_v2.BatchNormalization]) class NormalizationLayersGraphModeOnlyTest( test.TestCase, parameterized.TestCase): def test_shared_batchnorm(self, layer): """Test that a BN layer can be shared across different data streams.""" with self.cached_session(): # Test single layer reuse bn = layer() x1 = keras.layers.Input(shape=(10,)) _ = bn(x1) x2 = keras.layers.Input(shape=(10,)) y2 = bn(x2) x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10)) model = keras.models.Model(x2, y2) model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse') model.train_on_batch(x, x) self.assertEqual(len(bn.updates), 4) self.assertEqual(len(model.updates), 2) self.assertEqual(len(model.get_updates_for(x2)), 2) # Test model-level reuse x3 = keras.layers.Input(shape=(10,)) y3 = model(x3) new_model = keras.models.Model(x3, y3, name='new_model') self.assertEqual(len(new_model.updates), 2) self.assertEqual(len(model.updates), 4) self.assertEqual(len(new_model.get_updates_for(x3)), 2) new_model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse') new_model.train_on_batch(x, x) def test_that_trainable_disables_updates(self, layer): with self.cached_session(): val_a = np.random.random((10, 4)) val_out = np.random.random((10, 4)) a = keras.layers.Input(shape=(4,)) layer = layer(input_shape=(4,)) b = layer(a) model = keras.models.Model(a, b) model.trainable = False assert not model.updates model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse') assert not model.updates x1 = model.predict(val_a) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) self.assertAllClose(x1, x2, atol=1e-7) model.trainable = True model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse') assert model.updates model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) assert np.abs(np.sum(x1 - x2)) > 1e-5 layer.trainable = False model.compile(gradient_descent.GradientDescentOptimizer(0.01), 'mse') assert not model.updates x1 = model.predict(val_a) model.train_on_batch(val_a, val_out) x2 = model.predict(val_a) self.assertAllClose(x1, x2, atol=1e-7) @tf_test_util.run_deprecated_v1 def test_batchnorm_trainable(self, layer): """Tests that batchnorm layer is trainable when learning phase is enabled. Computes mean and std for current inputs then applies batch normalization using them. Args: layer: Either V1 or V2 of BatchNormalization layer. """ # TODO(fchollet): enable in all execution modes when issue with # learning phase setting is resolved. with self.cached_session(): bn_mean = 0.5 bn_std = 10. val_a = np.expand_dims(np.arange(10.), axis=1) def get_model(bn_mean, bn_std): inp = keras.layers.Input(shape=(1,)) x = layer()(inp) model1 = keras.models.Model(inp, x) model1.set_weights([ np.array([1.]), np.array([0.]), np.array([bn_mean]), np.array([bn_std**2]) ]) return model1 # Simulates training-mode with trainable layer. # Should use mini-batch statistics. with keras.backend.learning_phase_scope(1): model = get_model(bn_mean, bn_std) model.compile(loss='mse', optimizer='rmsprop') out = model.predict(val_a) self.assertAllClose( (val_a - np.mean(val_a)) / np.std(val_a), out, atol=1e-3) def _run_layernorm_correctness_test(layer, dtype='float32'): model = keras.models.Sequential() norm = layer(input_shape=(2, 2, 2)) model.add(norm) model.compile(loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) # centered on 5.0, variance 10.0 x = (np.random.normal(loc=5.0, scale=10.0, size=(1000, 2, 2, 2)) .astype(dtype)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= keras.backend.eval(norm.beta) out /= keras.backend.eval(norm.gamma) np.testing.assert_allclose(out.mean(), 0.0, atol=1e-1) np.testing.assert_allclose(out.std(), 1.0, atol=1e-1) class LayerNormalizationTest(keras_parameterized.TestCase): @keras_parameterized.run_all_keras_modes def test_basic_layernorm(self): testing_utils.layer_test( keras.layers.LayerNormalization, kwargs={ 'gamma_regularizer': keras.regularizers.l2(0.01), 'beta_regularizer': keras.regularizers.l2(0.01) }, input_shape=(3, 4, 2)) testing_utils.layer_test( keras.layers.LayerNormalization, kwargs={ 'gamma_initializer': 'ones', 'beta_initializer': 'ones', }, input_shape=(3, 4, 2)) testing_utils.layer_test( keras.layers.LayerNormalization, kwargs={'scale': False, 'center': False}, input_shape=(3, 3)) @tf_test_util.run_in_graph_and_eager_modes def test_layernorm_weights(self): layer = keras.layers.LayerNormalization(scale=False, center=False) layer.build((None, 3, 4)) self.assertEqual(len(layer.trainable_weights), 0) self.assertEqual(len(layer.weights), 0) layer = keras.layers.LayerNormalization() layer.build((None, 3, 4)) self.assertEqual(len(layer.trainable_weights), 2) self.assertEqual(len(layer.weights), 2) @tf_test_util.run_in_graph_and_eager_modes def test_layernorm_regularization(self): layer = keras.layers.LayerNormalization( gamma_regularizer='l1', beta_regularizer='l1') layer.build((None, 3, 4)) self.assertEqual(len(layer.losses), 2) max_norm = keras.constraints.max_norm layer = keras.layers.LayerNormalization( gamma_constraint=max_norm, beta_constraint=max_norm) layer.build((None, 3, 4)) self.assertEqual(layer.gamma.constraint, max_norm) self.assertEqual(layer.beta.constraint, max_norm) @keras_parameterized.run_all_keras_modes def test_layernorm_convnet(self): if test.is_gpu_available(cuda_only=True): with self.session(use_gpu=True): model = keras.models.Sequential() norm = keras.layers.LayerNormalization( input_shape=(3, 4, 4), params_axis=1) model.add(norm) model.compile(loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 3, 4, 4)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= np.reshape(keras.backend.eval(norm.beta), (1, 3, 1, 1)) out /= np.reshape(keras.backend.eval(norm.gamma), (1, 3, 1, 1)) np.testing.assert_allclose(np.mean(out, axis=(0, 2, 3)), 0.0, atol=1e-1) np.testing.assert_allclose(np.std(out, axis=(0, 2, 3)), 1.0, atol=1e-1) @keras_parameterized.run_all_keras_modes def test_layernorm_convnet_channel_last(self): model = keras.models.Sequential() norm = keras.layers.LayerNormalization(input_shape=(4, 4, 3)) model.add(norm) model.compile(loss='mse', optimizer=gradient_descent.GradientDescentOptimizer(0.01), run_eagerly=testing_utils.should_run_eagerly()) # centered on 5.0, variance 10.0 x = np.random.normal(loc=5.0, scale=10.0, size=(1000, 4, 4, 3)) model.fit(x, x, epochs=4, verbose=0) out = model.predict(x) out -= np.reshape(keras.backend.eval(norm.beta), (1, 1, 1, 3)) out /= np.reshape(keras.backend.eval(norm.gamma), (1, 1, 1, 3)) np.testing.assert_allclose(np.mean(out, axis=(0, 1, 2)), 0.0, atol=1e-1) np.testing.assert_allclose(np.std(out, axis=(0, 1, 2)), 1.0, atol=1e-1) @keras_parameterized.run_all_keras_modes def test_layernorm_correctness(self): _run_layernorm_correctness_test( normalization.LayerNormalization, dtype='float32') @keras_parameterized.run_all_keras_modes def test_layernorm_mixed_precision(self): _run_layernorm_correctness_test( normalization.LayerNormalization, dtype='float16') def doOutputTest(self, input_shape, tol=1e-5, norm_axis=None, params_axis=-1, dtype=None): ndim = len(input_shape) if norm_axis is None: moments_axis = range(1, ndim) elif isinstance(norm_axis, int): if norm_axis < 0: moments_axis = [norm_axis + ndim] else: moments_axis = [norm_axis] else: moments_axis = [] for dim in norm_axis: if dim < 0: dim = dim + ndim moments_axis.append(dim) moments_axis = tuple(moments_axis) expected_shape = [] for i in range(ndim): if i not in moments_axis: expected_shape.append(input_shape[i]) expected_mean = np.zeros(expected_shape) expected_var = np.ones(expected_shape) for mu in [0.0, 1e2]: for sigma in [1.0, 0.1]: inputs = np.random.randn(*input_shape) * sigma + mu inputs_t = constant_op.constant(inputs, shape=input_shape) layer = normalization.LayerNormalization( norm_axis=norm_axis, params_axis=params_axis, dtype=dtype) outputs = layer(inputs_t) beta = layer.beta gamma = layer.gamma for weight in layer.weights: self.evaluate(weight.initializer) outputs = self.evaluate(outputs) beta = self.evaluate(beta) gamma = self.evaluate(gamma) # The mean and variance of the output should be close to 0 and 1 # respectively. # Make sure that there are no NaNs self.assertFalse(np.isnan(outputs).any()) mean = np.mean(outputs, axis=moments_axis) var = np.var(outputs, axis=moments_axis) # Layer-norm implemented in numpy eps = 1e-12 expected_out = ( (gamma * (inputs - np.mean( inputs, axis=moments_axis, keepdims=True)) / np.sqrt(eps + np.var( inputs, axis=moments_axis, keepdims=True))) + beta) self.assertAllClose(expected_mean, mean, atol=tol, rtol=tol) self.assertAllClose(expected_var, var, atol=tol) # The full computation gets a bigger tolerance self.assertAllClose(expected_out, outputs, atol=5 * tol) @tf_test_util.run_in_graph_and_eager_modes def testOutput2DInput(self): self.doOutputTest((10, 300)) self.doOutputTest((10, 300), norm_axis=[0]) self.doOutputTest((10, 300), params_axis=[0, 1]) @tf_test_util.run_in_graph_and_eager_modes def testOutput2DInputDegenerateNormAxis(self): with self.assertRaisesRegexp(ValueError, r'Invalid axis: 2'): self.doOutputTest((10, 300), norm_axis=2) @tf_test_util.run_in_graph_and_eager_modes def testOutput4DInput(self): self.doOutputTest((100, 10, 10, 3)) @tf_test_util.run_in_graph_and_eager_modes def testOutput4DInputNormOnInnermostAxis(self): # Equivalent tests shape = (100, 10, 10, 3) self.doOutputTest( shape, norm_axis=list(range(3, len(shape))), tol=1e-4, dtype='float64') self.doOutputTest(shape, norm_axis=-1, tol=1e-4, dtype='float64') @tf_test_util.run_in_graph_and_eager_modes def testOutputSmallInput(self): self.doOutputTest((10, 10, 10, 30)) @tf_test_util.run_in_graph_and_eager_modes def testOutputSmallInputNormOnInnermostAxis(self): self.doOutputTest((10, 10, 10, 30), norm_axis=3) @tf_test_util.run_in_graph_and_eager_modes def testOutputSmallInputNormOnMixedAxes(self): self.doOutputTest((10, 10, 10, 30), norm_axis=[0, 3]) self.doOutputTest((10, 10, 10, 30), params_axis=[-2, -1]) self.doOutputTest((10, 10, 10, 30), norm_axis=[0, 3], params_axis=[-3, -2, -1]) @tf_test_util.run_in_graph_and_eager_modes def testOutputBigInput(self): self.doOutputTest((1, 100, 100, 1)) self.doOutputTest((1, 100, 100, 1), norm_axis=[1, 2]) self.doOutputTest((1, 100, 100, 1), norm_axis=[1, 2], params_axis=[-2, -1]) if __name__ == '__main__': test.main()
apache-2.0
2,995,893,033,428,413,000
36.115073
80
0.650524
false
3.394101
true
false
false
meisamhe/GPLshared
Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/s2.py
1
1744
import time import threading # @include class SpellCheckService: w_last = closest_to_last_word = None lock = threading.Lock() @staticmethod def service(req, resp): w = req.extract_word_to_check_from_request() result = None with SpellCheckService.lock: if w == SpellCheckService.w_last: result = SpellCheckService.closest_to_last_word.copy() if result is None: result = closest_in_dictionary(w) with SpellCheckService.lock: SpellCheckService.w_last = w SpellCheckService.closest_to_last_word = result resp.encode_into_response(result) # @exclude class ServiceRequest: def __init__(self, s): self.request = s def extract_word_to_check_from_request(self): return self.request class ServiceResponse: response = None def encode_into_response(self, s): self.response = s def closest_in_dictionary(w): time.sleep(0.2) return [w + '_result'] class ServiceThread(threading.Thread): def __init__(self, data): super().__init__() self.data = data def run(self): start_time = time.time() req = ServiceRequest(self.data) resp = ServiceResponse() SpellCheckService.service(req, resp) print(self.data, '->', resp.response, '(%.3f sec)' % (time.time() - start_time)) def main(): i = 0 while True: ServiceThread('req:%d' % (i + 1)).start() if i > 0: # while req:i+1 is computed we could return req:i from the cache ServiceThread('req:%d' % i).start() time.sleep(0.5) i += 1 if __name__ == '__main__': main()
gpl-3.0
-6,428,526,257,127,913,000
22.567568
76
0.575115
false
3.62578
false
false
false
gypsymauro/gestione-cantiere
build/lib.linux-x86_64-2.7/cantiere/admin.py
1
1533
from django.contrib import admin # Register your models here. from .models import Squadra from .models import StatoSegnalazione from .models import Segnalazione from .models import StatoIntervento from .models import Intervento from .models import Risorsa from .models import InterventoRisorsa from .models import Costo from .models import CentroCosto from .models import Allegato class InterventoRisorsaInline(admin.TabularInline): model = InterventoRisorsa exclude = ['created','created_by','modified','modified_by','deleted','note'] class RisorsaAdmin(admin.ModelAdmin): inlines = (InterventoRisorsaInline,) exclude = ['created','created_by','modified','modified_by','deleted'] class InterventoAdmin(admin.ModelAdmin): inlines = (InterventoRisorsaInline,) list_display = ['oggetto','data_inizio','stato','stampa_intervento'] list_editable = ['stato'] ordering = ['created'] exclude = ['created','created_by','modified','modified_by','deleted'] list_filter = ('stato','data_inizio','centro_costo','responsabile') save_on_top = True search_fields = ('oggetto','data_inizio') admin.site.register(Squadra) admin.site.register(StatoSegnalazione) admin.site.register(Segnalazione) admin.site.register(StatoIntervento) admin.site.register(Intervento,InterventoAdmin) admin.site.register(Risorsa,RisorsaAdmin) admin.site.register(Costo) admin.site.register(CentroCosto) admin.site.register(Allegato) #admin.site.register(InterventoMezzo) #admin.site.register(InterventoPersona)
gpl-2.0
-2,517,305,136,308,949,000
32.326087
80
0.763862
false
3.200418
false
false
false
SembeiNorimaki/Bioinformatics
EulerianCycle.py
1
1903
# Test passed :) # TODO: split right here before the conditional. import sys def handle_input_output(): # handle input graph = {} while True: try: line = sys.stdin.readline().rstrip('\n') left, right = line.split(' -> ') if left in graph.keys(): graph[left].append(right) else: graph[left] = right.split(',') except: break # EOF #print(graph) # Execute main function r = EulerianCycle(graph) # handle output print('->'.join(r)) def EulerianCycle(graph): stack = [] location = None circuit = [] # since it's an Eulerian Cycle we can start at any vertex location = list(graph)[0] # Repeat until the current vertex has no more out-going edges (neighbors) # and the stack is empty. while len(graph[location]) > 0 or len(stack) > 0: if len(graph[location]) == 0: # If current vertex has no out-going edges circuit.append(location) # add it to circuit location = stack.pop() # remove the last vertex from the stack and set it as the current one else: # otherwise stack.append(location) # add the vertex to the stack location = graph[location].pop() # take any of its neighbors # remove the edge between that vertex and selected neighbor # and set that neighbor as the current vertex # Here we must append the first element at the end to close the cycle # but since circuit is reversed, we append the last element at the beginning circuit.insert(0, circuit[-1]) return circuit[::-1] # return the reversed circuit if __name__ == '__main__': handle_input_output()
mit
-4,300,464,103,968,979,000
33
115
0.553337
false
4.541766
false
false
false
kittiu/account-payment
account_payment_return/models/payment_return.py
1
15028
# Copyright 2011-2012 7 i TRIA <http://www.7itria.cat> # Copyright 2011-2012 Avanzosc <http://www.avanzosc.com> # Copyright 2013 Pedro M. Baeza <[email protected]> # Copyright 2014 Markus Schneider <[email protected]> # Copyright 2016 Carlos Dauden <[email protected]> # Copyright 2017 Luis M. Ontalba <[email protected]> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from odoo import _, api, fields, models from odoo.exceptions import Warning as UserError import odoo.addons.decimal_precision as dp class PaymentReturn(models.Model): _name = "payment.return" _inherit = ['mail.thread'] _description = 'Payment return' _order = 'date DESC, id DESC' company_id = fields.Many2one( 'res.company', string='Company', required=True, states={'done': [('readonly', True)], 'cancelled': [('readonly', True)]}, default=lambda self: self.env['res.company']._company_default_get( 'account')) date = fields.Date( string='Return date', help="This date will be used as the account entry date.", states={'done': [('readonly', True)], 'cancelled': [('readonly', True)]}, default=lambda x: fields.Date.today()) name = fields.Char( string="Reference", required=True, states={'done': [('readonly', True)], 'cancelled': [('readonly', True)]}, default=lambda self: self.env['ir.sequence'].next_by_code( 'payment.return')) line_ids = fields.One2many( comodel_name='payment.return.line', inverse_name='return_id', states={'done': [('readonly', True)], 'cancelled': [('readonly', True)]}) journal_id = fields.Many2one( comodel_name='account.journal', string='Bank journal', required=True, states={'done': [('readonly', True)], 'cancelled': [('readonly', True)]}) move_id = fields.Many2one( comodel_name='account.move', string='Reference to the created journal entry', states={'done': [('readonly', True)], 'cancelled': [('readonly', True)]}) state = fields.Selection( selection=[('draft', 'Draft'), ('imported', 'Imported'), ('done', 'Done'), ('cancelled', 'Cancelled')], string='State', readonly=True, default='draft', track_visibility='onchange') @api.multi @api.constrains('line_ids') def _check_duplicate_move_line(self): def append_error(error_line): error_list.append( _("Payment Line: %s (%s) in Payment Return: %s") % ( ', '.join(error_line.mapped('move_line_ids.name')), error_line.partner_id.name, error_line.return_id.name ) ) error_list = [] all_move_lines = self.env['account.move.line'] for line in self.mapped('line_ids'): for move_line in line.move_line_ids: if move_line in all_move_lines: append_error(line) all_move_lines |= move_line if (not error_list) and all_move_lines: duplicate_lines = self.env['payment.return.line'].search([ ('move_line_ids', 'in', all_move_lines.ids), ('return_id.state', '=', 'done'), ]) if duplicate_lines: for line in duplicate_lines: append_error(line) if error_list: raise UserError( _("Payment reference must be unique" "\n%s") % '\n'.join(error_list) ) def _get_move_amount(self, return_line): return return_line.amount def _prepare_invoice_returned_vals(self): return {'returned_payment': True} @api.multi def unlink(self): if self.filtered(lambda x: x.state == 'done'): raise UserError(_( "You can not remove a payment return if state is 'Done'")) return super(PaymentReturn, self).unlink() @api.multi def button_match(self): self.mapped('line_ids').filtered(lambda x: ( (not x.move_line_ids) and x.reference))._find_match() self._check_duplicate_move_line() @api.multi def _prepare_return_move_vals(self): """Prepare the values for the journal entry created from the return. :return: Dictionary with the record values. """ self.ensure_one() return { 'name': '/', 'ref': _('Return %s') % self.name, 'journal_id': self.journal_id.id, 'date': self.date, 'company_id': self.company_id.id, } @api.multi def action_confirm(self): self.ensure_one() # Check for incomplete lines if self.line_ids.filtered(lambda x: not x.move_line_ids): raise UserError( _("You must input all moves references in the payment " "return.")) invoices = self.env['account.invoice'] move_line_obj = self.env['account.move.line'] move = self.env['account.move'].create( self._prepare_return_move_vals() ) total_amount = 0.0 for return_line in self.line_ids: move_amount = self._get_move_amount(return_line) move_line2 = self.env['account.move.line'].with_context( check_move_validity=False).create({ 'name': move.ref, 'debit': move_amount, 'credit': 0.0, 'account_id': return_line.move_line_ids[0].account_id.id, 'move_id': move.id, 'partner_id': return_line.partner_id.id, 'journal_id': move.journal_id.id, }) total_amount += move_amount for move_line in return_line.move_line_ids: returned_moves = move_line.matched_debit_ids.mapped( 'debit_move_id') invoices |= returned_moves.mapped('invoice_id') move_line.remove_move_reconcile() (move_line | move_line2).reconcile() return_line.move_line_ids.mapped('matched_debit_ids').write( {'origin_returned_move_ids': [(6, 0, returned_moves.ids)]}) if return_line.expense_amount: expense_lines_vals = [] expense_lines_vals.append({ 'name': move.ref, 'move_id': move.id, 'debit': 0.0, 'credit': return_line.expense_amount, 'partner_id': return_line.expense_partner_id.id, 'account_id': (return_line.return_id.journal_id. default_credit_account_id.id), }) expense_lines_vals.append({ 'move_id': move.id, 'debit': return_line.expense_amount, 'name': move.ref, 'credit': 0.0, 'partner_id': return_line.expense_partner_id.id, 'account_id': return_line.expense_account.id, }) for expense_line_vals in expense_lines_vals: move_line_obj.with_context( check_move_validity=False).create(expense_line_vals) extra_lines_vals = return_line._prepare_extra_move_lines(move) for extra_line_vals in extra_lines_vals: move_line_obj.create(extra_line_vals) move_line_obj.create({ 'name': move.ref, 'debit': 0.0, 'credit': total_amount, 'account_id': self.journal_id.default_credit_account_id.id, 'move_id': move.id, 'journal_id': move.journal_id.id, }) # Write directly because we returned payments just now invoices.write(self._prepare_invoice_returned_vals()) move.post() self.write({'state': 'done', 'move_id': move.id}) return True @api.multi def action_cancel(self): invoices = self.env['account.invoice'] for move_line in self.mapped('move_id.line_ids').filtered( lambda x: x.user_type_id.type == 'receivable'): for partial_line in move_line.matched_credit_ids: invoices |= partial_line.origin_returned_move_ids.mapped( 'invoice_id') lines2reconcile = (partial_line.origin_returned_move_ids | partial_line.credit_move_id) partial_line.credit_move_id.remove_move_reconcile() lines2reconcile.reconcile() self.move_id.button_cancel() self.move_id.unlink() self.write({'state': 'cancelled', 'move_id': False}) invoices.check_payment_return() return True @api.multi def action_draft(self): self.write({'state': 'draft'}) return True class PaymentReturnLine(models.Model): _name = "payment.return.line" _description = 'Payment return lines' return_id = fields.Many2one( comodel_name='payment.return', string='Payment return', required=True, ondelete='cascade') concept = fields.Char( string='Concept', help="Read from imported file. Only for reference.") reason_id = fields.Many2one( comodel_name='payment.return.reason', oldname="reason", string='Return reason', ) reference = fields.Char( string='Reference', help="Reference to match moves from related documents") move_line_ids = fields.Many2many( comodel_name='account.move.line', string='Payment Reference') date = fields.Date( string='Return date', help="Only for reference", ) partner_name = fields.Char( string='Partner name', readonly=True, help="Read from imported file. Only for reference.") partner_id = fields.Many2one( comodel_name='res.partner', string='Customer', domain="[('customer', '=', True)]") amount = fields.Float( string='Amount', help="Returned amount. Can be different from the move amount", digits=dp.get_precision('Account')) expense_account = fields.Many2one( comodel_name='account.account', string='Charges Account') expense_amount = fields.Float(string='Charges Amount') expense_partner_id = fields.Many2one( comodel_name="res.partner", string="Charges Partner", domain=[('supplier', '=', True)], ) @api.multi def _compute_amount(self): for line in self: line.amount = sum(line.move_line_ids.mapped('credit')) @api.multi def _get_partner_from_move(self): for line in self.filtered(lambda x: not x.partner_id): partners = line.move_line_ids.mapped('partner_id') if len(partners) > 1: raise UserError( _("All payments must be owned by the same partner")) line.partner_id = partners[:1].id line.partner_name = partners[:1].name @api.onchange('move_line_ids') def _onchange_move_line(self): self._compute_amount() @api.onchange('expense_amount') def _onchange_expense_amount(self): if self.expense_amount: journal = self.return_id.journal_id self.expense_account = journal.default_expense_account_id self.expense_partner_id = journal.default_expense_partner_id @api.multi def match_invoice(self): for line in self: domain = line.partner_id and [ ('partner_id', '=', line.partner_id.id)] or [] domain.append(('number', '=', line.reference)) invoice = self.env['account.invoice'].search(domain) if invoice: payments = invoice.payment_move_line_ids if payments: line.move_line_ids = payments[0].ids if not line.concept: line.concept = _('Invoice: %s') % invoice.number @api.multi def match_move_lines(self): for line in self: domain = line.partner_id and [ ('partner_id', '=', line.partner_id.id)] or [] if line.return_id.journal_id: domain.append(('journal_id', '=', line.return_id.journal_id.id)) domain.extend([ ('account_id.internal_type', '=', 'receivable'), ('reconciled', '=', True), '|', ('name', '=', line.reference), ('ref', '=', line.reference), ]) move_lines = self.env['account.move.line'].search(domain) if move_lines: line.move_line_ids = move_lines.ids if not line.concept: line.concept = (_('Move lines: %s') % ', '.join(move_lines.mapped('name'))) @api.multi def match_move(self): for line in self: domain = line.partner_id and [ ('partner_id', '=', line.partner_id.id)] or [] domain.append(('name', '=', line.reference)) move = self.env['account.move'].search(domain) if move: if len(move) > 1: raise UserError( _("More than one matches to move reference: %s") % self.reference) line.move_line_ids = move.line_ids.filtered(lambda l: ( l.user_type_id.type == 'receivable' and l.reconciled )).ids if not line.concept: line.concept = _('Move: %s') % move.ref @api.multi def _find_match(self): # we filter again to remove all ready matched lines in inheritance lines2match = self.filtered(lambda x: ( (not x.move_line_ids) and x.reference)) lines2match.match_invoice() lines2match = lines2match.filtered(lambda x: ( (not x.move_line_ids) and x.reference)) lines2match.match_move_lines() lines2match = lines2match.filtered(lambda x: ( (not x.move_line_ids) and x.reference)) lines2match.match_move() self._get_partner_from_move() self.filtered(lambda x: not x.amount)._compute_amount() @api.multi def _prepare_extra_move_lines(self, move): """Include possible extra lines in the return journal entry for other return concepts. :param self: Reference to the payment return line. :param move: Reference to the journal entry created for the return. :return: A list with dictionaries of the extra move lines to add """ self.ensure_one() return []
agpl-3.0
5,260,136,324,633,751,000
39.506739
79
0.544118
false
4.058331
false
false
false
dpshelio/sunpy
examples/units_and_coordinates/planet_locations.py
1
1252
""" =================================== Getting the location of the planets =================================== How to get the position of planetary bodies im the solar system using `astropy's solar system ephemeris <http://docs.astropy.org/en/stable/coordinates/solarsystem.html#solar-system-ephemerides>`__ information and SunPy. """ import matplotlib.pyplot as plt from astropy.time import Time from sunpy.coordinates import get_body_heliographic_stonyhurst ############################################################################## # Lets grab the positions of each of the planets in Heliographic Stonyhurst # coordinates. obstime = Time('2014-05-15T07:54:00.005') planet_list = ['earth', 'venus', 'mars', 'mercury', 'jupiter', 'neptune', 'uranus', 'sun'] planet_coord = [get_body_heliographic_stonyhurst(this_planet, time=obstime) for this_planet in planet_list] ############################################################################## # Let's plot the results. Remember the Sun is at the center of this coordinate # system. ax = plt.subplot(projection='polar') for this_planet, this_coord in zip(planet_list, planet_coord): plt.polar(this_coord.lon.to('rad'), this_coord.radius, 'o', label=this_planet) plt.legend() plt.show()
bsd-2-clause
-3,296,977,724,421,778,000
42.172414
149
0.615815
false
3.639535
false
false
false
Djimmer/obts
Fuzzer/function_scanner.py
1
6412
#!/usr/bin/python # -*- coding: utf-8 -*- import socket import time import binascii import os import sys from libmich.formats import * import gsm_um import smarter_fuzzer_function_def as fuzzer import itertools from random import randint from math import factorial import logging from pythonjsonlogger import jsonlogger # Fill in current mobile device if len(sys.argv) > 2: device = sys.argv[1]; imsi = sys.argv[2]; else: print("ERROR: Device name not found.") print("Call the script with: ./smarter_fuzzer #DEVICE #IMSI"); print("Where #DEVICE is the name and #IMSI is the IMSI of the mobile device."); sys.exit(0); ############################################### SETTINGS ############################################# # Default OpenBTS port TESTCALL_PORT = 28670; # Log file location date = str(time.strftime("%Y%m%d-%H%M%S")); log_all_functions_JSON = "logs/functions/" + device + "_log_" + date + ".json"; # Creat socket tcsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) tcsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) tcsock.settimeout(2) ocsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ocsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) HOST = 'localhost' # Symbolic name meaning all available interfaces PORT = 21337 # Arbitrary non-privileged port ocsock.bind((HOST, PORT)) ocsock.settimeout(20) # Initialize JSON logger logger = logging.getLogger() logger.setLevel(logging.INFO) # create a file handler handler = logging.FileHandler(log_all_functions_JSON) handler.setLevel(logging.INFO) # create a logging format formatter = jsonlogger.JsonFormatter() handler.setFormatter(formatter) # add the handlers to the logger logger.addHandler(handler) logger.info({ "message": "Function Scanner; Device and SIM information", "device": device, "imsi" : imsi}); ################################################# LOG ################################################ def log_packets(run, maxRun, packet, parsed_packet, reply, parsed_reply): if "ERROR" in parsed_reply: parsed_reply = "libmich ERROR"; logger.info({ "message": run, "maxRun" : maxRun, "packet": str(packet).encode("hex"), "parsed_packet": parsed_packet, "reply": str(reply).encode("hex"), "parsed_reply": parsed_reply }) ############################################## CHANNEL ############################################### # Send a restart to OpenBTS to establish a new channel def establishNewChannel(): restart = "RESTART"; print("Channel restart: Establishing a new channel, this may take a second."); tcsock.sendto(restart, ('127.0.0.1', TESTCALL_PORT)); # Wait for OpenBTS to confirm new channel. try: reply = ocsock.recv(20000) except: print "Could not establish a new channel."; return False; print "New channel established, fuzzing will continue."; time.sleep(1); return True; def send(tcsock, packet): try: tcsock.sendto(packet, ('127.0.0.1', TESTCALL_PORT)) reply = tcsock.recv(1024) except socket.timeout: print "socket.timeout: Mobile device is not responding"; return False return packetImplemented(reply) def packetImplemented(reply): parsed_reply = repr(L3Mobile.parse_L3(reply)); print "Received packet: ", str(reply).encode("hex") + "\n"; print "GSM_UM interpetation: " + '\n' + parsed_reply + "\n\n"; if "RELEASE_COMPLETE" in parsed_reply: return "Restart"; elif((str(reply).encode("hex") == "786e430200")): #MDL_ERROR_INDICATION return "Restart"; elif((str(reply).encode("hex") == "789ea400")): #MDL_ERROR_INDICATION return "Restart"; elif((str(reply).encode("hex") == "06126100")): return "Skip"; elif "Message type non-existent or not implemented" in parsed_reply: return "Skip"; else: return reply; ############################################### UTILS ################################################ def printPacket(packet, currentRun, total_runs): print('------------------------------- INPUT -------------------------------' + '\n'); print('Run ' + str(currentRun) + "/" + str(total_runs) + '\n'); # Make the packet readable if(len(packet) % 2 == 0): printable = str(packet).encode("hex"); print "Current complete packet: " + printable + '\n'; # Decode printable hex to make it usable for L3Mobile. # Adding the \x for the bytes. l3msg_input = repr(L3Mobile.parse_L3(str(packet))); print "GSM_UM interpetation: \n " + l3msg_input + '\n\n'; print "------------------------------- OUTPUT -------------------------------" + '\n'; ############################################ SMART FUZZER ############################################ # This fuzzer targets fields with variable length # Tries all different bytes for length byte # Tries random bytes for a range of lengths ###################################################################################################### # Fuzzer specific settings maxPacketAttempt = 5; currentPacketAttempt = 1; protocols = [3]; currentRun = 1; total_runs = len(protocols) * 256; print "Total amount of runs: " + str(total_runs); time.sleep(1); for i in protocols: firstByte = "{0:0{1}x}".format(i,2); n = 1; while n < 256: secondByte = "{0:0{1}x}".format(n,2); if(i == 5 and n == 17): # Skip because the packet 0511 is a Authentication Reject # and disconnects the mobile device secondByte = "{0:0{1}x}".format(n+1,2); packet = "\\x" + str(firstByte) + "\\x" + str(secondByte); packet = packet.replace('\\x', '').decode('hex'); print "Packet: " + str(packet).encode("hex"); printPacket(packet, currentRun, total_runs); # Send packet to the mobile device. result = send(tcsock, packet); if(result == "Restart" or result == False): currentPacketAttempt = currentPacketAttempt + 1; establishNewChannel(); if(currentPacketAttempt >= maxPacketAttempt): parsed_packet = repr(L3Mobile.parse_L3(packet)); log_packets(currentRun, total_runs, packet, parsed_packet, "None", "None"); currentRun = currentRun + 1; n = n + 1; elif(result =="Skip"): currentRun = currentRun + 1; currentPacketAttempt = 0; n = n + 1; else: parsed_result = repr(L3Mobile.parse_L3(result)); parsed_packet = repr(L3Mobile.parse_L3(packet)); log_packets(currentRun, total_runs, packet, parsed_packet, result, parsed_result); currentRun = currentRun + 1; currentPacketAttempt = 0; n = n + 1;
agpl-3.0
7,928,618,940,592,154,000
29.980676
102
0.611822
false
3.385428
false
false
false
gyurisc/stackjobs
clean_data.py
1
1758
# Ad-hoc fixing of mongo database from datetime import datetime import pymongo client = pymongo.MongoClient('localhost', 27017) db = client['stackoverflow'] jobs = db['jobs'] # total jobs total_jobs = jobs.count() print "Total jobs: %s" % total_jobs print "=== Fixing Date Stamp ===" date_stamp = datetime(2016, 6, 1, 7, 01, 01) jobs.update_many({ "date" : { "$exists" : False}}, {"$set" : {"date" : date_stamp}}) count = 0 for job in jobs.find( { "date" : { "$exists" : False}}): count = count + 1 # print(job) print "=== Fixing Date Stamp ===" print "Number of jobs with no date is %s." % count count = 0 for job in jobs.find( { "date" : date_stamp}): count = count + 1 # print(job) print "Number of jobs with default date is %s." % count # Week number print "=== Fixing Week Number ===" wkcount = jobs.find( {"weeknum" : {"$exists" : True}}).count() print "Week number exists with %s and missing for %s jobs." % (wkcount, total_jobs - wkcount) for job in jobs.find({"weeknum" : {"$exists": False}}): d = datetime.strptime(job["date"], '%Y-%m-%d') wk = d.isocalendar()[1] jobs.update({"_id" : job["_id"]}, {"$set" : {"weeknum" : wk}}) # Employee and Location Whitespace print "=== Fixing Employee & Location ===" print "Striping strings from white space in employer and location strings" for job in jobs.find(): _emp = job["employer"].strip() _loc = job["location"].strip() jobs.update({"_id" : job["_id"]}, {"$set" : {"employer" : _emp, "location" : _loc}}) print "Stripping strings from whitespace where salary exists" for job in jobs.find({ "salary" : { "$exists" : True }}): _salary = job["salary"].strip() jobs.update({"_id" : job["_id"]}, {"$set" : {"salary" : _salary}})
mit
6,723,297,113,947,829,000
31.555556
93
0.610353
false
3.106007
false
false
false
rodo/ansible-tsung
ec2tool.py
1
5117
#!/usr/bin/env python import boto.ec2 import jinja2 import sys import json import yaml class Tsing(boto.ec2.instance.Instance): def shortname(self): return self.private_dns_name.split('.')[0] @property def private_short_name(self): return self.private_dns_name.split('.')[0] def get_specs(instance, region, data): """ region (string) : the region name data (dict) """ datas = get_data_region(region, data) instance_spec = get_instance(instance, datas) return instance_spec def get_instance(instance, data): """ instance (string) data (dict) """ result = None for inst in data['instanceTypes']: for size in inst['sizes']: if instance == size['size']: result = size break return result def get_data_region(region, data): """ region (string) : the region name data (dict) """ config = data['config'] ec2_regions = {"us-east-1": "us-east", "us-west-1": "us-west", "us-west-2": "us-west-2", "eu-west-1": "eu-ireland", "ap-southeast-1": "apac-sin", "ap-southeast-2": "apac-syd", "ap-northeast-1": "apac-tokyo", "sa-east-1": "sa-east-1" } for reg in config['regions']: if reg['region'] == ec2_regions[region]: return reg def write_nodes(controller, injectors, data): """ controller (dict) injectors (dict) """ hosts = open("playbooks/roles/tsung/vars/nodes.yml", 'w') hosts.write("---\n") contr_str = "controller: { private_dns_name: '%s', private_ip_address: '%s', private_short_name: '%s' }\n\n" hosts.write(contr_str % (controller.private_dns_name, controller.private_ip_address, controller.private_short_name)) hosts.write("injectors:\n") for injec in injectors: print injec.__dict__ specs = get_specs(injec.instance_type, region, data) injector = {"private_dns_name": str(injec.private_dns_name), "private_ip_address": str(injec.private_ip_address), "private_short_name": str(injec.private_short_name), "instance_type": str(injec.instance_type), "cpu": int(specs['vCPU'])} hosts.write(" - {}".format(yaml.dump(injector, encoding='utf-8'))) hosts.close() def instance_weights(injectors, region, data): """ Define instances weights """ assw = {} weights = [] for injec in injectors: specs = get_specs(injec['instance_type'], region, data) weights.append(float(specs['memoryGiB'])) minweight = min(weights) for injec in injectors: specs = get_specs(injec['instance_type'], region, data) iid = injec['id'] assw[iid] = int(round(float(specs['memoryGiB']) / minweight)) return assw def parse_instances(instances): """ Wait for instance in running state """ controller = None injectors = [] for instance in instances: inst = instance.instances[0] inst.__class__ = Tsing if inst.state == 'running': tags = inst.tags if 'tsung_role' in tags: if tags['tsung_role'] == 'controller': controller = inst else: injectors.append(inst) else: injectors.append(inst) return controller, injectors def cloud_connect(region): """ Connect on cloud """ print "connect on {}...".format(region) conn = boto.ec2.connect_to_region(region) return conn def write_ini(injectors, controller): """ Write ansible .ini file """ templateLoader = jinja2.FileSystemLoader(searchpath=".") templateEnv = jinja2.Environment(loader=templateLoader) templateVars = {"injectors": injectors, "controller": controller} # # Configure the cluster # template = templateEnv.get_template("cluster.j2") clients = open("cluster.ini", 'w') clients.write(template.render(templateVars)) clients.close() if __name__ == "__main__": try: region = sys.argv[1] except: print "usage : ec2tool.py REGI0N" sys.exit(1) conn = cloud_connect(region) print "connected" instances = conn.get_all_instances() controller, injectors = parse_instances(instances) print "found\n {} injectors".format(len(injectors)) if controller is None: print "ERROR didn't found any controller" sys.exit(1) else: print " controller : tsung@{} ".format(controller.ip_address) # # with open("linux-od.json") as data_file: data = json.load(data_file) # # write_nodes(controller, injectors, data) write_ini(injectors, controller) # print 'ansible-playbook -i cluster.ini -u ubuntu playbooks/tsung.yml'
gpl-3.0
6,011,133,720,753,712,000
24.713568
112
0.560876
false
3.737765
false
false
false
nemesisdesign/openwisp2
openwisp_controller/config/controller/views.py
1
14788
import json from ipaddress import ip_address from django.core.exceptions import FieldDoesNotExist, ValidationError from django.db import transaction from django.db.models import Q from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from django.views.generic.base import View from django.views.generic.detail import SingleObjectMixin from swapper import load_model from .. import settings as app_settings from ..signals import checksum_requested, config_download_requested, device_registered from ..utils import ( ControllerResponse, forbid_unallowed, get_object_or_404, invalid_response, send_device_config, send_vpn_config, update_last_ip, ) Device = load_model('config', 'Device') OrganizationConfigSettings = load_model('config', 'OrganizationConfigSettings') Vpn = load_model('config', 'Vpn') class BaseConfigView(SingleObjectMixin, View): """ Base view that implements a ``get_object`` method Subclassed by all views dealing with existing objects """ def get_object(self, *args, **kwargs): kwargs['config__isnull'] = False return get_object_or_404(self.model, *args, **kwargs) class CsrfExtemptMixin(object): """ Mixin that makes the view extempt from CSFR protection """ @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super().dispatch(request, *args, **kwargs) class UpdateLastIpMixin(object): def update_last_ip(self, device, request): result = update_last_ip(device, request) if result: # avoid that any other device in the # same org stays with the same management_ip # This can happen when management interfaces are using DHCP # and they get a new address which was previously used by another # device that may now be offline, without this fix, we will end up # with two devices having the same management_ip, which will # cause OpenWISP to be confused self.model.objects.filter( organization=device.organization, management_ip=device.management_ip ).exclude(pk=device.pk).update(management_ip='') # in the case of last_ip, we take a different approach, # because it may be a public IP. If it's a public IP we will # allow it to be duplicated if ip_address(device.last_ip).is_private: Device.objects.filter( organization=device.organization, last_ip=device.last_ip ).exclude(pk=device.pk).update(last_ip='') return result class ActiveOrgMixin(object): """ adds check to organization.is_active to ``get_object`` method """ def get_object(self, *args, **kwargs): kwargs['organization__is_active'] = True return super().get_object(*args, **kwargs) class DeviceChecksumView(ActiveOrgMixin, UpdateLastIpMixin, BaseConfigView): """ returns device's configuration checksum """ model = Device def get(self, request, *args, **kwargs): device = self.get_object(*args, **kwargs) bad_request = forbid_unallowed(request, 'GET', 'key', device.key) if bad_request: return bad_request self.update_last_ip(device, request) checksum_requested.send( sender=device.__class__, instance=device, request=request ) return ControllerResponse(device.config.checksum, content_type='text/plain') class DeviceDownloadConfigView(ActiveOrgMixin, BaseConfigView): """ returns configuration archive as attachment """ model = Device def get(self, request, *args, **kwargs): device = self.get_object(*args, **kwargs) bad_request = forbid_unallowed(request, 'GET', 'key', device.key) if bad_request: return bad_request config_download_requested.send( sender=device.__class__, instance=device, request=request ) return send_device_config(device.config, request) class DeviceUpdateInfoView(ActiveOrgMixin, CsrfExtemptMixin, BaseConfigView): """ updates general information about the device """ model = Device UPDATABLE_FIELDS = ['os', 'model', 'system'] def post(self, request, *args, **kwargs): device = self.get_object(*args, **kwargs) bad_request = forbid_unallowed(request, 'POST', 'key', device.key) if bad_request: return bad_request # update device information for attr in self.UPDATABLE_FIELDS: if attr in request.POST: setattr(device, attr, request.POST.get(attr)) # validate and save everything or fail otherwise try: with transaction.atomic(): device.full_clean() device.save() except ValidationError as e: # dump message_dict as JSON, # this should make it easy to debug return ControllerResponse( json.dumps(e.message_dict, indent=4, sort_keys=True), content_type='text/plain', status=400, ) return ControllerResponse('update-info: success', content_type='text/plain') class DeviceReportStatusView(ActiveOrgMixin, CsrfExtemptMixin, BaseConfigView): """ updates status of config objects """ model = Device def post(self, request, *args, **kwargs): device = self.get_object(*args, **kwargs) config = device.config # ensure request is well formed and authorized allowed_status = [choices[0] for choices in config.STATUS] allowed_status.append('running') # backward compatibility required_params = [('key', device.key), ('status', allowed_status)] for key, value in required_params: bad_response = forbid_unallowed(request, 'POST', key, value) if bad_response: return bad_response status = request.POST.get('status') # mantain backward compatibility with old agents # ("running" was changed to "applied") status = status if status != 'running' else 'applied' # call set_status_{status} method on Config model method_name = f'set_status_{status}' getattr(config, method_name)() return ControllerResponse( f'report-result: success\ncurrent-status: {config.status}\n', content_type='text/plain', ) class DeviceRegisterView(UpdateLastIpMixin, CsrfExtemptMixin, View): """ registers new Config objects """ model = Device org_config_settings_model = OrganizationConfigSettings UPDATABLE_FIELDS = ['os', 'model', 'system'] def init_object(self, **kwargs): """ initializes Config object with incoming POST data """ device_model = self.model config_model = device_model.get_config_model() options = {} for attr in kwargs.keys(): # skip attributes that are not model fields try: device_model._meta.get_field(attr) except FieldDoesNotExist: continue options[attr] = kwargs.get(attr) # do not specify key if: # app_settings.CONSISTENT_REGISTRATION is False # if key is ``None`` (it would cause exception) if 'key' in options and ( app_settings.CONSISTENT_REGISTRATION is False or options['key'] is None ): del options['key'] if 'hardware_id' in options and options['hardware_id'] == "": options['hardware_id'] = None config = config_model(device=device_model(**options), backend=kwargs['backend']) config.organization = self.organization config.device.organization = self.organization return config def get_template_queryset(self, config): """ returns Template model queryset """ queryset = config.get_template_model().objects.all() # filter templates of the same organization or shared templates return queryset.filter(Q(organization=self.organization) | Q(organization=None)) def add_tagged_templates(self, config, request): """ adds templates specified in incoming POST tag setting """ tags = request.POST.get('tags') if not tags: return # retrieve tags and add them to current config tags = tags.split() queryset = self.get_template_queryset(config) templates = queryset.filter(tags__name__in=tags).only('id').distinct() for template in templates: config.templates.add(template) def invalid(self, request): """ ensures request is well formed """ allowed_backends = [path for path, name in app_settings.BACKENDS] required_params = [ ('secret', None), ('name', None), ('mac_address', None), ('backend', allowed_backends), ] # valid required params or forbid for key, value in required_params: invalid_response = forbid_unallowed(request, 'POST', key, value) if invalid_response: return invalid_response def forbidden(self, request): """ ensures request is authorized: - secret matches an organization's shared_secret - the organization has registration_enabled set to True """ try: secret = request.POST.get('secret') org_settings = self.org_config_settings_model.objects.select_related( 'organization' ).get(shared_secret=secret, organization__is_active=True) except self.org_config_settings_model.DoesNotExist: return invalid_response(request, 'error: unrecognized secret', status=403) if not org_settings.registration_enabled: return invalid_response(request, 'error: registration disabled', status=403) # set an organization attribute as a side effect # this attribute will be used in ``init_object`` self.organization = org_settings.organization def post(self, request, *args, **kwargs): """ POST logic """ if not app_settings.REGISTRATION_ENABLED: return ControllerResponse('error: registration disabled', status=403) # ensure request is valid bad_response = self.invalid(request) if bad_response: return bad_response # ensure request is allowed forbidden = self.forbidden(request) if forbidden: return forbidden # prepare model attributes key = None if app_settings.CONSISTENT_REGISTRATION: key = request.POST.get('key') # try retrieving existing Device first # (key is not None only if CONSISTENT_REGISTRATION is enabled) new = False try: device = self.model.objects.get(key=key) # update hw info for attr in self.UPDATABLE_FIELDS: if attr in request.POST: setattr(device, attr, request.POST.get(attr)) config = device.config # if get queryset fails, instantiate a new Device and Config except self.model.DoesNotExist: if not app_settings.REGISTRATION_SELF_CREATION: return ControllerResponse( 'Device not found in the system, please create it first.', status=404, ) new = True config = self.init_object(**request.POST.dict()) device = config.device # if get queryset succedes but device has no related config # instantiate new Config but reuse existing device except self.model.config.RelatedObjectDoesNotExist: config = self.init_object(**request.POST.dict()) config.device = device # update last_ip field of device device.last_ip = request.META.get('REMOTE_ADDR') # validate and save everything or fail otherwise try: with transaction.atomic(): device.full_clean() device.save() config.full_clean() config.save() except ValidationError as e: # dump message_dict as JSON, # this should make it easy to debug return ControllerResponse( json.dumps(e.message_dict, indent=4, sort_keys=True), content_type='text/plain', status=400, ) # add templates specified in tags self.add_tagged_templates(config, request) # emit device registered signal device_registered.send(sender=device.__class__, instance=device, is_new=new) # prepare response s = ( 'registration-result: success\n' 'uuid: {id}\n' 'key: {key}\n' 'hostname: {name}\n' 'is-new: {is_new}\n' ) attributes = device.__dict__.copy() attributes.update({'id': device.pk.hex, 'key': device.key, 'is_new': int(new)}) return ControllerResponse( s.format(**attributes), content_type='text/plain', status=201 ) class VpnChecksumView(BaseConfigView): """ returns vpn's configuration checksum """ model = Vpn def get(self, request, *args, **kwargs): vpn = self.get_object(*args, **kwargs) bad_request = forbid_unallowed(request, 'GET', 'key', vpn.key) if bad_request: return bad_request checksum_requested.send(sender=vpn.__class__, instance=vpn, request=request) return ControllerResponse(vpn.checksum, content_type='text/plain') class VpnDownloadConfigView(BaseConfigView): """ returns configuration archive as attachment """ model = Vpn def get(self, request, *args, **kwargs): vpn = self.get_object(*args, **kwargs) bad_request = forbid_unallowed(request, 'GET', 'key', vpn.key) if bad_request: return bad_request config_download_requested.send( sender=vpn.__class__, instance=vpn, request=request ) return send_vpn_config(vpn, request) device_checksum = DeviceChecksumView.as_view() device_download_config = DeviceDownloadConfigView.as_view() device_update_info = DeviceUpdateInfoView.as_view() device_report_status = DeviceReportStatusView.as_view() device_register = DeviceRegisterView.as_view() vpn_checksum = VpnChecksumView.as_view() vpn_download_config = VpnDownloadConfigView.as_view()
gpl-3.0
6,931,061,912,724,383,000
35.78607
88
0.61719
false
4.388131
true
false
false
pinax/pinax-blog
pinax/blog/admin.py
1
3056
from functools import partial as curry from django.contrib import admin from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from pinax.images.admin import ImageInline from pinax.images.models import ImageSet from .conf import settings from .forms import AdminPostForm from .models import Blog, Post, ReviewComment, Section class PostImageSet(ImageSet): class Meta: proxy = True class ReviewInline(admin.TabularInline): model = ReviewComment def make_published(modeladmin, request, queryset): queryset = queryset.exclude(state=Post.STATE_CHOICES[-1][0], published__isnull=False) queryset.update(state=Post.STATE_CHOICES[-1][0]) queryset.filter(published__isnull=True).update(published=timezone.now()) make_published.short_description = _("Publish selected posts") class PostAdmin(admin.ModelAdmin): list_display = ["title", "state", "section", "published", "show_secret_share_url"] list_filter = ["section", "state"] form = AdminPostForm actions = [make_published] fields = [ "section", "title", "slug", "author", "markup", "teaser", "content", "description", "sharable_url", "state", "published", "image_set" # maybe this https://github.com/anziem/django_reverse_admin ] readonly_fields = ["sharable_url"] prepopulated_fields = {"slug": ("title",)} inlines = [ ReviewInline, ] def show_secret_share_url(self, obj): return '<a href="{}">{}</a>'.format(obj.sharable_url, obj.sharable_url) show_secret_share_url.short_description = _("Share this url") show_secret_share_url.allow_tags = True def formfield_for_dbfield(self, db_field, **kwargs): request = kwargs.get("request") if db_field.name == "author": ff = super().formfield_for_dbfield(db_field, **kwargs) ff.initial = request.user.id return ff return super().formfield_for_dbfield(db_field, **kwargs) def get_form(self, request, obj=None, **kwargs): kwargs.update({ "formfield_callback": curry(self.formfield_for_dbfield, request=request), }) return super().get_form(request, obj, **kwargs) def save_form(self, request, form, change): # this is done for explicitness that we want form.save to commit # form.save doesn't take a commit kwarg for this reason return form.save(Blog.objects.first() if not settings.PINAX_BLOG_SCOPING_MODEL else None) if settings.PINAX_BLOG_SCOPING_MODEL: PostAdmin.fields.insert(0, "blog") PostAdmin.list_filter.append("blog__scoper") class SectionAdmin(admin.ModelAdmin): prepopulated_fields = {"slug": ("name",)} admin.site.register(Post, PostAdmin) admin.site.register(Section, SectionAdmin) admin.site.register( PostImageSet, list_display=["blog_post", "primary_image", "created_by", "created_at"], raw_id_fields=["created_by"], inlines=[ImageInline], )
mit
7,357,990,425,241,163,000
29.56
97
0.659359
false
3.690821
false
false
false